summaryrefslogtreecommitdiff
path: root/vendor/github.com
diff options
context:
space:
mode:
Diffstat (limited to 'vendor/github.com')
-rw-r--r--vendor/github.com/cenkalti/backoff/v5/exponential.go11
-rw-r--r--vendor/github.com/cenkalti/backoff/v5/retry.go4
-rw-r--r--vendor/github.com/go-jose/go-jose/v4/CHANGELOG.md5
-rw-r--r--vendor/github.com/go-jose/go-jose/v4/jwe.go19
-rw-r--r--vendor/github.com/go-jose/go-jose/v4/jwk.go2
-rw-r--r--vendor/github.com/go-jose/go-jose/v4/jws.go74
-rw-r--r--vendor/github.com/go-jose/go-jose/v4/symmetric.go12
-rw-r--r--vendor/github.com/go-jose/go-jose/v4/symmetric_go124.go28
-rw-r--r--vendor/github.com/go-jose/go-jose/v4/symmetric_legacy.go29
-rw-r--r--vendor/github.com/ncruces/go-sqlite3/driver/driver.go41
-rw-r--r--vendor/github.com/ncruces/go-sqlite3/embed/README.md2
-rw-r--r--vendor/github.com/ncruces/go-sqlite3/embed/build.sh3
-rw-r--r--vendor/github.com/ncruces/go-sqlite3/embed/sqlite3.wasmbin1398735 -> 1404349 bytes
-rw-r--r--vendor/github.com/ncruces/go-sqlite3/func.go14
-rw-r--r--vendor/github.com/ncruces/go-sqlite3/internal/util/func.go14
-rw-r--r--vendor/github.com/ncruces/go-sqlite3/util/sql3util/wasm/sql3parse_table.wasmbin15960 -> 16007 bytes
-rw-r--r--vendor/github.com/ncruces/go-sqlite3/util/vfsutil/slice.go102
-rw-r--r--vendor/github.com/ncruces/go-sqlite3/util/vfsutil/wrap.go185
-rw-r--r--vendor/github.com/ncruces/go-sqlite3/value.go6
-rw-r--r--vendor/github.com/ncruces/go-sqlite3/vfs/api.go6
-rw-r--r--vendor/github.com/ncruces/go-sqlite3/vfs/cksm.go99
-rw-r--r--vendor/github.com/ncruces/go-sqlite3/vfs/file.go3
-rw-r--r--vendor/github.com/ncruces/go-sqlite3/vfs/filename.go2
-rw-r--r--vendor/github.com/ncruces/go-sqlite3/vfs/memdb/memdb.go101
-rw-r--r--vendor/github.com/ncruces/go-sqlite3/vfs/vfs.go2
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go4
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/metric.go25
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/process_collector_darwin.go6
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_nocgo_darwin.go2
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/process_collector_procfsenabled.go8
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go2
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/vec.go10
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/wrap.go36
-rw-r--r--vendor/github.com/prometheus/otlptranslator/README.md122
-rw-r--r--vendor/github.com/prometheus/otlptranslator/doc.go24
-rw-r--r--vendor/github.com/prometheus/otlptranslator/label_namer.go90
-rw-r--r--vendor/github.com/prometheus/otlptranslator/metric_namer.go152
-rw-r--r--vendor/github.com/prometheus/otlptranslator/normalize_label.go57
-rw-r--r--vendor/github.com/prometheus/otlptranslator/strategy.go86
-rw-r--r--vendor/github.com/prometheus/otlptranslator/unit_namer.go24
-rw-r--r--vendor/github.com/spf13/cobra/.golangci.yml28
-rw-r--r--vendor/github.com/spf13/cobra/README.md24
-rw-r--r--vendor/github.com/spf13/cobra/SECURITY.md105
-rw-r--r--vendor/github.com/spf13/cobra/command.go11
-rw-r--r--vendor/github.com/spf13/cobra/completions.go19
-rw-r--r--vendor/github.com/spf13/pflag/flag.go36
-rw-r--r--vendor/github.com/spf13/pflag/golangflag.go34
-rw-r--r--vendor/github.com/spf13/pflag/string_to_string.go10
-rw-r--r--vendor/github.com/spf13/pflag/time.go8
-rw-r--r--vendor/github.com/stretchr/testify/assert/assertion_compare.go22
-rw-r--r--vendor/github.com/stretchr/testify/assert/assertion_format.go51
-rw-r--r--vendor/github.com/stretchr/testify/assert/assertion_forward.go102
-rw-r--r--vendor/github.com/stretchr/testify/assert/assertion_order.go2
-rw-r--r--vendor/github.com/stretchr/testify/assert/assertions.go367
-rw-r--r--vendor/github.com/stretchr/testify/assert/doc.go4
-rw-r--r--vendor/github.com/stretchr/testify/assert/http_assertions.go4
-rw-r--r--vendor/github.com/stretchr/testify/assert/yaml/yaml_custom.go1
-rw-r--r--vendor/github.com/stretchr/testify/assert/yaml/yaml_default.go1
-rw-r--r--vendor/github.com/stretchr/testify/assert/yaml/yaml_fail.go1
-rw-r--r--vendor/github.com/stretchr/testify/require/doc.go2
-rw-r--r--vendor/github.com/stretchr/testify/require/require.go108
-rw-r--r--vendor/github.com/stretchr/testify/require/require_forward.go102
-rw-r--r--vendor/github.com/stretchr/testify/suite/stats.go16
-rw-r--r--vendor/github.com/stretchr/testify/suite/suite.go110
-rw-r--r--vendor/github.com/tdewolff/minify/v2/minify.go14
-rw-r--r--vendor/github.com/tdewolff/minify/v2/publish.sh3
-rw-r--r--vendor/github.com/tdewolff/parse/v2/binary.go601
-rw-r--r--vendor/github.com/tdewolff/parse/v2/binary_unix.go28
68 files changed, 2125 insertions, 1101 deletions
diff --git a/vendor/github.com/cenkalti/backoff/v5/exponential.go b/vendor/github.com/cenkalti/backoff/v5/exponential.go
index c1f3e442d..79d425e87 100644
--- a/vendor/github.com/cenkalti/backoff/v5/exponential.go
+++ b/vendor/github.com/cenkalti/backoff/v5/exponential.go
@@ -1,7 +1,7 @@
package backoff
import (
- "math/rand"
+ "math/rand/v2"
"time"
)
@@ -28,13 +28,7 @@ multiplied by the exponential, that is, between 2 and 6 seconds.
Note: MaxInterval caps the RetryInterval and not the randomized interval.
-If the time elapsed since an ExponentialBackOff instance is created goes past the
-MaxElapsedTime, then the method NextBackOff() starts returning backoff.Stop.
-
-The elapsed time can be reset by calling Reset().
-
-Example: Given the following default arguments, for 10 tries the sequence will be,
-and assuming we go over the MaxElapsedTime on the 10th try:
+Example: Given the following default arguments, for 9 tries the sequence will be:
Request # RetryInterval (seconds) Randomized Interval (seconds)
@@ -47,7 +41,6 @@ and assuming we go over the MaxElapsedTime on the 10th try:
7 5.692 [2.846, 8.538]
8 8.538 [4.269, 12.807]
9 12.807 [6.403, 19.210]
- 10 19.210 backoff.Stop
Note: Implementation is not thread-safe.
*/
diff --git a/vendor/github.com/cenkalti/backoff/v5/retry.go b/vendor/github.com/cenkalti/backoff/v5/retry.go
index e43f47fb8..32a7f9883 100644
--- a/vendor/github.com/cenkalti/backoff/v5/retry.go
+++ b/vendor/github.com/cenkalti/backoff/v5/retry.go
@@ -47,7 +47,7 @@ func WithNotify(n Notify) RetryOption {
}
}
-// WithMaxTries limits the number of retry attempts.
+// WithMaxTries limits the number of all attempts.
func WithMaxTries(n uint) RetryOption {
return func(args *retryOptions) {
args.MaxTries = n
@@ -97,7 +97,7 @@ func Retry[T any](ctx context.Context, operation Operation[T], opts ...RetryOpti
// Handle permanent errors without retrying.
var permanent *PermanentError
if errors.As(err, &permanent) {
- return res, err
+ return res, permanent.Unwrap()
}
// Stop retrying if context is cancelled.
diff --git a/vendor/github.com/go-jose/go-jose/v4/CHANGELOG.md b/vendor/github.com/go-jose/go-jose/v4/CHANGELOG.md
index 6f717dbd8..66a8a0f89 100644
--- a/vendor/github.com/go-jose/go-jose/v4/CHANGELOG.md
+++ b/vendor/github.com/go-jose/go-jose/v4/CHANGELOG.md
@@ -1,3 +1,8 @@
+## Changed
+
+ - Defined a custom error, ErrUnexpectedSignatureAlgorithm, returned when a JWS
+ header contains an unsupported signature algorithm.
+
# v4.0.4
## Fixed
diff --git a/vendor/github.com/go-jose/go-jose/v4/jwe.go b/vendor/github.com/go-jose/go-jose/v4/jwe.go
index 9f1322dcc..6102f9100 100644
--- a/vendor/github.com/go-jose/go-jose/v4/jwe.go
+++ b/vendor/github.com/go-jose/go-jose/v4/jwe.go
@@ -274,7 +274,7 @@ func validateAlgEnc(headers rawHeader, keyAlgorithms []KeyAlgorithm, contentEncr
if alg != "" && !containsKeyAlgorithm(keyAlgorithms, alg) {
return fmt.Errorf("unexpected key algorithm %q; expected %q", alg, keyAlgorithms)
}
- if alg != "" && !containsContentEncryption(contentEncryption, enc) {
+ if enc != "" && !containsContentEncryption(contentEncryption, enc) {
return fmt.Errorf("unexpected content encryption algorithm %q; expected %q", enc, contentEncryption)
}
return nil
@@ -288,11 +288,20 @@ func ParseEncryptedCompact(
keyAlgorithms []KeyAlgorithm,
contentEncryption []ContentEncryption,
) (*JSONWebEncryption, error) {
- // Five parts is four separators
- if strings.Count(input, ".") != 4 {
- return nil, fmt.Errorf("go-jose/go-jose: compact JWE format must have five parts")
+ var parts [5]string
+ var ok bool
+
+ for i := range 4 {
+ parts[i], input, ok = strings.Cut(input, ".")
+ if !ok {
+ return nil, errors.New("go-jose/go-jose: compact JWE format must have five parts")
+ }
+ }
+ // Validate that the last part does not contain more dots
+ if strings.ContainsRune(input, '.') {
+ return nil, errors.New("go-jose/go-jose: compact JWE format must have five parts")
}
- parts := strings.SplitN(input, ".", 5)
+ parts[4] = input
rawProtected, err := base64.RawURLEncoding.DecodeString(parts[0])
if err != nil {
diff --git a/vendor/github.com/go-jose/go-jose/v4/jwk.go b/vendor/github.com/go-jose/go-jose/v4/jwk.go
index 9e57e93ba..9700f8906 100644
--- a/vendor/github.com/go-jose/go-jose/v4/jwk.go
+++ b/vendor/github.com/go-jose/go-jose/v4/jwk.go
@@ -239,7 +239,7 @@ func (k *JSONWebKey) UnmarshalJSON(data []byte) (err error) {
keyPub = key
}
} else {
- return fmt.Errorf("go-jose/go-jose: unknown curve %s'", raw.Crv)
+ return fmt.Errorf("go-jose/go-jose: unknown curve '%s'", raw.Crv)
}
default:
return fmt.Errorf("go-jose/go-jose: unknown json web key type '%s'", raw.Kty)
diff --git a/vendor/github.com/go-jose/go-jose/v4/jws.go b/vendor/github.com/go-jose/go-jose/v4/jws.go
index d09d8ba50..c40bd3ec1 100644
--- a/vendor/github.com/go-jose/go-jose/v4/jws.go
+++ b/vendor/github.com/go-jose/go-jose/v4/jws.go
@@ -75,7 +75,14 @@ type Signature struct {
original *rawSignatureInfo
}
-// ParseSigned parses a signed message in JWS Compact or JWS JSON Serialization.
+// ParseSigned parses a signed message in JWS Compact or JWS JSON Serialization. Validation fails if
+// the JWS is signed with an algorithm that isn't in the provided list of signature algorithms.
+// Applications should decide for themselves which signature algorithms are acceptable. If you're
+// not sure which signature algorithms your application might receive, consult the documentation of
+// the program which provides them or the protocol that you are implementing. You can also try
+// getting an example JWS and decoding it with a tool like https://jwt.io to see what its "alg"
+// header parameter indicates. The signature on the JWS does not get validated during parsing. Call
+// Verify() after parsing to validate the signature and obtain the payload.
//
// https://datatracker.ietf.org/doc/html/rfc7515#section-7
func ParseSigned(
@@ -90,7 +97,14 @@ func ParseSigned(
return parseSignedCompact(signature, nil, signatureAlgorithms)
}
-// ParseSignedCompact parses a message in JWS Compact Serialization.
+// ParseSignedCompact parses a message in JWS Compact Serialization. Validation fails if the JWS is
+// signed with an algorithm that isn't in the provided list of signature algorithms. Applications
+// should decide for themselves which signature algorithms are acceptable.If you're not sure which
+// signature algorithms your application might receive, consult the documentation of the program
+// which provides them or the protocol that you are implementing. You can also try getting an
+// example JWS and decoding it with a tool like https://jwt.io to see what its "alg" header
+// parameter indicates. The signature on the JWS does not get validated during parsing. Call
+// Verify() after parsing to validate the signature and obtain the payload.
//
// https://datatracker.ietf.org/doc/html/rfc7515#section-7.1
func ParseSignedCompact(
@@ -101,6 +115,15 @@ func ParseSignedCompact(
}
// ParseDetached parses a signed message in compact serialization format with detached payload.
+// Validation fails if the JWS is signed with an algorithm that isn't in the provided list of
+// signature algorithms. Applications should decide for themselves which signature algorithms are
+// acceptable. If you're not sure which signature algorithms your application might receive, consult
+// the documentation of the program which provides them or the protocol that you are implementing.
+// You can also try getting an example JWS and decoding it with a tool like https://jwt.io to see
+// what its "alg" header parameter indicates. The signature on the JWS does not get validated during
+// parsing. Call Verify() after parsing to validate the signature and obtain the payload.
+//
+// https://datatracker.ietf.org/doc/html/rfc7515#appendix-F
func ParseDetached(
signature string,
payload []byte,
@@ -181,6 +204,25 @@ func containsSignatureAlgorithm(haystack []SignatureAlgorithm, needle SignatureA
return false
}
+// ErrUnexpectedSignatureAlgorithm is returned when the signature algorithm in
+// the JWS header does not match one of the expected algorithms.
+type ErrUnexpectedSignatureAlgorithm struct {
+ // Got is the signature algorithm found in the JWS header.
+ Got SignatureAlgorithm
+ expected []SignatureAlgorithm
+}
+
+func (e *ErrUnexpectedSignatureAlgorithm) Error() string {
+ return fmt.Sprintf("unexpected signature algorithm %q; expected %q", e.Got, e.expected)
+}
+
+func newErrUnexpectedSignatureAlgorithm(got SignatureAlgorithm, expected []SignatureAlgorithm) error {
+ return &ErrUnexpectedSignatureAlgorithm{
+ Got: got,
+ expected: expected,
+ }
+}
+
// sanitized produces a cleaned-up JWS object from the raw JSON.
func (parsed *rawJSONWebSignature) sanitized(signatureAlgorithms []SignatureAlgorithm) (*JSONWebSignature, error) {
if len(signatureAlgorithms) == 0 {
@@ -236,8 +278,7 @@ func (parsed *rawJSONWebSignature) sanitized(signatureAlgorithms []SignatureAlgo
alg := SignatureAlgorithm(signature.Header.Algorithm)
if !containsSignatureAlgorithm(signatureAlgorithms, alg) {
- return nil, fmt.Errorf("go-jose/go-jose: unexpected signature algorithm %q; expected %q",
- alg, signatureAlgorithms)
+ return nil, newErrUnexpectedSignatureAlgorithm(alg, signatureAlgorithms)
}
if signature.header != nil {
@@ -285,8 +326,7 @@ func (parsed *rawJSONWebSignature) sanitized(signatureAlgorithms []SignatureAlgo
alg := SignatureAlgorithm(obj.Signatures[i].Header.Algorithm)
if !containsSignatureAlgorithm(signatureAlgorithms, alg) {
- return nil, fmt.Errorf("go-jose/go-jose: unexpected signature algorithm %q; expected %q",
- alg, signatureAlgorithms)
+ return nil, newErrUnexpectedSignatureAlgorithm(alg, signatureAlgorithms)
}
if obj.Signatures[i].header != nil {
@@ -321,35 +361,43 @@ func (parsed *rawJSONWebSignature) sanitized(signatureAlgorithms []SignatureAlgo
return obj, nil
}
+const tokenDelim = "."
+
// parseSignedCompact parses a message in compact format.
func parseSignedCompact(
input string,
payload []byte,
signatureAlgorithms []SignatureAlgorithm,
) (*JSONWebSignature, error) {
- // Three parts is two separators
- if strings.Count(input, ".") != 2 {
+ protected, s, ok := strings.Cut(input, tokenDelim)
+ if !ok { // no period found
+ return nil, fmt.Errorf("go-jose/go-jose: compact JWS format must have three parts")
+ }
+ claims, sig, ok := strings.Cut(s, tokenDelim)
+ if !ok { // only one period found
+ return nil, fmt.Errorf("go-jose/go-jose: compact JWS format must have three parts")
+ }
+ if strings.ContainsRune(sig, '.') { // too many periods found
return nil, fmt.Errorf("go-jose/go-jose: compact JWS format must have three parts")
}
- parts := strings.SplitN(input, ".", 3)
- if parts[1] != "" && payload != nil {
+ if claims != "" && payload != nil {
return nil, fmt.Errorf("go-jose/go-jose: payload is not detached")
}
- rawProtected, err := base64.RawURLEncoding.DecodeString(parts[0])
+ rawProtected, err := base64.RawURLEncoding.DecodeString(protected)
if err != nil {
return nil, err
}
if payload == nil {
- payload, err = base64.RawURLEncoding.DecodeString(parts[1])
+ payload, err = base64.RawURLEncoding.DecodeString(claims)
if err != nil {
return nil, err
}
}
- signature, err := base64.RawURLEncoding.DecodeString(parts[2])
+ signature, err := base64.RawURLEncoding.DecodeString(sig)
if err != nil {
return nil, err
}
diff --git a/vendor/github.com/go-jose/go-jose/v4/symmetric.go b/vendor/github.com/go-jose/go-jose/v4/symmetric.go
index a69103b08..6176e0607 100644
--- a/vendor/github.com/go-jose/go-jose/v4/symmetric.go
+++ b/vendor/github.com/go-jose/go-jose/v4/symmetric.go
@@ -30,8 +30,6 @@ import (
"hash"
"io"
- "golang.org/x/crypto/pbkdf2"
-
josecipher "github.com/go-jose/go-jose/v4/cipher"
)
@@ -330,7 +328,10 @@ func (ctx *symmetricKeyCipher) encryptKey(cek []byte, alg KeyAlgorithm) (recipie
// derive key
keyLen, h := getPbkdf2Params(alg)
- key := pbkdf2.Key(ctx.key, salt, ctx.p2c, keyLen, h)
+ key, err := pbkdf2Key(h, string(ctx.key), salt, ctx.p2c, keyLen)
+ if err != nil {
+ return recipientInfo{}, nil
+ }
// use AES cipher with derived key
block, err := aes.NewCipher(key)
@@ -432,7 +433,10 @@ func (ctx *symmetricKeyCipher) decryptKey(headers rawHeader, recipient *recipien
// derive key
keyLen, h := getPbkdf2Params(alg)
- key := pbkdf2.Key(ctx.key, salt, p2c, keyLen, h)
+ key, err := pbkdf2Key(h, string(ctx.key), salt, p2c, keyLen)
+ if err != nil {
+ return nil, err
+ }
// use AES cipher with derived key
block, err := aes.NewCipher(key)
diff --git a/vendor/github.com/go-jose/go-jose/v4/symmetric_go124.go b/vendor/github.com/go-jose/go-jose/v4/symmetric_go124.go
new file mode 100644
index 000000000..6c5a4e7f2
--- /dev/null
+++ b/vendor/github.com/go-jose/go-jose/v4/symmetric_go124.go
@@ -0,0 +1,28 @@
+//go:build go1.24
+
+/*-
+ * Copyright 2014 Square Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package jose
+
+import (
+ "crypto/pbkdf2"
+ "hash"
+)
+
+func pbkdf2Key(h func() hash.Hash, password string, salt []byte, iter, keyLen int) ([]byte, error) {
+ return pbkdf2.Key(h, password, salt, iter, keyLen)
+}
diff --git a/vendor/github.com/go-jose/go-jose/v4/symmetric_legacy.go b/vendor/github.com/go-jose/go-jose/v4/symmetric_legacy.go
new file mode 100644
index 000000000..bdfc3d766
--- /dev/null
+++ b/vendor/github.com/go-jose/go-jose/v4/symmetric_legacy.go
@@ -0,0 +1,29 @@
+//go:build !go1.24
+
+/*-
+ * Copyright 2014 Square Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package jose
+
+import (
+ "hash"
+
+ "golang.org/x/crypto/pbkdf2"
+)
+
+func pbkdf2Key(h func() hash.Hash, password string, salt []byte, iter, keyLen int) ([]byte, error) {
+ return pbkdf2.Key([]byte(password), salt, iter, keyLen, h), nil
+}
diff --git a/vendor/github.com/ncruces/go-sqlite3/driver/driver.go b/vendor/github.com/ncruces/go-sqlite3/driver/driver.go
index 280611f71..27496f6cb 100644
--- a/vendor/github.com/ncruces/go-sqlite3/driver/driver.go
+++ b/vendor/github.com/ncruces/go-sqlite3/driver/driver.go
@@ -604,15 +604,6 @@ func (r resultRowsAffected) RowsAffected() (int64, error) {
return int64(r), nil
}
-type rows struct {
- ctx context.Context
- *stmt
- names []string
- types []string
- nulls []bool
- scans []scantype
-}
-
type scantype byte
const (
@@ -648,10 +639,20 @@ func scanFromDecl(decl string) scantype {
return _ANY
}
+type rows struct {
+ ctx context.Context
+ *stmt
+ names []string
+ types []string
+ nulls []bool
+ scans []scantype
+}
+
var (
// Ensure these interfaces are implemented:
_ driver.RowsColumnTypeDatabaseTypeName = &rows{}
_ driver.RowsColumnTypeNullable = &rows{}
+ // _ driver.RowsColumnScanner = &rows{}
)
func (r *rows) Close() error {
@@ -740,7 +741,7 @@ func (r *rows) ColumnTypeScanType(index int) (typ reflect.Type) {
switch {
case scan == _TIME && val != _BLOB && val != _NULL:
t := r.Stmt.ColumnTime(index, r.tmRead)
- useValType = t == time.Time{}
+ useValType = t.IsZero()
case scan == _BOOL && val == _INT:
i := r.Stmt.ColumnInt64(index)
useValType = i != 0 && i != 1
@@ -830,3 +831,23 @@ func (r *rows) Next(dest []driver.Value) error {
}
return nil
}
+
+func (r *rows) ScanColumn(dest any, index int) error {
+ // notest // Go 1.26
+ var ptr *time.Time
+ switch d := dest.(type) {
+ case *time.Time:
+ ptr = d
+ case *sql.NullTime:
+ ptr = &d.Time
+ case *sql.Null[time.Time]:
+ ptr = &d.V
+ default:
+ return driver.ErrSkip
+ }
+ if t := r.Stmt.ColumnTime(index, r.tmRead); !t.IsZero() {
+ *ptr = t
+ return nil
+ }
+ return driver.ErrSkip
+}
diff --git a/vendor/github.com/ncruces/go-sqlite3/embed/README.md b/vendor/github.com/ncruces/go-sqlite3/embed/README.md
index 346a4e308..67af2977b 100644
--- a/vendor/github.com/ncruces/go-sqlite3/embed/README.md
+++ b/vendor/github.com/ncruces/go-sqlite3/embed/README.md
@@ -1,6 +1,6 @@
# Embeddable Wasm build of SQLite
-This folder includes an embeddable Wasm build of SQLite 3.50.3 for use with
+This folder includes an embeddable Wasm build of SQLite 3.50.4 for use with
[`github.com/ncruces/go-sqlite3`](https://pkg.go.dev/github.com/ncruces/go-sqlite3).
The following optional features are compiled in:
diff --git a/vendor/github.com/ncruces/go-sqlite3/embed/build.sh b/vendor/github.com/ncruces/go-sqlite3/embed/build.sh
index c96a18653..e078c1065 100644
--- a/vendor/github.com/ncruces/go-sqlite3/embed/build.sh
+++ b/vendor/github.com/ncruces/go-sqlite3/embed/build.sh
@@ -17,7 +17,8 @@ trap 'rm -f sqlite3.tmp' EXIT
-mmutable-globals -mnontrapping-fptoint \
-msimd128 -mbulk-memory -msign-ext \
-mreference-types -mmultivalue \
- -fno-stack-protector -fno-stack-clash-protection \
+ -mno-extended-const \
+ -fno-stack-protector \
-Wl,--stack-first \
-Wl,--import-undefined \
-Wl,--initial-memory=327680 \
diff --git a/vendor/github.com/ncruces/go-sqlite3/embed/sqlite3.wasm b/vendor/github.com/ncruces/go-sqlite3/embed/sqlite3.wasm
index dac29da3d..f8c8f5f86 100644
--- a/vendor/github.com/ncruces/go-sqlite3/embed/sqlite3.wasm
+++ b/vendor/github.com/ncruces/go-sqlite3/embed/sqlite3.wasm
Binary files differ
diff --git a/vendor/github.com/ncruces/go-sqlite3/func.go b/vendor/github.com/ncruces/go-sqlite3/func.go
index 16b43056d..556d89ebc 100644
--- a/vendor/github.com/ncruces/go-sqlite3/func.go
+++ b/vendor/github.com/ncruces/go-sqlite3/func.go
@@ -59,7 +59,7 @@ func (c *Conn) CreateCollation(name string, fn CollatingFunction) error {
return c.error(rc)
}
-// Collating function is the type of a collation callback.
+// CollatingFunction is the type of a collation callback.
// Implementations must not retain a or b.
type CollatingFunction func(a, b []byte) int
@@ -132,7 +132,7 @@ func (c *Conn) CreateWindowFunction(name string, nArg int, flag FunctionFlag, fn
if win, ok := agg.(WindowFunction); ok {
return win
}
- return windowFunc{agg, name}
+ return agg
}))
}
rc := res_t(c.call("sqlite3_create_window_function_go",
@@ -307,13 +307,3 @@ func (a *aggregateFunc) Close() error {
a.stop()
return nil
}
-
-type windowFunc struct {
- AggregateFunction
- name string
-}
-
-func (w windowFunc) Inverse(ctx Context, arg ...Value) {
- // Implementing inverse allows certain queries that don't really need it to succeed.
- ctx.ResultError(util.ErrorString(w.name + ": may not be used as a window function"))
-}
diff --git a/vendor/github.com/ncruces/go-sqlite3/internal/util/func.go b/vendor/github.com/ncruces/go-sqlite3/internal/util/func.go
index e705f3181..8e89b118a 100644
--- a/vendor/github.com/ncruces/go-sqlite3/internal/util/func.go
+++ b/vendor/github.com/ncruces/go-sqlite3/internal/util/func.go
@@ -20,20 +20,6 @@ func ExportFuncVI[T0 i32](mod wazero.HostModuleBuilder, name string, fn func(con
Export(name)
}
-type funcVII[T0, T1 i32] func(context.Context, api.Module, T0, T1)
-
-func (fn funcVII[T0, T1]) Call(ctx context.Context, mod api.Module, stack []uint64) {
- _ = stack[1] // prevent bounds check on every slice access
- fn(ctx, mod, T0(stack[0]), T1(stack[1]))
-}
-
-func ExportFuncVII[T0, T1 i32](mod wazero.HostModuleBuilder, name string, fn func(context.Context, api.Module, T0, T1)) {
- mod.NewFunctionBuilder().
- WithGoModuleFunction(funcVII[T0, T1](fn),
- []api.ValueType{api.ValueTypeI32, api.ValueTypeI32}, nil).
- Export(name)
-}
-
type funcVIII[T0, T1, T2 i32] func(context.Context, api.Module, T0, T1, T2)
func (fn funcVIII[T0, T1, T2]) Call(ctx context.Context, mod api.Module, stack []uint64) {
diff --git a/vendor/github.com/ncruces/go-sqlite3/util/sql3util/wasm/sql3parse_table.wasm b/vendor/github.com/ncruces/go-sqlite3/util/sql3util/wasm/sql3parse_table.wasm
index f7dc0cdf4..996541e76 100644
--- a/vendor/github.com/ncruces/go-sqlite3/util/sql3util/wasm/sql3parse_table.wasm
+++ b/vendor/github.com/ncruces/go-sqlite3/util/sql3util/wasm/sql3parse_table.wasm
Binary files differ
diff --git a/vendor/github.com/ncruces/go-sqlite3/util/vfsutil/slice.go b/vendor/github.com/ncruces/go-sqlite3/util/vfsutil/slice.go
new file mode 100644
index 000000000..b0fbe993c
--- /dev/null
+++ b/vendor/github.com/ncruces/go-sqlite3/util/vfsutil/slice.go
@@ -0,0 +1,102 @@
+package vfsutil
+
+import (
+ "io"
+
+ "github.com/ncruces/go-sqlite3"
+ "github.com/ncruces/go-sqlite3/vfs"
+)
+
+// SliceFile implements [vfs.File] with a byte slice.
+// It is suitable for temporary files (such as [vfs.OPEN_TEMP_JOURNAL]),
+// but not concurrency safe.
+type SliceFile []byte
+
+var (
+ // Ensure these interfaces are implemented:
+ _ vfs.FileSizeHint = &SliceFile{}
+)
+
+// ReadAt implements [io.ReaderAt].
+func (f *SliceFile) ReadAt(b []byte, off int64) (n int, err error) {
+ if d := *f; off < int64(len(d)) {
+ n = copy(b, d[off:])
+ }
+ if n < len(b) {
+ err = io.EOF
+ }
+ return
+}
+
+// WriteAt implements [io.WriterAt].
+func (f *SliceFile) WriteAt(b []byte, off int64) (n int, err error) {
+ d := *f
+ if off > int64(len(d)) {
+ d = append(d, make([]byte, off-int64(len(d)))...)
+ }
+ d = append(d[:off], b...)
+ if len(d) > len(*f) {
+ *f = d
+ }
+ return len(b), nil
+}
+
+// Size implements [vfs.File].
+func (f *SliceFile) Size() (int64, error) {
+ return int64(len(*f)), nil
+}
+
+// Truncate implements [vfs.File].
+func (f *SliceFile) Truncate(size int64) error {
+ if d := *f; size < int64(len(d)) {
+ *f = d[:size]
+ }
+ return nil
+}
+
+// SizeHint implements [vfs.FileSizeHint].
+func (f *SliceFile) SizeHint(size int64) error {
+ if d := *f; size > int64(len(d)) {
+ *f = append(d, make([]byte, size-int64(len(d)))...)
+ }
+ return nil
+}
+
+// Close implements [io.Closer].
+func (*SliceFile) Close() error { return nil }
+
+// Sync implements [vfs.File].
+func (*SliceFile) Sync(flags vfs.SyncFlag) error { return nil }
+
+// Lock implements [vfs.File].
+func (*SliceFile) Lock(lock vfs.LockLevel) error {
+ // notest // not concurrency safe
+ return sqlite3.IOERR_LOCK
+}
+
+// Unlock implements [vfs.File].
+func (*SliceFile) Unlock(lock vfs.LockLevel) error {
+ // notest // not concurrency safe
+ return sqlite3.IOERR_UNLOCK
+}
+
+// CheckReservedLock implements [vfs.File].
+func (*SliceFile) CheckReservedLock() (bool, error) {
+ // notest // not concurrency safe
+ return false, sqlite3.IOERR_CHECKRESERVEDLOCK
+}
+
+// SectorSize implements [vfs.File].
+func (*SliceFile) SectorSize() int {
+ // notest // safe default
+ return 0
+}
+
+// DeviceCharacteristics implements [vfs.File].
+func (*SliceFile) DeviceCharacteristics() vfs.DeviceCharacteristic {
+ return vfs.IOCAP_ATOMIC |
+ vfs.IOCAP_SEQUENTIAL |
+ vfs.IOCAP_SAFE_APPEND |
+ vfs.IOCAP_POWERSAFE_OVERWRITE |
+ vfs.IOCAP_SUBPAGE_READ
+}
diff --git a/vendor/github.com/ncruces/go-sqlite3/util/vfsutil/wrap.go b/vendor/github.com/ncruces/go-sqlite3/util/vfsutil/wrap.go
new file mode 100644
index 000000000..ad96547fa
--- /dev/null
+++ b/vendor/github.com/ncruces/go-sqlite3/util/vfsutil/wrap.go
@@ -0,0 +1,185 @@
+// Package vfsutil implements virtual filesystem utilities.
+package vfsutil
+
+import (
+ "github.com/ncruces/go-sqlite3"
+ "github.com/ncruces/go-sqlite3/vfs"
+)
+
+// UnwrapFile unwraps a [vfs.File],
+// possibly implementing [vfs.FileUnwrap],
+// to a concrete type.
+func UnwrapFile[T vfs.File](f vfs.File) (_ T, _ bool) {
+ for {
+ switch t := f.(type) {
+ default:
+ return
+ case T:
+ return t, true
+ case vfs.FileUnwrap:
+ f = t.Unwrap()
+ }
+ }
+}
+
+// WrapOpen helps wrap [vfs.VFS].
+func WrapOpen(f vfs.VFS, name string, flags vfs.OpenFlag) (file vfs.File, _ vfs.OpenFlag, err error) {
+ if f, ok := f.(vfs.VFSFilename); name == "" && ok {
+ return f.OpenFilename(nil, flags)
+ }
+ return f.Open(name, flags)
+}
+
+// WrapOpenFilename helps wrap [vfs.VFSFilename].
+func WrapOpenFilename(f vfs.VFS, name *vfs.Filename, flags vfs.OpenFlag) (file vfs.File, _ vfs.OpenFlag, err error) {
+ if f, ok := f.(vfs.VFSFilename); ok {
+ return f.OpenFilename(name, flags)
+ }
+ return f.Open(name.String(), flags)
+}
+
+// WrapLockState helps wrap [vfs.FileLockState].
+func WrapLockState(f vfs.File) vfs.LockLevel {
+ if f, ok := f.(vfs.FileLockState); ok {
+ return f.LockState()
+ }
+ return vfs.LOCK_EXCLUSIVE + 1 // UNKNOWN_LOCK
+}
+
+// WrapPersistWAL helps wrap [vfs.FilePersistWAL].
+func WrapPersistWAL(f vfs.File) bool {
+ if f, ok := f.(vfs.FilePersistWAL); ok {
+ return f.PersistWAL()
+ }
+ return false
+}
+
+// WrapSetPersistWAL helps wrap [vfs.FilePersistWAL].
+func WrapSetPersistWAL(f vfs.File, keepWAL bool) {
+ if f, ok := f.(vfs.FilePersistWAL); ok {
+ f.SetPersistWAL(keepWAL)
+ }
+}
+
+// WrapPowersafeOverwrite helps wrap [vfs.FilePowersafeOverwrite].
+func WrapPowersafeOverwrite(f vfs.File) bool {
+ if f, ok := f.(vfs.FilePowersafeOverwrite); ok {
+ return f.PowersafeOverwrite()
+ }
+ return false
+}
+
+// WrapSetPowersafeOverwrite helps wrap [vfs.FilePowersafeOverwrite].
+func WrapSetPowersafeOverwrite(f vfs.File, psow bool) {
+ if f, ok := f.(vfs.FilePowersafeOverwrite); ok {
+ f.SetPowersafeOverwrite(psow)
+ }
+}
+
+// WrapChunkSize helps wrap [vfs.FileChunkSize].
+func WrapChunkSize(f vfs.File, size int) {
+ if f, ok := f.(vfs.FileChunkSize); ok {
+ f.ChunkSize(size)
+ }
+}
+
+// WrapSizeHint helps wrap [vfs.FileSizeHint].
+func WrapSizeHint(f vfs.File, size int64) error {
+ if f, ok := f.(vfs.FileSizeHint); ok {
+ return f.SizeHint(size)
+ }
+ return sqlite3.NOTFOUND
+}
+
+// WrapHasMoved helps wrap [vfs.FileHasMoved].
+func WrapHasMoved(f vfs.File) (bool, error) {
+ if f, ok := f.(vfs.FileHasMoved); ok {
+ return f.HasMoved()
+ }
+ return false, sqlite3.NOTFOUND
+}
+
+// WrapOverwrite helps wrap [vfs.FileOverwrite].
+func WrapOverwrite(f vfs.File) error {
+ if f, ok := f.(vfs.FileOverwrite); ok {
+ return f.Overwrite()
+ }
+ return sqlite3.NOTFOUND
+}
+
+// WrapSyncSuper helps wrap [vfs.FileSync].
+func WrapSyncSuper(f vfs.File, super string) error {
+ if f, ok := f.(vfs.FileSync); ok {
+ return f.SyncSuper(super)
+ }
+ return sqlite3.NOTFOUND
+}
+
+// WrapCommitPhaseTwo helps wrap [vfs.FileCommitPhaseTwo].
+func WrapCommitPhaseTwo(f vfs.File) error {
+ if f, ok := f.(vfs.FileCommitPhaseTwo); ok {
+ return f.CommitPhaseTwo()
+ }
+ return sqlite3.NOTFOUND
+}
+
+// WrapBeginAtomicWrite helps wrap [vfs.FileBatchAtomicWrite].
+func WrapBeginAtomicWrite(f vfs.File) error {
+ if f, ok := f.(vfs.FileBatchAtomicWrite); ok {
+ return f.BeginAtomicWrite()
+ }
+ return sqlite3.NOTFOUND
+}
+
+// WrapCommitAtomicWrite helps wrap [vfs.FileBatchAtomicWrite].
+func WrapCommitAtomicWrite(f vfs.File) error {
+ if f, ok := f.(vfs.FileBatchAtomicWrite); ok {
+ return f.CommitAtomicWrite()
+ }
+ return sqlite3.NOTFOUND
+}
+
+// WrapRollbackAtomicWrite helps wrap [vfs.FileBatchAtomicWrite].
+func WrapRollbackAtomicWrite(f vfs.File) error {
+ if f, ok := f.(vfs.FileBatchAtomicWrite); ok {
+ return f.RollbackAtomicWrite()
+ }
+ return sqlite3.NOTFOUND
+}
+
+// WrapCheckpointStart helps wrap [vfs.FileCheckpoint].
+func WrapCheckpointStart(f vfs.File) {
+ if f, ok := f.(vfs.FileCheckpoint); ok {
+ f.CheckpointStart()
+ }
+}
+
+// WrapCheckpointDone helps wrap [vfs.FileCheckpoint].
+func WrapCheckpointDone(f vfs.File) {
+ if f, ok := f.(vfs.FileCheckpoint); ok {
+ f.CheckpointDone()
+ }
+}
+
+// WrapPragma helps wrap [vfs.FilePragma].
+func WrapPragma(f vfs.File, name, value string) (string, error) {
+ if f, ok := f.(vfs.FilePragma); ok {
+ return f.Pragma(name, value)
+ }
+ return "", sqlite3.NOTFOUND
+}
+
+// WrapBusyHandler helps wrap [vfs.FilePragma].
+func WrapBusyHandler(f vfs.File, handler func() bool) {
+ if f, ok := f.(vfs.FileBusyHandler); ok {
+ f.BusyHandler(handler)
+ }
+}
+
+// WrapSharedMemory helps wrap [vfs.FileSharedMemory].
+func WrapSharedMemory(f vfs.File) vfs.SharedMemory {
+ if f, ok := f.(vfs.FileSharedMemory); ok {
+ return f.SharedMemory()
+ }
+ return nil
+}
diff --git a/vendor/github.com/ncruces/go-sqlite3/value.go b/vendor/github.com/ncruces/go-sqlite3/value.go
index 54d8f21eb..6806e9a79 100644
--- a/vendor/github.com/ncruces/go-sqlite3/value.go
+++ b/vendor/github.com/ncruces/go-sqlite3/value.go
@@ -31,9 +31,9 @@ func (v Value) Dup() *Value {
// Close frees an SQL value previously obtained by [Value.Dup].
//
// https://sqlite.org/c3ref/value_dup.html
-func (dup *Value) Close() error {
- dup.c.call("sqlite3_value_free", stk_t(dup.handle))
- dup.handle = 0
+func (v *Value) Close() error {
+ v.c.call("sqlite3_value_free", stk_t(v.handle))
+ v.handle = 0
return nil
}
diff --git a/vendor/github.com/ncruces/go-sqlite3/vfs/api.go b/vendor/github.com/ncruces/go-sqlite3/vfs/api.go
index d5bb3a7ae..a0d36b214 100644
--- a/vendor/github.com/ncruces/go-sqlite3/vfs/api.go
+++ b/vendor/github.com/ncruces/go-sqlite3/vfs/api.go
@@ -36,9 +36,9 @@ type VFSFilename interface {
//
// https://sqlite.org/c3ref/io_methods.html
type File interface {
- Close() error
- ReadAt(p []byte, off int64) (n int, err error)
- WriteAt(p []byte, off int64) (n int, err error)
+ io.Closer
+ io.ReaderAt
+ io.WriterAt
Truncate(size int64) error
Sync(flags SyncFlag) error
Size() (int64, error)
diff --git a/vendor/github.com/ncruces/go-sqlite3/vfs/cksm.go b/vendor/github.com/ncruces/go-sqlite3/vfs/cksm.go
index 0ff7b6f18..0123cc634 100644
--- a/vendor/github.com/ncruces/go-sqlite3/vfs/cksm.go
+++ b/vendor/github.com/ncruces/go-sqlite3/vfs/cksm.go
@@ -5,7 +5,6 @@ import (
"context"
_ "embed"
"encoding/binary"
- "strconv"
"github.com/tetratelabs/wazero/api"
@@ -13,48 +12,30 @@ import (
"github.com/ncruces/go-sqlite3/util/sql3util"
)
-func cksmWrapFile(name *Filename, flags OpenFlag, file File) File {
- // Checksum only main databases and WALs.
- if flags&(OPEN_MAIN_DB|OPEN_WAL) == 0 {
+func cksmWrapFile(file File, flags OpenFlag) File {
+ // Checksum only main databases.
+ if flags&OPEN_MAIN_DB == 0 {
return file
}
-
- cksm := cksmFile{File: file}
-
- if flags&OPEN_WAL != 0 {
- main, _ := name.DatabaseFile().(cksmFile)
- cksm.cksmFlags = main.cksmFlags
- } else {
- cksm.cksmFlags = new(cksmFlags)
- cksm.isDB = true
- }
-
- return cksm
+ return &cksmFile{File: file}
}
type cksmFile struct {
File
- *cksmFlags
- isDB bool
-}
-
-type cksmFlags struct {
- computeCksm bool
verifyCksm bool
- inCkpt bool
- pageSize int
+ computeCksm bool
}
-func (c cksmFile) ReadAt(p []byte, off int64) (n int, err error) {
+func (c *cksmFile) ReadAt(p []byte, off int64) (n int, err error) {
n, err = c.File.ReadAt(p, off)
p = p[:n]
- if isHeader(c.isDB, p, off) {
+ if isHeader(p, off) {
c.init((*[100]byte)(p))
}
// Verify checksums.
- if c.verifyCksm && !c.inCkpt && len(p) == c.pageSize {
+ if c.verifyCksm && sql3util.ValidPageSize(len(p)) {
cksm1 := cksmCompute(p[:len(p)-8])
cksm2 := *(*[8]byte)(p[len(p)-8:])
if cksm1 != cksm2 {
@@ -64,20 +45,20 @@ func (c cksmFile) ReadAt(p []byte, off int64) (n int, err error) {
return n, err
}
-func (c cksmFile) WriteAt(p []byte, off int64) (n int, err error) {
- if isHeader(c.isDB, p, off) {
+func (c *cksmFile) WriteAt(p []byte, off int64) (n int, err error) {
+ if isHeader(p, off) {
c.init((*[100]byte)(p))
}
// Compute checksums.
- if c.computeCksm && !c.inCkpt && len(p) == c.pageSize {
+ if c.computeCksm && sql3util.ValidPageSize(len(p)) {
*(*[8]byte)(p[len(p)-8:]) = cksmCompute(p[:len(p)-8])
}
return c.File.WriteAt(p, off)
}
-func (c cksmFile) Pragma(name string, value string) (string, error) {
+func (c *cksmFile) Pragma(name string, value string) (string, error) {
switch name {
case "checksum_verification":
b, ok := sql3util.ParseBool(value)
@@ -90,15 +71,15 @@ func (c cksmFile) Pragma(name string, value string) (string, error) {
return "1", nil
case "page_size":
- if c.computeCksm {
+ if c.computeCksm && value != "" {
// Do not allow page size changes on a checksum database.
- return strconv.Itoa(c.pageSize), nil
+ return "", nil
}
}
return "", _NOTFOUND
}
-func (c cksmFile) DeviceCharacteristics() DeviceCharacteristic {
+func (c *cksmFile) DeviceCharacteristics() DeviceCharacteristic {
ret := c.File.DeviceCharacteristics()
if c.verifyCksm {
ret &^= IOCAP_SUBPAGE_READ
@@ -106,13 +87,8 @@ func (c cksmFile) DeviceCharacteristics() DeviceCharacteristic {
return ret
}
-func (c cksmFile) fileControl(ctx context.Context, mod api.Module, op _FcntlOpcode, pArg ptr_t) _ErrorCode {
- switch op {
- case _FCNTL_CKPT_START:
- c.inCkpt = true
- case _FCNTL_CKPT_DONE:
- c.inCkpt = false
- case _FCNTL_PRAGMA:
+func (c *cksmFile) fileControl(ctx context.Context, mod api.Module, op _FcntlOpcode, pArg ptr_t) _ErrorCode {
+ if op == _FCNTL_PRAGMA {
rc := vfsFileControlImpl(ctx, mod, c, op, pArg)
if rc != _NOTFOUND {
return rc
@@ -121,24 +97,26 @@ func (c cksmFile) fileControl(ctx context.Context, mod api.Module, op _FcntlOpco
return vfsFileControlImpl(ctx, mod, c.File, op, pArg)
}
-func (f *cksmFlags) init(header *[100]byte) {
- f.pageSize = 256 * int(binary.LittleEndian.Uint16(header[16:18]))
- if r := header[20] == 8; r != f.computeCksm {
- f.computeCksm = r
- f.verifyCksm = r
- }
- if !sql3util.ValidPageSize(f.pageSize) {
- f.computeCksm = false
- f.verifyCksm = false
+func (c *cksmFile) init(header *[100]byte) {
+ if r := header[20] == 8; r != c.computeCksm {
+ c.computeCksm = r
+ c.verifyCksm = r
}
}
-func isHeader(isDB bool, p []byte, off int64) bool {
- check := sql3util.ValidPageSize(len(p))
- if isDB {
- check = off == 0 && len(p) >= 100
+func (c *cksmFile) SharedMemory() SharedMemory {
+ if f, ok := c.File.(FileSharedMemory); ok {
+ return f.SharedMemory()
}
- return check && bytes.HasPrefix(p, []byte("SQLite format 3\000"))
+ return nil
+}
+
+func (c *cksmFile) Unwrap() File {
+ return c.File
+}
+
+func isHeader(p []byte, off int64) bool {
+ return off == 0 && len(p) >= 100 && bytes.HasPrefix(p, []byte("SQLite format 3\000"))
}
func cksmCompute(a []byte) (cksm [8]byte) {
@@ -155,14 +133,3 @@ func cksmCompute(a []byte) (cksm [8]byte) {
binary.LittleEndian.PutUint32(cksm[4:8], s2)
return
}
-
-func (c cksmFile) SharedMemory() SharedMemory {
- if f, ok := c.File.(FileSharedMemory); ok {
- return f.SharedMemory()
- }
- return nil
-}
-
-func (c cksmFile) Unwrap() File {
- return c.File
-}
diff --git a/vendor/github.com/ncruces/go-sqlite3/vfs/file.go b/vendor/github.com/ncruces/go-sqlite3/vfs/file.go
index 8e65ca660..06906c961 100644
--- a/vendor/github.com/ncruces/go-sqlite3/vfs/file.go
+++ b/vendor/github.com/ncruces/go-sqlite3/vfs/file.go
@@ -75,6 +75,9 @@ func (vfsOS) Access(name string, flags AccessFlag) (bool, error) {
func (vfsOS) Open(name string, flags OpenFlag) (File, OpenFlag, error) {
// notest // OpenFilename is called instead
+ if name == "" {
+ return vfsOS{}.OpenFilename(nil, flags)
+ }
return nil, 0, _CANTOPEN
}
diff --git a/vendor/github.com/ncruces/go-sqlite3/vfs/filename.go b/vendor/github.com/ncruces/go-sqlite3/vfs/filename.go
index 965c3b1a6..be16b3dde 100644
--- a/vendor/github.com/ncruces/go-sqlite3/vfs/filename.go
+++ b/vendor/github.com/ncruces/go-sqlite3/vfs/filename.go
@@ -56,7 +56,7 @@ func (n *Filename) Journal() string {
return n.path("sqlite3_filename_journal")
}
-// Journal returns the name of the corresponding WAL file.
+// WAL returns the name of the corresponding WAL file.
//
// https://sqlite.org/c3ref/filename_database.html
func (n *Filename) WAL() string {
diff --git a/vendor/github.com/ncruces/go-sqlite3/vfs/memdb/memdb.go b/vendor/github.com/ncruces/go-sqlite3/vfs/memdb/memdb.go
index b99070496..e304f6d58 100644
--- a/vendor/github.com/ncruces/go-sqlite3/vfs/memdb/memdb.go
+++ b/vendor/github.com/ncruces/go-sqlite3/vfs/memdb/memdb.go
@@ -2,40 +2,39 @@ package memdb
import (
"io"
+ "strings"
"sync"
"time"
"github.com/ncruces/go-sqlite3"
+ "github.com/ncruces/go-sqlite3/util/vfsutil"
"github.com/ncruces/go-sqlite3/vfs"
)
const sectorSize = 65536
-// Ensure sectorSize is a multiple of 64K (the largest page size).
-var _ [0]struct{} = [sectorSize & 65535]struct{}{}
-
type memVFS struct{}
func (memVFS) Open(name string, flags vfs.OpenFlag) (vfs.File, vfs.OpenFlag, error) {
// For simplicity, we do not support reading or writing data
// across "sector" boundaries.
- //
- // This is not a problem for most SQLite file types:
- // - databases, which only do page aligned reads/writes;
- // - temp journals, as used by the sorter, which does the same:
- // https://github.com/sqlite/sqlite/blob/b74eb0/src/vdbesort.c#L409-L412
- //
- // We refuse to open all other file types,
- // but returning OPEN_MEMORY means SQLite won't ask us to.
- const types = vfs.OPEN_MAIN_DB | vfs.OPEN_TEMP_DB |
- vfs.OPEN_TRANSIENT_DB | vfs.OPEN_TEMP_JOURNAL
- if flags&types == 0 {
+ // This is not a problem for SQLite database files.
+ const databases = vfs.OPEN_MAIN_DB | vfs.OPEN_TEMP_DB | vfs.OPEN_TRANSIENT_DB
+
+ // Temp journals, as used by the sorter, use SliceFile.
+ if flags&vfs.OPEN_TEMP_JOURNAL != 0 {
+ return &vfsutil.SliceFile{}, flags | vfs.OPEN_MEMORY, nil
+ }
+
+ // Refuse to open all other file types.
+ // Returning OPEN_MEMORY means SQLite won't ask us to.
+ if flags&databases == 0 {
// notest // OPEN_MEMORY
return nil, flags, sqlite3.CANTOPEN
}
// A shared database has a name that begins with "/".
- shared := len(name) > 1 && name[0] == '/'
+ shared := strings.HasPrefix(name, "/")
var db *memDB
if shared {
@@ -76,18 +75,16 @@ func (memVFS) FullPathname(name string) (string, error) {
type memDB struct {
name string
+ // +checklocks:lockMtx
+ waiter *sync.Cond
// +checklocks:dataMtx
data []*[sectorSize]byte
- // +checklocks:dataMtx
- size int64
-
- // +checklocks:memoryMtx
- refs int32
- shared int32 // +checklocks:lockMtx
- pending bool // +checklocks:lockMtx
- reserved bool // +checklocks:lockMtx
- waiter *sync.Cond // +checklocks:lockMtx
+ size int64 // +checklocks:dataMtx
+ refs int32 // +checklocks:memoryMtx
+ shared int32 // +checklocks:lockMtx
+ pending bool // +checklocks:lockMtx
+ reserved bool // +checklocks:lockMtx
lockMtx sync.Mutex
dataMtx sync.RWMutex
@@ -129,7 +126,7 @@ func (m *memFile) ReadAt(b []byte, off int64) (n int, err error) {
base := off / sectorSize
rest := off % sectorSize
have := int64(sectorSize)
- if base == int64(len(m.data))-1 {
+ if m.size < off+int64(len(b)) {
have = modRoundUp(m.size, sectorSize)
}
n = copy(b, (*m.data[base])[rest:have])
@@ -150,22 +147,37 @@ func (m *memFile) WriteAt(b []byte, off int64) (n int, err error) {
m.data = append(m.data, new([sectorSize]byte))
}
n = copy((*m.data[base])[rest:], b)
+ if size := off + int64(n); size > m.size {
+ m.size = size
+ }
if n < len(b) {
// notest // assume writes are page aligned
return n, io.ErrShortWrite
}
- if size := off + int64(len(b)); size > m.size {
- m.size = size
- }
return n, nil
}
+func (m *memFile) Size() (int64, error) {
+ m.dataMtx.RLock()
+ defer m.dataMtx.RUnlock()
+ return m.size, nil
+}
+
func (m *memFile) Truncate(size int64) error {
m.dataMtx.Lock()
defer m.dataMtx.Unlock()
return m.truncate(size)
}
+func (m *memFile) SizeHint(size int64) error {
+ m.dataMtx.Lock()
+ defer m.dataMtx.Unlock()
+ if size > m.size {
+ return m.truncate(size)
+ }
+ return nil
+}
+
// +checklocks:m.dataMtx
func (m *memFile) truncate(size int64) error {
if size < m.size {
@@ -185,16 +197,6 @@ func (m *memFile) truncate(size int64) error {
return nil
}
-func (m *memFile) Sync(flag vfs.SyncFlag) error {
- return nil
-}
-
-func (m *memFile) Size() (int64, error) {
- m.dataMtx.RLock()
- defer m.dataMtx.RUnlock()
- return m.size, nil
-}
-
func (m *memFile) Lock(lock vfs.LockLevel) error {
if m.lock >= lock {
return nil
@@ -278,31 +280,24 @@ func (m *memFile) CheckReservedLock() (bool, error) {
return m.reserved, nil
}
-func (m *memFile) SectorSize() int {
+func (m *memFile) LockState() vfs.LockLevel {
+ return m.lock
+}
+
+func (*memFile) Sync(flag vfs.SyncFlag) error { return nil }
+
+func (*memFile) SectorSize() int {
// notest // IOCAP_POWERSAFE_OVERWRITE
return sectorSize
}
-func (m *memFile) DeviceCharacteristics() vfs.DeviceCharacteristic {
+func (*memFile) DeviceCharacteristics() vfs.DeviceCharacteristic {
return vfs.IOCAP_ATOMIC |
vfs.IOCAP_SEQUENTIAL |
vfs.IOCAP_SAFE_APPEND |
vfs.IOCAP_POWERSAFE_OVERWRITE
}
-func (m *memFile) SizeHint(size int64) error {
- m.dataMtx.Lock()
- defer m.dataMtx.Unlock()
- if size > m.size {
- return m.truncate(size)
- }
- return nil
-}
-
-func (m *memFile) LockState() vfs.LockLevel {
- return m.lock
-}
-
func divRoundUp(a, b int64) int64 {
return (a + b - 1) / b
}
diff --git a/vendor/github.com/ncruces/go-sqlite3/vfs/vfs.go b/vendor/github.com/ncruces/go-sqlite3/vfs/vfs.go
index 2656ddb49..aef81c3f2 100644
--- a/vendor/github.com/ncruces/go-sqlite3/vfs/vfs.go
+++ b/vendor/github.com/ncruces/go-sqlite3/vfs/vfs.go
@@ -148,7 +148,7 @@ func vfsOpen(ctx context.Context, mod api.Module, pVfs, zPath, pFile ptr_t, flag
if pOutFlags != 0 {
util.Write32(mod, pOutFlags, flags)
}
- file = cksmWrapFile(name, flags, file)
+ file = cksmWrapFile(file, flags)
vfsFileRegister(ctx, mod, pFile, file)
return _OK
}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go
index 8b016355a..7bac0da33 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go
@@ -453,7 +453,7 @@ func (m *SequenceMatcher) GetGroupedOpCodes(n int) [][]OpCode {
}
group = append(group, OpCode{c.Tag, i1, i2, j1, j2})
}
- if len(group) > 0 && !(len(group) == 1 && group[0].Tag == 'e') {
+ if len(group) > 0 && (len(group) != 1 || group[0].Tag != 'e') {
groups = append(groups, group)
}
return groups
@@ -568,7 +568,7 @@ func WriteUnifiedDiff(writer io.Writer, diff UnifiedDiff) error {
buf := bufio.NewWriter(writer)
defer buf.Flush()
wf := func(format string, args ...interface{}) error {
- _, err := buf.WriteString(fmt.Sprintf(format, args...))
+ _, err := fmt.Fprintf(buf, format, args...)
return err
}
ws := func(s string) error {
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/metric.go b/vendor/github.com/prometheus/client_golang/prometheus/metric.go
index 592eec3e2..76e59f128 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/metric.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/metric.go
@@ -186,21 +186,31 @@ func (m *withExemplarsMetric) Write(pb *dto.Metric) error {
case pb.Counter != nil:
pb.Counter.Exemplar = m.exemplars[len(m.exemplars)-1]
case pb.Histogram != nil:
+ h := pb.Histogram
for _, e := range m.exemplars {
- // pb.Histogram.Bucket are sorted by UpperBound.
- i := sort.Search(len(pb.Histogram.Bucket), func(i int) bool {
- return pb.Histogram.Bucket[i].GetUpperBound() >= e.GetValue()
+ if (h.GetZeroThreshold() != 0 || h.GetZeroCount() != 0 ||
+ len(h.PositiveSpan) != 0 || len(h.NegativeSpan) != 0) &&
+ e.GetTimestamp() != nil {
+ h.Exemplars = append(h.Exemplars, e)
+ if len(h.Bucket) == 0 {
+ // Don't proceed to classic buckets if there are none.
+ continue
+ }
+ }
+ // h.Bucket are sorted by UpperBound.
+ i := sort.Search(len(h.Bucket), func(i int) bool {
+ return h.Bucket[i].GetUpperBound() >= e.GetValue()
})
- if i < len(pb.Histogram.Bucket) {
- pb.Histogram.Bucket[i].Exemplar = e
+ if i < len(h.Bucket) {
+ h.Bucket[i].Exemplar = e
} else {
// The +Inf bucket should be explicitly added if there is an exemplar for it, similar to non-const histogram logic in https://github.com/prometheus/client_golang/blob/main/prometheus/histogram.go#L357-L365.
b := &dto.Bucket{
- CumulativeCount: proto.Uint64(pb.Histogram.GetSampleCount()),
+ CumulativeCount: proto.Uint64(h.GetSampleCount()),
UpperBound: proto.Float64(math.Inf(1)),
Exemplar: e,
}
- pb.Histogram.Bucket = append(pb.Histogram.Bucket, b)
+ h.Bucket = append(h.Bucket, b)
}
}
default:
@@ -227,6 +237,7 @@ type Exemplar struct {
// Only last applicable exemplar is injected from the list.
// For example for Counter it means last exemplar is injected.
// For Histogram, it means last applicable exemplar for each bucket is injected.
+// For a Native Histogram, all valid exemplars are injected.
//
// NewMetricWithExemplars works best with MustNewConstMetric and
// MustNewConstHistogram, see example.
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_darwin.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_darwin.go
index 0a61b9846..b32c95fa3 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_darwin.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_darwin.go
@@ -25,9 +25,9 @@ import (
"golang.org/x/sys/unix"
)
-// notImplementedErr is returned by stub functions that replace cgo functions, when cgo
+// errNotImplemented is returned by stub functions that replace cgo functions, when cgo
// isn't available.
-var notImplementedErr = errors.New("not implemented")
+var errNotImplemented = errors.New("not implemented")
type memoryInfo struct {
vsize uint64 // Virtual memory size in bytes
@@ -101,7 +101,7 @@ func (c *processCollector) processCollect(ch chan<- Metric) {
if memInfo, err := getMemory(); err == nil {
ch <- MustNewConstMetric(c.rss, GaugeValue, float64(memInfo.rss))
ch <- MustNewConstMetric(c.vsize, GaugeValue, float64(memInfo.vsize))
- } else if !errors.Is(err, notImplementedErr) {
+ } else if !errors.Is(err, errNotImplemented) {
// Don't report an error when support is not compiled in.
c.reportError(ch, c.rss, err)
c.reportError(ch, c.vsize, err)
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_nocgo_darwin.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_nocgo_darwin.go
index 8ddb0995d..378865129 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_nocgo_darwin.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_nocgo_darwin.go
@@ -16,7 +16,7 @@
package prometheus
func getMemory() (*memoryInfo, error) {
- return nil, notImplementedErr
+ return nil, errNotImplemented
}
// describe returns all descriptions of the collector for Darwin.
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_procfsenabled.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_procfsenabled.go
index 9f4b130be..8074f70f5 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_procfsenabled.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_procfsenabled.go
@@ -66,11 +66,11 @@ func (c *processCollector) processCollect(ch chan<- Metric) {
if netstat, err := p.Netstat(); err == nil {
var inOctets, outOctets float64
- if netstat.IpExt.InOctets != nil {
- inOctets = *netstat.IpExt.InOctets
+ if netstat.InOctets != nil {
+ inOctets = *netstat.InOctets
}
- if netstat.IpExt.OutOctets != nil {
- outOctets = *netstat.IpExt.OutOctets
+ if netstat.OutOctets != nil {
+ outOctets = *netstat.OutOctets
}
ch <- MustNewConstMetric(c.inBytes, CounterValue, inOctets)
ch <- MustNewConstMetric(c.outBytes, CounterValue, outOctets)
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go
index 356edb786..9332b0249 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go
@@ -392,7 +392,7 @@ func isLabelCurried(c prometheus.Collector, label string) bool {
func labels(code, method bool, reqMethod string, status int, extraMethods ...string) prometheus.Labels {
labels := prometheus.Labels{}
- if !(code || method) {
+ if !code && !method {
return labels
}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/vec.go b/vendor/github.com/prometheus/client_golang/prometheus/vec.go
index 2c808eece..487b46656 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/vec.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/vec.go
@@ -79,7 +79,7 @@ func (m *MetricVec) DeleteLabelValues(lvs ...string) bool {
return false
}
- return m.metricMap.deleteByHashWithLabelValues(h, lvs, m.curry)
+ return m.deleteByHashWithLabelValues(h, lvs, m.curry)
}
// Delete deletes the metric where the variable labels are the same as those
@@ -101,7 +101,7 @@ func (m *MetricVec) Delete(labels Labels) bool {
return false
}
- return m.metricMap.deleteByHashWithLabels(h, labels, m.curry)
+ return m.deleteByHashWithLabels(h, labels, m.curry)
}
// DeletePartialMatch deletes all metrics where the variable labels contain all of those
@@ -114,7 +114,7 @@ func (m *MetricVec) DeletePartialMatch(labels Labels) int {
labels, closer := constrainLabels(m.desc, labels)
defer closer()
- return m.metricMap.deleteByLabels(labels, m.curry)
+ return m.deleteByLabels(labels, m.curry)
}
// Without explicit forwarding of Describe, Collect, Reset, those methods won't
@@ -216,7 +216,7 @@ func (m *MetricVec) GetMetricWithLabelValues(lvs ...string) (Metric, error) {
return nil, err
}
- return m.metricMap.getOrCreateMetricWithLabelValues(h, lvs, m.curry), nil
+ return m.getOrCreateMetricWithLabelValues(h, lvs, m.curry), nil
}
// GetMetricWith returns the Metric for the given Labels map (the label names
@@ -244,7 +244,7 @@ func (m *MetricVec) GetMetricWith(labels Labels) (Metric, error) {
return nil, err
}
- return m.metricMap.getOrCreateMetricWithLabels(h, labels, m.curry), nil
+ return m.getOrCreateMetricWithLabels(h, labels, m.curry), nil
}
func (m *MetricVec) hashLabelValues(vals []string) (uint64, error) {
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/wrap.go b/vendor/github.com/prometheus/client_golang/prometheus/wrap.go
index 25da157f1..2ed128506 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/wrap.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/wrap.go
@@ -63,7 +63,7 @@ func WrapRegistererWith(labels Labels, reg Registerer) Registerer {
// metric names that are standardized across applications, as that would break
// horizontal monitoring, for example the metrics provided by the Go collector
// (see NewGoCollector) and the process collector (see NewProcessCollector). (In
-// fact, those metrics are already prefixed with “go_” or “process_”,
+// fact, those metrics are already prefixed with "go_" or "process_",
// respectively.)
//
// Conflicts between Collectors registered through the original Registerer with
@@ -78,6 +78,40 @@ func WrapRegistererWithPrefix(prefix string, reg Registerer) Registerer {
}
}
+// WrapCollectorWith returns a Collector wrapping the provided Collector. The
+// wrapped Collector will add the provided Labels to all Metrics it collects (as
+// ConstLabels). The Metrics collected by the unmodified Collector must not
+// duplicate any of those labels.
+//
+// WrapCollectorWith can be useful to work with multiple instances of a third
+// party library that does not expose enough flexibility on the lifecycle of its
+// registered metrics.
+// For example, let's say you have a foo.New(reg Registerer) constructor that
+// registers metrics but never unregisters them, and you want to create multiple
+// instances of foo.Foo with different labels.
+// The way to achieve that, is to create a new Registry, pass it to foo.New,
+// then use WrapCollectorWith to wrap that Registry with the desired labels and
+// register that as a collector in your main Registry.
+// Then you can un-register the wrapped collector effectively un-registering the
+// metrics registered by foo.New.
+func WrapCollectorWith(labels Labels, c Collector) Collector {
+ return &wrappingCollector{
+ wrappedCollector: c,
+ labels: labels,
+ }
+}
+
+// WrapCollectorWithPrefix returns a Collector wrapping the provided Collector. The
+// wrapped Collector will add the provided prefix to the name of all Metrics it collects.
+//
+// See the documentation of WrapCollectorWith for more details on the use case.
+func WrapCollectorWithPrefix(prefix string, c Collector) Collector {
+ return &wrappingCollector{
+ wrappedCollector: c,
+ prefix: prefix,
+ }
+}
+
type wrappingRegisterer struct {
wrappedRegisterer Registerer
prefix string
diff --git a/vendor/github.com/prometheus/otlptranslator/README.md b/vendor/github.com/prometheus/otlptranslator/README.md
index 3b31a448e..b09484e27 100644
--- a/vendor/github.com/prometheus/otlptranslator/README.md
+++ b/vendor/github.com/prometheus/otlptranslator/README.md
@@ -1,2 +1,120 @@
-# otlp-prometheus-translator
-Library providing API to convert OTLP metric and attribute names to respectively Prometheus metric and label names.
+# OTLP Prometheus Translator
+
+A Go library for converting [OpenTelemetry Protocol (OTLP)](https://opentelemetry.io/docs/specs/otlp/) metric and attribute names to [Prometheus](https://prometheus.io/)-compliant formats.
+
+Part of the [Prometheus](https://prometheus.io/) ecosystem, following the [OpenTelemetry to Prometheus compatibility specification](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/compatibility/prometheus_and_openmetrics.md).
+
+## Features
+
+- **Metric Name and Label Translation**: Convert OTLP metric names and attributes to Prometheus-compliant format
+- **Unit Handling**: Translate OTLP units to Prometheus unit conventions
+- **Type-Aware Suffixes**: Optionally append `_total`, `_ratio` based on metric type
+- **Namespace Support**: Add configurable namespace prefixes
+- **UTF-8 Support**: Choose between Prometheus legacy scheme compliant metric/label names (`[a-zA-Z0-9:_]`) or untranslated metric/label names
+- **Translation Strategy Configuration**: Select a translation strategy with a standard set of strings.
+
+## Installation
+
+```bash
+go get github.com/prometheus/otlptranslator
+```
+
+## Quick Start
+
+```go
+package main
+
+import (
+ "fmt"
+ "github.com/prometheus/otlptranslator"
+)
+
+func main() {
+ // Create a metric namer using traditional Prometheus name translation, with suffixes added and UTF-8 disallowed.
+ strategy := otlptranslator.UnderscoreEscapingWithSuffixes
+ namer := otlptranslator.NewMetricNamer("myapp", strategy)
+
+ // Translate OTLP metric to Prometheus format
+ metric := otlptranslator.Metric{
+ Name: "http.server.request.duration",
+ Unit: "s",
+ Type: otlptranslator.MetricTypeHistogram,
+ }
+ fmt.Println(namer.Build(metric)) // Output: myapp_http_server_request_duration_seconds
+
+ // Translate label names
+ labelNamer := otlptranslator.LabelNamer{UTF8Allowed: false}
+ fmt.Println(labelNamer.Build("http.method")) // Output: http_method
+}
+```
+
+## Usage Examples
+
+### Metric Name Translation
+
+```go
+namer := otlptranslator.MetricNamer{WithMetricSuffixes: true, UTF8Allowed: false}
+
+// Counter gets _total suffix
+counter := otlptranslator.Metric{
+ Name: "requests.count", Unit: "1", Type: otlptranslator.MetricTypeMonotonicCounter,
+}
+fmt.Println(namer.Build(counter)) // requests_count_total
+
+// Gauge with unit conversion
+gauge := otlptranslator.Metric{
+ Name: "memory.usage", Unit: "By", Type: otlptranslator.MetricTypeGauge,
+}
+fmt.Println(namer.Build(gauge)) // memory_usage_bytes
+
+// Dimensionless gauge gets _ratio suffix
+ratio := otlptranslator.Metric{
+ Name: "cpu.utilization", Unit: "1", Type: otlptranslator.MetricTypeGauge,
+}
+fmt.Println(namer.Build(ratio)) // cpu_utilization_ratio
+```
+
+### Label Translation
+
+```go
+labelNamer := otlptranslator.LabelNamer{UTF8Allowed: false}
+
+labelNamer.Build("http.method") // http_method
+labelNamer.Build("123invalid") // key_123invalid
+labelNamer.Build("_private") // key_private
+labelNamer.Build("__reserved__") // __reserved__ (preserved)
+labelNamer.Build("label@with$symbols") // label_with_symbols
+```
+
+### Unit Translation
+
+```go
+unitNamer := otlptranslator.UnitNamer{UTF8Allowed: false}
+
+unitNamer.Build("s") // seconds
+unitNamer.Build("By") // bytes
+unitNamer.Build("requests/s") // requests_per_second
+unitNamer.Build("1") // "" (dimensionless)
+```
+
+### Configuration Options
+
+```go
+// Prometheus-compliant mode - supports [a-zA-Z0-9:_]
+compliantNamer := otlptranslator.MetricNamer{UTF8Allowed: false, WithMetricSuffixes: true}
+
+// Transparent pass-through mode, aka "NoTranslation"
+utf8Namer := otlptranslator.MetricNamer{UTF8Allowed: true, WithMetricSuffixes: false}
+utf8Namer = otlptranslator.NewMetricNamer("", otlpTranslator.NoTranslation)
+
+// With namespace and suffixes
+productionNamer := otlptranslator.MetricNamer{
+ Namespace: "myservice",
+ WithMetricSuffixes: true,
+ UTF8Allowed: false,
+}
+```
+
+## License
+
+Licensed under the Apache License 2.0 - see the [LICENSE](LICENSE) file for details.
diff --git a/vendor/github.com/prometheus/otlptranslator/doc.go b/vendor/github.com/prometheus/otlptranslator/doc.go
new file mode 100644
index 000000000..a704d8190
--- /dev/null
+++ b/vendor/github.com/prometheus/otlptranslator/doc.go
@@ -0,0 +1,24 @@
+// Copyright 2025 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package otlptranslator provides utilities for converting OpenTelemetry Protocol (OTLP)
+// metric and attribute names to Prometheus-compliant formats.
+//
+// This package is designed to help users translate OpenTelemetry metrics to Prometheus
+// metrics while following the official OpenTelemetry to Prometheus compatibility specification.
+//
+// Main components:
+// - MetricNamer: Translates OTLP metric names to Prometheus metric names
+// - LabelNamer: Translates OTLP attribute names to Prometheus label names
+// - UnitNamer: Translates OTLP units to Prometheus unit conventions
+package otlptranslator
diff --git a/vendor/github.com/prometheus/otlptranslator/label_namer.go b/vendor/github.com/prometheus/otlptranslator/label_namer.go
new file mode 100644
index 000000000..00072a39e
--- /dev/null
+++ b/vendor/github.com/prometheus/otlptranslator/label_namer.go
@@ -0,0 +1,90 @@
+// Copyright 2025 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+// Provenance-includes-location: https://github.com/prometheus/prometheus/blob/93e991ef7ed19cc997a9360c8016cac3767b8057/storage/remote/otlptranslator/prometheus/normalize_label.go
+// Provenance-includes-license: Apache-2.0
+// Provenance-includes-copyright: Copyright The Prometheus Authors
+// Provenance-includes-location: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/95e8f8fdc2a9dc87230406c9a3cf02be4fd68bea/pkg/translator/prometheus/normalize_label.go
+// Provenance-includes-license: Apache-2.0
+// Provenance-includes-copyright: Copyright The OpenTelemetry Authors.
+
+package otlptranslator
+
+import (
+ "fmt"
+ "strings"
+ "unicode"
+)
+
+// LabelNamer is a helper struct to build label names.
+// It translates OpenTelemetry Protocol (OTLP) attribute names to Prometheus-compliant label names.
+//
+// Example usage:
+//
+// namer := LabelNamer{UTF8Allowed: false}
+// result := namer.Build("http.method") // "http_method"
+type LabelNamer struct {
+ UTF8Allowed bool
+}
+
+// Build normalizes the specified label to follow Prometheus label names standard.
+//
+// Translation rules:
+// - Replaces invalid characters with underscores
+// - Prefixes labels with invalid start characters (numbers or `_`) with "key"
+// - Preserves double underscore labels (reserved names)
+// - If UTF8Allowed is true, returns label as-is
+//
+// Examples:
+//
+// namer := LabelNamer{UTF8Allowed: false}
+// namer.Build("http.method") // "http_method"
+// namer.Build("123invalid") // "key_123invalid"
+// namer.Build("__reserved__") // "__reserved__" (preserved)
+func (ln *LabelNamer) Build(label string) (normalizedName string, err error) {
+ defer func() {
+ if len(normalizedName) == 0 {
+ err = fmt.Errorf("normalization for label name %q resulted in empty name", label)
+ return
+ }
+
+ if ln.UTF8Allowed || normalizedName == label {
+ return
+ }
+
+ // Check that the resulting normalized name contains at least one non-underscore character
+ for _, c := range normalizedName {
+ if c != '_' {
+ return
+ }
+ }
+ err = fmt.Errorf("normalization for label name %q resulted in invalid name %q", label, normalizedName)
+ normalizedName = ""
+ }()
+
+ // Trivial case.
+ if len(label) == 0 || ln.UTF8Allowed {
+ normalizedName = label
+ return
+ }
+
+ normalizedName = sanitizeLabelName(label)
+
+ // If label starts with a number, prepend with "key_".
+ if unicode.IsDigit(rune(normalizedName[0])) {
+ normalizedName = "key_" + normalizedName
+ } else if strings.HasPrefix(normalizedName, "_") && !strings.HasPrefix(normalizedName, "__") {
+ normalizedName = "key" + normalizedName
+ }
+
+ return
+}
diff --git a/vendor/github.com/prometheus/otlptranslator/metric_namer.go b/vendor/github.com/prometheus/otlptranslator/metric_namer.go
index 58d68ba98..79e005f68 100644
--- a/vendor/github.com/prometheus/otlptranslator/metric_namer.go
+++ b/vendor/github.com/prometheus/otlptranslator/metric_namer.go
@@ -20,6 +20,7 @@
package otlptranslator
import (
+ "fmt"
"slices"
"strings"
"unicode"
@@ -81,13 +82,48 @@ var perUnitMap = map[string]string{
}
// MetricNamer is a helper struct to build metric names.
+// It converts OpenTelemetry Protocol (OTLP) metric names to Prometheus-compliant metric names.
+//
+// Example usage:
+//
+// namer := MetricNamer{
+// WithMetricSuffixes: true,
+// UTF8Allowed: false,
+// }
+//
+// metric := Metric{
+// Name: "http.server.duration",
+// Unit: "s",
+// Type: MetricTypeHistogram,
+// }
+//
+// result := namer.Build(metric) // "http_server_duration_seconds"
type MetricNamer struct {
Namespace string
WithMetricSuffixes bool
UTF8Allowed bool
}
+// NewMetricNamer creates a MetricNamer with the specified namespace (can be
+// blank) and the requested Translation Strategy.
+func NewMetricNamer(namespace string, strategy TranslationStrategyOption) MetricNamer {
+ return MetricNamer{
+ Namespace: namespace,
+ WithMetricSuffixes: strategy.ShouldAddSuffixes(),
+ UTF8Allowed: !strategy.ShouldEscape(),
+ }
+}
+
// Metric is a helper struct that holds information about a metric.
+// It represents an OpenTelemetry metric with its name, unit, and type.
+//
+// Example:
+//
+// metric := Metric{
+// Name: "http.server.request.duration",
+// Unit: "s",
+// Type: MetricTypeHistogram,
+// }
type Metric struct {
Name string
Unit string
@@ -96,34 +132,70 @@ type Metric struct {
// Build builds a metric name for the specified metric.
//
-// If UTF8Allowed is true, the metric name is returned as is, only with the addition of type/unit suffixes and namespace preffix if required.
-// Otherwise the metric name is normalized to be Prometheus-compliant.
-// See rules at https://prometheus.io/docs/concepts/data_model/#metric-names-and-labels,
-// https://prometheus.io/docs/practices/naming/#metric-and-label-naming
-func (mn *MetricNamer) Build(metric Metric) string {
+// The method applies different transformations based on the MetricNamer configuration:
+// - If UTF8Allowed is true, doesn't translate names - all characters must be valid UTF-8, however.
+// - If UTF8Allowed is false, translates metric names to comply with legacy Prometheus name scheme by escaping invalid characters to `_`.
+// - If WithMetricSuffixes is true, adds appropriate suffixes based on type and unit.
+//
+// See rules at https://prometheus.io/docs/concepts/data_model/#metric-names-and-labels
+//
+// Examples:
+//
+// namer := MetricNamer{WithMetricSuffixes: true, UTF8Allowed: false}
+//
+// // Counter gets _total suffix
+// counter := Metric{Name: "requests.count", Unit: "1", Type: MetricTypeMonotonicCounter}
+// result := namer.Build(counter) // "requests_count_total"
+//
+// // Gauge with unit suffix
+// gauge := Metric{Name: "memory.usage", Unit: "By", Type: MetricTypeGauge}
+// result = namer.Build(gauge) // "memory_usage_bytes"
+func (mn *MetricNamer) Build(metric Metric) (string, error) {
if mn.UTF8Allowed {
return mn.buildMetricName(metric.Name, metric.Unit, metric.Type)
}
return mn.buildCompliantMetricName(metric.Name, metric.Unit, metric.Type)
}
-func (mn *MetricNamer) buildCompliantMetricName(name, unit string, metricType MetricType) string {
+func (mn *MetricNamer) buildCompliantMetricName(name, unit string, metricType MetricType) (normalizedName string, err error) {
+ defer func() {
+ if len(normalizedName) == 0 {
+ err = fmt.Errorf("normalization for metric %q resulted in empty name", name)
+ return
+ }
+
+ if normalizedName == name {
+ return
+ }
+
+ // Check that the resulting normalized name contains at least one non-underscore character
+ for _, c := range normalizedName {
+ if c != '_' {
+ return
+ }
+ }
+ err = fmt.Errorf("normalization for metric %q resulted in invalid name %q", name, normalizedName)
+ normalizedName = ""
+ }()
+
// Full normalization following standard Prometheus naming conventions
if mn.WithMetricSuffixes {
- return normalizeName(name, unit, metricType, mn.Namespace)
+ normalizedName = normalizeName(name, unit, metricType, mn.Namespace)
+ return
}
// Simple case (no full normalization, no units, etc.).
metricName := strings.Join(strings.FieldsFunc(name, func(r rune) bool {
- return invalidMetricCharRE.MatchString(string(r))
+ return !isValidCompliantMetricChar(r) && r != '_'
}), "_")
// Namespace?
if mn.Namespace != "" {
namespace := strings.Join(strings.FieldsFunc(mn.Namespace, func(r rune) bool {
- return invalidMetricCharRE.MatchString(string(r))
+ return !isValidCompliantMetricChar(r) && r != '_'
}), "_")
- return namespace + "_" + metricName
+ normalizedName = namespace + "_" + metricName
+ return
}
// Metric name starts with a digit? Prefix it with an underscore.
@@ -131,14 +203,11 @@ func (mn *MetricNamer) buildCompliantMetricName(name, unit string, metricType Me
metricName = "_" + metricName
}
- return metricName
+ normalizedName = metricName
+ return
}
-var (
- // Regexp for metric name characters that should be replaced with _.
- invalidMetricCharRE = regexp.MustCompile(`[^a-zA-Z0-9:_]`)
- multipleUnderscoresRE = regexp.MustCompile(`__+`)
-)
+var multipleUnderscoresRE = regexp.MustCompile(`__+`)
// isValidCompliantMetricChar checks if a rune is a valid metric name character (a-z, A-Z, 0-9, :).
func isValidCompliantMetricChar(r rune) bool {
@@ -243,33 +312,54 @@ func removeItem(slice []string, value string) []string {
return newSlice
}
-func (mn *MetricNamer) buildMetricName(name, unit string, metricType MetricType) string {
+func (mn *MetricNamer) buildMetricName(inputName, unit string, metricType MetricType) (name string, err error) {
+ name = inputName
if mn.Namespace != "" {
name = mn.Namespace + "_" + name
}
if mn.WithMetricSuffixes {
- mainUnitSuffix, perUnitSuffix := buildUnitSuffixes(unit)
- if mainUnitSuffix != "" {
- name = name + "_" + mainUnitSuffix
- }
- if perUnitSuffix != "" {
- name = name + "_" + perUnitSuffix
- }
-
- // Append _total for Counters
- if metricType == MetricTypeMonotonicCounter {
- name += "_total"
- }
-
// Append _ratio for metrics with unit "1"
// Some OTel receivers improperly use unit "1" for counters of objects
// See https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aissue+some+metric+units+don%27t+follow+otel+semantic+conventions
// Until these issues have been fixed, we're appending `_ratio` for gauges ONLY
// Theoretically, counters could be ratios as well, but it's absurd (for mathematical reasons)
if unit == "1" && metricType == MetricTypeGauge {
- name += "_ratio"
+ name = trimSuffixAndDelimiter(name, "ratio")
+ defer func() {
+ name += "_ratio"
+ }()
}
+
+ // Append _total for Counters.
+ if metricType == MetricTypeMonotonicCounter {
+ name = trimSuffixAndDelimiter(name, "total")
+ defer func() {
+ name += "_total"
+ }()
+ }
+
+ mainUnitSuffix, perUnitSuffix := buildUnitSuffixes(unit)
+ if perUnitSuffix != "" {
+ name = trimSuffixAndDelimiter(name, perUnitSuffix)
+ defer func() {
+ name = name + "_" + perUnitSuffix
+ }()
+ }
+ // We don't need to trim and re-append the suffix here because this is
+ // the inner-most suffix.
+ if mainUnitSuffix != "" && !strings.HasSuffix(name, mainUnitSuffix) {
+ name = name + "_" + mainUnitSuffix
+ }
+ }
+ return
+}
+
+// trimSuffixAndDelimiter trims a suffix, plus one extra character which is
+// assumed to be a delimiter.
+func trimSuffixAndDelimiter(name, suffix string) string {
+ if strings.HasSuffix(name, suffix) && len(name) > len(suffix)+1 {
+ return name[:len(name)-(len(suffix)+1)]
}
return name
}
diff --git a/vendor/github.com/prometheus/otlptranslator/normalize_label.go b/vendor/github.com/prometheus/otlptranslator/normalize_label.go
deleted file mode 100644
index aa771f784..000000000
--- a/vendor/github.com/prometheus/otlptranslator/normalize_label.go
+++ /dev/null
@@ -1,57 +0,0 @@
-// Copyright 2025 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-// Provenance-includes-location: https://github.com/prometheus/prometheus/blob/93e991ef7ed19cc997a9360c8016cac3767b8057/storage/remote/otlptranslator/prometheus/normalize_label.go
-// Provenance-includes-license: Apache-2.0
-// Provenance-includes-copyright: Copyright The Prometheus Authors
-// Provenance-includes-location: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/95e8f8fdc2a9dc87230406c9a3cf02be4fd68bea/pkg/translator/prometheus/normalize_label.go
-// Provenance-includes-license: Apache-2.0
-// Provenance-includes-copyright: Copyright The OpenTelemetry Authors.
-
-package otlptranslator
-
-import (
- "strings"
- "unicode"
-)
-
-// LabelNamer is a helper struct to build label names.
-type LabelNamer struct {
- UTF8Allowed bool
-}
-
-// Build normalizes the specified label to follow Prometheus label names standard.
-//
-// See rules at https://prometheus.io/docs/concepts/data_model/#metric-names-and-labels.
-//
-// Labels that start with non-letter rune will be prefixed with "key_".
-// An exception is made for double-underscores which are allowed.
-//
-// If UTF8Allowed is true, the label is returned as is. This option is provided just to
-// keep a consistent interface with the MetricNamer.
-func (ln *LabelNamer) Build(label string) string {
- // Trivial case.
- if len(label) == 0 || ln.UTF8Allowed {
- return label
- }
-
- label = sanitizeLabelName(label)
-
- // If label starts with a number, prepend with "key_".
- if unicode.IsDigit(rune(label[0])) {
- label = "key_" + label
- } else if strings.HasPrefix(label, "_") && !strings.HasPrefix(label, "__") {
- label = "key" + label
- }
-
- return label
-}
diff --git a/vendor/github.com/prometheus/otlptranslator/strategy.go b/vendor/github.com/prometheus/otlptranslator/strategy.go
new file mode 100644
index 000000000..20fe01975
--- /dev/null
+++ b/vendor/github.com/prometheus/otlptranslator/strategy.go
@@ -0,0 +1,86 @@
+// Copyright 2025 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+// Provenance-includes-location: https://github.com/prometheus/prometheus/blob/3602785a89162ccc99a940fb9d862219a2d02241/config/config.go
+// Provenance-includes-license: Apache-2.0
+// Provenance-includes-copyright: Copyright The Prometheus Authors
+
+package otlptranslator
+
+// TranslationStrategyOption is a constant that defines how metric and label
+// names should be handled during translation. The recommended approach is to
+// use either UnderscoreEscapingWithSuffixes for full Prometheus-style
+// compatibility, or NoTranslation for Otel-style names.
+type TranslationStrategyOption string
+
+var (
+ // NoUTF8EscapingWithSuffixes will accept metric/label names as they are. Unit
+ // and type suffixes may be added to metric names, according to certain rules.
+ NoUTF8EscapingWithSuffixes TranslationStrategyOption = "NoUTF8EscapingWithSuffixes"
+ // UnderscoreEscapingWithSuffixes is the default option for translating OTLP
+ // to Prometheus. This option will translate metric name characters that are
+ // not alphanumerics/underscores/colons to underscores, and label name
+ // characters that are not alphanumerics/underscores to underscores. Unit and
+ // type suffixes may be appended to metric names, according to certain rules.
+ UnderscoreEscapingWithSuffixes TranslationStrategyOption = "UnderscoreEscapingWithSuffixes"
+ // UnderscoreEscapingWithoutSuffixes translates metric name characters that
+ // are not alphanumerics/underscores/colons to underscores, and label name
+ // characters that are not alphanumerics/underscores to underscores, but
+ // unlike UnderscoreEscapingWithSuffixes it does not append any suffixes to
+ // the names.
+ UnderscoreEscapingWithoutSuffixes TranslationStrategyOption = "UnderscoreEscapingWithoutSuffixes"
+ // NoTranslation (EXPERIMENTAL): disables all translation of incoming metric
+ // and label names. This offers a way for the OTLP users to use native metric
+ // names, reducing confusion.
+ //
+ // WARNING: This setting has significant known risks and limitations (see
+ // https://prometheus.io/docs/practices/naming/ for details): * Impaired UX
+ // when using PromQL in plain YAML (e.g. alerts, rules, dashboard, autoscaling
+ // configuration). * Series collisions which in the best case may result in
+ // OOO errors, in the worst case a silently malformed time series. For
+ // instance, you may end up in situation of ingesting `foo.bar` series with
+ // unit `seconds` and a separate series `foo.bar` with unit `milliseconds`.
+ //
+ // As a result, this setting is experimental and currently, should not be used
+ // in production systems.
+ //
+ // TODO(ArthurSens): Mention `type-and-unit-labels` feature
+ // (https://github.com/prometheus/proposals/pull/39) once released, as
+ // potential mitigation of the above risks.
+ NoTranslation TranslationStrategyOption = "NoTranslation"
+)
+
+// ShouldEscape returns true if the translation strategy requires that metric
+// names be escaped.
+func (o TranslationStrategyOption) ShouldEscape() bool {
+ switch o {
+ case UnderscoreEscapingWithSuffixes, UnderscoreEscapingWithoutSuffixes:
+ return true
+ case NoTranslation, NoUTF8EscapingWithSuffixes:
+ return false
+ default:
+ return false
+ }
+}
+
+// ShouldAddSuffixes returns a bool deciding whether the given translation
+// strategy should have suffixes added.
+func (o TranslationStrategyOption) ShouldAddSuffixes() bool {
+ switch o {
+ case UnderscoreEscapingWithSuffixes, NoUTF8EscapingWithSuffixes:
+ return true
+ case UnderscoreEscapingWithoutSuffixes, NoTranslation:
+ return false
+ default:
+ return false
+ }
+}
diff --git a/vendor/github.com/prometheus/otlptranslator/unit_namer.go b/vendor/github.com/prometheus/otlptranslator/unit_namer.go
index 4bbf93ef9..bb41fa89e 100644
--- a/vendor/github.com/prometheus/otlptranslator/unit_namer.go
+++ b/vendor/github.com/prometheus/otlptranslator/unit_namer.go
@@ -15,14 +15,34 @@ package otlptranslator
import "strings"
// UnitNamer is a helper for building compliant unit names.
+// It processes OpenTelemetry Protocol (OTLP) unit strings and converts them
+// to Prometheus-compliant unit names.
+//
+// Example usage:
+//
+// namer := UnitNamer{UTF8Allowed: false}
+// result := namer.Build("s") // "seconds"
+// result = namer.Build("By/s") // "bytes_per_second"
type UnitNamer struct {
UTF8Allowed bool
}
// Build builds a unit name for the specified unit string.
// It processes the unit by splitting it into main and per components,
-// applying appropriate unit mappings, and cleaning up invalid characters
-// when the whole UTF-8 character set is not allowed.
+// applying unit mappings, and cleaning up invalid characters when UTF8Allowed is false.
+//
+// Unit mappings include:
+// - Time: s→seconds, ms→milliseconds, h→hours
+// - Bytes: By→bytes, KBy→kilobytes, MBy→megabytes
+// - SI: m→meters, V→volts, W→watts
+// - Special: 1→"" (empty), %→percent
+//
+// Examples:
+//
+// namer := UnitNamer{UTF8Allowed: false}
+// namer.Build("s") // "seconds"
+// namer.Build("requests/s") // "requests_per_second"
+// namer.Build("1") // "" (dimensionless)
func (un *UnitNamer) Build(unit string) string {
mainUnit, perUnit := buildUnitSuffixes(unit)
if !un.UTF8Allowed {
diff --git a/vendor/github.com/spf13/cobra/.golangci.yml b/vendor/github.com/spf13/cobra/.golangci.yml
index 2c8f4808c..6acf8ab1e 100644
--- a/vendor/github.com/spf13/cobra/.golangci.yml
+++ b/vendor/github.com/spf13/cobra/.golangci.yml
@@ -12,14 +12,20 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+version: "2"
+
run:
- deadline: 5m
+ timeout: 5m
+
+formatters:
+ enable:
+ - gofmt
+ - goimports
linters:
- disable-all: true
+ default: none
enable:
#- bodyclose
- # - deadcode ! deprecated since v1.49.0; replaced by 'unused'
#- depguard
#- dogsled
#- dupl
@@ -30,28 +36,24 @@ linters:
- goconst
- gocritic
#- gocyclo
- - gofmt
- - goimports
- #- gomnd
#- goprintffuncname
- gosec
- - gosimple
- govet
- ineffassign
#- lll
- misspell
+ #- mnd
#- nakedret
#- noctx
- nolintlint
#- rowserrcheck
- #- scopelint
- staticcheck
- #- structcheck ! deprecated since v1.49.0; replaced by 'unused'
- - stylecheck
- #- typecheck
- unconvert
#- unparam
- unused
- # - varcheck ! deprecated since v1.49.0; replaced by 'unused'
#- whitespace
- fast: false
+ exclusions:
+ presets:
+ - common-false-positives
+ - legacy
+ - std-error-handling
diff --git a/vendor/github.com/spf13/cobra/README.md b/vendor/github.com/spf13/cobra/README.md
index 71757151c..8416275f4 100644
--- a/vendor/github.com/spf13/cobra/README.md
+++ b/vendor/github.com/spf13/cobra/README.md
@@ -1,8 +1,14 @@
-
-![cobra logo](https://github.com/user-attachments/assets/cbc3adf8-0dff-46e9-a88d-5e2d971c169e)
+<div align="center">
+<a href="https://cobra.dev">
+<img width="512" height="535" alt="cobra-logo" src="https://github.com/user-attachments/assets/c8bf9aad-b5ae-41d3-8899-d83baec10af8" />
+</a>
+</div>
Cobra is a library for creating powerful modern CLI applications.
+<a href="https://cobra.dev">Visit Cobra.dev for extensive documentation</a>
+
+
Cobra is used in many Go projects such as [Kubernetes](https://kubernetes.io/),
[Hugo](https://gohugo.io), and [GitHub CLI](https://github.com/cli/cli) to
name a few. [This list](site/content/projects_using_cobra.md) contains a more extensive list of projects using Cobra.
@@ -11,6 +17,20 @@ name a few. [This list](site/content/projects_using_cobra.md) contains a more ex
[![Go Reference](https://pkg.go.dev/badge/github.com/spf13/cobra.svg)](https://pkg.go.dev/github.com/spf13/cobra)
[![Go Report Card](https://goreportcard.com/badge/github.com/spf13/cobra)](https://goreportcard.com/report/github.com/spf13/cobra)
[![Slack](https://img.shields.io/badge/Slack-cobra-brightgreen)](https://gophers.slack.com/archives/CD3LP1199)
+<hr>
+<div align="center" markdown="1">
+ <sup>Supported by:</sup>
+ <br>
+ <br>
+ <a href="https://www.warp.dev/cobra">
+ <img alt="Warp sponsorship" width="400" src="https://github.com/user-attachments/assets/ab8dd143-b0fd-4904-bdc5-dd7ecac94eae">
+ </a>
+
+### [Warp, the AI terminal for devs](https://www.warp.dev/cobra)
+[Try Cobra in Warp today](https://www.warp.dev/cobra)<br>
+
+</div>
+<hr>
# Overview
diff --git a/vendor/github.com/spf13/cobra/SECURITY.md b/vendor/github.com/spf13/cobra/SECURITY.md
new file mode 100644
index 000000000..54e60c28c
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/SECURITY.md
@@ -0,0 +1,105 @@
+# Security Policy
+
+## Reporting a Vulnerability
+
+The `cobra` maintainers take security issues seriously and
+we appreciate your efforts to _**responsibly**_ disclose your findings.
+We will make every effort to swiftly respond and address concerns.
+
+To report a security vulnerability:
+
+1. **DO NOT** create a public GitHub issue for the vulnerability!
+2. **DO NOT** create a public GitHub Pull Request with a fix for the vulnerability!
+3. Send an email to `cobra-security@googlegroups.com`.
+4. Include the following details in your report:
+ - Description of the vulnerability
+ - Steps to reproduce
+ - Potential impact of the vulnerability (to your downstream project, to the Go ecosystem, etc.)
+ - Any potential mitigations you've already identified
+5. Allow up to 7 days for an initial response.
+ You should receive an acknowledgment of your report and an estimated timeline for a fix.
+6. (Optional) If you have a fix and would like to contribute your patch, please work
+ directly with the maintainers via `cobra-security@googlegroups.com` to
+ coordinate pushing the patch to GitHub, cutting a new release, and disclosing the change.
+
+## Response Process
+
+When a security vulnerability report is received, the `cobra` maintainers will:
+
+1. Confirm receipt of the vulnerability report within 7 days.
+2. Assess the report to determine if it constitutes a security vulnerability.
+3. If confirmed, assign the vulnerability a severity level and create a timeline for addressing it.
+4. Develop and test a fix.
+5. Patch the vulnerability and make a new GitHub release: the maintainers will coordinate disclosure with the reporter.
+6. Create a new GitHub Security Advisory to inform the broader Go ecosystem
+
+## Disclosure Policy
+
+The `cobra` maintainers follow a coordinated disclosure process:
+
+1. Security vulnerabilities will be addressed as quickly as possible.
+2. A CVE (Common Vulnerabilities and Exposures) identifier will be requested for significant vulnerabilities
+ that are within `cobra` itself.
+3. Once a fix is ready, the maintainers will:
+ - Release a new version containing the fix.
+ - Update the security advisory with details about the vulnerability.
+ - Credit the reporter (unless they wish to remain anonymous).
+ - Credit the fixer (unless they wish to remain anonymous, this may be the same as the reporter).
+ - Announce the vulnerability through appropriate channels
+ (GitHub Security Advisory, mailing lists, GitHub Releases, etc.)
+
+## Supported Versions
+
+Security fixes will typically only be released for the most recent major release.
+
+## Upstream Security Issues
+
+`cobra` generally will not accept vulnerability reports that originate in upstream
+dependencies. I.e., if there is a problem in Go code that `cobra` depends on,
+it is best to engage that project's maintainers and owners.
+
+This security policy primarily pertains only to `cobra` itself but if you believe you've
+identified a problem that originates in an upstream dependency and is being widely
+distributed by `cobra`, please follow the disclosure procedure above: the `cobra`
+maintainers will work with you to determine the severity and ecosystem impact.
+
+## Security Updates and CVEs
+
+Information about known security vulnerabilities and CVEs affecting `cobra` will
+be published as GitHub Security Advisories at
+https://github.com/spf13/cobra/security/advisories.
+
+All users are encouraged to watch the repository and upgrade promptly when
+security releases are published.
+
+## `cobra` Security Best Practices for Users
+
+When using `cobra` in your CLIs, the `cobra` maintainers recommend the following:
+
+1. Always use the latest version of `cobra`.
+2. [Use Go modules](https://go.dev/blog/using-go-modules) for dependency management.
+3. Always use the latest possible version of Go.
+
+## Security Best Practices for Contributors
+
+When contributing to `cobra`:
+
+1. Be mindful of security implications when adding new features or modifying existing ones.
+2. Be aware of `cobra`'s extremely large reach: it is used in nearly every Go CLI
+ (like Kubernetes, Docker, Prometheus, etc. etc.)
+3. Write tests that explicitly cover edge cases and potential issues.
+4. If you discover a security issue while working on `cobra`, please report it
+ following the process above rather than opening a public pull request or issue that
+ addresses the vulnerability.
+5. Take personal sec-ops seriously and secure your GitHub account: use [two-factor authentication](https://docs.github.com/en/authentication/securing-your-account-with-two-factor-authentication-2fa),
+ [sign your commits with a GPG or SSH key](https://docs.github.com/en/authentication/managing-commit-signature-verification/about-commit-signature-verification),
+ etc.
+
+## Acknowledgments
+
+The `cobra` maintainers would like to thank all security researchers and
+community members who help keep cobra, its users, and the entire Go ecosystem secure through responsible disclosures!!
+
+---
+
+*This security policy is inspired by the [Open Web Application Security Project (OWASP)](https://owasp.org/) guidelines and security best practices.*
diff --git a/vendor/github.com/spf13/cobra/command.go b/vendor/github.com/spf13/cobra/command.go
index dbb2c298b..78088db69 100644
--- a/vendor/github.com/spf13/cobra/command.go
+++ b/vendor/github.com/spf13/cobra/command.go
@@ -39,7 +39,7 @@ const (
)
// FParseErrWhitelist configures Flag parse errors to be ignored
-type FParseErrWhitelist flag.ParseErrorsWhitelist
+type FParseErrWhitelist flag.ParseErrorsAllowlist
// Group Structure to manage groups for commands
type Group struct {
@@ -1296,6 +1296,11 @@ Simply type ` + c.DisplayName() + ` help [path to command] for full details.`,
c.Printf("Unknown help topic %#q\n", args)
CheckErr(c.Root().Usage())
} else {
+ // FLow the context down to be used in help text
+ if cmd.ctx == nil {
+ cmd.ctx = c.ctx
+ }
+
cmd.InitDefaultHelpFlag() // make possible 'help' flag to be shown
cmd.InitDefaultVersionFlag() // make possible 'version' flag to be shown
CheckErr(cmd.Help())
@@ -1872,7 +1877,7 @@ func (c *Command) ParseFlags(args []string) error {
c.mergePersistentFlags()
// do it here after merging all flags and just before parse
- c.Flags().ParseErrorsWhitelist = flag.ParseErrorsWhitelist(c.FParseErrWhitelist)
+ c.Flags().ParseErrorsAllowlist = flag.ParseErrorsAllowlist(c.FParseErrWhitelist)
err := c.Flags().Parse(args)
// Print warnings if they occurred (e.g. deprecated flag messages).
@@ -2020,7 +2025,7 @@ func defaultUsageFunc(w io.Writer, in interface{}) error {
fmt.Fprint(w, trimRightSpace(c.InheritedFlags().FlagUsages()))
}
if c.HasHelpSubCommands() {
- fmt.Fprintf(w, "\n\nAdditional help topcis:")
+ fmt.Fprintf(w, "\n\nAdditional help topics:")
for _, subcmd := range c.Commands() {
if subcmd.IsAdditionalHelpTopicCommand() {
fmt.Fprintf(w, "\n %s %s", rpad(subcmd.CommandPath(), subcmd.CommandPathPadding()), subcmd.Short)
diff --git a/vendor/github.com/spf13/cobra/completions.go b/vendor/github.com/spf13/cobra/completions.go
index a1752f763..d3607c2d2 100644
--- a/vendor/github.com/spf13/cobra/completions.go
+++ b/vendor/github.com/spf13/cobra/completions.go
@@ -115,6 +115,13 @@ type CompletionOptions struct {
DisableDescriptions bool
// HiddenDefaultCmd makes the default 'completion' command hidden
HiddenDefaultCmd bool
+ // DefaultShellCompDirective sets the ShellCompDirective that is returned
+ // if no special directive can be determined
+ DefaultShellCompDirective *ShellCompDirective
+}
+
+func (receiver *CompletionOptions) SetDefaultShellCompDirective(directive ShellCompDirective) {
+ receiver.DefaultShellCompDirective = &directive
}
// Completion is a string that can be used for completions
@@ -375,7 +382,7 @@ func (c *Command) getCompletions(args []string) (*Command, []Completion, ShellCo
// Error while attempting to parse flags
if flagErr != nil {
// If error type is flagCompError and we don't want flagCompletion we should ignore the error
- if _, ok := flagErr.(*flagCompError); !(ok && !flagCompletion) {
+ if _, ok := flagErr.(*flagCompError); !ok || flagCompletion {
return finalCmd, []Completion{}, ShellCompDirectiveDefault, flagErr
}
}
@@ -480,6 +487,14 @@ func (c *Command) getCompletions(args []string) (*Command, []Completion, ShellCo
}
} else {
directive = ShellCompDirectiveDefault
+ // check current and parent commands for a custom DefaultShellCompDirective
+ for cmd := finalCmd; cmd != nil; cmd = cmd.parent {
+ if cmd.CompletionOptions.DefaultShellCompDirective != nil {
+ directive = *cmd.CompletionOptions.DefaultShellCompDirective
+ break
+ }
+ }
+
if flag == nil {
foundLocalNonPersistentFlag := false
// If TraverseChildren is true on the root command we don't check for
@@ -773,7 +788,7 @@ See each sub-command's help for details on how to use the generated script.
// shell completion for it (prog __complete completion '')
subCmd, cmdArgs, err := c.Find(args)
if err != nil || subCmd.Name() != compCmdName &&
- !(subCmd.Name() == ShellCompRequestCmd && len(cmdArgs) > 1 && cmdArgs[0] == compCmdName) {
+ (subCmd.Name() != ShellCompRequestCmd || len(cmdArgs) <= 1 || cmdArgs[0] != compCmdName) {
// The completion command is not being called or being completed so we remove it.
c.RemoveCommand(completionCmd)
return
diff --git a/vendor/github.com/spf13/pflag/flag.go b/vendor/github.com/spf13/pflag/flag.go
index d4dfbc5ea..2fd3c5759 100644
--- a/vendor/github.com/spf13/pflag/flag.go
+++ b/vendor/github.com/spf13/pflag/flag.go
@@ -137,12 +137,17 @@ const (
PanicOnError
)
-// ParseErrorsWhitelist defines the parsing errors that can be ignored
-type ParseErrorsWhitelist struct {
+// ParseErrorsAllowlist defines the parsing errors that can be ignored
+type ParseErrorsAllowlist struct {
// UnknownFlags will ignore unknown flags errors and continue parsing rest of the flags
UnknownFlags bool
}
+// ParseErrorsWhitelist defines the parsing errors that can be ignored.
+//
+// Deprecated: use [ParseErrorsAllowlist] instead. This type will be removed in a future release.
+type ParseErrorsWhitelist = ParseErrorsAllowlist
+
// NormalizedName is a flag name that has been normalized according to rules
// for the FlagSet (e.g. making '-' and '_' equivalent).
type NormalizedName string
@@ -158,8 +163,13 @@ type FlagSet struct {
// help/usage messages.
SortFlags bool
- // ParseErrorsWhitelist is used to configure a whitelist of errors
- ParseErrorsWhitelist ParseErrorsWhitelist
+ // ParseErrorsAllowlist is used to configure an allowlist of errors
+ ParseErrorsAllowlist ParseErrorsAllowlist
+
+ // ParseErrorsAllowlist is used to configure an allowlist of errors.
+ //
+ // Deprecated: use [FlagSet.ParseErrorsAllowlist] instead. This field will be removed in a future release.
+ ParseErrorsWhitelist ParseErrorsAllowlist
name string
parsed bool
@@ -928,7 +938,6 @@ func VarP(value Value, name, shorthand, usage string) {
// returns the error.
func (f *FlagSet) fail(err error) error {
if f.errorHandling != ContinueOnError {
- fmt.Fprintln(f.Output(), err)
f.usage()
}
return err
@@ -986,6 +995,8 @@ func (f *FlagSet) parseLongArg(s string, args []string, fn parseFunc) (a []strin
f.usage()
return a, ErrHelp
case f.ParseErrorsWhitelist.UnknownFlags:
+ fallthrough
+ case f.ParseErrorsAllowlist.UnknownFlags:
// --unknown=unknownval arg ...
// we do not want to lose arg in this case
if len(split) >= 2 {
@@ -1044,6 +1055,8 @@ func (f *FlagSet) parseSingleShortArg(shorthands string, args []string, fn parse
err = ErrHelp
return
case f.ParseErrorsWhitelist.UnknownFlags:
+ fallthrough
+ case f.ParseErrorsAllowlist.UnknownFlags:
// '-f=arg arg ...'
// we do not want to lose arg in this case
if len(shorthands) > 2 && shorthands[1] == '=' {
@@ -1158,12 +1171,12 @@ func (f *FlagSet) Parse(arguments []string) error {
}
f.parsed = true
+ f.args = make([]string, 0, len(arguments))
+
if len(arguments) == 0 {
return nil
}
- f.args = make([]string, 0, len(arguments))
-
set := func(flag *Flag, value string) error {
return f.Set(flag.Name, value)
}
@@ -1174,7 +1187,10 @@ func (f *FlagSet) Parse(arguments []string) error {
case ContinueOnError:
return err
case ExitOnError:
- fmt.Println(err)
+ if err == ErrHelp {
+ os.Exit(0)
+ }
+ fmt.Fprintln(f.Output(), err)
os.Exit(2)
case PanicOnError:
panic(err)
@@ -1200,6 +1216,10 @@ func (f *FlagSet) ParseAll(arguments []string, fn func(flag *Flag, value string)
case ContinueOnError:
return err
case ExitOnError:
+ if err == ErrHelp {
+ os.Exit(0)
+ }
+ fmt.Fprintln(f.Output(), err)
os.Exit(2)
case PanicOnError:
panic(err)
diff --git a/vendor/github.com/spf13/pflag/golangflag.go b/vendor/github.com/spf13/pflag/golangflag.go
index f563907e2..e62eab538 100644
--- a/vendor/github.com/spf13/pflag/golangflag.go
+++ b/vendor/github.com/spf13/pflag/golangflag.go
@@ -8,6 +8,7 @@ import (
goflag "flag"
"reflect"
"strings"
+ "time"
)
// go test flags prefixes
@@ -113,6 +114,38 @@ func (f *FlagSet) AddGoFlagSet(newSet *goflag.FlagSet) {
f.addedGoFlagSets = append(f.addedGoFlagSets, newSet)
}
+// CopyToGoFlagSet will add all current flags to the given Go flag set.
+// Deprecation remarks get copied into the usage description.
+// Whenever possible, a flag gets added for which Go flags shows
+// a proper type in the help message.
+func (f *FlagSet) CopyToGoFlagSet(newSet *goflag.FlagSet) {
+ f.VisitAll(func(flag *Flag) {
+ usage := flag.Usage
+ if flag.Deprecated != "" {
+ usage += " (DEPRECATED: " + flag.Deprecated + ")"
+ }
+
+ switch value := flag.Value.(type) {
+ case *stringValue:
+ newSet.StringVar((*string)(value), flag.Name, flag.DefValue, usage)
+ case *intValue:
+ newSet.IntVar((*int)(value), flag.Name, *(*int)(value), usage)
+ case *int64Value:
+ newSet.Int64Var((*int64)(value), flag.Name, *(*int64)(value), usage)
+ case *uintValue:
+ newSet.UintVar((*uint)(value), flag.Name, *(*uint)(value), usage)
+ case *uint64Value:
+ newSet.Uint64Var((*uint64)(value), flag.Name, *(*uint64)(value), usage)
+ case *durationValue:
+ newSet.DurationVar((*time.Duration)(value), flag.Name, *(*time.Duration)(value), usage)
+ case *float64Value:
+ newSet.Float64Var((*float64)(value), flag.Name, *(*float64)(value), usage)
+ default:
+ newSet.Var(flag.Value, flag.Name, usage)
+ }
+ })
+}
+
// ParseSkippedFlags explicitly Parses go test flags (i.e. the one starting with '-test.') with goflag.Parse(),
// since by default those are skipped by pflag.Parse().
// Typical usage example: `ParseGoTestFlags(os.Args[1:], goflag.CommandLine)`
@@ -125,3 +158,4 @@ func ParseSkippedFlags(osArgs []string, goFlagSet *goflag.FlagSet) error {
}
return goFlagSet.Parse(skippedFlags)
}
+
diff --git a/vendor/github.com/spf13/pflag/string_to_string.go b/vendor/github.com/spf13/pflag/string_to_string.go
index 890a01afc..1d1e3bf91 100644
--- a/vendor/github.com/spf13/pflag/string_to_string.go
+++ b/vendor/github.com/spf13/pflag/string_to_string.go
@@ -4,6 +4,7 @@ import (
"bytes"
"encoding/csv"
"fmt"
+ "sort"
"strings"
)
@@ -62,8 +63,15 @@ func (s *stringToStringValue) Type() string {
}
func (s *stringToStringValue) String() string {
+ keys := make([]string, 0, len(*s.value))
+ for k := range *s.value {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+
records := make([]string, 0, len(*s.value)>>1)
- for k, v := range *s.value {
+ for _, k := range keys {
+ v := (*s.value)[k]
records = append(records, k+"="+v)
}
diff --git a/vendor/github.com/spf13/pflag/time.go b/vendor/github.com/spf13/pflag/time.go
index dc024807e..3dee42479 100644
--- a/vendor/github.com/spf13/pflag/time.go
+++ b/vendor/github.com/spf13/pflag/time.go
@@ -48,7 +48,13 @@ func (d *timeValue) Type() string {
return "time"
}
-func (d *timeValue) String() string { return d.Time.Format(time.RFC3339Nano) }
+func (d *timeValue) String() string {
+ if d.Time.IsZero() {
+ return ""
+ } else {
+ return d.Time.Format(time.RFC3339Nano)
+ }
+}
// GetTime return the time value of a flag with the given name
func (f *FlagSet) GetTime(name string) (time.Time, error) {
diff --git a/vendor/github.com/stretchr/testify/assert/assertion_compare.go b/vendor/github.com/stretchr/testify/assert/assertion_compare.go
index 7e19eba09..ffb24e8e3 100644
--- a/vendor/github.com/stretchr/testify/assert/assertion_compare.go
+++ b/vendor/github.com/stretchr/testify/assert/assertion_compare.go
@@ -390,7 +390,8 @@ func Greater(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface
if h, ok := t.(tHelper); ok {
h.Helper()
}
- return compareTwoValues(t, e1, e2, []compareResult{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs...)
+ failMessage := fmt.Sprintf("\"%v\" is not greater than \"%v\"", e1, e2)
+ return compareTwoValues(t, e1, e2, []compareResult{compareGreater}, failMessage, msgAndArgs...)
}
// GreaterOrEqual asserts that the first element is greater than or equal to the second
@@ -403,7 +404,8 @@ func GreaterOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...in
if h, ok := t.(tHelper); ok {
h.Helper()
}
- return compareTwoValues(t, e1, e2, []compareResult{compareGreater, compareEqual}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs...)
+ failMessage := fmt.Sprintf("\"%v\" is not greater than or equal to \"%v\"", e1, e2)
+ return compareTwoValues(t, e1, e2, []compareResult{compareGreater, compareEqual}, failMessage, msgAndArgs...)
}
// Less asserts that the first element is less than the second
@@ -415,7 +417,8 @@ func Less(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{})
if h, ok := t.(tHelper); ok {
h.Helper()
}
- return compareTwoValues(t, e1, e2, []compareResult{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs...)
+ failMessage := fmt.Sprintf("\"%v\" is not less than \"%v\"", e1, e2)
+ return compareTwoValues(t, e1, e2, []compareResult{compareLess}, failMessage, msgAndArgs...)
}
// LessOrEqual asserts that the first element is less than or equal to the second
@@ -428,7 +431,8 @@ func LessOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...inter
if h, ok := t.(tHelper); ok {
h.Helper()
}
- return compareTwoValues(t, e1, e2, []compareResult{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs...)
+ failMessage := fmt.Sprintf("\"%v\" is not less than or equal to \"%v\"", e1, e2)
+ return compareTwoValues(t, e1, e2, []compareResult{compareLess, compareEqual}, failMessage, msgAndArgs...)
}
// Positive asserts that the specified element is positive
@@ -440,7 +444,8 @@ func Positive(t TestingT, e interface{}, msgAndArgs ...interface{}) bool {
h.Helper()
}
zero := reflect.Zero(reflect.TypeOf(e))
- return compareTwoValues(t, e, zero.Interface(), []compareResult{compareGreater}, "\"%v\" is not positive", msgAndArgs...)
+ failMessage := fmt.Sprintf("\"%v\" is not positive", e)
+ return compareTwoValues(t, e, zero.Interface(), []compareResult{compareGreater}, failMessage, msgAndArgs...)
}
// Negative asserts that the specified element is negative
@@ -452,7 +457,8 @@ func Negative(t TestingT, e interface{}, msgAndArgs ...interface{}) bool {
h.Helper()
}
zero := reflect.Zero(reflect.TypeOf(e))
- return compareTwoValues(t, e, zero.Interface(), []compareResult{compareLess}, "\"%v\" is not negative", msgAndArgs...)
+ failMessage := fmt.Sprintf("\"%v\" is not negative", e)
+ return compareTwoValues(t, e, zero.Interface(), []compareResult{compareLess}, failMessage, msgAndArgs...)
}
func compareTwoValues(t TestingT, e1 interface{}, e2 interface{}, allowedComparesResults []compareResult, failMessage string, msgAndArgs ...interface{}) bool {
@@ -468,11 +474,11 @@ func compareTwoValues(t TestingT, e1 interface{}, e2 interface{}, allowedCompare
compareResult, isComparable := compare(e1, e2, e1Kind)
if !isComparable {
- return Fail(t, fmt.Sprintf("Can not compare type \"%s\"", reflect.TypeOf(e1)), msgAndArgs...)
+ return Fail(t, fmt.Sprintf(`Can not compare type "%T"`, e1), msgAndArgs...)
}
if !containsValue(allowedComparesResults, compareResult) {
- return Fail(t, fmt.Sprintf(failMessage, e1, e2), msgAndArgs...)
+ return Fail(t, failMessage, msgAndArgs...)
}
return true
diff --git a/vendor/github.com/stretchr/testify/assert/assertion_format.go b/vendor/github.com/stretchr/testify/assert/assertion_format.go
index 190634165..c592f6ad5 100644
--- a/vendor/github.com/stretchr/testify/assert/assertion_format.go
+++ b/vendor/github.com/stretchr/testify/assert/assertion_format.go
@@ -50,10 +50,19 @@ func ElementsMatchf(t TestingT, listA interface{}, listB interface{}, msg string
return ElementsMatch(t, listA, listB, append([]interface{}{msg}, args...)...)
}
-// Emptyf asserts that the specified object is empty. I.e. nil, "", false, 0 or either
-// a slice or a channel with len == 0.
+// Emptyf asserts that the given value is "empty".
+//
+// [Zero values] are "empty".
+//
+// Arrays are "empty" if every element is the zero value of the type (stricter than "empty").
+//
+// Slices, maps and channels with zero length are "empty".
+//
+// Pointer values are "empty" if the pointer is nil or if the pointed value is "empty".
//
// assert.Emptyf(t, obj, "error message %s", "formatted")
+//
+// [Zero values]: https://go.dev/ref/spec#The_zero_value
func Emptyf(t TestingT, object interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -117,10 +126,8 @@ func EqualValuesf(t TestingT, expected interface{}, actual interface{}, msg stri
// Errorf asserts that a function returned an error (i.e. not `nil`).
//
-// actualObj, err := SomeFunction()
-// if assert.Errorf(t, err, "error message %s", "formatted") {
-// assert.Equal(t, expectedErrorf, err)
-// }
+// actualObj, err := SomeFunction()
+// assert.Errorf(t, err, "error message %s", "formatted")
func Errorf(t TestingT, err error, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -438,7 +445,19 @@ func IsNonIncreasingf(t TestingT, object interface{}, msg string, args ...interf
return IsNonIncreasing(t, object, append([]interface{}{msg}, args...)...)
}
+// IsNotTypef asserts that the specified objects are not of the same type.
+//
+// assert.IsNotTypef(t, &NotMyStruct{}, &MyStruct{}, "error message %s", "formatted")
+func IsNotTypef(t TestingT, theType interface{}, object interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return IsNotType(t, theType, object, append([]interface{}{msg}, args...)...)
+}
+
// IsTypef asserts that the specified objects are of the same type.
+//
+// assert.IsTypef(t, &MyStruct{}, &MyStruct{}, "error message %s", "formatted")
func IsTypef(t TestingT, expectedType interface{}, object interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -585,8 +604,7 @@ func NotElementsMatchf(t TestingT, listA interface{}, listB interface{}, msg str
return NotElementsMatch(t, listA, listB, append([]interface{}{msg}, args...)...)
}
-// NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either
-// a slice or a channel with len == 0.
+// NotEmptyf asserts that the specified object is NOT [Empty].
//
// if assert.NotEmptyf(t, obj, "error message %s", "formatted") {
// assert.Equal(t, "two", obj[1])
@@ -693,12 +711,15 @@ func NotSamef(t TestingT, expected interface{}, actual interface{}, msg string,
return NotSame(t, expected, actual, append([]interface{}{msg}, args...)...)
}
-// NotSubsetf asserts that the specified list(array, slice...) or map does NOT
-// contain all elements given in the specified subset list(array, slice...) or
-// map.
+// NotSubsetf asserts that the list (array, slice, or map) does NOT contain all
+// elements given in the subset (array, slice, or map).
+// Map elements are key-value pairs unless compared with an array or slice where
+// only the map key is evaluated.
//
// assert.NotSubsetf(t, [1, 3, 4], [1, 2], "error message %s", "formatted")
// assert.NotSubsetf(t, {"x": 1, "y": 2}, {"z": 3}, "error message %s", "formatted")
+// assert.NotSubsetf(t, [1, 3, 4], {1: "one", 2: "two"}, "error message %s", "formatted")
+// assert.NotSubsetf(t, {"x": 1, "y": 2}, ["z"], "error message %s", "formatted")
func NotSubsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -782,11 +803,15 @@ func Samef(t TestingT, expected interface{}, actual interface{}, msg string, arg
return Same(t, expected, actual, append([]interface{}{msg}, args...)...)
}
-// Subsetf asserts that the specified list(array, slice...) or map contains all
-// elements given in the specified subset list(array, slice...) or map.
+// Subsetf asserts that the list (array, slice, or map) contains all elements
+// given in the subset (array, slice, or map).
+// Map elements are key-value pairs unless compared with an array or slice where
+// only the map key is evaluated.
//
// assert.Subsetf(t, [1, 2, 3], [1, 2], "error message %s", "formatted")
// assert.Subsetf(t, {"x": 1, "y": 2}, {"x": 1}, "error message %s", "formatted")
+// assert.Subsetf(t, [1, 2, 3], {1: "one", 2: "two"}, "error message %s", "formatted")
+// assert.Subsetf(t, {"x": 1, "y": 2}, ["x"], "error message %s", "formatted")
func Subsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
diff --git a/vendor/github.com/stretchr/testify/assert/assertion_forward.go b/vendor/github.com/stretchr/testify/assert/assertion_forward.go
index 21629087b..58db92845 100644
--- a/vendor/github.com/stretchr/testify/assert/assertion_forward.go
+++ b/vendor/github.com/stretchr/testify/assert/assertion_forward.go
@@ -92,10 +92,19 @@ func (a *Assertions) ElementsMatchf(listA interface{}, listB interface{}, msg st
return ElementsMatchf(a.t, listA, listB, msg, args...)
}
-// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either
-// a slice or a channel with len == 0.
+// Empty asserts that the given value is "empty".
+//
+// [Zero values] are "empty".
+//
+// Arrays are "empty" if every element is the zero value of the type (stricter than "empty").
+//
+// Slices, maps and channels with zero length are "empty".
+//
+// Pointer values are "empty" if the pointer is nil or if the pointed value is "empty".
//
// a.Empty(obj)
+//
+// [Zero values]: https://go.dev/ref/spec#The_zero_value
func (a *Assertions) Empty(object interface{}, msgAndArgs ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -103,10 +112,19 @@ func (a *Assertions) Empty(object interface{}, msgAndArgs ...interface{}) bool {
return Empty(a.t, object, msgAndArgs...)
}
-// Emptyf asserts that the specified object is empty. I.e. nil, "", false, 0 or either
-// a slice or a channel with len == 0.
+// Emptyf asserts that the given value is "empty".
+//
+// [Zero values] are "empty".
+//
+// Arrays are "empty" if every element is the zero value of the type (stricter than "empty").
+//
+// Slices, maps and channels with zero length are "empty".
+//
+// Pointer values are "empty" if the pointer is nil or if the pointed value is "empty".
//
// a.Emptyf(obj, "error message %s", "formatted")
+//
+// [Zero values]: https://go.dev/ref/spec#The_zero_value
func (a *Assertions) Emptyf(object interface{}, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -224,10 +242,8 @@ func (a *Assertions) Equalf(expected interface{}, actual interface{}, msg string
// Error asserts that a function returned an error (i.e. not `nil`).
//
-// actualObj, err := SomeFunction()
-// if a.Error(err) {
-// assert.Equal(t, expectedError, err)
-// }
+// actualObj, err := SomeFunction()
+// a.Error(err)
func (a *Assertions) Error(err error, msgAndArgs ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -297,10 +313,8 @@ func (a *Assertions) ErrorIsf(err error, target error, msg string, args ...inter
// Errorf asserts that a function returned an error (i.e. not `nil`).
//
-// actualObj, err := SomeFunction()
-// if a.Errorf(err, "error message %s", "formatted") {
-// assert.Equal(t, expectedErrorf, err)
-// }
+// actualObj, err := SomeFunction()
+// a.Errorf(err, "error message %s", "formatted")
func (a *Assertions) Errorf(err error, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -868,7 +882,29 @@ func (a *Assertions) IsNonIncreasingf(object interface{}, msg string, args ...in
return IsNonIncreasingf(a.t, object, msg, args...)
}
+// IsNotType asserts that the specified objects are not of the same type.
+//
+// a.IsNotType(&NotMyStruct{}, &MyStruct{})
+func (a *Assertions) IsNotType(theType interface{}, object interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return IsNotType(a.t, theType, object, msgAndArgs...)
+}
+
+// IsNotTypef asserts that the specified objects are not of the same type.
+//
+// a.IsNotTypef(&NotMyStruct{}, &MyStruct{}, "error message %s", "formatted")
+func (a *Assertions) IsNotTypef(theType interface{}, object interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return IsNotTypef(a.t, theType, object, msg, args...)
+}
+
// IsType asserts that the specified objects are of the same type.
+//
+// a.IsType(&MyStruct{}, &MyStruct{})
func (a *Assertions) IsType(expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -877,6 +913,8 @@ func (a *Assertions) IsType(expectedType interface{}, object interface{}, msgAnd
}
// IsTypef asserts that the specified objects are of the same type.
+//
+// a.IsTypef(&MyStruct{}, &MyStruct{}, "error message %s", "formatted")
func (a *Assertions) IsTypef(expectedType interface{}, object interface{}, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -1162,8 +1200,7 @@ func (a *Assertions) NotElementsMatchf(listA interface{}, listB interface{}, msg
return NotElementsMatchf(a.t, listA, listB, msg, args...)
}
-// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either
-// a slice or a channel with len == 0.
+// NotEmpty asserts that the specified object is NOT [Empty].
//
// if a.NotEmpty(obj) {
// assert.Equal(t, "two", obj[1])
@@ -1175,8 +1212,7 @@ func (a *Assertions) NotEmpty(object interface{}, msgAndArgs ...interface{}) boo
return NotEmpty(a.t, object, msgAndArgs...)
}
-// NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either
-// a slice or a channel with len == 0.
+// NotEmptyf asserts that the specified object is NOT [Empty].
//
// if a.NotEmptyf(obj, "error message %s", "formatted") {
// assert.Equal(t, "two", obj[1])
@@ -1378,12 +1414,15 @@ func (a *Assertions) NotSamef(expected interface{}, actual interface{}, msg stri
return NotSamef(a.t, expected, actual, msg, args...)
}
-// NotSubset asserts that the specified list(array, slice...) or map does NOT
-// contain all elements given in the specified subset list(array, slice...) or
-// map.
+// NotSubset asserts that the list (array, slice, or map) does NOT contain all
+// elements given in the subset (array, slice, or map).
+// Map elements are key-value pairs unless compared with an array or slice where
+// only the map key is evaluated.
//
// a.NotSubset([1, 3, 4], [1, 2])
// a.NotSubset({"x": 1, "y": 2}, {"z": 3})
+// a.NotSubset([1, 3, 4], {1: "one", 2: "two"})
+// a.NotSubset({"x": 1, "y": 2}, ["z"])
func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -1391,12 +1430,15 @@ func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs
return NotSubset(a.t, list, subset, msgAndArgs...)
}
-// NotSubsetf asserts that the specified list(array, slice...) or map does NOT
-// contain all elements given in the specified subset list(array, slice...) or
-// map.
+// NotSubsetf asserts that the list (array, slice, or map) does NOT contain all
+// elements given in the subset (array, slice, or map).
+// Map elements are key-value pairs unless compared with an array or slice where
+// only the map key is evaluated.
//
// a.NotSubsetf([1, 3, 4], [1, 2], "error message %s", "formatted")
// a.NotSubsetf({"x": 1, "y": 2}, {"z": 3}, "error message %s", "formatted")
+// a.NotSubsetf([1, 3, 4], {1: "one", 2: "two"}, "error message %s", "formatted")
+// a.NotSubsetf({"x": 1, "y": 2}, ["z"], "error message %s", "formatted")
func (a *Assertions) NotSubsetf(list interface{}, subset interface{}, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -1556,11 +1598,15 @@ func (a *Assertions) Samef(expected interface{}, actual interface{}, msg string,
return Samef(a.t, expected, actual, msg, args...)
}
-// Subset asserts that the specified list(array, slice...) or map contains all
-// elements given in the specified subset list(array, slice...) or map.
+// Subset asserts that the list (array, slice, or map) contains all elements
+// given in the subset (array, slice, or map).
+// Map elements are key-value pairs unless compared with an array or slice where
+// only the map key is evaluated.
//
// a.Subset([1, 2, 3], [1, 2])
// a.Subset({"x": 1, "y": 2}, {"x": 1})
+// a.Subset([1, 2, 3], {1: "one", 2: "two"})
+// a.Subset({"x": 1, "y": 2}, ["x"])
func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -1568,11 +1614,15 @@ func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ...
return Subset(a.t, list, subset, msgAndArgs...)
}
-// Subsetf asserts that the specified list(array, slice...) or map contains all
-// elements given in the specified subset list(array, slice...) or map.
+// Subsetf asserts that the list (array, slice, or map) contains all elements
+// given in the subset (array, slice, or map).
+// Map elements are key-value pairs unless compared with an array or slice where
+// only the map key is evaluated.
//
// a.Subsetf([1, 2, 3], [1, 2], "error message %s", "formatted")
// a.Subsetf({"x": 1, "y": 2}, {"x": 1}, "error message %s", "formatted")
+// a.Subsetf([1, 2, 3], {1: "one", 2: "two"}, "error message %s", "formatted")
+// a.Subsetf({"x": 1, "y": 2}, ["x"], "error message %s", "formatted")
func (a *Assertions) Subsetf(list interface{}, subset interface{}, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
diff --git a/vendor/github.com/stretchr/testify/assert/assertion_order.go b/vendor/github.com/stretchr/testify/assert/assertion_order.go
index 1d2f71824..2fdf80fdd 100644
--- a/vendor/github.com/stretchr/testify/assert/assertion_order.go
+++ b/vendor/github.com/stretchr/testify/assert/assertion_order.go
@@ -33,7 +33,7 @@ func isOrdered(t TestingT, object interface{}, allowedComparesResults []compareR
compareResult, isComparable := compare(prevValueInterface, valueInterface, firstValueKind)
if !isComparable {
- return Fail(t, fmt.Sprintf("Can not compare type \"%s\" and \"%s\"", reflect.TypeOf(value), reflect.TypeOf(prevValue)), msgAndArgs...)
+ return Fail(t, fmt.Sprintf(`Can not compare type "%T" and "%T"`, value, prevValue), msgAndArgs...)
}
if !containsValue(allowedComparesResults, compareResult) {
diff --git a/vendor/github.com/stretchr/testify/assert/assertions.go b/vendor/github.com/stretchr/testify/assert/assertions.go
index 4e91332bb..de8de0cb6 100644
--- a/vendor/github.com/stretchr/testify/assert/assertions.go
+++ b/vendor/github.com/stretchr/testify/assert/assertions.go
@@ -210,59 +210,77 @@ the problem actually occurred in calling code.*/
// of each stack frame leading from the current test to the assert call that
// failed.
func CallerInfo() []string {
-
var pc uintptr
- var ok bool
var file string
var line int
var name string
+ const stackFrameBufferSize = 10
+ pcs := make([]uintptr, stackFrameBufferSize)
+
callers := []string{}
- for i := 0; ; i++ {
- pc, file, line, ok = runtime.Caller(i)
- if !ok {
- // The breaks below failed to terminate the loop, and we ran off the
- // end of the call stack.
- break
- }
+ offset := 1
- // This is a huge edge case, but it will panic if this is the case, see #180
- if file == "<autogenerated>" {
- break
- }
+ for {
+ n := runtime.Callers(offset, pcs)
- f := runtime.FuncForPC(pc)
- if f == nil {
- break
- }
- name = f.Name()
-
- // testing.tRunner is the standard library function that calls
- // tests. Subtests are called directly by tRunner, without going through
- // the Test/Benchmark/Example function that contains the t.Run calls, so
- // with subtests we should break when we hit tRunner, without adding it
- // to the list of callers.
- if name == "testing.tRunner" {
+ if n == 0 {
break
}
- parts := strings.Split(file, "/")
- if len(parts) > 1 {
- filename := parts[len(parts)-1]
- dir := parts[len(parts)-2]
- if (dir != "assert" && dir != "mock" && dir != "require") || filename == "mock_test.go" {
- callers = append(callers, fmt.Sprintf("%s:%d", file, line))
+ frames := runtime.CallersFrames(pcs[:n])
+
+ for {
+ frame, more := frames.Next()
+ pc = frame.PC
+ file = frame.File
+ line = frame.Line
+
+ // This is a huge edge case, but it will panic if this is the case, see #180
+ if file == "<autogenerated>" {
+ break
}
- }
- // Drop the package
- segments := strings.Split(name, ".")
- name = segments[len(segments)-1]
- if isTest(name, "Test") ||
- isTest(name, "Benchmark") ||
- isTest(name, "Example") {
- break
+ f := runtime.FuncForPC(pc)
+ if f == nil {
+ break
+ }
+ name = f.Name()
+
+ // testing.tRunner is the standard library function that calls
+ // tests. Subtests are called directly by tRunner, without going through
+ // the Test/Benchmark/Example function that contains the t.Run calls, so
+ // with subtests we should break when we hit tRunner, without adding it
+ // to the list of callers.
+ if name == "testing.tRunner" {
+ break
+ }
+
+ parts := strings.Split(file, "/")
+ if len(parts) > 1 {
+ filename := parts[len(parts)-1]
+ dir := parts[len(parts)-2]
+ if (dir != "assert" && dir != "mock" && dir != "require") || filename == "mock_test.go" {
+ callers = append(callers, fmt.Sprintf("%s:%d", file, line))
+ }
+ }
+
+ // Drop the package
+ dotPos := strings.LastIndexByte(name, '.')
+ name = name[dotPos+1:]
+ if isTest(name, "Test") ||
+ isTest(name, "Benchmark") ||
+ isTest(name, "Example") {
+ break
+ }
+
+ if !more {
+ break
+ }
}
+
+ // Next batch
+ offset += cap(pcs)
}
return callers
@@ -437,17 +455,34 @@ func NotImplements(t TestingT, interfaceObject interface{}, object interface{},
return true
}
+func isType(expectedType, object interface{}) bool {
+ return ObjectsAreEqual(reflect.TypeOf(object), reflect.TypeOf(expectedType))
+}
+
// IsType asserts that the specified objects are of the same type.
-func IsType(t TestingT, expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool {
+//
+// assert.IsType(t, &MyStruct{}, &MyStruct{})
+func IsType(t TestingT, expectedType, object interface{}, msgAndArgs ...interface{}) bool {
+ if isType(expectedType, object) {
+ return true
+ }
if h, ok := t.(tHelper); ok {
h.Helper()
}
+ return Fail(t, fmt.Sprintf("Object expected to be of type %T, but was %T", expectedType, object), msgAndArgs...)
+}
- if !ObjectsAreEqual(reflect.TypeOf(object), reflect.TypeOf(expectedType)) {
- return Fail(t, fmt.Sprintf("Object expected to be of type %v, but was %v", reflect.TypeOf(expectedType), reflect.TypeOf(object)), msgAndArgs...)
+// IsNotType asserts that the specified objects are not of the same type.
+//
+// assert.IsNotType(t, &NotMyStruct{}, &MyStruct{})
+func IsNotType(t TestingT, theType, object interface{}, msgAndArgs ...interface{}) bool {
+ if !isType(theType, object) {
+ return true
}
-
- return true
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return Fail(t, fmt.Sprintf("Object type expected to be different than %T", theType), msgAndArgs...)
}
// Equal asserts that two objects are equal.
@@ -475,7 +510,6 @@ func Equal(t TestingT, expected, actual interface{}, msgAndArgs ...interface{})
}
return true
-
}
// validateEqualArgs checks whether provided arguments can be safely used in the
@@ -510,8 +544,9 @@ func Same(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) b
if !same {
// both are pointers but not the same type & pointing to the same address
return Fail(t, fmt.Sprintf("Not same: \n"+
- "expected: %p %#v\n"+
- "actual : %p %#v", expected, expected, actual, actual), msgAndArgs...)
+ "expected: %p %#[1]v\n"+
+ "actual : %p %#[2]v",
+ expected, actual), msgAndArgs...)
}
return true
@@ -530,14 +565,14 @@ func NotSame(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}
same, ok := samePointers(expected, actual)
if !ok {
- //fails when the arguments are not pointers
+ // fails when the arguments are not pointers
return !(Fail(t, "Both arguments must be pointers", msgAndArgs...))
}
if same {
return Fail(t, fmt.Sprintf(
- "Expected and actual point to the same object: %p %#v",
- expected, expected), msgAndArgs...)
+ "Expected and actual point to the same object: %p %#[1]v",
+ expected), msgAndArgs...)
}
return true
}
@@ -549,7 +584,7 @@ func NotSame(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}
func samePointers(first, second interface{}) (same bool, ok bool) {
firstPtr, secondPtr := reflect.ValueOf(first), reflect.ValueOf(second)
if firstPtr.Kind() != reflect.Ptr || secondPtr.Kind() != reflect.Ptr {
- return false, false //not both are pointers
+ return false, false // not both are pointers
}
firstType, secondType := reflect.TypeOf(first), reflect.TypeOf(second)
@@ -610,7 +645,6 @@ func EqualValues(t TestingT, expected, actual interface{}, msgAndArgs ...interfa
}
return true
-
}
// EqualExportedValues asserts that the types of two objects are equal and their public
@@ -665,7 +699,6 @@ func Exactly(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}
}
return Equal(t, expected, actual, msgAndArgs...)
-
}
// NotNil asserts that the specified object is not nil.
@@ -715,37 +748,45 @@ func Nil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
// isEmpty gets whether the specified object is considered empty or not.
func isEmpty(object interface{}) bool {
-
// get nil case out of the way
if object == nil {
return true
}
- objValue := reflect.ValueOf(object)
+ return isEmptyValue(reflect.ValueOf(object))
+}
+// isEmptyValue gets whether the specified reflect.Value is considered empty or not.
+func isEmptyValue(objValue reflect.Value) bool {
+ if objValue.IsZero() {
+ return true
+ }
+ // Special cases of non-zero values that we consider empty
switch objValue.Kind() {
// collection types are empty when they have no element
+ // Note: array types are empty when they match their zero-initialized state.
case reflect.Chan, reflect.Map, reflect.Slice:
return objValue.Len() == 0
- // pointers are empty if nil or if the value they point to is empty
+ // non-nil pointers are empty if the value they point to is empty
case reflect.Ptr:
- if objValue.IsNil() {
- return true
- }
- deref := objValue.Elem().Interface()
- return isEmpty(deref)
- // for all other types, compare against the zero value
- // array types are empty when they match their zero-initialized state
- default:
- zero := reflect.Zero(objValue.Type())
- return reflect.DeepEqual(object, zero.Interface())
+ return isEmptyValue(objValue.Elem())
}
+ return false
}
-// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either
-// a slice or a channel with len == 0.
+// Empty asserts that the given value is "empty".
+//
+// [Zero values] are "empty".
+//
+// Arrays are "empty" if every element is the zero value of the type (stricter than "empty").
+//
+// Slices, maps and channels with zero length are "empty".
+//
+// Pointer values are "empty" if the pointer is nil or if the pointed value is "empty".
//
// assert.Empty(t, obj)
+//
+// [Zero values]: https://go.dev/ref/spec#The_zero_value
func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
pass := isEmpty(object)
if !pass {
@@ -756,11 +797,9 @@ func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
}
return pass
-
}
-// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either
-// a slice or a channel with len == 0.
+// NotEmpty asserts that the specified object is NOT [Empty].
//
// if assert.NotEmpty(t, obj) {
// assert.Equal(t, "two", obj[1])
@@ -775,7 +814,6 @@ func NotEmpty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
}
return pass
-
}
// getLen tries to get the length of an object.
@@ -819,7 +857,6 @@ func True(t TestingT, value bool, msgAndArgs ...interface{}) bool {
}
return true
-
}
// False asserts that the specified value is false.
@@ -834,7 +871,6 @@ func False(t TestingT, value bool, msgAndArgs ...interface{}) bool {
}
return true
-
}
// NotEqual asserts that the specified values are NOT equal.
@@ -857,7 +893,6 @@ func NotEqual(t TestingT, expected, actual interface{}, msgAndArgs ...interface{
}
return true
-
}
// NotEqualValues asserts that two objects are not equal even when converted to the same type
@@ -880,7 +915,6 @@ func NotEqualValues(t TestingT, expected, actual interface{}, msgAndArgs ...inte
// return (true, false) if element was not found.
// return (true, true) if element was found.
func containsElement(list interface{}, element interface{}) (ok, found bool) {
-
listValue := reflect.ValueOf(list)
listType := reflect.TypeOf(list)
if listType == nil {
@@ -915,7 +949,6 @@ func containsElement(list interface{}, element interface{}) (ok, found bool) {
}
}
return true, false
-
}
// Contains asserts that the specified string, list(array, slice...) or map contains the
@@ -938,7 +971,6 @@ func Contains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) bo
}
return true
-
}
// NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the
@@ -961,14 +993,17 @@ func NotContains(t TestingT, s, contains interface{}, msgAndArgs ...interface{})
}
return true
-
}
-// Subset asserts that the specified list(array, slice...) or map contains all
-// elements given in the specified subset list(array, slice...) or map.
+// Subset asserts that the list (array, slice, or map) contains all elements
+// given in the subset (array, slice, or map).
+// Map elements are key-value pairs unless compared with an array or slice where
+// only the map key is evaluated.
//
// assert.Subset(t, [1, 2, 3], [1, 2])
// assert.Subset(t, {"x": 1, "y": 2}, {"x": 1})
+// assert.Subset(t, [1, 2, 3], {1: "one", 2: "two"})
+// assert.Subset(t, {"x": 1, "y": 2}, ["x"])
func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok bool) {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -983,7 +1018,7 @@ func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok
}
subsetKind := reflect.TypeOf(subset).Kind()
- if subsetKind != reflect.Array && subsetKind != reflect.Slice && listKind != reflect.Map {
+ if subsetKind != reflect.Array && subsetKind != reflect.Slice && subsetKind != reflect.Map {
return Fail(t, fmt.Sprintf("%q has an unsupported type %s", subset, subsetKind), msgAndArgs...)
}
@@ -1007,6 +1042,13 @@ func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok
}
subsetList := reflect.ValueOf(subset)
+ if subsetKind == reflect.Map {
+ keys := make([]interface{}, subsetList.Len())
+ for idx, key := range subsetList.MapKeys() {
+ keys[idx] = key.Interface()
+ }
+ subsetList = reflect.ValueOf(keys)
+ }
for i := 0; i < subsetList.Len(); i++ {
element := subsetList.Index(i).Interface()
ok, found := containsElement(list, element)
@@ -1021,12 +1063,15 @@ func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok
return true
}
-// NotSubset asserts that the specified list(array, slice...) or map does NOT
-// contain all elements given in the specified subset list(array, slice...) or
-// map.
+// NotSubset asserts that the list (array, slice, or map) does NOT contain all
+// elements given in the subset (array, slice, or map).
+// Map elements are key-value pairs unless compared with an array or slice where
+// only the map key is evaluated.
//
// assert.NotSubset(t, [1, 3, 4], [1, 2])
// assert.NotSubset(t, {"x": 1, "y": 2}, {"z": 3})
+// assert.NotSubset(t, [1, 3, 4], {1: "one", 2: "two"})
+// assert.NotSubset(t, {"x": 1, "y": 2}, ["z"])
func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok bool) {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -1041,7 +1086,7 @@ func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{})
}
subsetKind := reflect.TypeOf(subset).Kind()
- if subsetKind != reflect.Array && subsetKind != reflect.Slice && listKind != reflect.Map {
+ if subsetKind != reflect.Array && subsetKind != reflect.Slice && subsetKind != reflect.Map {
return Fail(t, fmt.Sprintf("%q has an unsupported type %s", subset, subsetKind), msgAndArgs...)
}
@@ -1065,11 +1110,18 @@ func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{})
}
subsetList := reflect.ValueOf(subset)
+ if subsetKind == reflect.Map {
+ keys := make([]interface{}, subsetList.Len())
+ for idx, key := range subsetList.MapKeys() {
+ keys[idx] = key.Interface()
+ }
+ subsetList = reflect.ValueOf(keys)
+ }
for i := 0; i < subsetList.Len(); i++ {
element := subsetList.Index(i).Interface()
ok, found := containsElement(list, element)
if !ok {
- return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", list), msgAndArgs...)
+ return Fail(t, fmt.Sprintf("%q could not be applied builtin len()", list), msgAndArgs...)
}
if !found {
return true
@@ -1591,10 +1643,8 @@ func NoError(t TestingT, err error, msgAndArgs ...interface{}) bool {
// Error asserts that a function returned an error (i.e. not `nil`).
//
-// actualObj, err := SomeFunction()
-// if assert.Error(t, err) {
-// assert.Equal(t, expectedError, err)
-// }
+// actualObj, err := SomeFunction()
+// assert.Error(t, err)
func Error(t TestingT, err error, msgAndArgs ...interface{}) bool {
if err == nil {
if h, ok := t.(tHelper); ok {
@@ -1667,7 +1717,6 @@ func matchRegexp(rx interface{}, str interface{}) bool {
default:
return r.MatchString(fmt.Sprint(v))
}
-
}
// Regexp asserts that a specified regexp matches a string.
@@ -1703,7 +1752,6 @@ func NotRegexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interf
}
return !match
-
}
// Zero asserts that i is the zero value for its type.
@@ -1814,6 +1862,11 @@ func JSONEq(t TestingT, expected string, actual string, msgAndArgs ...interface{
return Fail(t, fmt.Sprintf("Expected value ('%s') is not valid json.\nJSON parsing error: '%s'", expected, err.Error()), msgAndArgs...)
}
+ // Shortcut if same bytes
+ if actual == expected {
+ return true
+ }
+
if err := json.Unmarshal([]byte(actual), &actualJSONAsInterface); err != nil {
return Fail(t, fmt.Sprintf("Input ('%s') needs to be valid json.\nJSON parsing error: '%s'", actual, err.Error()), msgAndArgs...)
}
@@ -1832,6 +1885,11 @@ func YAMLEq(t TestingT, expected string, actual string, msgAndArgs ...interface{
return Fail(t, fmt.Sprintf("Expected value ('%s') is not valid yaml.\nYAML parsing error: '%s'", expected, err.Error()), msgAndArgs...)
}
+ // Shortcut if same bytes
+ if actual == expected {
+ return true
+ }
+
if err := yaml.Unmarshal([]byte(actual), &actualYAMLAsInterface); err != nil {
return Fail(t, fmt.Sprintf("Input ('%s') needs to be valid yaml.\nYAML error: '%s'", actual, err.Error()), msgAndArgs...)
}
@@ -1933,6 +1991,7 @@ func Eventually(t TestingT, condition func() bool, waitFor time.Duration, tick t
}
ch := make(chan bool, 1)
+ checkCond := func() { ch <- condition() }
timer := time.NewTimer(waitFor)
defer timer.Stop()
@@ -1940,18 +1999,23 @@ func Eventually(t TestingT, condition func() bool, waitFor time.Duration, tick t
ticker := time.NewTicker(tick)
defer ticker.Stop()
- for tick := ticker.C; ; {
+ var tickC <-chan time.Time
+
+ // Check the condition once first on the initial call.
+ go checkCond()
+
+ for {
select {
case <-timer.C:
return Fail(t, "Condition never satisfied", msgAndArgs...)
- case <-tick:
- tick = nil
- go func() { ch <- condition() }()
+ case <-tickC:
+ tickC = nil
+ go checkCond()
case v := <-ch:
if v {
return true
}
- tick = ticker.C
+ tickC = ticker.C
}
}
}
@@ -1964,6 +2028,9 @@ type CollectT struct {
errors []error
}
+// Helper is like [testing.T.Helper] but does nothing.
+func (CollectT) Helper() {}
+
// Errorf collects the error.
func (c *CollectT) Errorf(format string, args ...interface{}) {
c.errors = append(c.errors, fmt.Errorf(format, args...))
@@ -2021,35 +2088,42 @@ func EventuallyWithT(t TestingT, condition func(collect *CollectT), waitFor time
var lastFinishedTickErrs []error
ch := make(chan *CollectT, 1)
+ checkCond := func() {
+ collect := new(CollectT)
+ defer func() {
+ ch <- collect
+ }()
+ condition(collect)
+ }
+
timer := time.NewTimer(waitFor)
defer timer.Stop()
ticker := time.NewTicker(tick)
defer ticker.Stop()
- for tick := ticker.C; ; {
+ var tickC <-chan time.Time
+
+ // Check the condition once first on the initial call.
+ go checkCond()
+
+ for {
select {
case <-timer.C:
for _, err := range lastFinishedTickErrs {
t.Errorf("%v", err)
}
return Fail(t, "Condition never satisfied", msgAndArgs...)
- case <-tick:
- tick = nil
- go func() {
- collect := new(CollectT)
- defer func() {
- ch <- collect
- }()
- condition(collect)
- }()
+ case <-tickC:
+ tickC = nil
+ go checkCond()
case collect := <-ch:
if !collect.failed() {
return true
}
// Keep the errors from the last ended condition, so that they can be copied to t if timeout is reached.
lastFinishedTickErrs = collect.errors
- tick = ticker.C
+ tickC = ticker.C
}
}
}
@@ -2064,6 +2138,7 @@ func Never(t TestingT, condition func() bool, waitFor time.Duration, tick time.D
}
ch := make(chan bool, 1)
+ checkCond := func() { ch <- condition() }
timer := time.NewTimer(waitFor)
defer timer.Stop()
@@ -2071,18 +2146,23 @@ func Never(t TestingT, condition func() bool, waitFor time.Duration, tick time.D
ticker := time.NewTicker(tick)
defer ticker.Stop()
- for tick := ticker.C; ; {
+ var tickC <-chan time.Time
+
+ // Check the condition once first on the initial call.
+ go checkCond()
+
+ for {
select {
case <-timer.C:
return true
- case <-tick:
- tick = nil
- go func() { ch <- condition() }()
+ case <-tickC:
+ tickC = nil
+ go checkCond()
case v := <-ch:
if v {
return Fail(t, "Condition satisfied", msgAndArgs...)
}
- tick = ticker.C
+ tickC = ticker.C
}
}
}
@@ -2100,9 +2180,12 @@ func ErrorIs(t TestingT, err, target error, msgAndArgs ...interface{}) bool {
var expectedText string
if target != nil {
expectedText = target.Error()
+ if err == nil {
+ return Fail(t, fmt.Sprintf("Expected error with %q in chain but got nil.", expectedText), msgAndArgs...)
+ }
}
- chain := buildErrorChainString(err)
+ chain := buildErrorChainString(err, false)
return Fail(t, fmt.Sprintf("Target error should be in err chain:\n"+
"expected: %q\n"+
@@ -2125,7 +2208,7 @@ func NotErrorIs(t TestingT, err, target error, msgAndArgs ...interface{}) bool {
expectedText = target.Error()
}
- chain := buildErrorChainString(err)
+ chain := buildErrorChainString(err, false)
return Fail(t, fmt.Sprintf("Target error should not be in err chain:\n"+
"found: %q\n"+
@@ -2143,11 +2226,17 @@ func ErrorAs(t TestingT, err error, target interface{}, msgAndArgs ...interface{
return true
}
- chain := buildErrorChainString(err)
+ expectedType := reflect.TypeOf(target).Elem().String()
+ if err == nil {
+ return Fail(t, fmt.Sprintf("An error is expected but got nil.\n"+
+ "expected: %s", expectedType), msgAndArgs...)
+ }
+
+ chain := buildErrorChainString(err, true)
return Fail(t, fmt.Sprintf("Should be in error chain:\n"+
- "expected: %q\n"+
- "in chain: %s", target, chain,
+ "expected: %s\n"+
+ "in chain: %s", expectedType, chain,
), msgAndArgs...)
}
@@ -2161,24 +2250,46 @@ func NotErrorAs(t TestingT, err error, target interface{}, msgAndArgs ...interfa
return true
}
- chain := buildErrorChainString(err)
+ chain := buildErrorChainString(err, true)
return Fail(t, fmt.Sprintf("Target error should not be in err chain:\n"+
- "found: %q\n"+
- "in chain: %s", target, chain,
+ "found: %s\n"+
+ "in chain: %s", reflect.TypeOf(target).Elem().String(), chain,
), msgAndArgs...)
}
-func buildErrorChainString(err error) string {
+func unwrapAll(err error) (errs []error) {
+ errs = append(errs, err)
+ switch x := err.(type) {
+ case interface{ Unwrap() error }:
+ err = x.Unwrap()
+ if err == nil {
+ return
+ }
+ errs = append(errs, unwrapAll(err)...)
+ case interface{ Unwrap() []error }:
+ for _, err := range x.Unwrap() {
+ errs = append(errs, unwrapAll(err)...)
+ }
+ }
+ return
+}
+
+func buildErrorChainString(err error, withType bool) string {
if err == nil {
return ""
}
- e := errors.Unwrap(err)
- chain := fmt.Sprintf("%q", err.Error())
- for e != nil {
- chain += fmt.Sprintf("\n\t%q", e.Error())
- e = errors.Unwrap(e)
+ var chain string
+ errs := unwrapAll(err)
+ for i := range errs {
+ if i != 0 {
+ chain += "\n\t"
+ }
+ chain += fmt.Sprintf("%q", errs[i].Error())
+ if withType {
+ chain += fmt.Sprintf(" (%T)", errs[i])
+ }
}
return chain
}
diff --git a/vendor/github.com/stretchr/testify/assert/doc.go b/vendor/github.com/stretchr/testify/assert/doc.go
index 4953981d3..a0b953aa5 100644
--- a/vendor/github.com/stretchr/testify/assert/doc.go
+++ b/vendor/github.com/stretchr/testify/assert/doc.go
@@ -1,5 +1,9 @@
// Package assert provides a set of comprehensive testing tools for use with the normal Go testing system.
//
+// # Note
+//
+// All functions in this package return a bool value indicating whether the assertion has passed.
+//
// # Example Usage
//
// The following is a complete example using assert in a standard test function:
diff --git a/vendor/github.com/stretchr/testify/assert/http_assertions.go b/vendor/github.com/stretchr/testify/assert/http_assertions.go
index 861ed4b7c..5a6bb75f2 100644
--- a/vendor/github.com/stretchr/testify/assert/http_assertions.go
+++ b/vendor/github.com/stretchr/testify/assert/http_assertions.go
@@ -138,7 +138,7 @@ func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method, url string,
contains := strings.Contains(body, fmt.Sprint(str))
if !contains {
- Fail(t, fmt.Sprintf("Expected response body for \"%s\" to contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body), msgAndArgs...)
+ Fail(t, fmt.Sprintf("Expected response body for %q to contain %q but found %q", url+"?"+values.Encode(), str, body), msgAndArgs...)
}
return contains
@@ -158,7 +158,7 @@ func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method, url strin
contains := strings.Contains(body, fmt.Sprint(str))
if contains {
- Fail(t, fmt.Sprintf("Expected response body for \"%s\" to NOT contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body), msgAndArgs...)
+ Fail(t, fmt.Sprintf("Expected response body for %q to NOT contain %q but found %q", url+"?"+values.Encode(), str, body), msgAndArgs...)
}
return !contains
diff --git a/vendor/github.com/stretchr/testify/assert/yaml/yaml_custom.go b/vendor/github.com/stretchr/testify/assert/yaml/yaml_custom.go
index baa0cc7d7..5a74c4f4d 100644
--- a/vendor/github.com/stretchr/testify/assert/yaml/yaml_custom.go
+++ b/vendor/github.com/stretchr/testify/assert/yaml/yaml_custom.go
@@ -1,5 +1,4 @@
//go:build testify_yaml_custom && !testify_yaml_fail && !testify_yaml_default
-// +build testify_yaml_custom,!testify_yaml_fail,!testify_yaml_default
// Package yaml is an implementation of YAML functions that calls a pluggable implementation.
//
diff --git a/vendor/github.com/stretchr/testify/assert/yaml/yaml_default.go b/vendor/github.com/stretchr/testify/assert/yaml/yaml_default.go
index b83c6cf64..0bae80e34 100644
--- a/vendor/github.com/stretchr/testify/assert/yaml/yaml_default.go
+++ b/vendor/github.com/stretchr/testify/assert/yaml/yaml_default.go
@@ -1,5 +1,4 @@
//go:build !testify_yaml_fail && !testify_yaml_custom
-// +build !testify_yaml_fail,!testify_yaml_custom
// Package yaml is just an indirection to handle YAML deserialization.
//
diff --git a/vendor/github.com/stretchr/testify/assert/yaml/yaml_fail.go b/vendor/github.com/stretchr/testify/assert/yaml/yaml_fail.go
index e78f7dfe6..8041803fd 100644
--- a/vendor/github.com/stretchr/testify/assert/yaml/yaml_fail.go
+++ b/vendor/github.com/stretchr/testify/assert/yaml/yaml_fail.go
@@ -1,5 +1,4 @@
//go:build testify_yaml_fail && !testify_yaml_custom && !testify_yaml_default
-// +build testify_yaml_fail,!testify_yaml_custom,!testify_yaml_default
// Package yaml is an implementation of YAML functions that always fail.
//
diff --git a/vendor/github.com/stretchr/testify/require/doc.go b/vendor/github.com/stretchr/testify/require/doc.go
index 968434724..c8e3f94a8 100644
--- a/vendor/github.com/stretchr/testify/require/doc.go
+++ b/vendor/github.com/stretchr/testify/require/doc.go
@@ -23,6 +23,8 @@
//
// The `require` package have same global functions as in the `assert` package,
// but instead of returning a boolean result they call `t.FailNow()`.
+// A consequence of this is that it must be called from the goroutine running
+// the test function, not from other goroutines created during the test.
//
// Every assertion function also takes an optional string message as the final argument,
// allowing custom error messages to be appended to the message the assertion method outputs.
diff --git a/vendor/github.com/stretchr/testify/require/require.go b/vendor/github.com/stretchr/testify/require/require.go
index d8921950d..2d02f9bce 100644
--- a/vendor/github.com/stretchr/testify/require/require.go
+++ b/vendor/github.com/stretchr/testify/require/require.go
@@ -117,10 +117,19 @@ func ElementsMatchf(t TestingT, listA interface{}, listB interface{}, msg string
t.FailNow()
}
-// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either
-// a slice or a channel with len == 0.
+// Empty asserts that the given value is "empty".
+//
+// [Zero values] are "empty".
+//
+// Arrays are "empty" if every element is the zero value of the type (stricter than "empty").
+//
+// Slices, maps and channels with zero length are "empty".
+//
+// Pointer values are "empty" if the pointer is nil or if the pointed value is "empty".
//
// require.Empty(t, obj)
+//
+// [Zero values]: https://go.dev/ref/spec#The_zero_value
func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -131,10 +140,19 @@ func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) {
t.FailNow()
}
-// Emptyf asserts that the specified object is empty. I.e. nil, "", false, 0 or either
-// a slice or a channel with len == 0.
+// Emptyf asserts that the given value is "empty".
+//
+// [Zero values] are "empty".
+//
+// Arrays are "empty" if every element is the zero value of the type (stricter than "empty").
+//
+// Slices, maps and channels with zero length are "empty".
+//
+// Pointer values are "empty" if the pointer is nil or if the pointed value is "empty".
//
// require.Emptyf(t, obj, "error message %s", "formatted")
+//
+// [Zero values]: https://go.dev/ref/spec#The_zero_value
func Emptyf(t TestingT, object interface{}, msg string, args ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -279,10 +297,8 @@ func Equalf(t TestingT, expected interface{}, actual interface{}, msg string, ar
// Error asserts that a function returned an error (i.e. not `nil`).
//
-// actualObj, err := SomeFunction()
-// if require.Error(t, err) {
-// require.Equal(t, expectedError, err)
-// }
+// actualObj, err := SomeFunction()
+// require.Error(t, err)
func Error(t TestingT, err error, msgAndArgs ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -373,10 +389,8 @@ func ErrorIsf(t TestingT, err error, target error, msg string, args ...interface
// Errorf asserts that a function returned an error (i.e. not `nil`).
//
-// actualObj, err := SomeFunction()
-// if require.Errorf(t, err, "error message %s", "formatted") {
-// require.Equal(t, expectedErrorf, err)
-// }
+// actualObj, err := SomeFunction()
+// require.Errorf(t, err, "error message %s", "formatted")
func Errorf(t TestingT, err error, msg string, args ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -1097,7 +1111,35 @@ func IsNonIncreasingf(t TestingT, object interface{}, msg string, args ...interf
t.FailNow()
}
+// IsNotType asserts that the specified objects are not of the same type.
+//
+// require.IsNotType(t, &NotMyStruct{}, &MyStruct{})
+func IsNotType(t TestingT, theType interface{}, object interface{}, msgAndArgs ...interface{}) {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ if assert.IsNotType(t, theType, object, msgAndArgs...) {
+ return
+ }
+ t.FailNow()
+}
+
+// IsNotTypef asserts that the specified objects are not of the same type.
+//
+// require.IsNotTypef(t, &NotMyStruct{}, &MyStruct{}, "error message %s", "formatted")
+func IsNotTypef(t TestingT, theType interface{}, object interface{}, msg string, args ...interface{}) {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ if assert.IsNotTypef(t, theType, object, msg, args...) {
+ return
+ }
+ t.FailNow()
+}
+
// IsType asserts that the specified objects are of the same type.
+//
+// require.IsType(t, &MyStruct{}, &MyStruct{})
func IsType(t TestingT, expectedType interface{}, object interface{}, msgAndArgs ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -1109,6 +1151,8 @@ func IsType(t TestingT, expectedType interface{}, object interface{}, msgAndArgs
}
// IsTypef asserts that the specified objects are of the same type.
+//
+// require.IsTypef(t, &MyStruct{}, &MyStruct{}, "error message %s", "formatted")
func IsTypef(t TestingT, expectedType interface{}, object interface{}, msg string, args ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -1469,8 +1513,7 @@ func NotElementsMatchf(t TestingT, listA interface{}, listB interface{}, msg str
t.FailNow()
}
-// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either
-// a slice or a channel with len == 0.
+// NotEmpty asserts that the specified object is NOT [Empty].
//
// if require.NotEmpty(t, obj) {
// require.Equal(t, "two", obj[1])
@@ -1485,8 +1528,7 @@ func NotEmpty(t TestingT, object interface{}, msgAndArgs ...interface{}) {
t.FailNow()
}
-// NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either
-// a slice or a channel with len == 0.
+// NotEmptyf asserts that the specified object is NOT [Empty].
//
// if require.NotEmptyf(t, obj, "error message %s", "formatted") {
// require.Equal(t, "two", obj[1])
@@ -1745,12 +1787,15 @@ func NotSamef(t TestingT, expected interface{}, actual interface{}, msg string,
t.FailNow()
}
-// NotSubset asserts that the specified list(array, slice...) or map does NOT
-// contain all elements given in the specified subset list(array, slice...) or
-// map.
+// NotSubset asserts that the list (array, slice, or map) does NOT contain all
+// elements given in the subset (array, slice, or map).
+// Map elements are key-value pairs unless compared with an array or slice where
+// only the map key is evaluated.
//
// require.NotSubset(t, [1, 3, 4], [1, 2])
// require.NotSubset(t, {"x": 1, "y": 2}, {"z": 3})
+// require.NotSubset(t, [1, 3, 4], {1: "one", 2: "two"})
+// require.NotSubset(t, {"x": 1, "y": 2}, ["z"])
func NotSubset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -1761,12 +1806,15 @@ func NotSubset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...i
t.FailNow()
}
-// NotSubsetf asserts that the specified list(array, slice...) or map does NOT
-// contain all elements given in the specified subset list(array, slice...) or
-// map.
+// NotSubsetf asserts that the list (array, slice, or map) does NOT contain all
+// elements given in the subset (array, slice, or map).
+// Map elements are key-value pairs unless compared with an array or slice where
+// only the map key is evaluated.
//
// require.NotSubsetf(t, [1, 3, 4], [1, 2], "error message %s", "formatted")
// require.NotSubsetf(t, {"x": 1, "y": 2}, {"z": 3}, "error message %s", "formatted")
+// require.NotSubsetf(t, [1, 3, 4], {1: "one", 2: "two"}, "error message %s", "formatted")
+// require.NotSubsetf(t, {"x": 1, "y": 2}, ["z"], "error message %s", "formatted")
func NotSubsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -1971,11 +2019,15 @@ func Samef(t TestingT, expected interface{}, actual interface{}, msg string, arg
t.FailNow()
}
-// Subset asserts that the specified list(array, slice...) or map contains all
-// elements given in the specified subset list(array, slice...) or map.
+// Subset asserts that the list (array, slice, or map) contains all elements
+// given in the subset (array, slice, or map).
+// Map elements are key-value pairs unless compared with an array or slice where
+// only the map key is evaluated.
//
// require.Subset(t, [1, 2, 3], [1, 2])
// require.Subset(t, {"x": 1, "y": 2}, {"x": 1})
+// require.Subset(t, [1, 2, 3], {1: "one", 2: "two"})
+// require.Subset(t, {"x": 1, "y": 2}, ["x"])
func Subset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -1986,11 +2038,15 @@ func Subset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...inte
t.FailNow()
}
-// Subsetf asserts that the specified list(array, slice...) or map contains all
-// elements given in the specified subset list(array, slice...) or map.
+// Subsetf asserts that the list (array, slice, or map) contains all elements
+// given in the subset (array, slice, or map).
+// Map elements are key-value pairs unless compared with an array or slice where
+// only the map key is evaluated.
//
// require.Subsetf(t, [1, 2, 3], [1, 2], "error message %s", "formatted")
// require.Subsetf(t, {"x": 1, "y": 2}, {"x": 1}, "error message %s", "formatted")
+// require.Subsetf(t, [1, 2, 3], {1: "one", 2: "two"}, "error message %s", "formatted")
+// require.Subsetf(t, {"x": 1, "y": 2}, ["x"], "error message %s", "formatted")
func Subsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
diff --git a/vendor/github.com/stretchr/testify/require/require_forward.go b/vendor/github.com/stretchr/testify/require/require_forward.go
index 1bd87304f..e6f7e9446 100644
--- a/vendor/github.com/stretchr/testify/require/require_forward.go
+++ b/vendor/github.com/stretchr/testify/require/require_forward.go
@@ -93,10 +93,19 @@ func (a *Assertions) ElementsMatchf(listA interface{}, listB interface{}, msg st
ElementsMatchf(a.t, listA, listB, msg, args...)
}
-// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either
-// a slice or a channel with len == 0.
+// Empty asserts that the given value is "empty".
+//
+// [Zero values] are "empty".
+//
+// Arrays are "empty" if every element is the zero value of the type (stricter than "empty").
+//
+// Slices, maps and channels with zero length are "empty".
+//
+// Pointer values are "empty" if the pointer is nil or if the pointed value is "empty".
//
// a.Empty(obj)
+//
+// [Zero values]: https://go.dev/ref/spec#The_zero_value
func (a *Assertions) Empty(object interface{}, msgAndArgs ...interface{}) {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -104,10 +113,19 @@ func (a *Assertions) Empty(object interface{}, msgAndArgs ...interface{}) {
Empty(a.t, object, msgAndArgs...)
}
-// Emptyf asserts that the specified object is empty. I.e. nil, "", false, 0 or either
-// a slice or a channel with len == 0.
+// Emptyf asserts that the given value is "empty".
+//
+// [Zero values] are "empty".
+//
+// Arrays are "empty" if every element is the zero value of the type (stricter than "empty").
+//
+// Slices, maps and channels with zero length are "empty".
+//
+// Pointer values are "empty" if the pointer is nil or if the pointed value is "empty".
//
// a.Emptyf(obj, "error message %s", "formatted")
+//
+// [Zero values]: https://go.dev/ref/spec#The_zero_value
func (a *Assertions) Emptyf(object interface{}, msg string, args ...interface{}) {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -225,10 +243,8 @@ func (a *Assertions) Equalf(expected interface{}, actual interface{}, msg string
// Error asserts that a function returned an error (i.e. not `nil`).
//
-// actualObj, err := SomeFunction()
-// if a.Error(err) {
-// assert.Equal(t, expectedError, err)
-// }
+// actualObj, err := SomeFunction()
+// a.Error(err)
func (a *Assertions) Error(err error, msgAndArgs ...interface{}) {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -298,10 +314,8 @@ func (a *Assertions) ErrorIsf(err error, target error, msg string, args ...inter
// Errorf asserts that a function returned an error (i.e. not `nil`).
//
-// actualObj, err := SomeFunction()
-// if a.Errorf(err, "error message %s", "formatted") {
-// assert.Equal(t, expectedErrorf, err)
-// }
+// actualObj, err := SomeFunction()
+// a.Errorf(err, "error message %s", "formatted")
func (a *Assertions) Errorf(err error, msg string, args ...interface{}) {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -869,7 +883,29 @@ func (a *Assertions) IsNonIncreasingf(object interface{}, msg string, args ...in
IsNonIncreasingf(a.t, object, msg, args...)
}
+// IsNotType asserts that the specified objects are not of the same type.
+//
+// a.IsNotType(&NotMyStruct{}, &MyStruct{})
+func (a *Assertions) IsNotType(theType interface{}, object interface{}, msgAndArgs ...interface{}) {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ IsNotType(a.t, theType, object, msgAndArgs...)
+}
+
+// IsNotTypef asserts that the specified objects are not of the same type.
+//
+// a.IsNotTypef(&NotMyStruct{}, &MyStruct{}, "error message %s", "formatted")
+func (a *Assertions) IsNotTypef(theType interface{}, object interface{}, msg string, args ...interface{}) {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ IsNotTypef(a.t, theType, object, msg, args...)
+}
+
// IsType asserts that the specified objects are of the same type.
+//
+// a.IsType(&MyStruct{}, &MyStruct{})
func (a *Assertions) IsType(expectedType interface{}, object interface{}, msgAndArgs ...interface{}) {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -878,6 +914,8 @@ func (a *Assertions) IsType(expectedType interface{}, object interface{}, msgAnd
}
// IsTypef asserts that the specified objects are of the same type.
+//
+// a.IsTypef(&MyStruct{}, &MyStruct{}, "error message %s", "formatted")
func (a *Assertions) IsTypef(expectedType interface{}, object interface{}, msg string, args ...interface{}) {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -1163,8 +1201,7 @@ func (a *Assertions) NotElementsMatchf(listA interface{}, listB interface{}, msg
NotElementsMatchf(a.t, listA, listB, msg, args...)
}
-// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either
-// a slice or a channel with len == 0.
+// NotEmpty asserts that the specified object is NOT [Empty].
//
// if a.NotEmpty(obj) {
// assert.Equal(t, "two", obj[1])
@@ -1176,8 +1213,7 @@ func (a *Assertions) NotEmpty(object interface{}, msgAndArgs ...interface{}) {
NotEmpty(a.t, object, msgAndArgs...)
}
-// NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either
-// a slice or a channel with len == 0.
+// NotEmptyf asserts that the specified object is NOT [Empty].
//
// if a.NotEmptyf(obj, "error message %s", "formatted") {
// assert.Equal(t, "two", obj[1])
@@ -1379,12 +1415,15 @@ func (a *Assertions) NotSamef(expected interface{}, actual interface{}, msg stri
NotSamef(a.t, expected, actual, msg, args...)
}
-// NotSubset asserts that the specified list(array, slice...) or map does NOT
-// contain all elements given in the specified subset list(array, slice...) or
-// map.
+// NotSubset asserts that the list (array, slice, or map) does NOT contain all
+// elements given in the subset (array, slice, or map).
+// Map elements are key-value pairs unless compared with an array or slice where
+// only the map key is evaluated.
//
// a.NotSubset([1, 3, 4], [1, 2])
// a.NotSubset({"x": 1, "y": 2}, {"z": 3})
+// a.NotSubset([1, 3, 4], {1: "one", 2: "two"})
+// a.NotSubset({"x": 1, "y": 2}, ["z"])
func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs ...interface{}) {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -1392,12 +1431,15 @@ func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs
NotSubset(a.t, list, subset, msgAndArgs...)
}
-// NotSubsetf asserts that the specified list(array, slice...) or map does NOT
-// contain all elements given in the specified subset list(array, slice...) or
-// map.
+// NotSubsetf asserts that the list (array, slice, or map) does NOT contain all
+// elements given in the subset (array, slice, or map).
+// Map elements are key-value pairs unless compared with an array or slice where
+// only the map key is evaluated.
//
// a.NotSubsetf([1, 3, 4], [1, 2], "error message %s", "formatted")
// a.NotSubsetf({"x": 1, "y": 2}, {"z": 3}, "error message %s", "formatted")
+// a.NotSubsetf([1, 3, 4], {1: "one", 2: "two"}, "error message %s", "formatted")
+// a.NotSubsetf({"x": 1, "y": 2}, ["z"], "error message %s", "formatted")
func (a *Assertions) NotSubsetf(list interface{}, subset interface{}, msg string, args ...interface{}) {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -1557,11 +1599,15 @@ func (a *Assertions) Samef(expected interface{}, actual interface{}, msg string,
Samef(a.t, expected, actual, msg, args...)
}
-// Subset asserts that the specified list(array, slice...) or map contains all
-// elements given in the specified subset list(array, slice...) or map.
+// Subset asserts that the list (array, slice, or map) contains all elements
+// given in the subset (array, slice, or map).
+// Map elements are key-value pairs unless compared with an array or slice where
+// only the map key is evaluated.
//
// a.Subset([1, 2, 3], [1, 2])
// a.Subset({"x": 1, "y": 2}, {"x": 1})
+// a.Subset([1, 2, 3], {1: "one", 2: "two"})
+// a.Subset({"x": 1, "y": 2}, ["x"])
func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ...interface{}) {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -1569,11 +1615,15 @@ func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ...
Subset(a.t, list, subset, msgAndArgs...)
}
-// Subsetf asserts that the specified list(array, slice...) or map contains all
-// elements given in the specified subset list(array, slice...) or map.
+// Subsetf asserts that the list (array, slice, or map) contains all elements
+// given in the subset (array, slice, or map).
+// Map elements are key-value pairs unless compared with an array or slice where
+// only the map key is evaluated.
//
// a.Subsetf([1, 2, 3], [1, 2], "error message %s", "formatted")
// a.Subsetf({"x": 1, "y": 2}, {"x": 1}, "error message %s", "formatted")
+// a.Subsetf([1, 2, 3], {1: "one", 2: "two"}, "error message %s", "formatted")
+// a.Subsetf({"x": 1, "y": 2}, ["x"], "error message %s", "formatted")
func (a *Assertions) Subsetf(list interface{}, subset interface{}, msg string, args ...interface{}) {
if h, ok := a.t.(tHelper); ok {
h.Helper()
diff --git a/vendor/github.com/stretchr/testify/suite/stats.go b/vendor/github.com/stretchr/testify/suite/stats.go
index 261da37f7..be4ccd679 100644
--- a/vendor/github.com/stretchr/testify/suite/stats.go
+++ b/vendor/github.com/stretchr/testify/suite/stats.go
@@ -16,26 +16,30 @@ type TestInformation struct {
}
func newSuiteInformation() *SuiteInformation {
- testStats := make(map[string]*TestInformation)
-
return &SuiteInformation{
- TestStats: testStats,
+ TestStats: make(map[string]*TestInformation),
}
}
-func (s SuiteInformation) start(testName string) {
+func (s *SuiteInformation) start(testName string) {
+ if s == nil {
+ return
+ }
s.TestStats[testName] = &TestInformation{
TestName: testName,
Start: time.Now(),
}
}
-func (s SuiteInformation) end(testName string, passed bool) {
+func (s *SuiteInformation) end(testName string, passed bool) {
+ if s == nil {
+ return
+ }
s.TestStats[testName].End = time.Now()
s.TestStats[testName].Passed = passed
}
-func (s SuiteInformation) Passed() bool {
+func (s *SuiteInformation) Passed() bool {
for _, stats := range s.TestStats {
if !stats.Passed {
return false
diff --git a/vendor/github.com/stretchr/testify/suite/suite.go b/vendor/github.com/stretchr/testify/suite/suite.go
index 18443a91c..1b19be3bc 100644
--- a/vendor/github.com/stretchr/testify/suite/suite.go
+++ b/vendor/github.com/stretchr/testify/suite/suite.go
@@ -7,6 +7,7 @@ import (
"reflect"
"regexp"
"runtime/debug"
+ "strings"
"sync"
"testing"
"time"
@@ -15,7 +16,6 @@ import (
"github.com/stretchr/testify/require"
)
-var allTestsFilter = func(_, _ string) (bool, error) { return true, nil }
var matchMethod = flag.String("testify.m", "", "regular expression to select tests of the testify suite to run")
// Suite is a basic testing suite with methods for storing and
@@ -116,6 +116,11 @@ func (suite *Suite) Run(name string, subtest func()) bool {
})
}
+type test = struct {
+ name string
+ run func(t *testing.T)
+}
+
// Run takes a testing suite and runs all of the tests attached
// to it.
func Run(t *testing.T, suite TestingSuite) {
@@ -124,45 +129,39 @@ func Run(t *testing.T, suite TestingSuite) {
suite.SetT(t)
suite.SetS(suite)
- var suiteSetupDone bool
-
var stats *SuiteInformation
if _, ok := suite.(WithStats); ok {
stats = newSuiteInformation()
}
- tests := []testing.InternalTest{}
+ var tests []test
methodFinder := reflect.TypeOf(suite)
suiteName := methodFinder.Elem().Name()
- for i := 0; i < methodFinder.NumMethod(); i++ {
- method := methodFinder.Method(i)
-
- ok, err := methodFilter(method.Name)
+ var matchMethodRE *regexp.Regexp
+ if *matchMethod != "" {
+ var err error
+ matchMethodRE, err = regexp.Compile(*matchMethod)
if err != nil {
fmt.Fprintf(os.Stderr, "testify: invalid regexp for -m: %s\n", err)
os.Exit(1)
}
+ }
- if !ok {
+ for i := 0; i < methodFinder.NumMethod(); i++ {
+ method := methodFinder.Method(i)
+
+ if !strings.HasPrefix(method.Name, "Test") {
continue
}
-
- if !suiteSetupDone {
- if stats != nil {
- stats.Start = time.Now()
- }
-
- if setupAllSuite, ok := suite.(SetupAllSuite); ok {
- setupAllSuite.SetupSuite()
- }
-
- suiteSetupDone = true
+ // Apply -testify.m filter
+ if matchMethodRE != nil && !matchMethodRE.MatchString(method.Name) {
+ continue
}
- test := testing.InternalTest{
- Name: method.Name,
- F: func(t *testing.T) {
+ test := test{
+ name: method.Name,
+ run: func(t *testing.T) {
parentT := suite.T()
suite.SetT(t)
defer recoverAndFailOnPanic(t)
@@ -171,10 +170,7 @@ func Run(t *testing.T, suite TestingSuite) {
r := recover()
- if stats != nil {
- passed := !t.Failed() && r == nil
- stats.end(method.Name, passed)
- }
+ stats.end(method.Name, !t.Failed() && r == nil)
if afterTestSuite, ok := suite.(AfterTest); ok {
afterTestSuite.AfterTest(suiteName, method.Name)
@@ -195,59 +191,47 @@ func Run(t *testing.T, suite TestingSuite) {
beforeTestSuite.BeforeTest(methodFinder.Elem().Name(), method.Name)
}
- if stats != nil {
- stats.start(method.Name)
- }
+ stats.start(method.Name)
method.Func.Call([]reflect.Value{reflect.ValueOf(suite)})
},
}
tests = append(tests, test)
}
- if suiteSetupDone {
- defer func() {
- if tearDownAllSuite, ok := suite.(TearDownAllSuite); ok {
- tearDownAllSuite.TearDownSuite()
- }
-
- if suiteWithStats, measureStats := suite.(WithStats); measureStats {
- stats.End = time.Now()
- suiteWithStats.HandleStats(suiteName, stats)
- }
- }()
+
+ if len(tests) == 0 {
+ return
}
- runTests(t, tests)
-}
+ if stats != nil {
+ stats.Start = time.Now()
+ }
-// Filtering method according to set regular expression
-// specified command-line argument -m
-func methodFilter(name string) (bool, error) {
- if ok, _ := regexp.MatchString("^Test", name); !ok {
- return false, nil
+ if setupAllSuite, ok := suite.(SetupAllSuite); ok {
+ setupAllSuite.SetupSuite()
}
- return regexp.MatchString(*matchMethod, name)
+
+ defer func() {
+ if tearDownAllSuite, ok := suite.(TearDownAllSuite); ok {
+ tearDownAllSuite.TearDownSuite()
+ }
+
+ if suiteWithStats, measureStats := suite.(WithStats); measureStats {
+ stats.End = time.Now()
+ suiteWithStats.HandleStats(suiteName, stats)
+ }
+ }()
+
+ runTests(t, tests)
}
-func runTests(t testing.TB, tests []testing.InternalTest) {
+func runTests(t *testing.T, tests []test) {
if len(tests) == 0 {
t.Log("warning: no tests to run")
return
}
- r, ok := t.(runner)
- if !ok { // backwards compatibility with Go 1.6 and below
- if !testing.RunTests(allTestsFilter, tests) {
- t.Fail()
- }
- return
- }
-
for _, test := range tests {
- r.Run(test.Name, test.F)
+ t.Run(test.name, test.run)
}
}
-
-type runner interface {
- Run(name string, f func(t *testing.T)) bool
-}
diff --git a/vendor/github.com/tdewolff/minify/v2/minify.go b/vendor/github.com/tdewolff/minify/v2/minify.go
index a40ec504f..e3a1232be 100644
--- a/vendor/github.com/tdewolff/minify/v2/minify.go
+++ b/vendor/github.com/tdewolff/minify/v2/minify.go
@@ -68,16 +68,26 @@ func (c *cmdMinifier) Minify(_ *M, w io.Writer, r io.Reader, _ map[string]string
if j := strings.Index(arg, "$in"); j != -1 {
var err error
ext := cmdArgExtension.FindString(arg[j+3:])
- if in, err = os.CreateTemp("", "minify-in-*"+ext); err != nil {
+ if in != nil {
+ return fmt.Errorf("more than one input arguments")
+ } else if in, err = os.CreateTemp("", "minify-in-*"+ext); err != nil {
return err
}
+ defer func() {
+ os.Remove(in.Name())
+ }()
cmd.Args[i] = arg[:j] + in.Name() + arg[j+3+len(ext):]
} else if j := strings.Index(arg, "$out"); j != -1 {
var err error
ext := cmdArgExtension.FindString(arg[j+4:])
- if out, err = os.CreateTemp("", "minify-out-*"+ext); err != nil {
+ if out != nil {
+ return fmt.Errorf("more than one output arguments")
+ } else if out, err = os.CreateTemp("", "minify-out-*"+ext); err != nil {
return err
}
+ defer func() {
+ os.Remove(out.Name())
+ }()
cmd.Args[i] = arg[:j] + out.Name() + arg[j+4+len(ext):]
}
}
diff --git a/vendor/github.com/tdewolff/minify/v2/publish.sh b/vendor/github.com/tdewolff/minify/v2/publish.sh
index 64ea672a2..476657d8f 100644
--- a/vendor/github.com/tdewolff/minify/v2/publish.sh
+++ b/vendor/github.com/tdewolff/minify/v2/publish.sh
@@ -10,7 +10,8 @@ SHA256=`sha256sum v$VERSION.tar.gz`
SHA256=( $SHA256 )
GOMODCACHE="$PWD"/go-mod go mod download -modcacherw -x
-tar -caf minify-v$VERSION-deps.tar.xz go-mod
+tar -caf minify-deps.tar.xz go-mod
+rm -rf go-mod
echo ""
echo "Releasing for AUR..."
diff --git a/vendor/github.com/tdewolff/parse/v2/binary.go b/vendor/github.com/tdewolff/parse/v2/binary.go
index 7247e6e10..cf4f91d4a 100644
--- a/vendor/github.com/tdewolff/parse/v2/binary.go
+++ b/vendor/github.com/tdewolff/parse/v2/binary.go
@@ -5,360 +5,15 @@ import (
"errors"
"fmt"
"io"
- "math"
"os"
)
const PageSize = 4096
-// BinaryReader is a binary big endian file format reader.
-type BinaryReader struct {
- Endianness binary.ByteOrder
- buf []byte
- pos uint32
- eof bool
-}
-
-// NewBinaryReader returns a big endian binary file format reader.
-func NewBinaryReader(buf []byte) *BinaryReader {
- if math.MaxUint32 < uint(len(buf)) {
- return &BinaryReader{binary.BigEndian, nil, 0, true}
- }
- return &BinaryReader{binary.BigEndian, buf, 0, false}
-}
-
-// NewBinaryReaderLE returns a little endian binary file format reader.
-func NewBinaryReaderLE(buf []byte) *BinaryReader {
- r := NewBinaryReader(buf)
- r.Endianness = binary.LittleEndian
- return r
-}
-
-// Seek set the reader position in the buffer.
-func (r *BinaryReader) Seek(pos uint32) error {
- if uint32(len(r.buf)) < pos {
- r.eof = true
- return io.EOF
- }
- r.pos = pos
- r.eof = false
- return nil
-}
-
-// Pos returns the reader's position.
-func (r *BinaryReader) Pos() uint32 {
- return r.pos
-}
-
-// Len returns the remaining length of the buffer.
-func (r *BinaryReader) Len() uint32 {
- return uint32(len(r.buf)) - r.pos
-}
-
-// SetLen sets the remaining length of the underlying buffer.
-func (r *BinaryReader) SetLen(n uint32) {
- r.buf = r.buf[: r.pos+n : r.pos+n]
-}
-
-// EOF returns true if we reached the end-of-file.
-func (r *BinaryReader) EOF() bool {
- return r.eof
-}
-
-// Read complies with io.Reader.
-func (r *BinaryReader) Read(b []byte) (int, error) {
- n := copy(b, r.buf[r.pos:])
- r.pos += uint32(n)
- if r.pos == uint32(len(r.buf)) {
- r.eof = true
- return n, io.EOF
- }
- return n, nil
-}
-
-// ReadBytes reads n bytes.
-func (r *BinaryReader) ReadBytes(n uint32) []byte {
- if r.eof || uint32(len(r.buf))-r.pos < n {
- r.eof = true
- return nil
- }
- buf := r.buf[r.pos : r.pos+n : r.pos+n]
- r.pos += n
- return buf
-}
-
-// ReadString reads a string of length n.
-func (r *BinaryReader) ReadString(n uint32) string {
- return string(r.ReadBytes(n))
-}
-
-// ReadByte reads a single byte.
-func (r *BinaryReader) ReadByte() byte {
- b := r.ReadBytes(1)
- if b == nil {
- return 0
- }
- return b[0]
-}
-
-// ReadUint8 reads a uint8.
-func (r *BinaryReader) ReadUint8() uint8 {
- return r.ReadByte()
-}
-
-// ReadUint16 reads a uint16.
-func (r *BinaryReader) ReadUint16() uint16 {
- b := r.ReadBytes(2)
- if b == nil {
- return 0
- }
- return r.Endianness.Uint16(b)
-}
-
-// ReadUint24 reads a uint24 into a uint32.
-func (r *BinaryReader) ReadUint24() uint32 {
- b := r.ReadBytes(3)
- if b == nil {
- return 0
- } else if r.Endianness == binary.LittleEndian {
- return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16
- } else {
- return uint32(b[2]) | uint32(b[1])<<8 | uint32(b[0])<<16
- }
-}
-
-// ReadUint32 reads a uint32.
-func (r *BinaryReader) ReadUint32() uint32 {
- b := r.ReadBytes(4)
- if b == nil {
- return 0
- }
- return r.Endianness.Uint32(b)
-}
-
-// ReadUint64 reads a uint64.
-func (r *BinaryReader) ReadUint64() uint64 {
- b := r.ReadBytes(8)
- if b == nil {
- return 0
- }
- return r.Endianness.Uint64(b)
-}
-
-// ReadInt8 reads an int8.
-func (r *BinaryReader) ReadInt8() int8 {
- return int8(r.ReadByte())
-}
-
-// ReadInt16 reads an int16.
-func (r *BinaryReader) ReadInt16() int16 {
- return int16(r.ReadUint16())
-}
-
-// ReadInt24 reads a int24 into an int32.
-func (r *BinaryReader) ReadInt24() int32 {
- return int32(r.ReadUint24())
-}
-
-// ReadInt32 reads an int32.
-func (r *BinaryReader) ReadInt32() int32 {
- return int32(r.ReadUint32())
-}
-
-// ReadInt64 reads an int64.
-func (r *BinaryReader) ReadInt64() int64 {
- return int64(r.ReadUint64())
-}
-
-type BinaryFileReader struct {
- f *os.File
- size uint64
- offset uint64
-
- Endianness binary.ByteOrder
- buf []byte
- pos int
-}
-
-func NewBinaryFileReader(f *os.File, chunk int) (*BinaryFileReader, error) {
- var buf []byte
- var size uint64
- if chunk == 0 {
- var err error
- if buf, err = io.ReadAll(f); err != nil {
- return nil, err
- }
- } else {
- buf = make([]byte, 0, chunk)
- }
- if info, err := f.Stat(); err != nil {
- return nil, err
- } else {
- size = uint64(info.Size())
- }
- return &BinaryFileReader{
- f: f,
- size: size,
- Endianness: binary.BigEndian,
- buf: buf,
- }, nil
-}
-
-func (r *BinaryFileReader) buffer(pos, length uint64) error {
- if pos < r.offset || r.offset+uint64(len(r.buf)) < pos+length {
- if math.MaxInt64 < pos {
- return fmt.Errorf("seek position too large")
- } else if _, err := r.f.Seek(int64(pos), 0); err != nil {
- return err
- } else if n, err := r.f.Read(r.buf[:cap(r.buf)]); err != nil {
- return err
- } else {
- r.offset = pos
- r.buf = r.buf[:n]
- r.pos = 0
- }
- }
- return nil
-}
-
-// Seek set the reader position in the buffer.
-func (r *BinaryFileReader) Seek(pos uint64) error {
- if r.size <= pos {
- return io.EOF
- } else if err := r.buffer(pos, 0); err != nil {
- return err
- }
- r.pos = int(pos - r.offset)
- return nil
-}
-
-// Pos returns the reader's position.
-func (r *BinaryFileReader) Pos() uint64 {
- return r.offset + uint64(r.pos)
-}
-
-// Len returns the remaining length of the buffer.
-func (r *BinaryFileReader) Len() uint64 {
- return r.size - r.Pos()
-}
-
-// Offset returns the offset of the buffer.
-func (r *BinaryFileReader) Offset() uint64 {
- return r.offset
-}
-
-// BufferLen returns the length of the buffer.
-func (r *BinaryFileReader) BufferLen() int {
- return len(r.buf)
-}
-
-// Read complies with io.Reader.
-func (r *BinaryFileReader) Read(b []byte) (int, error) {
- if len(b) <= cap(r.buf) {
- if err := r.buffer(r.offset+uint64(r.pos), uint64(len(b))); err != nil {
- return 0, err
- }
- n := copy(b, r.buf[r.pos:])
- r.pos += n
- return n, nil
- }
-
- // read directly from file
- if _, err := r.f.Seek(int64(r.offset)+int64(r.pos), 0); err != nil {
- return 0, err
- }
- n, err := r.f.Read(b)
- r.offset += uint64(r.pos + n)
- r.pos = 0
- r.buf = r.buf[:0]
- return n, err
-}
-
-// ReadBytes reads n bytes.
-func (r *BinaryFileReader) ReadBytes(n int) []byte {
- if n < len(r.buf)-r.pos {
- b := r.buf[r.pos : r.pos+n]
- r.pos += n
- return b
- }
-
- b := make([]byte, n)
- if _, err := r.Read(b); err != nil {
- return nil
- }
- return b
-}
-
-// ReadString reads a string of length n.
-func (r *BinaryFileReader) ReadString(n int) string {
- return string(r.ReadBytes(n))
-}
-
-// ReadByte reads a single byte.
-func (r *BinaryFileReader) ReadByte() byte {
- b := r.ReadBytes(1)
- if b == nil {
- return 0
- }
- return b[0]
-}
-
-// ReadUint8 reads a uint8.
-func (r *BinaryFileReader) ReadUint8() uint8 {
- return r.ReadByte()
-}
-
-// ReadUint16 reads a uint16.
-func (r *BinaryFileReader) ReadUint16() uint16 {
- b := r.ReadBytes(2)
- if b == nil {
- return 0
- }
- return r.Endianness.Uint16(b)
-}
-
-// ReadUint32 reads a uint32.
-func (r *BinaryFileReader) ReadUint32() uint32 {
- b := r.ReadBytes(4)
- if b == nil {
- return 0
- }
- return r.Endianness.Uint32(b)
-}
-
-// ReadUint64 reads a uint64.
-func (r *BinaryFileReader) ReadUint64() uint64 {
- b := r.ReadBytes(8)
- if b == nil {
- return 0
- }
- return r.Endianness.Uint64(b)
-}
-
-// ReadInt8 reads a int8.
-func (r *BinaryFileReader) ReadInt8() int8 {
- return int8(r.ReadByte())
-}
-
-// ReadInt16 reads a int16.
-func (r *BinaryFileReader) ReadInt16() int16 {
- return int16(r.ReadUint16())
-}
-
-// ReadInt32 reads a int32.
-func (r *BinaryFileReader) ReadInt32() int32 {
- return int32(r.ReadUint32())
-}
-
-// ReadInt64 reads a int64.
-func (r *BinaryFileReader) ReadInt64() int64 {
- return int64(r.ReadUint64())
-}
-
type IBinaryReader interface {
+ Bytes([]byte, int64, int64) ([]byte, error)
+ Len() int64
Close() error
- Len() int
- Bytes(int, int64) ([]byte, error)
}
type binaryReaderFile struct {
@@ -385,20 +40,21 @@ func (r *binaryReaderFile) Close() error {
}
// Len returns the length of the underlying memory-mapped file.
-func (r *binaryReaderFile) Len() int {
- return int(r.size)
+func (r *binaryReaderFile) Len() int64 {
+ return r.size
}
-func (r *binaryReaderFile) Bytes(n int, off int64) ([]byte, error) {
+func (r *binaryReaderFile) Bytes(b []byte, n, off int64) ([]byte, error) {
if _, err := r.f.Seek(off, 0); err != nil {
return nil, err
+ } else if b == nil {
+ b = make([]byte, n)
}
- b := make([]byte, n)
m, err := r.f.Read(b)
if err != nil {
return nil, err
- } else if m != n {
+ } else if int64(m) != n {
return nil, errors.New("file: could not read all bytes")
}
return b, nil
@@ -418,20 +74,26 @@ func (r *binaryReaderBytes) Close() error {
}
// Len returns the length of the underlying memory-mapped file.
-func (r *binaryReaderBytes) Len() int {
- return len(r.data)
+func (r *binaryReaderBytes) Len() int64 {
+ return int64(len(r.data))
}
-func (r *binaryReaderBytes) Bytes(n int, off int64) ([]byte, error) {
- if off < 0 || int64(len(r.data)) < off {
- return nil, fmt.Errorf("bytes: invalid offset %d", off)
+func (r *binaryReaderBytes) Bytes(b []byte, n, off int64) ([]byte, error) {
+ if off < 0 || n < 0 || int64(len(r.data)) < off || int64(len(r.data))-off < n {
+ return nil, fmt.Errorf("bytes: invalid range %d--%d", off, off+n)
+ }
+
+ data := r.data[off : off+n : off+n]
+ if b == nil {
+ return data, nil
}
- return r.data[off : off+int64(n) : off+int64(n)], nil
+ copy(b, data)
+ return b, nil
}
type binaryReaderReader struct {
r io.Reader
- n int64
+ size int64
readerAt bool
seeker bool
}
@@ -451,31 +113,33 @@ func (r *binaryReaderReader) Close() error {
}
// Len returns the length of the underlying memory-mapped file.
-func (r *binaryReaderReader) Len() int {
- return int(r.n)
+func (r *binaryReaderReader) Len() int64 {
+ return r.size
}
-func (r *binaryReaderReader) Bytes(n int, off int64) ([]byte, error) {
+func (r *binaryReaderReader) Bytes(b []byte, n, off int64) ([]byte, error) {
+ if b == nil {
+ b = make([]byte, n)
+ }
+
// seeker seems faster than readerAt by 10%
if r.seeker {
if _, err := r.r.(io.Seeker).Seek(off, 0); err != nil {
return nil, err
}
- b := make([]byte, n)
m, err := r.r.Read(b)
if err != nil {
return nil, err
- } else if m != n {
+ } else if int64(m) != n {
return nil, errors.New("file: could not read all bytes")
}
return b, nil
} else if r.readerAt {
- b := make([]byte, n)
m, err := r.r.(io.ReaderAt).ReadAt(b, off)
if err != nil {
return nil, err
- } else if m != n {
+ } else if int64(m) != n {
return nil, errors.New("file: could not read all bytes")
}
return b, nil
@@ -483,22 +147,22 @@ func (r *binaryReaderReader) Bytes(n int, off int64) ([]byte, error) {
return nil, errors.New("io.Seeker and io.ReaderAt not implemented")
}
-type BinaryReader2 struct {
+type BinaryReader struct {
f IBinaryReader
pos int64
err error
- Endian binary.ByteOrder
+ ByteOrder binary.ByteOrder
}
-func NewBinaryReader2(f IBinaryReader) *BinaryReader2 {
- return &BinaryReader2{
- f: f,
- Endian: binary.BigEndian,
+func NewBinaryReader(f IBinaryReader) *BinaryReader {
+ return &BinaryReader{
+ f: f,
+ ByteOrder: binary.BigEndian,
}
}
-func NewBinaryReader2Reader(r io.Reader, n int64) (*BinaryReader2, error) {
+func NewBinaryReaderReader(r io.Reader, n int64) (*BinaryReader, error) {
_, isReaderAt := r.(io.ReaderAt)
_, isSeeker := r.(io.Seeker)
@@ -512,27 +176,44 @@ func NewBinaryReader2Reader(r io.Reader, n int64) (*BinaryReader2, error) {
}
f = newBinaryReaderBytes(b)
}
- return NewBinaryReader2(f), nil
+ return NewBinaryReader(f), nil
}
-func NewBinaryReader2Bytes(data []byte) *BinaryReader2 {
+func NewBinaryReaderBytes(data []byte) *BinaryReader {
f := newBinaryReaderBytes(data)
- return NewBinaryReader2(f)
+ return NewBinaryReader(f)
}
-func NewBinaryReader2File(filename string) (*BinaryReader2, error) {
+func NewBinaryReaderFile(filename string) (*BinaryReader, error) {
f, err := newBinaryReaderFile(filename)
if err != nil {
return nil, err
}
- return NewBinaryReader2(f), nil
+ return NewBinaryReader(f), nil
+}
+
+func (r *BinaryReader) IBinaryReader() IBinaryReader {
+ return r.f
+}
+
+func (r *BinaryReader) Clone() *BinaryReader {
+ f := r.f
+ if cloner, ok := f.(interface{ Clone() IBinaryReader }); ok {
+ f = cloner.Clone()
+ }
+ return &BinaryReader{
+ f: f,
+ pos: r.pos,
+ err: r.err,
+ ByteOrder: r.ByteOrder,
+ }
}
-func (r *BinaryReader2) Err() error {
+func (r *BinaryReader) Err() error {
return r.err
}
-func (r *BinaryReader2) Close() error {
+func (r *BinaryReader) Close() error {
if err := r.f.Close(); err != nil {
return err
}
@@ -540,150 +221,184 @@ func (r *BinaryReader2) Close() error {
}
// InPageCache returns true if the range is already in the page cache (for mmap).
-func (r *BinaryReader2) InPageCache(start, end int64) bool {
- index := int64(r.Pos()) / PageSize
+func (r *BinaryReader) InPageCache(start, end int64) bool {
+ index := r.Pos() / PageSize
return start/PageSize == index && end/PageSize == index
}
-// Free frees all previously read bytes, you cannot seek from before this position (for reader).
-func (r *BinaryReader2) Free() {
-}
-
// Pos returns the reader's position.
-func (r *BinaryReader2) Pos() int64 {
+func (r *BinaryReader) Pos() int64 {
return r.pos
}
// Len returns the remaining length of the buffer.
-func (r *BinaryReader2) Len() int {
- return int(int64(r.f.Len()) - int64(r.pos))
+func (r *BinaryReader) Len() int64 {
+ return r.f.Len() - r.pos
}
-func (r *BinaryReader2) Seek(pos int64) {
- r.pos = pos
+// Seek complies with io.Seeker.
+func (r *BinaryReader) Seek(off int64, whence int) (int64, error) {
+ if whence == 0 {
+ if off < 0 || r.f.Len() < off {
+ return 0, fmt.Errorf("invalid offset")
+ }
+ r.pos = off
+ } else if whence == 1 {
+ if r.pos+off < 0 || r.f.Len() < r.pos+off {
+ return 0, fmt.Errorf("invalid offset")
+ }
+ r.pos += off
+ } else if whence == 2 {
+ if off < -r.f.Len() || 0 < off {
+ return 0, fmt.Errorf("invalid offset")
+ }
+ r.pos = r.f.Len() - off
+ } else {
+ return 0, fmt.Errorf("invalid whence")
+ }
+ return r.pos, nil
}
// Read complies with io.Reader.
-func (r *BinaryReader2) Read(b []byte) (int, error) {
- data, err := r.f.Bytes(len(b), r.pos)
+func (r *BinaryReader) Read(b []byte) (int, error) {
+ data, err := r.f.Bytes(b, int64(len(b)), r.pos)
if err != nil && err != io.EOF {
return 0, err
}
- n := copy(b, data)
- r.pos += int64(len(b))
- return n, err
+ r.pos += int64(len(data))
+ return len(data), err
}
// ReadAt complies with io.ReaderAt.
-func (r *BinaryReader2) ReadAt(b []byte, off int64) (int, error) {
- data, err := r.f.Bytes(len(b), off)
+func (r *BinaryReader) ReadAt(b []byte, off int64) (int, error) {
+ data, err := r.f.Bytes(b, int64(len(b)), off)
if err != nil && err != io.EOF {
return 0, err
}
- n := copy(b, data)
- return n, err
+ return len(data), err
}
// ReadBytes reads n bytes.
-func (r *BinaryReader2) ReadBytes(n int) []byte {
- data, err := r.f.Bytes(n, r.pos)
+func (r *BinaryReader) ReadBytes(n int64) []byte {
+ data, err := r.f.Bytes(nil, n, r.pos)
if err != nil {
r.err = err
return nil
}
- r.pos += int64(n)
+ r.pos += n
return data
}
// ReadString reads a string of length n.
-func (r *BinaryReader2) ReadString(n int) string {
+func (r *BinaryReader) ReadString(n int64) string {
return string(r.ReadBytes(n))
}
// ReadByte reads a single byte.
-func (r *BinaryReader2) ReadByte() byte {
+func (r *BinaryReader) ReadByte() (byte, error) {
data := r.ReadBytes(1)
if data == nil {
- return 0
+ return 0, r.err
}
- return data[0]
+ return data[0], nil
}
// ReadUint8 reads a uint8.
-func (r *BinaryReader2) ReadUint8() uint8 {
- return r.ReadByte()
+func (r *BinaryReader) ReadUint8() uint8 {
+ data := r.ReadBytes(1)
+ if data == nil {
+ return 0
+ }
+ return data[0]
}
// ReadUint16 reads a uint16.
-func (r *BinaryReader2) ReadUint16() uint16 {
+func (r *BinaryReader) ReadUint16() uint16 {
data := r.ReadBytes(2)
if data == nil {
return 0
- } else if r.Endian == binary.LittleEndian {
+ } else if r.ByteOrder == binary.LittleEndian {
return uint16(data[1])<<8 | uint16(data[0])
}
return uint16(data[0])<<8 | uint16(data[1])
}
+// ReadUint24 reads a uint24 into a uint32.
+func (r *BinaryReader) ReadUint24() uint32 {
+ b := r.ReadBytes(3)
+ if b == nil {
+ return 0
+ } else if r.ByteOrder == binary.LittleEndian {
+ return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16
+ } else {
+ return uint32(b[2]) | uint32(b[1])<<8 | uint32(b[0])<<16
+ }
+}
+
// ReadUint32 reads a uint32.
-func (r *BinaryReader2) ReadUint32() uint32 {
+func (r *BinaryReader) ReadUint32() uint32 {
data := r.ReadBytes(4)
if data == nil {
return 0
- } else if r.Endian == binary.LittleEndian {
+ } else if r.ByteOrder == binary.LittleEndian {
return uint32(data[3])<<24 | uint32(data[2])<<16 | uint32(data[1])<<8 | uint32(data[0])
}
return uint32(data[0])<<24 | uint32(data[1])<<16 | uint32(data[2])<<8 | uint32(data[3])
}
// ReadUint64 reads a uint64.
-func (r *BinaryReader2) ReadUint64() uint64 {
+func (r *BinaryReader) ReadUint64() uint64 {
data := r.ReadBytes(8)
if data == nil {
return 0
- } else if r.Endian == binary.LittleEndian {
+ } else if r.ByteOrder == binary.LittleEndian {
return uint64(data[7])<<56 | uint64(data[6])<<48 | uint64(data[5])<<40 | uint64(data[4])<<32 | uint64(data[3])<<24 | uint64(data[2])<<16 | uint64(data[1])<<8 | uint64(data[0])
}
return uint64(data[0])<<56 | uint64(data[1])<<48 | uint64(data[2])<<40 | uint64(data[3])<<32 | uint64(data[4])<<24 | uint64(data[5])<<16 | uint64(data[6])<<8 | uint64(data[7])
}
// ReadInt8 reads a int8.
-func (r *BinaryReader2) ReadInt8() int8 {
- return int8(r.ReadByte())
+func (r *BinaryReader) ReadInt8() int8 {
+ return int8(r.ReadUint8())
}
// ReadInt16 reads a int16.
-func (r *BinaryReader2) ReadInt16() int16 {
+func (r *BinaryReader) ReadInt16() int16 {
return int16(r.ReadUint16())
}
+// ReadInt24 reads a int24 into an int32.
+func (r *BinaryReader) ReadInt24() int32 {
+ return int32(r.ReadUint24())
+}
+
// ReadInt32 reads a int32.
-func (r *BinaryReader2) ReadInt32() int32 {
+func (r *BinaryReader) ReadInt32() int32 {
return int32(r.ReadUint32())
}
// ReadInt64 reads a int64.
-func (r *BinaryReader2) ReadInt64() int64 {
+func (r *BinaryReader) ReadInt64() int64 {
return int64(r.ReadUint64())
}
// BinaryWriter is a big endian binary file format writer.
type BinaryWriter struct {
- buf []byte
- Endian binary.ByteOrder
+ buf []byte
+ ByteOrder binary.AppendByteOrder
}
// NewBinaryWriter returns a big endian binary file format writer.
func NewBinaryWriter(buf []byte) *BinaryWriter {
return &BinaryWriter{
- buf: buf,
- Endian: binary.BigEndian,
+ buf: buf,
+ ByteOrder: binary.BigEndian,
}
}
// Len returns the buffer's length in bytes.
-func (w *BinaryWriter) Len() uint32 {
- return uint32(len(w.buf))
+func (w *BinaryWriter) Len() int64 {
+ return int64(len(w.buf))
}
// Bytes returns the buffer's bytes.
@@ -719,23 +434,26 @@ func (w *BinaryWriter) WriteUint8(v uint8) {
// WriteUint16 writes the given uint16 to the buffer.
func (w *BinaryWriter) WriteUint16(v uint16) {
- pos := len(w.buf)
- w.buf = append(w.buf, make([]byte, 2)...)
- w.Endian.PutUint16(w.buf[pos:], v)
+ w.buf = w.ByteOrder.AppendUint16(w.buf, v)
+}
+
+// WriteUint24 writes the given uint32 as a uint24 to the buffer.
+func (w *BinaryWriter) WriteUint24(v uint32) {
+ if w.ByteOrder == binary.LittleEndian {
+ w.buf = append(w.buf, byte(v), byte(v>>8), byte(v>>16))
+ } else {
+ w.buf = append(w.buf, byte(v>>16), byte(v>>8), byte(v))
+ }
}
// WriteUint32 writes the given uint32 to the buffer.
func (w *BinaryWriter) WriteUint32(v uint32) {
- pos := len(w.buf)
- w.buf = append(w.buf, make([]byte, 4)...)
- w.Endian.PutUint32(w.buf[pos:], v)
+ w.buf = w.ByteOrder.AppendUint32(w.buf, v)
}
// WriteUint64 writes the given uint64 to the buffer.
func (w *BinaryWriter) WriteUint64(v uint64) {
- pos := len(w.buf)
- w.buf = append(w.buf, make([]byte, 8)...)
- w.Endian.PutUint64(w.buf[pos:], v)
+ w.buf = w.ByteOrder.AppendUint64(w.buf, v)
}
// WriteInt8 writes the given int8 to the buffer.
@@ -748,6 +466,11 @@ func (w *BinaryWriter) WriteInt16(v int16) {
w.WriteUint16(uint16(v))
}
+// WriteInt24 writes the given int32 as an in24 to the buffer.
+func (w *BinaryWriter) WriteInt24(v int32) {
+ w.WriteUint24(uint32(v))
+}
+
// WriteInt32 writes the given int32 to the buffer.
func (w *BinaryWriter) WriteInt32(v int32) {
w.WriteUint32(uint32(v))
@@ -794,7 +517,7 @@ func (r *BitmapReader) Read() bool {
// BitmapWriter is a binary bitmap writer.
type BitmapWriter struct {
buf []byte
- pos uint32
+ pos uint64
}
// NewBitmapWriter returns a binary bitmap writer.
@@ -803,8 +526,8 @@ func NewBitmapWriter(buf []byte) *BitmapWriter {
}
// Len returns the buffer's length in bytes.
-func (w *BitmapWriter) Len() uint32 {
- return uint32(len(w.buf))
+func (w *BitmapWriter) Len() int64 {
+ return int64(len(w.buf))
}
// Bytes returns the buffer's bytes.
@@ -814,7 +537,7 @@ func (w *BitmapWriter) Bytes() []byte {
// Write writes the next bit.
func (w *BitmapWriter) Write(bit bool) {
- if uint32(len(w.buf)) <= (w.pos+1)/8 {
+ if uint64(len(w.buf)) <= (w.pos+1)/8 {
w.buf = append(w.buf, 0)
}
if bit {
diff --git a/vendor/github.com/tdewolff/parse/v2/binary_unix.go b/vendor/github.com/tdewolff/parse/v2/binary_unix.go
index 70bb8767f..4a8979fda 100644
--- a/vendor/github.com/tdewolff/parse/v2/binary_unix.go
+++ b/vendor/github.com/tdewolff/parse/v2/binary_unix.go
@@ -5,7 +5,6 @@ package parse
import (
"errors"
"fmt"
- "io"
"os"
"runtime"
"syscall"
@@ -13,6 +12,7 @@ import (
type binaryReaderMmap struct {
data []byte
+ size int64
}
func newBinaryReaderMmap(filename string) (*binaryReaderMmap, error) {
@@ -47,7 +47,7 @@ func newBinaryReaderMmap(filename string) (*binaryReaderMmap, error) {
if err != nil {
return nil, err
}
- r := &binaryReaderMmap{data}
+ r := &binaryReaderMmap{data, size}
runtime.SetFinalizer(r, (*binaryReaderMmap).Close)
return r, nil
}
@@ -67,25 +67,29 @@ func (r *binaryReaderMmap) Close() error {
}
// Len returns the length of the underlying memory-mapped file.
-func (r *binaryReaderMmap) Len() int {
- return len(r.data)
+func (r *binaryReaderMmap) Len() int64 {
+ return r.size
}
-func (r *binaryReaderMmap) Bytes(n int, off int64) ([]byte, error) {
+func (r *binaryReaderMmap) Bytes(b []byte, n, off int64) ([]byte, error) {
if r.data == nil {
return nil, errors.New("mmap: closed")
- } else if off < 0 || int64(len(r.data)) < off {
- return nil, fmt.Errorf("mmap: invalid offset %d", off)
- } else if int64(len(r.data)-n) < off {
- return r.data[off:len(r.data):len(r.data)], io.EOF
+ } else if off < 0 || n < 0 || int64(len(r.data)) < off || int64(len(r.data))-off < n {
+ return nil, fmt.Errorf("mmap: invalid range %d--%d", off, off+n)
}
- return r.data[off : off+int64(n) : off+int64(n)], nil
+
+ data := r.data[off : off+n : off+n]
+ if b == nil {
+ return data, nil
+ }
+ copy(b, data)
+ return b, nil
}
-func NewBinaryReader2Mmap(filename string) (*BinaryReader2, error) {
+func NewBinaryReaderMmap(filename string) (*BinaryReader, error) {
f, err := newBinaryReaderMmap(filename)
if err != nil {
return nil, err
}
- return NewBinaryReader2(f), nil
+ return NewBinaryReader(f), nil
}