summaryrefslogtreecommitdiff
path: root/vendor
diff options
context:
space:
mode:
authorLibravatar kim <grufwub@gmail.com>2025-05-26 16:13:55 +0200
committerLibravatar tobi <kipvandenbos@noreply.codeberg.org>2025-05-26 16:13:55 +0200
commit143febb318ee16ca68ea312249ab5dadeab608bb (patch)
tree594820ce5f746c7c9d0e28cc8820da563b0c4bdd /vendor
parent[chore] migration to update `statuses.thread_id` to be notnull (#4160) (diff)
downloadgotosocial-143febb318ee16ca68ea312249ab5dadeab608bb.tar.xz
[chore] update dependencies (#4196)
- go.opentelemetry.io/contrib/exporters/autoexport v0.60.0 -> v0.61.0 - go.opentelemetry.io/contrib/instrumentation/runtime v0.60.0 -> v0.61.0 Reviewed-on: https://codeberg.org/superseriousbusiness/gotosocial/pulls/4196 Co-authored-by: kim <grufwub@gmail.com> Co-committed-by: kim <grufwub@gmail.com>
Diffstat (limited to 'vendor')
-rw-r--r--vendor/github.com/cenkalti/backoff/v4/context.go62
-rw-r--r--vendor/github.com/cenkalti/backoff/v4/exponential.go216
-rw-r--r--vendor/github.com/cenkalti/backoff/v4/retry.go146
-rw-r--r--vendor/github.com/cenkalti/backoff/v4/tries.go38
-rw-r--r--vendor/github.com/cenkalti/backoff/v5/.gitignore (renamed from vendor/github.com/cenkalti/backoff/v4/.gitignore)0
-rw-r--r--vendor/github.com/cenkalti/backoff/v5/CHANGELOG.md29
-rw-r--r--vendor/github.com/cenkalti/backoff/v5/LICENSE (renamed from vendor/github.com/cenkalti/backoff/v4/LICENSE)0
-rw-r--r--vendor/github.com/cenkalti/backoff/v5/README.md (renamed from vendor/github.com/cenkalti/backoff/v4/README.md)15
-rw-r--r--vendor/github.com/cenkalti/backoff/v5/backoff.go (renamed from vendor/github.com/cenkalti/backoff/v4/backoff.go)14
-rw-r--r--vendor/github.com/cenkalti/backoff/v5/error.go46
-rw-r--r--vendor/github.com/cenkalti/backoff/v5/exponential.go125
-rw-r--r--vendor/github.com/cenkalti/backoff/v5/retry.go139
-rw-r--r--vendor/github.com/cenkalti/backoff/v5/ticker.go (renamed from vendor/github.com/cenkalti/backoff/v4/ticker.go)18
-rw-r--r--vendor/github.com/cenkalti/backoff/v5/timer.go (renamed from vendor/github.com/cenkalti/backoff/v4/timer.go)2
-rw-r--r--vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go32
-rw-r--r--vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go10
-rw-r--r--vendor/github.com/prometheus/common/expfmt/text_parse.go4
-rw-r--r--vendor/github.com/prometheus/common/model/alert.go2
-rw-r--r--vendor/github.com/prometheus/common/model/labels.go5
-rw-r--r--vendor/github.com/prometheus/common/model/metric.go28
-rw-r--r--vendor/github.com/prometheus/procfs/.golangci.yml63
-rw-r--r--vendor/github.com/prometheus/procfs/Makefile.common10
-rw-r--r--vendor/github.com/prometheus/procfs/README.md6
-rw-r--r--vendor/github.com/prometheus/procfs/arp.go4
-rw-r--r--vendor/github.com/prometheus/procfs/fs.go10
-rw-r--r--vendor/github.com/prometheus/procfs/fs_statfs_notype.go4
-rw-r--r--vendor/github.com/prometheus/procfs/fscache.go6
-rw-r--r--vendor/github.com/prometheus/procfs/internal/fs/fs.go3
-rw-r--r--vendor/github.com/prometheus/procfs/internal/util/parse.go14
-rw-r--r--vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go20
-rw-r--r--vendor/github.com/prometheus/procfs/mountstats.go27
-rw-r--r--vendor/github.com/prometheus/procfs/net_dev_snmp6.go96
-rw-r--r--vendor/github.com/prometheus/procfs/net_ip_socket.go8
-rw-r--r--vendor/github.com/prometheus/procfs/net_protocols.go21
-rw-r--r--vendor/github.com/prometheus/procfs/net_tcp.go4
-rw-r--r--vendor/github.com/prometheus/procfs/net_unix.go8
-rw-r--r--vendor/github.com/prometheus/procfs/proc.go8
-rw-r--r--vendor/github.com/prometheus/procfs/proc_cgroup.go2
-rw-r--r--vendor/github.com/prometheus/procfs/proc_io.go2
-rw-r--r--vendor/github.com/prometheus/procfs/proc_netstat.go224
-rw-r--r--vendor/github.com/prometheus/procfs/proc_smaps.go4
-rw-r--r--vendor/github.com/prometheus/procfs/proc_snmp.go120
-rw-r--r--vendor/github.com/prometheus/procfs/proc_snmp6.go150
-rw-r--r--vendor/github.com/prometheus/procfs/proc_status.go18
-rw-r--r--vendor/github.com/prometheus/procfs/proc_sys.go2
-rw-r--r--vendor/github.com/prometheus/procfs/softirqs.go22
-rw-r--r--vendor/go.opentelemetry.io/contrib/bridges/prometheus/BENCHMARKS.md2
-rw-r--r--vendor/go.opentelemetry.io/contrib/instrumentation/runtime/version.go9
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/config.go33
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/retry/retry.go28
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/transform/log.go4
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/version.go2
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/client.go27
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/config.go48
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/internal/retry/retry.go28
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/internal/transform/log.go4
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/version.go2
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/config.go5
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/envconfig/envconfig.go4
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/gen.go1
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/envconfig.go36
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/options.go20
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/optiontypes.go2
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/tls.go2
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/partialsuccess.go2
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/retry/retry.go28
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform/attribute.go2
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform/error.go2
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform/metricdata.go14
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/version.go2
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/client.go29
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/config.go16
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/envconfig/envconfig.go4
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/gen.go1
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/envconfig.go36
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/options.go20
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/optiontypes.go2
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/tls.go2
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/partialsuccess.go2
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/retry/retry.go28
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/transform/attribute.go2
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/transform/error.go2
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/transform/metricdata.go14
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/version.go2
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/attribute.go2
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig/envconfig.go4
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/gen.go1
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/envconfig.go14
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options.go14
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/optiontypes.go2
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/tls.go2
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/partialsuccess.go2
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry/retry.go28
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/options.go5
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/client.go30
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/envconfig/envconfig.go4
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/gen.go1
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/envconfig.go14
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/options.go14
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/optiontypes.go2
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/tls.go2
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/partialsuccess.go2
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/retry/retry.go28
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/options.go16
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go2
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/prometheus/config.go11
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/prometheus/exporter.go108
-rw-r--r--vendor/go.opentelemetry.io/otel/exporters/stdout/stdoutmetric/exporter.go4
-rw-r--r--vendor/go.opentelemetry.io/otel/log/doc.go17
-rw-r--r--vendor/go.opentelemetry.io/otel/log/embedded/embedded.go18
-rw-r--r--vendor/go.opentelemetry.io/otel/log/keyvalue.go2
-rw-r--r--vendor/go.opentelemetry.io/otel/log/noop/noop.go6
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/log/batch.go28
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/log/doc.go2
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/log/exporter.go11
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/log/filter_processor.go2
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/log/logger.go1
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/log/provider.go2
-rw-r--r--vendor/go.opentelemetry.io/proto/otlp/collector/logs/v1/logs_service_grpc.pb.go4
-rw-r--r--vendor/go.opentelemetry.io/proto/otlp/collector/metrics/v1/metrics_service_grpc.pb.go4
-rw-r--r--vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service_grpc.pb.go4
-rw-r--r--vendor/go.opentelemetry.io/proto/otlp/common/v1/common.pb.go138
-rw-r--r--vendor/go.opentelemetry.io/proto/otlp/logs/v1/logs.pb.go2
-rw-r--r--vendor/go.opentelemetry.io/proto/otlp/metrics/v1/metrics.pb.go11
-rw-r--r--vendor/go.opentelemetry.io/proto/otlp/resource/v1/resource.pb.go56
-rw-r--r--vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go2
-rw-r--r--vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go315
-rw-r--r--vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go2
-rw-r--r--vendor/google.golang.org/grpc/balancer/base/balancer.go12
-rw-r--r--vendor/google.golang.org/grpc/balancer/endpointsharding/endpointsharding.go20
-rw-r--r--vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go43
-rw-r--r--vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go2
-rw-r--r--vendor/google.golang.org/grpc/clientconn.go3
-rw-r--r--vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go167
-rw-r--r--vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go64
-rw-r--r--vendor/google.golang.org/grpc/internal/envconfig/envconfig.go20
-rw-r--r--vendor/google.golang.org/grpc/internal/internal.go13
-rw-r--r--vendor/google.golang.org/grpc/internal/metadata/metadata.go26
-rw-r--r--vendor/google.golang.org/grpc/internal/resolver/delegatingresolver/delegatingresolver.go213
-rw-r--r--vendor/google.golang.org/grpc/internal/transport/client_stream.go2
-rw-r--r--vendor/google.golang.org/grpc/internal/transport/http2_client.go8
-rw-r--r--vendor/google.golang.org/grpc/internal/transport/http2_server.go23
-rw-r--r--vendor/google.golang.org/grpc/internal/transport/http_util.go4
-rw-r--r--vendor/google.golang.org/grpc/internal/transport/server_stream.go6
-rw-r--r--vendor/google.golang.org/grpc/resolver/map.go174
-rw-r--r--vendor/google.golang.org/grpc/resolver_wrapper.go23
-rw-r--r--vendor/google.golang.org/grpc/rpc_util.go16
-rw-r--r--vendor/google.golang.org/grpc/stats/stats.go35
-rw-r--r--vendor/google.golang.org/grpc/version.go2
-rw-r--r--vendor/modules.txt102
150 files changed, 2558 insertions, 1613 deletions
diff --git a/vendor/github.com/cenkalti/backoff/v4/context.go b/vendor/github.com/cenkalti/backoff/v4/context.go
deleted file mode 100644
index 48482330e..000000000
--- a/vendor/github.com/cenkalti/backoff/v4/context.go
+++ /dev/null
@@ -1,62 +0,0 @@
-package backoff
-
-import (
- "context"
- "time"
-)
-
-// BackOffContext is a backoff policy that stops retrying after the context
-// is canceled.
-type BackOffContext interface { // nolint: golint
- BackOff
- Context() context.Context
-}
-
-type backOffContext struct {
- BackOff
- ctx context.Context
-}
-
-// WithContext returns a BackOffContext with context ctx
-//
-// ctx must not be nil
-func WithContext(b BackOff, ctx context.Context) BackOffContext { // nolint: golint
- if ctx == nil {
- panic("nil context")
- }
-
- if b, ok := b.(*backOffContext); ok {
- return &backOffContext{
- BackOff: b.BackOff,
- ctx: ctx,
- }
- }
-
- return &backOffContext{
- BackOff: b,
- ctx: ctx,
- }
-}
-
-func getContext(b BackOff) context.Context {
- if cb, ok := b.(BackOffContext); ok {
- return cb.Context()
- }
- if tb, ok := b.(*backOffTries); ok {
- return getContext(tb.delegate)
- }
- return context.Background()
-}
-
-func (b *backOffContext) Context() context.Context {
- return b.ctx
-}
-
-func (b *backOffContext) NextBackOff() time.Duration {
- select {
- case <-b.ctx.Done():
- return Stop
- default:
- return b.BackOff.NextBackOff()
- }
-}
diff --git a/vendor/github.com/cenkalti/backoff/v4/exponential.go b/vendor/github.com/cenkalti/backoff/v4/exponential.go
deleted file mode 100644
index aac99f196..000000000
--- a/vendor/github.com/cenkalti/backoff/v4/exponential.go
+++ /dev/null
@@ -1,216 +0,0 @@
-package backoff
-
-import (
- "math/rand"
- "time"
-)
-
-/*
-ExponentialBackOff is a backoff implementation that increases the backoff
-period for each retry attempt using a randomization function that grows exponentially.
-
-NextBackOff() is calculated using the following formula:
-
- randomized interval =
- RetryInterval * (random value in range [1 - RandomizationFactor, 1 + RandomizationFactor])
-
-In other words NextBackOff() will range between the randomization factor
-percentage below and above the retry interval.
-
-For example, given the following parameters:
-
- RetryInterval = 2
- RandomizationFactor = 0.5
- Multiplier = 2
-
-the actual backoff period used in the next retry attempt will range between 1 and 3 seconds,
-multiplied by the exponential, that is, between 2 and 6 seconds.
-
-Note: MaxInterval caps the RetryInterval and not the randomized interval.
-
-If the time elapsed since an ExponentialBackOff instance is created goes past the
-MaxElapsedTime, then the method NextBackOff() starts returning backoff.Stop.
-
-The elapsed time can be reset by calling Reset().
-
-Example: Given the following default arguments, for 10 tries the sequence will be,
-and assuming we go over the MaxElapsedTime on the 10th try:
-
- Request # RetryInterval (seconds) Randomized Interval (seconds)
-
- 1 0.5 [0.25, 0.75]
- 2 0.75 [0.375, 1.125]
- 3 1.125 [0.562, 1.687]
- 4 1.687 [0.8435, 2.53]
- 5 2.53 [1.265, 3.795]
- 6 3.795 [1.897, 5.692]
- 7 5.692 [2.846, 8.538]
- 8 8.538 [4.269, 12.807]
- 9 12.807 [6.403, 19.210]
- 10 19.210 backoff.Stop
-
-Note: Implementation is not thread-safe.
-*/
-type ExponentialBackOff struct {
- InitialInterval time.Duration
- RandomizationFactor float64
- Multiplier float64
- MaxInterval time.Duration
- // After MaxElapsedTime the ExponentialBackOff returns Stop.
- // It never stops if MaxElapsedTime == 0.
- MaxElapsedTime time.Duration
- Stop time.Duration
- Clock Clock
-
- currentInterval time.Duration
- startTime time.Time
-}
-
-// Clock is an interface that returns current time for BackOff.
-type Clock interface {
- Now() time.Time
-}
-
-// ExponentialBackOffOpts is a function type used to configure ExponentialBackOff options.
-type ExponentialBackOffOpts func(*ExponentialBackOff)
-
-// Default values for ExponentialBackOff.
-const (
- DefaultInitialInterval = 500 * time.Millisecond
- DefaultRandomizationFactor = 0.5
- DefaultMultiplier = 1.5
- DefaultMaxInterval = 60 * time.Second
- DefaultMaxElapsedTime = 15 * time.Minute
-)
-
-// NewExponentialBackOff creates an instance of ExponentialBackOff using default values.
-func NewExponentialBackOff(opts ...ExponentialBackOffOpts) *ExponentialBackOff {
- b := &ExponentialBackOff{
- InitialInterval: DefaultInitialInterval,
- RandomizationFactor: DefaultRandomizationFactor,
- Multiplier: DefaultMultiplier,
- MaxInterval: DefaultMaxInterval,
- MaxElapsedTime: DefaultMaxElapsedTime,
- Stop: Stop,
- Clock: SystemClock,
- }
- for _, fn := range opts {
- fn(b)
- }
- b.Reset()
- return b
-}
-
-// WithInitialInterval sets the initial interval between retries.
-func WithInitialInterval(duration time.Duration) ExponentialBackOffOpts {
- return func(ebo *ExponentialBackOff) {
- ebo.InitialInterval = duration
- }
-}
-
-// WithRandomizationFactor sets the randomization factor to add jitter to intervals.
-func WithRandomizationFactor(randomizationFactor float64) ExponentialBackOffOpts {
- return func(ebo *ExponentialBackOff) {
- ebo.RandomizationFactor = randomizationFactor
- }
-}
-
-// WithMultiplier sets the multiplier for increasing the interval after each retry.
-func WithMultiplier(multiplier float64) ExponentialBackOffOpts {
- return func(ebo *ExponentialBackOff) {
- ebo.Multiplier = multiplier
- }
-}
-
-// WithMaxInterval sets the maximum interval between retries.
-func WithMaxInterval(duration time.Duration) ExponentialBackOffOpts {
- return func(ebo *ExponentialBackOff) {
- ebo.MaxInterval = duration
- }
-}
-
-// WithMaxElapsedTime sets the maximum total time for retries.
-func WithMaxElapsedTime(duration time.Duration) ExponentialBackOffOpts {
- return func(ebo *ExponentialBackOff) {
- ebo.MaxElapsedTime = duration
- }
-}
-
-// WithRetryStopDuration sets the duration after which retries should stop.
-func WithRetryStopDuration(duration time.Duration) ExponentialBackOffOpts {
- return func(ebo *ExponentialBackOff) {
- ebo.Stop = duration
- }
-}
-
-// WithClockProvider sets the clock used to measure time.
-func WithClockProvider(clock Clock) ExponentialBackOffOpts {
- return func(ebo *ExponentialBackOff) {
- ebo.Clock = clock
- }
-}
-
-type systemClock struct{}
-
-func (t systemClock) Now() time.Time {
- return time.Now()
-}
-
-// SystemClock implements Clock interface that uses time.Now().
-var SystemClock = systemClock{}
-
-// Reset the interval back to the initial retry interval and restarts the timer.
-// Reset must be called before using b.
-func (b *ExponentialBackOff) Reset() {
- b.currentInterval = b.InitialInterval
- b.startTime = b.Clock.Now()
-}
-
-// NextBackOff calculates the next backoff interval using the formula:
-// Randomized interval = RetryInterval * (1 ± RandomizationFactor)
-func (b *ExponentialBackOff) NextBackOff() time.Duration {
- // Make sure we have not gone over the maximum elapsed time.
- elapsed := b.GetElapsedTime()
- next := getRandomValueFromInterval(b.RandomizationFactor, rand.Float64(), b.currentInterval)
- b.incrementCurrentInterval()
- if b.MaxElapsedTime != 0 && elapsed+next > b.MaxElapsedTime {
- return b.Stop
- }
- return next
-}
-
-// GetElapsedTime returns the elapsed time since an ExponentialBackOff instance
-// is created and is reset when Reset() is called.
-//
-// The elapsed time is computed using time.Now().UnixNano(). It is
-// safe to call even while the backoff policy is used by a running
-// ticker.
-func (b *ExponentialBackOff) GetElapsedTime() time.Duration {
- return b.Clock.Now().Sub(b.startTime)
-}
-
-// Increments the current interval by multiplying it with the multiplier.
-func (b *ExponentialBackOff) incrementCurrentInterval() {
- // Check for overflow, if overflow is detected set the current interval to the max interval.
- if float64(b.currentInterval) >= float64(b.MaxInterval)/b.Multiplier {
- b.currentInterval = b.MaxInterval
- } else {
- b.currentInterval = time.Duration(float64(b.currentInterval) * b.Multiplier)
- }
-}
-
-// Returns a random value from the following interval:
-// [currentInterval - randomizationFactor * currentInterval, currentInterval + randomizationFactor * currentInterval].
-func getRandomValueFromInterval(randomizationFactor, random float64, currentInterval time.Duration) time.Duration {
- if randomizationFactor == 0 {
- return currentInterval // make sure no randomness is used when randomizationFactor is 0.
- }
- var delta = randomizationFactor * float64(currentInterval)
- var minInterval = float64(currentInterval) - delta
- var maxInterval = float64(currentInterval) + delta
-
- // Get a random value from the range [minInterval, maxInterval].
- // The formula used below has a +1 because if the minInterval is 1 and the maxInterval is 3 then
- // we want a 33% chance for selecting either 1, 2 or 3.
- return time.Duration(minInterval + (random * (maxInterval - minInterval + 1)))
-}
diff --git a/vendor/github.com/cenkalti/backoff/v4/retry.go b/vendor/github.com/cenkalti/backoff/v4/retry.go
deleted file mode 100644
index b9c0c51cd..000000000
--- a/vendor/github.com/cenkalti/backoff/v4/retry.go
+++ /dev/null
@@ -1,146 +0,0 @@
-package backoff
-
-import (
- "errors"
- "time"
-)
-
-// An OperationWithData is executing by RetryWithData() or RetryNotifyWithData().
-// The operation will be retried using a backoff policy if it returns an error.
-type OperationWithData[T any] func() (T, error)
-
-// An Operation is executing by Retry() or RetryNotify().
-// The operation will be retried using a backoff policy if it returns an error.
-type Operation func() error
-
-func (o Operation) withEmptyData() OperationWithData[struct{}] {
- return func() (struct{}, error) {
- return struct{}{}, o()
- }
-}
-
-// Notify is a notify-on-error function. It receives an operation error and
-// backoff delay if the operation failed (with an error).
-//
-// NOTE that if the backoff policy stated to stop retrying,
-// the notify function isn't called.
-type Notify func(error, time.Duration)
-
-// Retry the operation o until it does not return error or BackOff stops.
-// o is guaranteed to be run at least once.
-//
-// If o returns a *PermanentError, the operation is not retried, and the
-// wrapped error is returned.
-//
-// Retry sleeps the goroutine for the duration returned by BackOff after a
-// failed operation returns.
-func Retry(o Operation, b BackOff) error {
- return RetryNotify(o, b, nil)
-}
-
-// RetryWithData is like Retry but returns data in the response too.
-func RetryWithData[T any](o OperationWithData[T], b BackOff) (T, error) {
- return RetryNotifyWithData(o, b, nil)
-}
-
-// RetryNotify calls notify function with the error and wait duration
-// for each failed attempt before sleep.
-func RetryNotify(operation Operation, b BackOff, notify Notify) error {
- return RetryNotifyWithTimer(operation, b, notify, nil)
-}
-
-// RetryNotifyWithData is like RetryNotify but returns data in the response too.
-func RetryNotifyWithData[T any](operation OperationWithData[T], b BackOff, notify Notify) (T, error) {
- return doRetryNotify(operation, b, notify, nil)
-}
-
-// RetryNotifyWithTimer calls notify function with the error and wait duration using the given Timer
-// for each failed attempt before sleep.
-// A default timer that uses system timer is used when nil is passed.
-func RetryNotifyWithTimer(operation Operation, b BackOff, notify Notify, t Timer) error {
- _, err := doRetryNotify(operation.withEmptyData(), b, notify, t)
- return err
-}
-
-// RetryNotifyWithTimerAndData is like RetryNotifyWithTimer but returns data in the response too.
-func RetryNotifyWithTimerAndData[T any](operation OperationWithData[T], b BackOff, notify Notify, t Timer) (T, error) {
- return doRetryNotify(operation, b, notify, t)
-}
-
-func doRetryNotify[T any](operation OperationWithData[T], b BackOff, notify Notify, t Timer) (T, error) {
- var (
- err error
- next time.Duration
- res T
- )
- if t == nil {
- t = &defaultTimer{}
- }
-
- defer func() {
- t.Stop()
- }()
-
- ctx := getContext(b)
-
- b.Reset()
- for {
- res, err = operation()
- if err == nil {
- return res, nil
- }
-
- var permanent *PermanentError
- if errors.As(err, &permanent) {
- return res, permanent.Err
- }
-
- if next = b.NextBackOff(); next == Stop {
- if cerr := ctx.Err(); cerr != nil {
- return res, cerr
- }
-
- return res, err
- }
-
- if notify != nil {
- notify(err, next)
- }
-
- t.Start(next)
-
- select {
- case <-ctx.Done():
- return res, ctx.Err()
- case <-t.C():
- }
- }
-}
-
-// PermanentError signals that the operation should not be retried.
-type PermanentError struct {
- Err error
-}
-
-func (e *PermanentError) Error() string {
- return e.Err.Error()
-}
-
-func (e *PermanentError) Unwrap() error {
- return e.Err
-}
-
-func (e *PermanentError) Is(target error) bool {
- _, ok := target.(*PermanentError)
- return ok
-}
-
-// Permanent wraps the given err in a *PermanentError.
-func Permanent(err error) error {
- if err == nil {
- return nil
- }
- return &PermanentError{
- Err: err,
- }
-}
diff --git a/vendor/github.com/cenkalti/backoff/v4/tries.go b/vendor/github.com/cenkalti/backoff/v4/tries.go
deleted file mode 100644
index 28d58ca37..000000000
--- a/vendor/github.com/cenkalti/backoff/v4/tries.go
+++ /dev/null
@@ -1,38 +0,0 @@
-package backoff
-
-import "time"
-
-/*
-WithMaxRetries creates a wrapper around another BackOff, which will
-return Stop if NextBackOff() has been called too many times since
-the last time Reset() was called
-
-Note: Implementation is not thread-safe.
-*/
-func WithMaxRetries(b BackOff, max uint64) BackOff {
- return &backOffTries{delegate: b, maxTries: max}
-}
-
-type backOffTries struct {
- delegate BackOff
- maxTries uint64
- numTries uint64
-}
-
-func (b *backOffTries) NextBackOff() time.Duration {
- if b.maxTries == 0 {
- return Stop
- }
- if b.maxTries > 0 {
- if b.maxTries <= b.numTries {
- return Stop
- }
- b.numTries++
- }
- return b.delegate.NextBackOff()
-}
-
-func (b *backOffTries) Reset() {
- b.numTries = 0
- b.delegate.Reset()
-}
diff --git a/vendor/github.com/cenkalti/backoff/v4/.gitignore b/vendor/github.com/cenkalti/backoff/v5/.gitignore
index 50d95c548..50d95c548 100644
--- a/vendor/github.com/cenkalti/backoff/v4/.gitignore
+++ b/vendor/github.com/cenkalti/backoff/v5/.gitignore
diff --git a/vendor/github.com/cenkalti/backoff/v5/CHANGELOG.md b/vendor/github.com/cenkalti/backoff/v5/CHANGELOG.md
new file mode 100644
index 000000000..658c37436
--- /dev/null
+++ b/vendor/github.com/cenkalti/backoff/v5/CHANGELOG.md
@@ -0,0 +1,29 @@
+# Changelog
+
+All notable changes to this project will be documented in this file.
+
+The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/),
+and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
+
+## [5.0.0] - 2024-12-19
+
+### Added
+
+- RetryAfterError can be returned from an operation to indicate how long to wait before the next retry.
+
+### Changed
+
+- Retry function now accepts additional options for specifying max number of tries and max elapsed time.
+- Retry function now accepts a context.Context.
+- Operation function signature changed to return result (any type) and error.
+
+### Removed
+
+- RetryNotify* and RetryWithData functions. Only single Retry function remains.
+- Optional arguments from ExponentialBackoff constructor.
+- Clock and Timer interfaces.
+
+### Fixed
+
+- The original error is returned from Retry if there's a PermanentError. (#144)
+- The Retry function respects the wrapped PermanentError. (#140)
diff --git a/vendor/github.com/cenkalti/backoff/v4/LICENSE b/vendor/github.com/cenkalti/backoff/v5/LICENSE
index 89b817996..89b817996 100644
--- a/vendor/github.com/cenkalti/backoff/v4/LICENSE
+++ b/vendor/github.com/cenkalti/backoff/v5/LICENSE
diff --git a/vendor/github.com/cenkalti/backoff/v4/README.md b/vendor/github.com/cenkalti/backoff/v5/README.md
index 9433004a2..4611b1d17 100644
--- a/vendor/github.com/cenkalti/backoff/v4/README.md
+++ b/vendor/github.com/cenkalti/backoff/v5/README.md
@@ -1,4 +1,4 @@
-# Exponential Backoff [![GoDoc][godoc image]][godoc] [![Coverage Status][coveralls image]][coveralls]
+# Exponential Backoff [![GoDoc][godoc image]][godoc]
This is a Go port of the exponential backoff algorithm from [Google's HTTP Client Library for Java][google-http-java-client].
@@ -9,9 +9,11 @@ The retries exponentially increase and stop increasing when a certain threshold
## Usage
-Import path is `github.com/cenkalti/backoff/v4`. Please note the version part at the end.
+Import path is `github.com/cenkalti/backoff/v5`. Please note the version part at the end.
-Use https://pkg.go.dev/github.com/cenkalti/backoff/v4 to view the documentation.
+For most cases, use `Retry` function. See [example_test.go][example] for an example.
+
+If you have specific needs, copy `Retry` function (from [retry.go][retry-src]) into your code and modify it as needed.
## Contributing
@@ -19,12 +21,11 @@ Use https://pkg.go.dev/github.com/cenkalti/backoff/v4 to view the documentation.
* Please don't send a PR without opening an issue and discussing it first.
* If proposed change is not a common use case, I will probably not accept it.
-[godoc]: https://pkg.go.dev/github.com/cenkalti/backoff/v4
+[godoc]: https://pkg.go.dev/github.com/cenkalti/backoff/v5
[godoc image]: https://godoc.org/github.com/cenkalti/backoff?status.png
-[coveralls]: https://coveralls.io/github/cenkalti/backoff?branch=master
-[coveralls image]: https://coveralls.io/repos/github/cenkalti/backoff/badge.svg?branch=master
[google-http-java-client]: https://github.com/google/google-http-java-client/blob/da1aa993e90285ec18579f1553339b00e19b3ab5/google-http-client/src/main/java/com/google/api/client/util/ExponentialBackOff.java
[exponential backoff wiki]: http://en.wikipedia.org/wiki/Exponential_backoff
-[advanced example]: https://pkg.go.dev/github.com/cenkalti/backoff/v4?tab=doc#pkg-examples
+[retry-src]: https://github.com/cenkalti/backoff/blob/v5/retry.go
+[example]: https://github.com/cenkalti/backoff/blob/v5/example_test.go
diff --git a/vendor/github.com/cenkalti/backoff/v4/backoff.go b/vendor/github.com/cenkalti/backoff/v5/backoff.go
index 3676ee405..dd2b24ca7 100644
--- a/vendor/github.com/cenkalti/backoff/v4/backoff.go
+++ b/vendor/github.com/cenkalti/backoff/v5/backoff.go
@@ -15,16 +15,16 @@ import "time"
// BackOff is a backoff policy for retrying an operation.
type BackOff interface {
// NextBackOff returns the duration to wait before retrying the operation,
- // or backoff. Stop to indicate that no more retries should be made.
+ // backoff.Stop to indicate that no more retries should be made.
//
// Example usage:
//
- // duration := backoff.NextBackOff();
- // if (duration == backoff.Stop) {
- // // Do not retry operation.
- // } else {
- // // Sleep for duration and retry operation.
- // }
+ // duration := backoff.NextBackOff()
+ // if duration == backoff.Stop {
+ // // Do not retry operation.
+ // } else {
+ // // Sleep for duration and retry operation.
+ // }
//
NextBackOff() time.Duration
diff --git a/vendor/github.com/cenkalti/backoff/v5/error.go b/vendor/github.com/cenkalti/backoff/v5/error.go
new file mode 100644
index 000000000..beb2b38a2
--- /dev/null
+++ b/vendor/github.com/cenkalti/backoff/v5/error.go
@@ -0,0 +1,46 @@
+package backoff
+
+import (
+ "fmt"
+ "time"
+)
+
+// PermanentError signals that the operation should not be retried.
+type PermanentError struct {
+ Err error
+}
+
+// Permanent wraps the given err in a *PermanentError.
+func Permanent(err error) error {
+ if err == nil {
+ return nil
+ }
+ return &PermanentError{
+ Err: err,
+ }
+}
+
+// Error returns a string representation of the Permanent error.
+func (e *PermanentError) Error() string {
+ return e.Err.Error()
+}
+
+// Unwrap returns the wrapped error.
+func (e *PermanentError) Unwrap() error {
+ return e.Err
+}
+
+// RetryAfterError signals that the operation should be retried after the given duration.
+type RetryAfterError struct {
+ Duration time.Duration
+}
+
+// RetryAfter returns a RetryAfter error that specifies how long to wait before retrying.
+func RetryAfter(seconds int) error {
+ return &RetryAfterError{Duration: time.Duration(seconds) * time.Second}
+}
+
+// Error returns a string representation of the RetryAfter error.
+func (e *RetryAfterError) Error() string {
+ return fmt.Sprintf("retry after %s", e.Duration)
+}
diff --git a/vendor/github.com/cenkalti/backoff/v5/exponential.go b/vendor/github.com/cenkalti/backoff/v5/exponential.go
new file mode 100644
index 000000000..c1f3e442d
--- /dev/null
+++ b/vendor/github.com/cenkalti/backoff/v5/exponential.go
@@ -0,0 +1,125 @@
+package backoff
+
+import (
+ "math/rand"
+ "time"
+)
+
+/*
+ExponentialBackOff is a backoff implementation that increases the backoff
+period for each retry attempt using a randomization function that grows exponentially.
+
+NextBackOff() is calculated using the following formula:
+
+ randomized interval =
+ RetryInterval * (random value in range [1 - RandomizationFactor, 1 + RandomizationFactor])
+
+In other words NextBackOff() will range between the randomization factor
+percentage below and above the retry interval.
+
+For example, given the following parameters:
+
+ RetryInterval = 2
+ RandomizationFactor = 0.5
+ Multiplier = 2
+
+the actual backoff period used in the next retry attempt will range between 1 and 3 seconds,
+multiplied by the exponential, that is, between 2 and 6 seconds.
+
+Note: MaxInterval caps the RetryInterval and not the randomized interval.
+
+If the time elapsed since an ExponentialBackOff instance is created goes past the
+MaxElapsedTime, then the method NextBackOff() starts returning backoff.Stop.
+
+The elapsed time can be reset by calling Reset().
+
+Example: Given the following default arguments, for 10 tries the sequence will be,
+and assuming we go over the MaxElapsedTime on the 10th try:
+
+ Request # RetryInterval (seconds) Randomized Interval (seconds)
+
+ 1 0.5 [0.25, 0.75]
+ 2 0.75 [0.375, 1.125]
+ 3 1.125 [0.562, 1.687]
+ 4 1.687 [0.8435, 2.53]
+ 5 2.53 [1.265, 3.795]
+ 6 3.795 [1.897, 5.692]
+ 7 5.692 [2.846, 8.538]
+ 8 8.538 [4.269, 12.807]
+ 9 12.807 [6.403, 19.210]
+ 10 19.210 backoff.Stop
+
+Note: Implementation is not thread-safe.
+*/
+type ExponentialBackOff struct {
+ InitialInterval time.Duration
+ RandomizationFactor float64
+ Multiplier float64
+ MaxInterval time.Duration
+
+ currentInterval time.Duration
+}
+
+// Default values for ExponentialBackOff.
+const (
+ DefaultInitialInterval = 500 * time.Millisecond
+ DefaultRandomizationFactor = 0.5
+ DefaultMultiplier = 1.5
+ DefaultMaxInterval = 60 * time.Second
+)
+
+// NewExponentialBackOff creates an instance of ExponentialBackOff using default values.
+func NewExponentialBackOff() *ExponentialBackOff {
+ return &ExponentialBackOff{
+ InitialInterval: DefaultInitialInterval,
+ RandomizationFactor: DefaultRandomizationFactor,
+ Multiplier: DefaultMultiplier,
+ MaxInterval: DefaultMaxInterval,
+ }
+}
+
+// Reset the interval back to the initial retry interval and restarts the timer.
+// Reset must be called before using b.
+func (b *ExponentialBackOff) Reset() {
+ b.currentInterval = b.InitialInterval
+}
+
+// NextBackOff calculates the next backoff interval using the formula:
+//
+// Randomized interval = RetryInterval * (1 ± RandomizationFactor)
+func (b *ExponentialBackOff) NextBackOff() time.Duration {
+ if b.currentInterval == 0 {
+ b.currentInterval = b.InitialInterval
+ }
+
+ next := getRandomValueFromInterval(b.RandomizationFactor, rand.Float64(), b.currentInterval)
+ b.incrementCurrentInterval()
+ return next
+}
+
+// Increments the current interval by multiplying it with the multiplier.
+func (b *ExponentialBackOff) incrementCurrentInterval() {
+ // Check for overflow, if overflow is detected set the current interval to the max interval.
+ if float64(b.currentInterval) >= float64(b.MaxInterval)/b.Multiplier {
+ b.currentInterval = b.MaxInterval
+ } else {
+ b.currentInterval = time.Duration(float64(b.currentInterval) * b.Multiplier)
+ }
+}
+
+// Returns a random value from the following interval:
+//
+// [currentInterval - randomizationFactor * currentInterval, currentInterval + randomizationFactor * currentInterval].
+func getRandomValueFromInterval(randomizationFactor, random float64, currentInterval time.Duration) time.Duration {
+ if randomizationFactor == 0 {
+ return currentInterval // make sure no randomness is used when randomizationFactor is 0.
+ }
+ var delta = randomizationFactor * float64(currentInterval)
+ var minInterval = float64(currentInterval) - delta
+ var maxInterval = float64(currentInterval) + delta
+
+ // Get a random value from the range [minInterval, maxInterval].
+ // The formula used below has a +1 because if the minInterval is 1 and the maxInterval is 3 then
+ // we want a 33% chance for selecting either 1, 2 or 3.
+ return time.Duration(minInterval + (random * (maxInterval - minInterval + 1)))
+}
diff --git a/vendor/github.com/cenkalti/backoff/v5/retry.go b/vendor/github.com/cenkalti/backoff/v5/retry.go
new file mode 100644
index 000000000..e43f47fb8
--- /dev/null
+++ b/vendor/github.com/cenkalti/backoff/v5/retry.go
@@ -0,0 +1,139 @@
+package backoff
+
+import (
+ "context"
+ "errors"
+ "time"
+)
+
+// DefaultMaxElapsedTime sets a default limit for the total retry duration.
+const DefaultMaxElapsedTime = 15 * time.Minute
+
+// Operation is a function that attempts an operation and may be retried.
+type Operation[T any] func() (T, error)
+
+// Notify is a function called on operation error with the error and backoff duration.
+type Notify func(error, time.Duration)
+
+// retryOptions holds configuration settings for the retry mechanism.
+type retryOptions struct {
+ BackOff BackOff // Strategy for calculating backoff periods.
+ Timer timer // Timer to manage retry delays.
+ Notify Notify // Optional function to notify on each retry error.
+ MaxTries uint // Maximum number of retry attempts.
+ MaxElapsedTime time.Duration // Maximum total time for all retries.
+}
+
+type RetryOption func(*retryOptions)
+
+// WithBackOff configures a custom backoff strategy.
+func WithBackOff(b BackOff) RetryOption {
+ return func(args *retryOptions) {
+ args.BackOff = b
+ }
+}
+
+// withTimer sets a custom timer for managing delays between retries.
+func withTimer(t timer) RetryOption {
+ return func(args *retryOptions) {
+ args.Timer = t
+ }
+}
+
+// WithNotify sets a notification function to handle retry errors.
+func WithNotify(n Notify) RetryOption {
+ return func(args *retryOptions) {
+ args.Notify = n
+ }
+}
+
+// WithMaxTries limits the number of retry attempts.
+func WithMaxTries(n uint) RetryOption {
+ return func(args *retryOptions) {
+ args.MaxTries = n
+ }
+}
+
+// WithMaxElapsedTime limits the total duration for retry attempts.
+func WithMaxElapsedTime(d time.Duration) RetryOption {
+ return func(args *retryOptions) {
+ args.MaxElapsedTime = d
+ }
+}
+
+// Retry attempts the operation until success, a permanent error, or backoff completion.
+// It ensures the operation is executed at least once.
+//
+// Returns the operation result or error if retries are exhausted or context is cancelled.
+func Retry[T any](ctx context.Context, operation Operation[T], opts ...RetryOption) (T, error) {
+ // Initialize default retry options.
+ args := &retryOptions{
+ BackOff: NewExponentialBackOff(),
+ Timer: &defaultTimer{},
+ MaxElapsedTime: DefaultMaxElapsedTime,
+ }
+
+ // Apply user-provided options to the default settings.
+ for _, opt := range opts {
+ opt(args)
+ }
+
+ defer args.Timer.Stop()
+
+ startedAt := time.Now()
+ args.BackOff.Reset()
+ for numTries := uint(1); ; numTries++ {
+ // Execute the operation.
+ res, err := operation()
+ if err == nil {
+ return res, nil
+ }
+
+ // Stop retrying if maximum tries exceeded.
+ if args.MaxTries > 0 && numTries >= args.MaxTries {
+ return res, err
+ }
+
+ // Handle permanent errors without retrying.
+ var permanent *PermanentError
+ if errors.As(err, &permanent) {
+ return res, err
+ }
+
+ // Stop retrying if context is cancelled.
+ if cerr := context.Cause(ctx); cerr != nil {
+ return res, cerr
+ }
+
+ // Calculate next backoff duration.
+ next := args.BackOff.NextBackOff()
+ if next == Stop {
+ return res, err
+ }
+
+ // Reset backoff if RetryAfterError is encountered.
+ var retryAfter *RetryAfterError
+ if errors.As(err, &retryAfter) {
+ next = retryAfter.Duration
+ args.BackOff.Reset()
+ }
+
+ // Stop retrying if maximum elapsed time exceeded.
+ if args.MaxElapsedTime > 0 && time.Since(startedAt)+next > args.MaxElapsedTime {
+ return res, err
+ }
+
+ // Notify on error if a notifier function is provided.
+ if args.Notify != nil {
+ args.Notify(err, next)
+ }
+
+ // Wait for the next backoff period or context cancellation.
+ args.Timer.Start(next)
+ select {
+ case <-args.Timer.C():
+ case <-ctx.Done():
+ return res, context.Cause(ctx)
+ }
+ }
+}
diff --git a/vendor/github.com/cenkalti/backoff/v4/ticker.go b/vendor/github.com/cenkalti/backoff/v5/ticker.go
index df9d68bce..f0d4b2ae7 100644
--- a/vendor/github.com/cenkalti/backoff/v4/ticker.go
+++ b/vendor/github.com/cenkalti/backoff/v5/ticker.go
@@ -1,7 +1,6 @@
package backoff
import (
- "context"
"sync"
"time"
)
@@ -14,8 +13,7 @@ type Ticker struct {
C <-chan time.Time
c chan time.Time
b BackOff
- ctx context.Context
- timer Timer
+ timer timer
stop chan struct{}
stopOnce sync.Once
}
@@ -27,22 +25,12 @@ type Ticker struct {
// provided backoff policy (notably calling NextBackOff or Reset)
// while the ticker is running.
func NewTicker(b BackOff) *Ticker {
- return NewTickerWithTimer(b, &defaultTimer{})
-}
-
-// NewTickerWithTimer returns a new Ticker with a custom timer.
-// A default timer that uses system timer is used when nil is passed.
-func NewTickerWithTimer(b BackOff, timer Timer) *Ticker {
- if timer == nil {
- timer = &defaultTimer{}
- }
c := make(chan time.Time)
t := &Ticker{
C: c,
c: c,
b: b,
- ctx: getContext(b),
- timer: timer,
+ timer: &defaultTimer{},
stop: make(chan struct{}),
}
t.b.Reset()
@@ -73,8 +61,6 @@ func (t *Ticker) run() {
case <-t.stop:
t.c = nil // Prevent future ticks from being sent to the channel.
return
- case <-t.ctx.Done():
- return
}
}
}
diff --git a/vendor/github.com/cenkalti/backoff/v4/timer.go b/vendor/github.com/cenkalti/backoff/v5/timer.go
index 8120d0213..a89530974 100644
--- a/vendor/github.com/cenkalti/backoff/v4/timer.go
+++ b/vendor/github.com/cenkalti/backoff/v5/timer.go
@@ -2,7 +2,7 @@ package backoff
import "time"
-type Timer interface {
+type timer interface {
Start(duration time.Duration)
Stop()
C() <-chan time.Time
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go
index 41cd4f503..bbe7decf0 100644
--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go
@@ -148,22 +148,20 @@ func DefaultHTTPErrorHandler(ctx context.Context, mux *ServeMux, marshaler Marsh
}
md, ok := ServerMetadataFromContext(ctx)
- if !ok {
- grpclog.Error("Failed to extract ServerMetadata from context")
- }
-
- handleForwardResponseServerMetadata(w, mux, md)
-
- // RFC 7230 https://tools.ietf.org/html/rfc7230#section-4.1.2
- // Unless the request includes a TE header field indicating "trailers"
- // is acceptable, as described in Section 4.3, a server SHOULD NOT
- // generate trailer fields that it believes are necessary for the user
- // agent to receive.
- doForwardTrailers := requestAcceptsTrailers(r)
-
- if doForwardTrailers {
- handleForwardResponseTrailerHeader(w, mux, md)
- w.Header().Set("Transfer-Encoding", "chunked")
+ if ok {
+ handleForwardResponseServerMetadata(w, mux, md)
+
+ // RFC 7230 https://tools.ietf.org/html/rfc7230#section-4.1.2
+ // Unless the request includes a TE header field indicating "trailers"
+ // is acceptable, as described in Section 4.3, a server SHOULD NOT
+ // generate trailer fields that it believes are necessary for the user
+ // agent to receive.
+ doForwardTrailers := requestAcceptsTrailers(r)
+
+ if doForwardTrailers {
+ handleForwardResponseTrailerHeader(w, mux, md)
+ w.Header().Set("Transfer-Encoding", "chunked")
+ }
}
st := HTTPStatusFromCode(s.Code())
@@ -176,7 +174,7 @@ func DefaultHTTPErrorHandler(ctx context.Context, mux *ServeMux, marshaler Marsh
grpclog.Errorf("Failed to write response: %v", err)
}
- if doForwardTrailers {
+ if ok && requestAcceptsTrailers(r) {
handleForwardResponseTrailer(w, mux, md)
}
}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go
index f0727cf7c..2f0b9e9e0 100644
--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go
@@ -153,12 +153,10 @@ type responseBody interface {
// ForwardResponseMessage forwards the message "resp" from gRPC server to REST client.
func ForwardResponseMessage(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, req *http.Request, resp proto.Message, opts ...func(context.Context, http.ResponseWriter, proto.Message) error) {
md, ok := ServerMetadataFromContext(ctx)
- if !ok {
- grpclog.Error("Failed to extract ServerMetadata from context")
+ if ok {
+ handleForwardResponseServerMetadata(w, mux, md)
}
- handleForwardResponseServerMetadata(w, mux, md)
-
// RFC 7230 https://tools.ietf.org/html/rfc7230#section-4.1.2
// Unless the request includes a TE header field indicating "trailers"
// is acceptable, as described in Section 4.3, a server SHOULD NOT
@@ -166,7 +164,7 @@ func ForwardResponseMessage(ctx context.Context, mux *ServeMux, marshaler Marsha
// agent to receive.
doForwardTrailers := requestAcceptsTrailers(req)
- if doForwardTrailers {
+ if ok && doForwardTrailers {
handleForwardResponseTrailerHeader(w, mux, md)
w.Header().Set("Transfer-Encoding", "chunked")
}
@@ -204,7 +202,7 @@ func ForwardResponseMessage(ctx context.Context, mux *ServeMux, marshaler Marsha
grpclog.Errorf("Failed to write response: %v", err)
}
- if doForwardTrailers {
+ if ok && doForwardTrailers {
handleForwardResponseTrailer(w, mux, md)
}
}
diff --git a/vendor/github.com/prometheus/common/expfmt/text_parse.go b/vendor/github.com/prometheus/common/expfmt/text_parse.go
index b4607fe4d..4067978a1 100644
--- a/vendor/github.com/prometheus/common/expfmt/text_parse.go
+++ b/vendor/github.com/prometheus/common/expfmt/text_parse.go
@@ -345,8 +345,8 @@ func (p *TextParser) startLabelName() stateFn {
}
// Special summary/histogram treatment. Don't add 'quantile' and 'le'
// labels to 'real' labels.
- if !(p.currentMF.GetType() == dto.MetricType_SUMMARY && p.currentLabelPair.GetName() == model.QuantileLabel) &&
- !(p.currentMF.GetType() == dto.MetricType_HISTOGRAM && p.currentLabelPair.GetName() == model.BucketLabel) {
+ if (p.currentMF.GetType() != dto.MetricType_SUMMARY || p.currentLabelPair.GetName() != model.QuantileLabel) &&
+ (p.currentMF.GetType() != dto.MetricType_HISTOGRAM || p.currentLabelPair.GetName() != model.BucketLabel) {
p.currentLabelPairs = append(p.currentLabelPairs, p.currentLabelPair)
}
// Check for duplicate label names.
diff --git a/vendor/github.com/prometheus/common/model/alert.go b/vendor/github.com/prometheus/common/model/alert.go
index bd3a39e3e..460f554f2 100644
--- a/vendor/github.com/prometheus/common/model/alert.go
+++ b/vendor/github.com/prometheus/common/model/alert.go
@@ -65,7 +65,7 @@ func (a *Alert) Resolved() bool {
return a.ResolvedAt(time.Now())
}
-// ResolvedAt returns true off the activity interval ended before
+// ResolvedAt returns true iff the activity interval ended before
// the given timestamp.
func (a *Alert) ResolvedAt(ts time.Time) bool {
if a.EndsAt.IsZero() {
diff --git a/vendor/github.com/prometheus/common/model/labels.go b/vendor/github.com/prometheus/common/model/labels.go
index 73b7aa3e6..de83afe93 100644
--- a/vendor/github.com/prometheus/common/model/labels.go
+++ b/vendor/github.com/prometheus/common/model/labels.go
@@ -22,7 +22,7 @@ import (
)
const (
- // AlertNameLabel is the name of the label containing the an alert's name.
+ // AlertNameLabel is the name of the label containing the alert's name.
AlertNameLabel = "alertname"
// ExportedLabelPrefix is the prefix to prepend to the label names present in
@@ -122,7 +122,8 @@ func (ln LabelName) IsValidLegacy() bool {
return false
}
for i, b := range ln {
- if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) {
+ // TODO: Apply De Morgan's law. Make sure there are tests for this.
+ if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) { //nolint:staticcheck
return false
}
}
diff --git a/vendor/github.com/prometheus/common/model/metric.go b/vendor/github.com/prometheus/common/model/metric.go
index 5766107cf..a6b01755b 100644
--- a/vendor/github.com/prometheus/common/model/metric.go
+++ b/vendor/github.com/prometheus/common/model/metric.go
@@ -27,13 +27,25 @@ import (
)
var (
- // NameValidationScheme determines the method of name validation to be used by
- // all calls to IsValidMetricName() and LabelName IsValid(). Setting UTF-8
- // mode in isolation from other components that don't support UTF-8 may result
- // in bugs or other undefined behavior. This value can be set to
- // LegacyValidation during startup if a binary is not UTF-8-aware binaries. To
- // avoid need for locking, this value should be set once, ideally in an
- // init(), before multiple goroutines are started.
+ // NameValidationScheme determines the global default method of the name
+ // validation to be used by all calls to IsValidMetricName() and LabelName
+ // IsValid().
+ //
+ // Deprecated: This variable should not be used and might be removed in the
+ // far future. If you wish to stick to the legacy name validation use
+ // `IsValidLegacyMetricName()` and `LabelName.IsValidLegacy()` methods
+ // instead. This variable is here as an escape hatch for emergency cases,
+ // given the recent change from `LegacyValidation` to `UTF8Validation`, e.g.,
+ // to delay UTF-8 migrations in time or aid in debugging unforeseen results of
+ // the change. In such a case, a temporary assignment to `LegacyValidation`
+ // value in the `init()` function in your main.go or so, could be considered.
+ //
+ // Historically we opted for a global variable for feature gating different
+ // validation schemes in operations that were not otherwise easily adjustable
+ // (e.g. Labels yaml unmarshaling). That could have been a mistake, a separate
+ // Labels structure or package might have been a better choice. Given the
+ // change was made and many upgraded the common already, we live this as-is
+ // with this warning and learning for the future.
NameValidationScheme = UTF8Validation
// NameEscapingScheme defines the default way that names will be escaped when
@@ -50,7 +62,7 @@ var (
type ValidationScheme int
const (
- // LegacyValidation is a setting that requirets that metric and label names
+ // LegacyValidation is a setting that requires that all metric and label names
// conform to the original Prometheus character requirements described by
// MetricNameRE and LabelNameRE.
LegacyValidation ValidationScheme = iota
diff --git a/vendor/github.com/prometheus/procfs/.golangci.yml b/vendor/github.com/prometheus/procfs/.golangci.yml
index 126df9e67..3c3bf910f 100644
--- a/vendor/github.com/prometheus/procfs/.golangci.yml
+++ b/vendor/github.com/prometheus/procfs/.golangci.yml
@@ -1,22 +1,45 @@
----
+version: "2"
linters:
enable:
- - errcheck
- - godot
- - gosimple
- - govet
- - ineffassign
- - misspell
- - revive
- - staticcheck
- - testifylint
- - unused
-
-linter-settings:
- godot:
- capital: true
- exclude:
- # Ignore "See: URL"
- - 'See:'
- misspell:
- locale: US
+ - forbidigo
+ - godot
+ - misspell
+ - revive
+ - testifylint
+ settings:
+ forbidigo:
+ forbid:
+ - pattern: ^fmt\.Print.*$
+ msg: Do not commit print statements.
+ godot:
+ exclude:
+ # Ignore "See: URL".
+ - 'See:'
+ capital: true
+ misspell:
+ locale: US
+ exclusions:
+ generated: lax
+ presets:
+ - comments
+ - common-false-positives
+ - legacy
+ - std-error-handling
+ paths:
+ - third_party$
+ - builtin$
+ - examples$
+formatters:
+ enable:
+ - gofmt
+ - goimports
+ settings:
+ goimports:
+ local-prefixes:
+ - github.com/prometheus/procfs
+ exclusions:
+ generated: lax
+ paths:
+ - third_party$
+ - builtin$
+ - examples$
diff --git a/vendor/github.com/prometheus/procfs/Makefile.common b/vendor/github.com/prometheus/procfs/Makefile.common
index 161729235..0ed55c2ba 100644
--- a/vendor/github.com/prometheus/procfs/Makefile.common
+++ b/vendor/github.com/prometheus/procfs/Makefile.common
@@ -33,7 +33,7 @@ GOHOSTOS ?= $(shell $(GO) env GOHOSTOS)
GOHOSTARCH ?= $(shell $(GO) env GOHOSTARCH)
GO_VERSION ?= $(shell $(GO) version)
-GO_VERSION_NUMBER ?= $(word 3, $(GO_VERSION))
+GO_VERSION_NUMBER ?= $(word 3, $(GO_VERSION))Error Parsing File
PRE_GO_111 ?= $(shell echo $(GO_VERSION_NUMBER) | grep -E 'go1\.(10|[0-9])\.')
PROMU := $(FIRST_GOPATH)/bin/promu
@@ -61,7 +61,7 @@ PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_
SKIP_GOLANGCI_LINT :=
GOLANGCI_LINT :=
GOLANGCI_LINT_OPTS ?=
-GOLANGCI_LINT_VERSION ?= v1.59.0
+GOLANGCI_LINT_VERSION ?= v2.0.2
# golangci-lint only supports linux, darwin and windows platforms on i386/amd64/arm64.
# windows isn't included here because of the path separator being different.
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin))
@@ -275,3 +275,9 @@ $(1)_precheck:
exit 1; \
fi
endef
+
+govulncheck: install-govulncheck
+ govulncheck ./...
+
+install-govulncheck:
+ command -v govulncheck > /dev/null || go install golang.org/x/vuln/cmd/govulncheck@latest
diff --git a/vendor/github.com/prometheus/procfs/README.md b/vendor/github.com/prometheus/procfs/README.md
index 1224816c2..0718239cf 100644
--- a/vendor/github.com/prometheus/procfs/README.md
+++ b/vendor/github.com/prometheus/procfs/README.md
@@ -47,15 +47,15 @@ However, most of the API includes unit tests which can be run with `make test`.
The procfs library includes a set of test fixtures which include many example files from
the `/proc` and `/sys` filesystems. These fixtures are included as a [ttar](https://github.com/ideaship/ttar) file
which is extracted automatically during testing. To add/update the test fixtures, first
-ensure the `fixtures` directory is up to date by removing the existing directory and then
-extracting the ttar file using `make fixtures/.unpacked` or just `make test`.
+ensure the `testdata/fixtures` directory is up to date by removing the existing directory and then
+extracting the ttar file using `make testdata/fixtures/.unpacked` or just `make test`.
```bash
rm -rf testdata/fixtures
make test
```
-Next, make the required changes to the extracted files in the `fixtures` directory. When
+Next, make the required changes to the extracted files in the `testdata/fixtures` directory. When
the changes are complete, run `make update_fixtures` to create a new `fixtures.ttar` file
based on the updated `fixtures` directory. And finally, verify the changes using
`git diff testdata/fixtures.ttar`.
diff --git a/vendor/github.com/prometheus/procfs/arp.go b/vendor/github.com/prometheus/procfs/arp.go
index cdcc8a7cc..2e5334415 100644
--- a/vendor/github.com/prometheus/procfs/arp.go
+++ b/vendor/github.com/prometheus/procfs/arp.go
@@ -23,9 +23,9 @@ import (
// Learned from include/uapi/linux/if_arp.h.
const (
- // completed entry (ha valid).
+ // Completed entry (ha valid).
ATFComplete = 0x02
- // permanent entry.
+ // Permanent entry.
ATFPermanent = 0x04
// Publish entry.
ATFPublish = 0x08
diff --git a/vendor/github.com/prometheus/procfs/fs.go b/vendor/github.com/prometheus/procfs/fs.go
index 4980c875b..9bdaccc7c 100644
--- a/vendor/github.com/prometheus/procfs/fs.go
+++ b/vendor/github.com/prometheus/procfs/fs.go
@@ -24,8 +24,14 @@ type FS struct {
isReal bool
}
-// DefaultMountPoint is the common mount point of the proc filesystem.
-const DefaultMountPoint = fs.DefaultProcMountPoint
+const (
+ // DefaultMountPoint is the common mount point of the proc filesystem.
+ DefaultMountPoint = fs.DefaultProcMountPoint
+
+ // SectorSize represents the size of a sector in bytes.
+ // It is specific to Linux block I/O operations.
+ SectorSize = 512
+)
// NewDefaultFS returns a new proc FS mounted under the default proc mountPoint.
// It will error if the mount point directory can't be read or is a file.
diff --git a/vendor/github.com/prometheus/procfs/fs_statfs_notype.go b/vendor/github.com/prometheus/procfs/fs_statfs_notype.go
index 134767d69..1b5bdbdf8 100644
--- a/vendor/github.com/prometheus/procfs/fs_statfs_notype.go
+++ b/vendor/github.com/prometheus/procfs/fs_statfs_notype.go
@@ -17,7 +17,7 @@
package procfs
// isRealProc returns true on architectures that don't have a Type argument
-// in their Statfs_t struct
-func isRealProc(mountPoint string) (bool, error) {
+// in their Statfs_t struct.
+func isRealProc(_ string) (bool, error) {
return true, nil
}
diff --git a/vendor/github.com/prometheus/procfs/fscache.go b/vendor/github.com/prometheus/procfs/fscache.go
index cf2e3eaa0..7db863307 100644
--- a/vendor/github.com/prometheus/procfs/fscache.go
+++ b/vendor/github.com/prometheus/procfs/fscache.go
@@ -162,7 +162,7 @@ type Fscacheinfo struct {
ReleaseRequestsAgainstPagesStoredByTimeLockGranted uint64
// Number of release reqs ignored due to in-progress store
ReleaseRequestsIgnoredDueToInProgressStore uint64
- // Number of page stores cancelled due to release req
+ // Number of page stores canceled due to release req
PageStoresCancelledByReleaseRequests uint64
VmscanWaiting uint64
// Number of times async ops added to pending queues
@@ -171,11 +171,11 @@ type Fscacheinfo struct {
OpsRunning uint64
// Number of times async ops queued for processing
OpsEnqueued uint64
- // Number of async ops cancelled
+ // Number of async ops canceled
OpsCancelled uint64
// Number of async ops rejected due to object lookup/create failure
OpsRejected uint64
- // Number of async ops initialised
+ // Number of async ops initialized
OpsInitialised uint64
// Number of async ops queued for deferred release
OpsDeferred uint64
diff --git a/vendor/github.com/prometheus/procfs/internal/fs/fs.go b/vendor/github.com/prometheus/procfs/internal/fs/fs.go
index 3c18c7610..3a43e8391 100644
--- a/vendor/github.com/prometheus/procfs/internal/fs/fs.go
+++ b/vendor/github.com/prometheus/procfs/internal/fs/fs.go
@@ -28,6 +28,9 @@ const (
// DefaultConfigfsMountPoint is the common mount point of the configfs.
DefaultConfigfsMountPoint = "/sys/kernel/config"
+
+ // DefaultSelinuxMountPoint is the common mount point of the selinuxfs.
+ DefaultSelinuxMountPoint = "/sys/fs/selinux"
)
// FS represents a pseudo-filesystem, normally /proc or /sys, which provides an
diff --git a/vendor/github.com/prometheus/procfs/internal/util/parse.go b/vendor/github.com/prometheus/procfs/internal/util/parse.go
index 14272dc78..5a7d2df06 100644
--- a/vendor/github.com/prometheus/procfs/internal/util/parse.go
+++ b/vendor/github.com/prometheus/procfs/internal/util/parse.go
@@ -14,6 +14,7 @@
package util
import (
+ "errors"
"os"
"strconv"
"strings"
@@ -110,3 +111,16 @@ func ParseBool(b string) *bool {
}
return &truth
}
+
+// ReadHexFromFile reads a file and attempts to parse a uint64 from a hexadecimal format 0xXX.
+func ReadHexFromFile(path string) (uint64, error) {
+ data, err := os.ReadFile(path)
+ if err != nil {
+ return 0, err
+ }
+ hexString := strings.TrimSpace(string(data))
+ if !strings.HasPrefix(hexString, "0x") {
+ return 0, errors.New("invalid format: hex string does not start with '0x'")
+ }
+ return strconv.ParseUint(hexString[2:], 16, 64)
+}
diff --git a/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go b/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go
index 1ab875cee..d5404a6d7 100644
--- a/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go
+++ b/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go
@@ -20,6 +20,8 @@ package util
import (
"bytes"
"os"
+ "strconv"
+ "strings"
"syscall"
)
@@ -48,3 +50,21 @@ func SysReadFile(file string) (string, error) {
return string(bytes.TrimSpace(b[:n])), nil
}
+
+// SysReadUintFromFile reads a file using SysReadFile and attempts to parse a uint64 from it.
+func SysReadUintFromFile(path string) (uint64, error) {
+ data, err := SysReadFile(path)
+ if err != nil {
+ return 0, err
+ }
+ return strconv.ParseUint(strings.TrimSpace(string(data)), 10, 64)
+}
+
+// SysReadIntFromFile reads a file using SysReadFile and attempts to parse a int64 from it.
+func SysReadIntFromFile(path string) (int64, error) {
+ data, err := SysReadFile(path)
+ if err != nil {
+ return 0, err
+ }
+ return strconv.ParseInt(strings.TrimSpace(string(data)), 10, 64)
+}
diff --git a/vendor/github.com/prometheus/procfs/mountstats.go b/vendor/github.com/prometheus/procfs/mountstats.go
index 75a3b6c81..50caa7327 100644
--- a/vendor/github.com/prometheus/procfs/mountstats.go
+++ b/vendor/github.com/prometheus/procfs/mountstats.go
@@ -45,11 +45,11 @@ const (
fieldTransport11TCPLen = 13
fieldTransport11UDPLen = 10
- // kernel version >= 4.14 MaxLen
+ // Kernel version >= 4.14 MaxLen
// See: https://elixir.bootlin.com/linux/v6.4.8/source/net/sunrpc/xprtrdma/xprt_rdma.h#L393
fieldTransport11RDMAMaxLen = 28
- // kernel version <= 4.2 MinLen
+ // Kernel version <= 4.2 MinLen
// See: https://elixir.bootlin.com/linux/v4.2.8/source/net/sunrpc/xprtrdma/xprt_rdma.h#L331
fieldTransport11RDMAMinLen = 20
)
@@ -601,11 +601,12 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats
switch statVersion {
case statVersion10:
var expectedLength int
- if protocol == "tcp" {
+ switch protocol {
+ case "tcp":
expectedLength = fieldTransport10TCPLen
- } else if protocol == "udp" {
+ case "udp":
expectedLength = fieldTransport10UDPLen
- } else {
+ default:
return nil, fmt.Errorf("%w: Invalid NFS protocol \"%s\" in stats 1.0 statement: %v", ErrFileParse, protocol, ss)
}
if len(ss) != expectedLength {
@@ -613,13 +614,14 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats
}
case statVersion11:
var expectedLength int
- if protocol == "tcp" {
+ switch protocol {
+ case "tcp":
expectedLength = fieldTransport11TCPLen
- } else if protocol == "udp" {
+ case "udp":
expectedLength = fieldTransport11UDPLen
- } else if protocol == "rdma" {
+ case "rdma":
expectedLength = fieldTransport11RDMAMinLen
- } else {
+ default:
return nil, fmt.Errorf("%w: invalid NFS protocol \"%s\" in stats 1.1 statement: %v", ErrFileParse, protocol, ss)
}
if (len(ss) != expectedLength && (protocol == "tcp" || protocol == "udp")) ||
@@ -655,11 +657,12 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats
// For the udp RPC transport there is no connection count, connect idle time,
// or idle time (fields #3, #4, and #5); all other fields are the same. So
// we set them to 0 here.
- if protocol == "udp" {
+ switch protocol {
+ case "udp":
ns = append(ns[:2], append(make([]uint64, 3), ns[2:]...)...)
- } else if protocol == "tcp" {
+ case "tcp":
ns = append(ns[:fieldTransport11TCPLen], make([]uint64, fieldTransport11RDMAMaxLen-fieldTransport11TCPLen+3)...)
- } else if protocol == "rdma" {
+ case "rdma":
ns = append(ns[:fieldTransport10TCPLen], append(make([]uint64, 3), ns[fieldTransport10TCPLen:]...)...)
}
diff --git a/vendor/github.com/prometheus/procfs/net_dev_snmp6.go b/vendor/github.com/prometheus/procfs/net_dev_snmp6.go
new file mode 100644
index 000000000..f50b38e35
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/net_dev_snmp6.go
@@ -0,0 +1,96 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+ "bufio"
+ "errors"
+ "io"
+ "os"
+ "strconv"
+ "strings"
+)
+
+// NetDevSNMP6 is parsed from files in /proc/net/dev_snmp6/ or /proc/<PID>/net/dev_snmp6/.
+// The outer map's keys are interface names and the inner map's keys are stat names.
+//
+// If you'd like a total across all interfaces, please use the Snmp6() method of the Proc type.
+type NetDevSNMP6 map[string]map[string]uint64
+
+// Returns kernel/system statistics read from interface files within the /proc/net/dev_snmp6/
+// directory.
+func (fs FS) NetDevSNMP6() (NetDevSNMP6, error) {
+ return newNetDevSNMP6(fs.proc.Path("net/dev_snmp6"))
+}
+
+// Returns kernel/system statistics read from interface files within the /proc/<PID>/net/dev_snmp6/
+// directory.
+func (p Proc) NetDevSNMP6() (NetDevSNMP6, error) {
+ return newNetDevSNMP6(p.path("net/dev_snmp6"))
+}
+
+// newNetDevSNMP6 creates a new NetDevSNMP6 from the contents of the given directory.
+func newNetDevSNMP6(dir string) (NetDevSNMP6, error) {
+ netDevSNMP6 := make(NetDevSNMP6)
+
+ // The net/dev_snmp6 folders contain one file per interface
+ ifaceFiles, err := os.ReadDir(dir)
+ if err != nil {
+ // On systems with IPv6 disabled, this directory won't exist.
+ // Do nothing.
+ if errors.Is(err, os.ErrNotExist) {
+ return netDevSNMP6, err
+ }
+ return netDevSNMP6, err
+ }
+
+ for _, iFaceFile := range ifaceFiles {
+ f, err := os.Open(dir + "/" + iFaceFile.Name())
+ if err != nil {
+ return netDevSNMP6, err
+ }
+ defer f.Close()
+
+ netDevSNMP6[iFaceFile.Name()], err = parseNetDevSNMP6Stats(f)
+ if err != nil {
+ return netDevSNMP6, err
+ }
+ }
+
+ return netDevSNMP6, nil
+}
+
+func parseNetDevSNMP6Stats(r io.Reader) (map[string]uint64, error) {
+ m := make(map[string]uint64)
+
+ scanner := bufio.NewScanner(r)
+ for scanner.Scan() {
+ stat := strings.Fields(scanner.Text())
+ if len(stat) < 2 {
+ continue
+ }
+ key, val := stat[0], stat[1]
+
+ // Expect stat name to contain "6" or be "ifIndex"
+ if strings.Contains(key, "6") || key == "ifIndex" {
+ v, err := strconv.ParseUint(val, 10, 64)
+ if err != nil {
+ return m, err
+ }
+
+ m[key] = v
+ }
+ }
+ return m, scanner.Err()
+}
diff --git a/vendor/github.com/prometheus/procfs/net_ip_socket.go b/vendor/github.com/prometheus/procfs/net_ip_socket.go
index b70f1fc7a..19e3378f7 100644
--- a/vendor/github.com/prometheus/procfs/net_ip_socket.go
+++ b/vendor/github.com/prometheus/procfs/net_ip_socket.go
@@ -25,7 +25,7 @@ import (
)
const (
- // readLimit is used by io.LimitReader while reading the content of the
+ // Maximum size limit used by io.LimitReader while reading the content of the
// /proc/net/udp{,6} files. The number of lines inside such a file is dynamic
// as each line represents a single used socket.
// In theory, the number of available sockets is 65535 (2^16 - 1) per IP.
@@ -50,12 +50,12 @@ type (
// UsedSockets shows the total number of parsed lines representing the
// number of used sockets.
UsedSockets uint64
- // Drops shows the total number of dropped packets of all UPD sockets.
+ // Drops shows the total number of dropped packets of all UDP sockets.
Drops *uint64
}
- // netIPSocketLine represents the fields parsed from a single line
- // in /proc/net/{t,u}dp{,6}. Fields which are not used by IPSocket are skipped.
+ // A single line parser for fields from /proc/net/{t,u}dp{,6}.
+ // Fields which are not used by IPSocket are skipped.
// Drops is non-nil for udp{,6}, but nil for tcp{,6}.
// For the proc file format details, see https://linux.die.net/man/5/proc.
netIPSocketLine struct {
diff --git a/vendor/github.com/prometheus/procfs/net_protocols.go b/vendor/github.com/prometheus/procfs/net_protocols.go
index b6c77b709..8d4b1ac05 100644
--- a/vendor/github.com/prometheus/procfs/net_protocols.go
+++ b/vendor/github.com/prometheus/procfs/net_protocols.go
@@ -115,22 +115,24 @@ func (ps NetProtocolStats) parseLine(rawLine string) (*NetProtocolStatLine, erro
if err != nil {
return nil, err
}
- if fields[4] == enabled {
+ switch fields[4] {
+ case enabled:
line.Pressure = 1
- } else if fields[4] == disabled {
+ case disabled:
line.Pressure = 0
- } else {
+ default:
line.Pressure = -1
}
line.MaxHeader, err = strconv.ParseUint(fields[5], 10, 64)
if err != nil {
return nil, err
}
- if fields[6] == enabled {
+ switch fields[6] {
+ case enabled:
line.Slab = true
- } else if fields[6] == disabled {
+ case disabled:
line.Slab = false
- } else {
+ default:
return nil, fmt.Errorf("%w: capability for protocol: %s", ErrFileParse, line.Name)
}
line.ModuleName = fields[7]
@@ -168,11 +170,12 @@ func (pc *NetProtocolCapabilities) parseCapabilities(capabilities []string) erro
}
for i := 0; i < len(capabilities); i++ {
- if capabilities[i] == "y" {
+ switch capabilities[i] {
+ case "y":
*capabilityFields[i] = true
- } else if capabilities[i] == "n" {
+ case "n":
*capabilityFields[i] = false
- } else {
+ default:
return fmt.Errorf("%w: capability block for protocol: position %d", ErrFileParse, i)
}
}
diff --git a/vendor/github.com/prometheus/procfs/net_tcp.go b/vendor/github.com/prometheus/procfs/net_tcp.go
index 527762955..0396d7201 100644
--- a/vendor/github.com/prometheus/procfs/net_tcp.go
+++ b/vendor/github.com/prometheus/procfs/net_tcp.go
@@ -25,24 +25,28 @@ type (
// NetTCP returns the IPv4 kernel/networking statistics for TCP datagrams
// read from /proc/net/tcp.
+// Deprecated: Use github.com/mdlayher/netlink#Conn (with syscall.AF_INET) instead.
func (fs FS) NetTCP() (NetTCP, error) {
return newNetTCP(fs.proc.Path("net/tcp"))
}
// NetTCP6 returns the IPv6 kernel/networking statistics for TCP datagrams
// read from /proc/net/tcp6.
+// Deprecated: Use github.com/mdlayher/netlink#Conn (with syscall.AF_INET6) instead.
func (fs FS) NetTCP6() (NetTCP, error) {
return newNetTCP(fs.proc.Path("net/tcp6"))
}
// NetTCPSummary returns already computed statistics like the total queue lengths
// for TCP datagrams read from /proc/net/tcp.
+// Deprecated: Use github.com/mdlayher/netlink#Conn (with syscall.AF_INET) instead.
func (fs FS) NetTCPSummary() (*NetTCPSummary, error) {
return newNetTCPSummary(fs.proc.Path("net/tcp"))
}
// NetTCP6Summary returns already computed statistics like the total queue lengths
// for TCP datagrams read from /proc/net/tcp6.
+// Deprecated: Use github.com/mdlayher/netlink#Conn (with syscall.AF_INET6) instead.
func (fs FS) NetTCP6Summary() (*NetTCPSummary, error) {
return newNetTCPSummary(fs.proc.Path("net/tcp6"))
}
diff --git a/vendor/github.com/prometheus/procfs/net_unix.go b/vendor/github.com/prometheus/procfs/net_unix.go
index d868cebda..d7e0cacb4 100644
--- a/vendor/github.com/prometheus/procfs/net_unix.go
+++ b/vendor/github.com/prometheus/procfs/net_unix.go
@@ -121,12 +121,12 @@ func parseNetUNIX(r io.Reader) (*NetUNIX, error) {
return &nu, nil
}
-func (u *NetUNIX) parseLine(line string, hasInode bool, min int) (*NetUNIXLine, error) {
+func (u *NetUNIX) parseLine(line string, hasInode bool, minFields int) (*NetUNIXLine, error) {
fields := strings.Fields(line)
l := len(fields)
- if l < min {
- return nil, fmt.Errorf("%w: expected at least %d fields but got %d", ErrFileParse, min, l)
+ if l < minFields {
+ return nil, fmt.Errorf("%w: expected at least %d fields but got %d", ErrFileParse, minFields, l)
}
// Field offsets are as follows:
@@ -172,7 +172,7 @@ func (u *NetUNIX) parseLine(line string, hasInode bool, min int) (*NetUNIXLine,
}
// Path field is optional.
- if l > min {
+ if l > minFields {
// Path occurs at either index 6 or 7 depending on whether inode is
// already present.
pathIdx := 7
diff --git a/vendor/github.com/prometheus/procfs/proc.go b/vendor/github.com/prometheus/procfs/proc.go
index 142796368..368187fa8 100644
--- a/vendor/github.com/prometheus/procfs/proc.go
+++ b/vendor/github.com/prometheus/procfs/proc.go
@@ -37,9 +37,9 @@ type Proc struct {
type Procs []Proc
var (
- ErrFileParse = errors.New("Error Parsing File")
- ErrFileRead = errors.New("Error Reading File")
- ErrMountPoint = errors.New("Error Accessing Mount point")
+ ErrFileParse = errors.New("error parsing file")
+ ErrFileRead = errors.New("error reading file")
+ ErrMountPoint = errors.New("error accessing mount point")
)
func (p Procs) Len() int { return len(p) }
@@ -79,7 +79,7 @@ func (fs FS) Self() (Proc, error) {
if err != nil {
return Proc{}, err
}
- pid, err := strconv.Atoi(strings.Replace(p, string(fs.proc), "", -1))
+ pid, err := strconv.Atoi(strings.ReplaceAll(p, string(fs.proc), ""))
if err != nil {
return Proc{}, err
}
diff --git a/vendor/github.com/prometheus/procfs/proc_cgroup.go b/vendor/github.com/prometheus/procfs/proc_cgroup.go
index daeed7f57..4a64347c0 100644
--- a/vendor/github.com/prometheus/procfs/proc_cgroup.go
+++ b/vendor/github.com/prometheus/procfs/proc_cgroup.go
@@ -24,7 +24,7 @@ import (
)
// Cgroup models one line from /proc/[pid]/cgroup. Each Cgroup struct describes the placement of a PID inside a
-// specific control hierarchy. The kernel has two cgroup APIs, v1 and v2. v1 has one hierarchy per available resource
+// specific control hierarchy. The kernel has two cgroup APIs, v1 and v2. The v1 has one hierarchy per available resource
// controller, while v2 has one unified hierarchy shared by all controllers. Regardless of v1 or v2, all hierarchies
// contain all running processes, so the question answerable with a Cgroup struct is 'where is this process in
// this hierarchy' (where==what path on the specific cgroupfs). By prefixing this path with the mount point of
diff --git a/vendor/github.com/prometheus/procfs/proc_io.go b/vendor/github.com/prometheus/procfs/proc_io.go
index 776f34971..d15b66ddb 100644
--- a/vendor/github.com/prometheus/procfs/proc_io.go
+++ b/vendor/github.com/prometheus/procfs/proc_io.go
@@ -50,7 +50,7 @@ func (p Proc) IO() (ProcIO, error) {
ioFormat := "rchar: %d\nwchar: %d\nsyscr: %d\nsyscw: %d\n" +
"read_bytes: %d\nwrite_bytes: %d\n" +
- "cancelled_write_bytes: %d\n"
+ "cancelled_write_bytes: %d\n" //nolint:misspell
_, err = fmt.Sscanf(string(data), ioFormat, &pio.RChar, &pio.WChar, &pio.SyscR,
&pio.SyscW, &pio.ReadBytes, &pio.WriteBytes, &pio.CancelledWriteBytes)
diff --git a/vendor/github.com/prometheus/procfs/proc_netstat.go b/vendor/github.com/prometheus/procfs/proc_netstat.go
index 8e3ff4d79..4248c1716 100644
--- a/vendor/github.com/prometheus/procfs/proc_netstat.go
+++ b/vendor/github.com/prometheus/procfs/proc_netstat.go
@@ -209,232 +209,232 @@ func parseProcNetstat(r io.Reader, fileName string) (ProcNetstat, error) {
case "TcpExt":
switch key {
case "SyncookiesSent":
- procNetstat.TcpExt.SyncookiesSent = &value
+ procNetstat.SyncookiesSent = &value
case "SyncookiesRecv":
- procNetstat.TcpExt.SyncookiesRecv = &value
+ procNetstat.SyncookiesRecv = &value
case "SyncookiesFailed":
- procNetstat.TcpExt.SyncookiesFailed = &value
+ procNetstat.SyncookiesFailed = &value
case "EmbryonicRsts":
- procNetstat.TcpExt.EmbryonicRsts = &value
+ procNetstat.EmbryonicRsts = &value
case "PruneCalled":
- procNetstat.TcpExt.PruneCalled = &value
+ procNetstat.PruneCalled = &value
case "RcvPruned":
- procNetstat.TcpExt.RcvPruned = &value
+ procNetstat.RcvPruned = &value
case "OfoPruned":
- procNetstat.TcpExt.OfoPruned = &value
+ procNetstat.OfoPruned = &value
case "OutOfWindowIcmps":
- procNetstat.TcpExt.OutOfWindowIcmps = &value
+ procNetstat.OutOfWindowIcmps = &value
case "LockDroppedIcmps":
- procNetstat.TcpExt.LockDroppedIcmps = &value
+ procNetstat.LockDroppedIcmps = &value
case "ArpFilter":
- procNetstat.TcpExt.ArpFilter = &value
+ procNetstat.ArpFilter = &value
case "TW":
- procNetstat.TcpExt.TW = &value
+ procNetstat.TW = &value
case "TWRecycled":
- procNetstat.TcpExt.TWRecycled = &value
+ procNetstat.TWRecycled = &value
case "TWKilled":
- procNetstat.TcpExt.TWKilled = &value
+ procNetstat.TWKilled = &value
case "PAWSActive":
- procNetstat.TcpExt.PAWSActive = &value
+ procNetstat.PAWSActive = &value
case "PAWSEstab":
- procNetstat.TcpExt.PAWSEstab = &value
+ procNetstat.PAWSEstab = &value
case "DelayedACKs":
- procNetstat.TcpExt.DelayedACKs = &value
+ procNetstat.DelayedACKs = &value
case "DelayedACKLocked":
- procNetstat.TcpExt.DelayedACKLocked = &value
+ procNetstat.DelayedACKLocked = &value
case "DelayedACKLost":
- procNetstat.TcpExt.DelayedACKLost = &value
+ procNetstat.DelayedACKLost = &value
case "ListenOverflows":
- procNetstat.TcpExt.ListenOverflows = &value
+ procNetstat.ListenOverflows = &value
case "ListenDrops":
- procNetstat.TcpExt.ListenDrops = &value
+ procNetstat.ListenDrops = &value
case "TCPHPHits":
- procNetstat.TcpExt.TCPHPHits = &value
+ procNetstat.TCPHPHits = &value
case "TCPPureAcks":
- procNetstat.TcpExt.TCPPureAcks = &value
+ procNetstat.TCPPureAcks = &value
case "TCPHPAcks":
- procNetstat.TcpExt.TCPHPAcks = &value
+ procNetstat.TCPHPAcks = &value
case "TCPRenoRecovery":
- procNetstat.TcpExt.TCPRenoRecovery = &value
+ procNetstat.TCPRenoRecovery = &value
case "TCPSackRecovery":
- procNetstat.TcpExt.TCPSackRecovery = &value
+ procNetstat.TCPSackRecovery = &value
case "TCPSACKReneging":
- procNetstat.TcpExt.TCPSACKReneging = &value
+ procNetstat.TCPSACKReneging = &value
case "TCPSACKReorder":
- procNetstat.TcpExt.TCPSACKReorder = &value
+ procNetstat.TCPSACKReorder = &value
case "TCPRenoReorder":
- procNetstat.TcpExt.TCPRenoReorder = &value
+ procNetstat.TCPRenoReorder = &value
case "TCPTSReorder":
- procNetstat.TcpExt.TCPTSReorder = &value
+ procNetstat.TCPTSReorder = &value
case "TCPFullUndo":
- procNetstat.TcpExt.TCPFullUndo = &value
+ procNetstat.TCPFullUndo = &value
case "TCPPartialUndo":
- procNetstat.TcpExt.TCPPartialUndo = &value
+ procNetstat.TCPPartialUndo = &value
case "TCPDSACKUndo":
- procNetstat.TcpExt.TCPDSACKUndo = &value
+ procNetstat.TCPDSACKUndo = &value
case "TCPLossUndo":
- procNetstat.TcpExt.TCPLossUndo = &value
+ procNetstat.TCPLossUndo = &value
case "TCPLostRetransmit":
- procNetstat.TcpExt.TCPLostRetransmit = &value
+ procNetstat.TCPLostRetransmit = &value
case "TCPRenoFailures":
- procNetstat.TcpExt.TCPRenoFailures = &value
+ procNetstat.TCPRenoFailures = &value
case "TCPSackFailures":
- procNetstat.TcpExt.TCPSackFailures = &value
+ procNetstat.TCPSackFailures = &value
case "TCPLossFailures":
- procNetstat.TcpExt.TCPLossFailures = &value
+ procNetstat.TCPLossFailures = &value
case "TCPFastRetrans":
- procNetstat.TcpExt.TCPFastRetrans = &value
+ procNetstat.TCPFastRetrans = &value
case "TCPSlowStartRetrans":
- procNetstat.TcpExt.TCPSlowStartRetrans = &value
+ procNetstat.TCPSlowStartRetrans = &value
case "TCPTimeouts":
- procNetstat.TcpExt.TCPTimeouts = &value
+ procNetstat.TCPTimeouts = &value
case "TCPLossProbes":
- procNetstat.TcpExt.TCPLossProbes = &value
+ procNetstat.TCPLossProbes = &value
case "TCPLossProbeRecovery":
- procNetstat.TcpExt.TCPLossProbeRecovery = &value
+ procNetstat.TCPLossProbeRecovery = &value
case "TCPRenoRecoveryFail":
- procNetstat.TcpExt.TCPRenoRecoveryFail = &value
+ procNetstat.TCPRenoRecoveryFail = &value
case "TCPSackRecoveryFail":
- procNetstat.TcpExt.TCPSackRecoveryFail = &value
+ procNetstat.TCPSackRecoveryFail = &value
case "TCPRcvCollapsed":
- procNetstat.TcpExt.TCPRcvCollapsed = &value
+ procNetstat.TCPRcvCollapsed = &value
case "TCPDSACKOldSent":
- procNetstat.TcpExt.TCPDSACKOldSent = &value
+ procNetstat.TCPDSACKOldSent = &value
case "TCPDSACKOfoSent":
- procNetstat.TcpExt.TCPDSACKOfoSent = &value
+ procNetstat.TCPDSACKOfoSent = &value
case "TCPDSACKRecv":
- procNetstat.TcpExt.TCPDSACKRecv = &value
+ procNetstat.TCPDSACKRecv = &value
case "TCPDSACKOfoRecv":
- procNetstat.TcpExt.TCPDSACKOfoRecv = &value
+ procNetstat.TCPDSACKOfoRecv = &value
case "TCPAbortOnData":
- procNetstat.TcpExt.TCPAbortOnData = &value
+ procNetstat.TCPAbortOnData = &value
case "TCPAbortOnClose":
- procNetstat.TcpExt.TCPAbortOnClose = &value
+ procNetstat.TCPAbortOnClose = &value
case "TCPDeferAcceptDrop":
- procNetstat.TcpExt.TCPDeferAcceptDrop = &value
+ procNetstat.TCPDeferAcceptDrop = &value
case "IPReversePathFilter":
- procNetstat.TcpExt.IPReversePathFilter = &value
+ procNetstat.IPReversePathFilter = &value
case "TCPTimeWaitOverflow":
- procNetstat.TcpExt.TCPTimeWaitOverflow = &value
+ procNetstat.TCPTimeWaitOverflow = &value
case "TCPReqQFullDoCookies":
- procNetstat.TcpExt.TCPReqQFullDoCookies = &value
+ procNetstat.TCPReqQFullDoCookies = &value
case "TCPReqQFullDrop":
- procNetstat.TcpExt.TCPReqQFullDrop = &value
+ procNetstat.TCPReqQFullDrop = &value
case "TCPRetransFail":
- procNetstat.TcpExt.TCPRetransFail = &value
+ procNetstat.TCPRetransFail = &value
case "TCPRcvCoalesce":
- procNetstat.TcpExt.TCPRcvCoalesce = &value
+ procNetstat.TCPRcvCoalesce = &value
case "TCPRcvQDrop":
- procNetstat.TcpExt.TCPRcvQDrop = &value
+ procNetstat.TCPRcvQDrop = &value
case "TCPOFOQueue":
- procNetstat.TcpExt.TCPOFOQueue = &value
+ procNetstat.TCPOFOQueue = &value
case "TCPOFODrop":
- procNetstat.TcpExt.TCPOFODrop = &value
+ procNetstat.TCPOFODrop = &value
case "TCPOFOMerge":
- procNetstat.TcpExt.TCPOFOMerge = &value
+ procNetstat.TCPOFOMerge = &value
case "TCPChallengeACK":
- procNetstat.TcpExt.TCPChallengeACK = &value
+ procNetstat.TCPChallengeACK = &value
case "TCPSYNChallenge":
- procNetstat.TcpExt.TCPSYNChallenge = &value
+ procNetstat.TCPSYNChallenge = &value
case "TCPFastOpenActive":
- procNetstat.TcpExt.TCPFastOpenActive = &value
+ procNetstat.TCPFastOpenActive = &value
case "TCPFastOpenActiveFail":
- procNetstat.TcpExt.TCPFastOpenActiveFail = &value
+ procNetstat.TCPFastOpenActiveFail = &value
case "TCPFastOpenPassive":
- procNetstat.TcpExt.TCPFastOpenPassive = &value
+ procNetstat.TCPFastOpenPassive = &value
case "TCPFastOpenPassiveFail":
- procNetstat.TcpExt.TCPFastOpenPassiveFail = &value
+ procNetstat.TCPFastOpenPassiveFail = &value
case "TCPFastOpenListenOverflow":
- procNetstat.TcpExt.TCPFastOpenListenOverflow = &value
+ procNetstat.TCPFastOpenListenOverflow = &value
case "TCPFastOpenCookieReqd":
- procNetstat.TcpExt.TCPFastOpenCookieReqd = &value
+ procNetstat.TCPFastOpenCookieReqd = &value
case "TCPFastOpenBlackhole":
- procNetstat.TcpExt.TCPFastOpenBlackhole = &value
+ procNetstat.TCPFastOpenBlackhole = &value
case "TCPSpuriousRtxHostQueues":
- procNetstat.TcpExt.TCPSpuriousRtxHostQueues = &value
+ procNetstat.TCPSpuriousRtxHostQueues = &value
case "BusyPollRxPackets":
- procNetstat.TcpExt.BusyPollRxPackets = &value
+ procNetstat.BusyPollRxPackets = &value
case "TCPAutoCorking":
- procNetstat.TcpExt.TCPAutoCorking = &value
+ procNetstat.TCPAutoCorking = &value
case "TCPFromZeroWindowAdv":
- procNetstat.TcpExt.TCPFromZeroWindowAdv = &value
+ procNetstat.TCPFromZeroWindowAdv = &value
case "TCPToZeroWindowAdv":
- procNetstat.TcpExt.TCPToZeroWindowAdv = &value
+ procNetstat.TCPToZeroWindowAdv = &value
case "TCPWantZeroWindowAdv":
- procNetstat.TcpExt.TCPWantZeroWindowAdv = &value
+ procNetstat.TCPWantZeroWindowAdv = &value
case "TCPSynRetrans":
- procNetstat.TcpExt.TCPSynRetrans = &value
+ procNetstat.TCPSynRetrans = &value
case "TCPOrigDataSent":
- procNetstat.TcpExt.TCPOrigDataSent = &value
+ procNetstat.TCPOrigDataSent = &value
case "TCPHystartTrainDetect":
- procNetstat.TcpExt.TCPHystartTrainDetect = &value
+ procNetstat.TCPHystartTrainDetect = &value
case "TCPHystartTrainCwnd":
- procNetstat.TcpExt.TCPHystartTrainCwnd = &value
+ procNetstat.TCPHystartTrainCwnd = &value
case "TCPHystartDelayDetect":
- procNetstat.TcpExt.TCPHystartDelayDetect = &value
+ procNetstat.TCPHystartDelayDetect = &value
case "TCPHystartDelayCwnd":
- procNetstat.TcpExt.TCPHystartDelayCwnd = &value
+ procNetstat.TCPHystartDelayCwnd = &value
case "TCPACKSkippedSynRecv":
- procNetstat.TcpExt.TCPACKSkippedSynRecv = &value
+ procNetstat.TCPACKSkippedSynRecv = &value
case "TCPACKSkippedPAWS":
- procNetstat.TcpExt.TCPACKSkippedPAWS = &value
+ procNetstat.TCPACKSkippedPAWS = &value
case "TCPACKSkippedSeq":
- procNetstat.TcpExt.TCPACKSkippedSeq = &value
+ procNetstat.TCPACKSkippedSeq = &value
case "TCPACKSkippedFinWait2":
- procNetstat.TcpExt.TCPACKSkippedFinWait2 = &value
+ procNetstat.TCPACKSkippedFinWait2 = &value
case "TCPACKSkippedTimeWait":
- procNetstat.TcpExt.TCPACKSkippedTimeWait = &value
+ procNetstat.TCPACKSkippedTimeWait = &value
case "TCPACKSkippedChallenge":
- procNetstat.TcpExt.TCPACKSkippedChallenge = &value
+ procNetstat.TCPACKSkippedChallenge = &value
case "TCPWinProbe":
- procNetstat.TcpExt.TCPWinProbe = &value
+ procNetstat.TCPWinProbe = &value
case "TCPKeepAlive":
- procNetstat.TcpExt.TCPKeepAlive = &value
+ procNetstat.TCPKeepAlive = &value
case "TCPMTUPFail":
- procNetstat.TcpExt.TCPMTUPFail = &value
+ procNetstat.TCPMTUPFail = &value
case "TCPMTUPSuccess":
- procNetstat.TcpExt.TCPMTUPSuccess = &value
+ procNetstat.TCPMTUPSuccess = &value
case "TCPWqueueTooBig":
- procNetstat.TcpExt.TCPWqueueTooBig = &value
+ procNetstat.TCPWqueueTooBig = &value
}
case "IpExt":
switch key {
case "InNoRoutes":
- procNetstat.IpExt.InNoRoutes = &value
+ procNetstat.InNoRoutes = &value
case "InTruncatedPkts":
- procNetstat.IpExt.InTruncatedPkts = &value
+ procNetstat.InTruncatedPkts = &value
case "InMcastPkts":
- procNetstat.IpExt.InMcastPkts = &value
+ procNetstat.InMcastPkts = &value
case "OutMcastPkts":
- procNetstat.IpExt.OutMcastPkts = &value
+ procNetstat.OutMcastPkts = &value
case "InBcastPkts":
- procNetstat.IpExt.InBcastPkts = &value
+ procNetstat.InBcastPkts = &value
case "OutBcastPkts":
- procNetstat.IpExt.OutBcastPkts = &value
+ procNetstat.OutBcastPkts = &value
case "InOctets":
- procNetstat.IpExt.InOctets = &value
+ procNetstat.InOctets = &value
case "OutOctets":
- procNetstat.IpExt.OutOctets = &value
+ procNetstat.OutOctets = &value
case "InMcastOctets":
- procNetstat.IpExt.InMcastOctets = &value
+ procNetstat.InMcastOctets = &value
case "OutMcastOctets":
- procNetstat.IpExt.OutMcastOctets = &value
+ procNetstat.OutMcastOctets = &value
case "InBcastOctets":
- procNetstat.IpExt.InBcastOctets = &value
+ procNetstat.InBcastOctets = &value
case "OutBcastOctets":
- procNetstat.IpExt.OutBcastOctets = &value
+ procNetstat.OutBcastOctets = &value
case "InCsumErrors":
- procNetstat.IpExt.InCsumErrors = &value
+ procNetstat.InCsumErrors = &value
case "InNoECTPkts":
- procNetstat.IpExt.InNoECTPkts = &value
+ procNetstat.InNoECTPkts = &value
case "InECT1Pkts":
- procNetstat.IpExt.InECT1Pkts = &value
+ procNetstat.InECT1Pkts = &value
case "InECT0Pkts":
- procNetstat.IpExt.InECT0Pkts = &value
+ procNetstat.InECT0Pkts = &value
case "InCEPkts":
- procNetstat.IpExt.InCEPkts = &value
+ procNetstat.InCEPkts = &value
case "ReasmOverlaps":
- procNetstat.IpExt.ReasmOverlaps = &value
+ procNetstat.ReasmOverlaps = &value
}
}
}
diff --git a/vendor/github.com/prometheus/procfs/proc_smaps.go b/vendor/github.com/prometheus/procfs/proc_smaps.go
index 09060e820..9a297afcf 100644
--- a/vendor/github.com/prometheus/procfs/proc_smaps.go
+++ b/vendor/github.com/prometheus/procfs/proc_smaps.go
@@ -19,7 +19,6 @@ package procfs
import (
"bufio"
"errors"
- "fmt"
"os"
"regexp"
"strconv"
@@ -29,7 +28,7 @@ import (
)
var (
- // match the header line before each mapped zone in `/proc/pid/smaps`.
+ // Match the header line before each mapped zone in `/proc/pid/smaps`.
procSMapsHeaderLine = regexp.MustCompile(`^[a-f0-9].*$`)
)
@@ -117,7 +116,6 @@ func (p Proc) procSMapsRollupManual() (ProcSMapsRollup, error) {
func (s *ProcSMapsRollup) parseLine(line string) error {
kv := strings.SplitN(line, ":", 2)
if len(kv) != 2 {
- fmt.Println(line)
return errors.New("invalid net/dev line, missing colon")
}
diff --git a/vendor/github.com/prometheus/procfs/proc_snmp.go b/vendor/github.com/prometheus/procfs/proc_snmp.go
index b9d2cf642..4bdc90b07 100644
--- a/vendor/github.com/prometheus/procfs/proc_snmp.go
+++ b/vendor/github.com/prometheus/procfs/proc_snmp.go
@@ -173,138 +173,138 @@ func parseSnmp(r io.Reader, fileName string) (ProcSnmp, error) {
case "Ip":
switch key {
case "Forwarding":
- procSnmp.Ip.Forwarding = &value
+ procSnmp.Forwarding = &value
case "DefaultTTL":
- procSnmp.Ip.DefaultTTL = &value
+ procSnmp.DefaultTTL = &value
case "InReceives":
- procSnmp.Ip.InReceives = &value
+ procSnmp.InReceives = &value
case "InHdrErrors":
- procSnmp.Ip.InHdrErrors = &value
+ procSnmp.InHdrErrors = &value
case "InAddrErrors":
- procSnmp.Ip.InAddrErrors = &value
+ procSnmp.InAddrErrors = &value
case "ForwDatagrams":
- procSnmp.Ip.ForwDatagrams = &value
+ procSnmp.ForwDatagrams = &value
case "InUnknownProtos":
- procSnmp.Ip.InUnknownProtos = &value
+ procSnmp.InUnknownProtos = &value
case "InDiscards":
- procSnmp.Ip.InDiscards = &value
+ procSnmp.InDiscards = &value
case "InDelivers":
- procSnmp.Ip.InDelivers = &value
+ procSnmp.InDelivers = &value
case "OutRequests":
- procSnmp.Ip.OutRequests = &value
+ procSnmp.OutRequests = &value
case "OutDiscards":
- procSnmp.Ip.OutDiscards = &value
+ procSnmp.OutDiscards = &value
case "OutNoRoutes":
- procSnmp.Ip.OutNoRoutes = &value
+ procSnmp.OutNoRoutes = &value
case "ReasmTimeout":
- procSnmp.Ip.ReasmTimeout = &value
+ procSnmp.ReasmTimeout = &value
case "ReasmReqds":
- procSnmp.Ip.ReasmReqds = &value
+ procSnmp.ReasmReqds = &value
case "ReasmOKs":
- procSnmp.Ip.ReasmOKs = &value
+ procSnmp.ReasmOKs = &value
case "ReasmFails":
- procSnmp.Ip.ReasmFails = &value
+ procSnmp.ReasmFails = &value
case "FragOKs":
- procSnmp.Ip.FragOKs = &value
+ procSnmp.FragOKs = &value
case "FragFails":
- procSnmp.Ip.FragFails = &value
+ procSnmp.FragFails = &value
case "FragCreates":
- procSnmp.Ip.FragCreates = &value
+ procSnmp.FragCreates = &value
}
case "Icmp":
switch key {
case "InMsgs":
- procSnmp.Icmp.InMsgs = &value
+ procSnmp.InMsgs = &value
case "InErrors":
procSnmp.Icmp.InErrors = &value
case "InCsumErrors":
procSnmp.Icmp.InCsumErrors = &value
case "InDestUnreachs":
- procSnmp.Icmp.InDestUnreachs = &value
+ procSnmp.InDestUnreachs = &value
case "InTimeExcds":
- procSnmp.Icmp.InTimeExcds = &value
+ procSnmp.InTimeExcds = &value
case "InParmProbs":
- procSnmp.Icmp.InParmProbs = &value
+ procSnmp.InParmProbs = &value
case "InSrcQuenchs":
- procSnmp.Icmp.InSrcQuenchs = &value
+ procSnmp.InSrcQuenchs = &value
case "InRedirects":
- procSnmp.Icmp.InRedirects = &value
+ procSnmp.InRedirects = &value
case "InEchos":
- procSnmp.Icmp.InEchos = &value
+ procSnmp.InEchos = &value
case "InEchoReps":
- procSnmp.Icmp.InEchoReps = &value
+ procSnmp.InEchoReps = &value
case "InTimestamps":
- procSnmp.Icmp.InTimestamps = &value
+ procSnmp.InTimestamps = &value
case "InTimestampReps":
- procSnmp.Icmp.InTimestampReps = &value
+ procSnmp.InTimestampReps = &value
case "InAddrMasks":
- procSnmp.Icmp.InAddrMasks = &value
+ procSnmp.InAddrMasks = &value
case "InAddrMaskReps":
- procSnmp.Icmp.InAddrMaskReps = &value
+ procSnmp.InAddrMaskReps = &value
case "OutMsgs":
- procSnmp.Icmp.OutMsgs = &value
+ procSnmp.OutMsgs = &value
case "OutErrors":
- procSnmp.Icmp.OutErrors = &value
+ procSnmp.OutErrors = &value
case "OutDestUnreachs":
- procSnmp.Icmp.OutDestUnreachs = &value
+ procSnmp.OutDestUnreachs = &value
case "OutTimeExcds":
- procSnmp.Icmp.OutTimeExcds = &value
+ procSnmp.OutTimeExcds = &value
case "OutParmProbs":
- procSnmp.Icmp.OutParmProbs = &value
+ procSnmp.OutParmProbs = &value
case "OutSrcQuenchs":
- procSnmp.Icmp.OutSrcQuenchs = &value
+ procSnmp.OutSrcQuenchs = &value
case "OutRedirects":
- procSnmp.Icmp.OutRedirects = &value
+ procSnmp.OutRedirects = &value
case "OutEchos":
- procSnmp.Icmp.OutEchos = &value
+ procSnmp.OutEchos = &value
case "OutEchoReps":
- procSnmp.Icmp.OutEchoReps = &value
+ procSnmp.OutEchoReps = &value
case "OutTimestamps":
- procSnmp.Icmp.OutTimestamps = &value
+ procSnmp.OutTimestamps = &value
case "OutTimestampReps":
- procSnmp.Icmp.OutTimestampReps = &value
+ procSnmp.OutTimestampReps = &value
case "OutAddrMasks":
- procSnmp.Icmp.OutAddrMasks = &value
+ procSnmp.OutAddrMasks = &value
case "OutAddrMaskReps":
- procSnmp.Icmp.OutAddrMaskReps = &value
+ procSnmp.OutAddrMaskReps = &value
}
case "IcmpMsg":
switch key {
case "InType3":
- procSnmp.IcmpMsg.InType3 = &value
+ procSnmp.InType3 = &value
case "OutType3":
- procSnmp.IcmpMsg.OutType3 = &value
+ procSnmp.OutType3 = &value
}
case "Tcp":
switch key {
case "RtoAlgorithm":
- procSnmp.Tcp.RtoAlgorithm = &value
+ procSnmp.RtoAlgorithm = &value
case "RtoMin":
- procSnmp.Tcp.RtoMin = &value
+ procSnmp.RtoMin = &value
case "RtoMax":
- procSnmp.Tcp.RtoMax = &value
+ procSnmp.RtoMax = &value
case "MaxConn":
- procSnmp.Tcp.MaxConn = &value
+ procSnmp.MaxConn = &value
case "ActiveOpens":
- procSnmp.Tcp.ActiveOpens = &value
+ procSnmp.ActiveOpens = &value
case "PassiveOpens":
- procSnmp.Tcp.PassiveOpens = &value
+ procSnmp.PassiveOpens = &value
case "AttemptFails":
- procSnmp.Tcp.AttemptFails = &value
+ procSnmp.AttemptFails = &value
case "EstabResets":
- procSnmp.Tcp.EstabResets = &value
+ procSnmp.EstabResets = &value
case "CurrEstab":
- procSnmp.Tcp.CurrEstab = &value
+ procSnmp.CurrEstab = &value
case "InSegs":
- procSnmp.Tcp.InSegs = &value
+ procSnmp.InSegs = &value
case "OutSegs":
- procSnmp.Tcp.OutSegs = &value
+ procSnmp.OutSegs = &value
case "RetransSegs":
- procSnmp.Tcp.RetransSegs = &value
+ procSnmp.RetransSegs = &value
case "InErrs":
- procSnmp.Tcp.InErrs = &value
+ procSnmp.InErrs = &value
case "OutRsts":
- procSnmp.Tcp.OutRsts = &value
+ procSnmp.OutRsts = &value
case "InCsumErrors":
procSnmp.Tcp.InCsumErrors = &value
}
diff --git a/vendor/github.com/prometheus/procfs/proc_snmp6.go b/vendor/github.com/prometheus/procfs/proc_snmp6.go
index 3059cc6a1..fb7fd3995 100644
--- a/vendor/github.com/prometheus/procfs/proc_snmp6.go
+++ b/vendor/github.com/prometheus/procfs/proc_snmp6.go
@@ -182,161 +182,161 @@ func parseSNMP6Stats(r io.Reader) (ProcSnmp6, error) {
case "Ip6":
switch key {
case "InReceives":
- procSnmp6.Ip6.InReceives = &value
+ procSnmp6.InReceives = &value
case "InHdrErrors":
- procSnmp6.Ip6.InHdrErrors = &value
+ procSnmp6.InHdrErrors = &value
case "InTooBigErrors":
- procSnmp6.Ip6.InTooBigErrors = &value
+ procSnmp6.InTooBigErrors = &value
case "InNoRoutes":
- procSnmp6.Ip6.InNoRoutes = &value
+ procSnmp6.InNoRoutes = &value
case "InAddrErrors":
- procSnmp6.Ip6.InAddrErrors = &value
+ procSnmp6.InAddrErrors = &value
case "InUnknownProtos":
- procSnmp6.Ip6.InUnknownProtos = &value
+ procSnmp6.InUnknownProtos = &value
case "InTruncatedPkts":
- procSnmp6.Ip6.InTruncatedPkts = &value
+ procSnmp6.InTruncatedPkts = &value
case "InDiscards":
- procSnmp6.Ip6.InDiscards = &value
+ procSnmp6.InDiscards = &value
case "InDelivers":
- procSnmp6.Ip6.InDelivers = &value
+ procSnmp6.InDelivers = &value
case "OutForwDatagrams":
- procSnmp6.Ip6.OutForwDatagrams = &value
+ procSnmp6.OutForwDatagrams = &value
case "OutRequests":
- procSnmp6.Ip6.OutRequests = &value
+ procSnmp6.OutRequests = &value
case "OutDiscards":
- procSnmp6.Ip6.OutDiscards = &value
+ procSnmp6.OutDiscards = &value
case "OutNoRoutes":
- procSnmp6.Ip6.OutNoRoutes = &value
+ procSnmp6.OutNoRoutes = &value
case "ReasmTimeout":
- procSnmp6.Ip6.ReasmTimeout = &value
+ procSnmp6.ReasmTimeout = &value
case "ReasmReqds":
- procSnmp6.Ip6.ReasmReqds = &value
+ procSnmp6.ReasmReqds = &value
case "ReasmOKs":
- procSnmp6.Ip6.ReasmOKs = &value
+ procSnmp6.ReasmOKs = &value
case "ReasmFails":
- procSnmp6.Ip6.ReasmFails = &value
+ procSnmp6.ReasmFails = &value
case "FragOKs":
- procSnmp6.Ip6.FragOKs = &value
+ procSnmp6.FragOKs = &value
case "FragFails":
- procSnmp6.Ip6.FragFails = &value
+ procSnmp6.FragFails = &value
case "FragCreates":
- procSnmp6.Ip6.FragCreates = &value
+ procSnmp6.FragCreates = &value
case "InMcastPkts":
- procSnmp6.Ip6.InMcastPkts = &value
+ procSnmp6.InMcastPkts = &value
case "OutMcastPkts":
- procSnmp6.Ip6.OutMcastPkts = &value
+ procSnmp6.OutMcastPkts = &value
case "InOctets":
- procSnmp6.Ip6.InOctets = &value
+ procSnmp6.InOctets = &value
case "OutOctets":
- procSnmp6.Ip6.OutOctets = &value
+ procSnmp6.OutOctets = &value
case "InMcastOctets":
- procSnmp6.Ip6.InMcastOctets = &value
+ procSnmp6.InMcastOctets = &value
case "OutMcastOctets":
- procSnmp6.Ip6.OutMcastOctets = &value
+ procSnmp6.OutMcastOctets = &value
case "InBcastOctets":
- procSnmp6.Ip6.InBcastOctets = &value
+ procSnmp6.InBcastOctets = &value
case "OutBcastOctets":
- procSnmp6.Ip6.OutBcastOctets = &value
+ procSnmp6.OutBcastOctets = &value
case "InNoECTPkts":
- procSnmp6.Ip6.InNoECTPkts = &value
+ procSnmp6.InNoECTPkts = &value
case "InECT1Pkts":
- procSnmp6.Ip6.InECT1Pkts = &value
+ procSnmp6.InECT1Pkts = &value
case "InECT0Pkts":
- procSnmp6.Ip6.InECT0Pkts = &value
+ procSnmp6.InECT0Pkts = &value
case "InCEPkts":
- procSnmp6.Ip6.InCEPkts = &value
+ procSnmp6.InCEPkts = &value
}
case "Icmp6":
switch key {
case "InMsgs":
- procSnmp6.Icmp6.InMsgs = &value
+ procSnmp6.InMsgs = &value
case "InErrors":
procSnmp6.Icmp6.InErrors = &value
case "OutMsgs":
- procSnmp6.Icmp6.OutMsgs = &value
+ procSnmp6.OutMsgs = &value
case "OutErrors":
- procSnmp6.Icmp6.OutErrors = &value
+ procSnmp6.OutErrors = &value
case "InCsumErrors":
procSnmp6.Icmp6.InCsumErrors = &value
case "InDestUnreachs":
- procSnmp6.Icmp6.InDestUnreachs = &value
+ procSnmp6.InDestUnreachs = &value
case "InPktTooBigs":
- procSnmp6.Icmp6.InPktTooBigs = &value
+ procSnmp6.InPktTooBigs = &value
case "InTimeExcds":
- procSnmp6.Icmp6.InTimeExcds = &value
+ procSnmp6.InTimeExcds = &value
case "InParmProblems":
- procSnmp6.Icmp6.InParmProblems = &value
+ procSnmp6.InParmProblems = &value
case "InEchos":
- procSnmp6.Icmp6.InEchos = &value
+ procSnmp6.InEchos = &value
case "InEchoReplies":
- procSnmp6.Icmp6.InEchoReplies = &value
+ procSnmp6.InEchoReplies = &value
case "InGroupMembQueries":
- procSnmp6.Icmp6.InGroupMembQueries = &value
+ procSnmp6.InGroupMembQueries = &value
case "InGroupMembResponses":
- procSnmp6.Icmp6.InGroupMembResponses = &value
+ procSnmp6.InGroupMembResponses = &value
case "InGroupMembReductions":
- procSnmp6.Icmp6.InGroupMembReductions = &value
+ procSnmp6.InGroupMembReductions = &value
case "InRouterSolicits":
- procSnmp6.Icmp6.InRouterSolicits = &value
+ procSnmp6.InRouterSolicits = &value
case "InRouterAdvertisements":
- procSnmp6.Icmp6.InRouterAdvertisements = &value
+ procSnmp6.InRouterAdvertisements = &value
case "InNeighborSolicits":
- procSnmp6.Icmp6.InNeighborSolicits = &value
+ procSnmp6.InNeighborSolicits = &value
case "InNeighborAdvertisements":
- procSnmp6.Icmp6.InNeighborAdvertisements = &value
+ procSnmp6.InNeighborAdvertisements = &value
case "InRedirects":
- procSnmp6.Icmp6.InRedirects = &value
+ procSnmp6.InRedirects = &value
case "InMLDv2Reports":
- procSnmp6.Icmp6.InMLDv2Reports = &value
+ procSnmp6.InMLDv2Reports = &value
case "OutDestUnreachs":
- procSnmp6.Icmp6.OutDestUnreachs = &value
+ procSnmp6.OutDestUnreachs = &value
case "OutPktTooBigs":
- procSnmp6.Icmp6.OutPktTooBigs = &value
+ procSnmp6.OutPktTooBigs = &value
case "OutTimeExcds":
- procSnmp6.Icmp6.OutTimeExcds = &value
+ procSnmp6.OutTimeExcds = &value
case "OutParmProblems":
- procSnmp6.Icmp6.OutParmProblems = &value
+ procSnmp6.OutParmProblems = &value
case "OutEchos":
- procSnmp6.Icmp6.OutEchos = &value
+ procSnmp6.OutEchos = &value
case "OutEchoReplies":
- procSnmp6.Icmp6.OutEchoReplies = &value
+ procSnmp6.OutEchoReplies = &value
case "OutGroupMembQueries":
- procSnmp6.Icmp6.OutGroupMembQueries = &value
+ procSnmp6.OutGroupMembQueries = &value
case "OutGroupMembResponses":
- procSnmp6.Icmp6.OutGroupMembResponses = &value
+ procSnmp6.OutGroupMembResponses = &value
case "OutGroupMembReductions":
- procSnmp6.Icmp6.OutGroupMembReductions = &value
+ procSnmp6.OutGroupMembReductions = &value
case "OutRouterSolicits":
- procSnmp6.Icmp6.OutRouterSolicits = &value
+ procSnmp6.OutRouterSolicits = &value
case "OutRouterAdvertisements":
- procSnmp6.Icmp6.OutRouterAdvertisements = &value
+ procSnmp6.OutRouterAdvertisements = &value
case "OutNeighborSolicits":
- procSnmp6.Icmp6.OutNeighborSolicits = &value
+ procSnmp6.OutNeighborSolicits = &value
case "OutNeighborAdvertisements":
- procSnmp6.Icmp6.OutNeighborAdvertisements = &value
+ procSnmp6.OutNeighborAdvertisements = &value
case "OutRedirects":
- procSnmp6.Icmp6.OutRedirects = &value
+ procSnmp6.OutRedirects = &value
case "OutMLDv2Reports":
- procSnmp6.Icmp6.OutMLDv2Reports = &value
+ procSnmp6.OutMLDv2Reports = &value
case "InType1":
- procSnmp6.Icmp6.InType1 = &value
+ procSnmp6.InType1 = &value
case "InType134":
- procSnmp6.Icmp6.InType134 = &value
+ procSnmp6.InType134 = &value
case "InType135":
- procSnmp6.Icmp6.InType135 = &value
+ procSnmp6.InType135 = &value
case "InType136":
- procSnmp6.Icmp6.InType136 = &value
+ procSnmp6.InType136 = &value
case "InType143":
- procSnmp6.Icmp6.InType143 = &value
+ procSnmp6.InType143 = &value
case "OutType133":
- procSnmp6.Icmp6.OutType133 = &value
+ procSnmp6.OutType133 = &value
case "OutType135":
- procSnmp6.Icmp6.OutType135 = &value
+ procSnmp6.OutType135 = &value
case "OutType136":
- procSnmp6.Icmp6.OutType136 = &value
+ procSnmp6.OutType136 = &value
case "OutType143":
- procSnmp6.Icmp6.OutType143 = &value
+ procSnmp6.OutType143 = &value
}
case "Udp6":
switch key {
@@ -355,7 +355,7 @@ func parseSNMP6Stats(r io.Reader) (ProcSnmp6, error) {
case "InCsumErrors":
procSnmp6.Udp6.InCsumErrors = &value
case "IgnoredMulti":
- procSnmp6.Udp6.IgnoredMulti = &value
+ procSnmp6.IgnoredMulti = &value
}
case "UdpLite6":
switch key {
diff --git a/vendor/github.com/prometheus/procfs/proc_status.go b/vendor/github.com/prometheus/procfs/proc_status.go
index a055197c6..dd8aa5688 100644
--- a/vendor/github.com/prometheus/procfs/proc_status.go
+++ b/vendor/github.com/prometheus/procfs/proc_status.go
@@ -146,7 +146,11 @@ func (s *ProcStatus) fillStatus(k string, vString string, vUint uint64, vUintByt
}
}
case "NSpid":
- s.NSpids = calcNSPidsList(vString)
+ nspids, err := calcNSPidsList(vString)
+ if err != nil {
+ return err
+ }
+ s.NSpids = nspids
case "VmPeak":
s.VmPeak = vUintBytes
case "VmSize":
@@ -222,17 +226,17 @@ func calcCpusAllowedList(cpuString string) []uint64 {
return g
}
-func calcNSPidsList(nspidsString string) []uint64 {
- s := strings.Split(nspidsString, " ")
+func calcNSPidsList(nspidsString string) ([]uint64, error) {
+ s := strings.Split(nspidsString, "\t")
var nspids []uint64
for _, nspid := range s {
- nspid, _ := strconv.ParseUint(nspid, 10, 64)
- if nspid == 0 {
- continue
+ nspid, err := strconv.ParseUint(nspid, 10, 64)
+ if err != nil {
+ return nil, err
}
nspids = append(nspids, nspid)
}
- return nspids
+ return nspids, nil
}
diff --git a/vendor/github.com/prometheus/procfs/proc_sys.go b/vendor/github.com/prometheus/procfs/proc_sys.go
index 5eefbe2ef..3810d1ac9 100644
--- a/vendor/github.com/prometheus/procfs/proc_sys.go
+++ b/vendor/github.com/prometheus/procfs/proc_sys.go
@@ -21,7 +21,7 @@ import (
)
func sysctlToPath(sysctl string) string {
- return strings.Replace(sysctl, ".", "/", -1)
+ return strings.ReplaceAll(sysctl, ".", "/")
}
func (fs FS) SysctlStrings(sysctl string) ([]string, error) {
diff --git a/vendor/github.com/prometheus/procfs/softirqs.go b/vendor/github.com/prometheus/procfs/softirqs.go
index 28708e074..403e6ae70 100644
--- a/vendor/github.com/prometheus/procfs/softirqs.go
+++ b/vendor/github.com/prometheus/procfs/softirqs.go
@@ -68,8 +68,8 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
if len(parts) < 2 {
continue
}
- switch {
- case parts[0] == "HI:":
+ switch parts[0] {
+ case "HI:":
perCPU := parts[1:]
softirqs.Hi = make([]uint64, len(perCPU))
for i, count := range perCPU {
@@ -77,7 +77,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (HI%d): %w", ErrFileParse, count, i, err)
}
}
- case parts[0] == "TIMER:":
+ case "TIMER:":
perCPU := parts[1:]
softirqs.Timer = make([]uint64, len(perCPU))
for i, count := range perCPU {
@@ -85,7 +85,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (TIMER%d): %w", ErrFileParse, count, i, err)
}
}
- case parts[0] == "NET_TX:":
+ case "NET_TX:":
perCPU := parts[1:]
softirqs.NetTx = make([]uint64, len(perCPU))
for i, count := range perCPU {
@@ -93,7 +93,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (NET_TX%d): %w", ErrFileParse, count, i, err)
}
}
- case parts[0] == "NET_RX:":
+ case "NET_RX:":
perCPU := parts[1:]
softirqs.NetRx = make([]uint64, len(perCPU))
for i, count := range perCPU {
@@ -101,7 +101,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (NET_RX%d): %w", ErrFileParse, count, i, err)
}
}
- case parts[0] == "BLOCK:":
+ case "BLOCK:":
perCPU := parts[1:]
softirqs.Block = make([]uint64, len(perCPU))
for i, count := range perCPU {
@@ -109,7 +109,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (BLOCK%d): %w", ErrFileParse, count, i, err)
}
}
- case parts[0] == "IRQ_POLL:":
+ case "IRQ_POLL:":
perCPU := parts[1:]
softirqs.IRQPoll = make([]uint64, len(perCPU))
for i, count := range perCPU {
@@ -117,7 +117,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (IRQ_POLL%d): %w", ErrFileParse, count, i, err)
}
}
- case parts[0] == "TASKLET:":
+ case "TASKLET:":
perCPU := parts[1:]
softirqs.Tasklet = make([]uint64, len(perCPU))
for i, count := range perCPU {
@@ -125,7 +125,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (TASKLET%d): %w", ErrFileParse, count, i, err)
}
}
- case parts[0] == "SCHED:":
+ case "SCHED:":
perCPU := parts[1:]
softirqs.Sched = make([]uint64, len(perCPU))
for i, count := range perCPU {
@@ -133,7 +133,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (SCHED%d): %w", ErrFileParse, count, i, err)
}
}
- case parts[0] == "HRTIMER:":
+ case "HRTIMER:":
perCPU := parts[1:]
softirqs.HRTimer = make([]uint64, len(perCPU))
for i, count := range perCPU {
@@ -141,7 +141,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (HRTIMER%d): %w", ErrFileParse, count, i, err)
}
}
- case parts[0] == "RCU:":
+ case "RCU:":
perCPU := parts[1:]
softirqs.RCU = make([]uint64, len(perCPU))
for i, count := range perCPU {
diff --git a/vendor/go.opentelemetry.io/contrib/bridges/prometheus/BENCHMARKS.md b/vendor/go.opentelemetry.io/contrib/bridges/prometheus/BENCHMARKS.md
index 7360d188c..df0af73cc 100644
--- a/vendor/go.opentelemetry.io/contrib/bridges/prometheus/BENCHMARKS.md
+++ b/vendor/go.opentelemetry.io/contrib/bridges/prometheus/BENCHMARKS.md
@@ -1,4 +1,4 @@
-## Summary
+# Prometheus Benchmarks
Using the Prometheus bridge and the OTLP exporter adds roughly ~50% to the CPU and memory overhead of an application compared to serving a Prometheus HTTP endpoint for metrics.
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/runtime/version.go b/vendor/go.opentelemetry.io/contrib/instrumentation/runtime/version.go
index 6df851609..4161ec624 100644
--- a/vendor/go.opentelemetry.io/contrib/instrumentation/runtime/version.go
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/runtime/version.go
@@ -5,13 +5,6 @@ package runtime // import "go.opentelemetry.io/contrib/instrumentation/runtime"
// Version is the current release version of the runtime instrumentation.
func Version() string {
- return "0.60.0"
+ return "0.61.0"
// This string is updated by the pre_release.sh script during release
}
-
-// SemVersion is the semantic version to be supplied to tracer/meter creation.
-//
-// Deprecated: Use [Version] instead.
-func SemVersion() string {
- return Version()
-}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/config.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/config.go
index cd33a1682..d0cc79d54 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/config.go
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/config.go
@@ -13,6 +13,7 @@ import (
"strconv"
"strings"
"time"
+ "unicode"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
@@ -359,8 +360,9 @@ func WithTimeout(duration time.Duration) Option {
// explicitly returns a backoff time in the response, that time will take
// precedence over these settings.
//
-// These settings do not define any network retry strategy. That is entirely
-// handled by the gRPC ClientConn.
+// These settings define the retry strategy implemented by the exporter.
+// These settings do not define any network retry strategy.
+// That is handled by the gRPC ClientConn.
//
// If unset, the default retry policy will be used. It will retry the export
// 5 seconds after receiving a retryable error and increase exponentially
@@ -442,13 +444,15 @@ func convHeaders(s string) (map[string]string, error) {
continue
}
- escKey, e := url.PathUnescape(rawKey)
- if e != nil {
+ key := strings.TrimSpace(rawKey)
+
+ // Validate the key.
+ if !isValidHeaderKey(key) {
err = errors.Join(err, fmt.Errorf("invalid header key: %s", rawKey))
continue
}
- key := strings.TrimSpace(escKey)
+ // Only decode the value.
escVal, e := url.PathUnescape(rawVal)
if e != nil {
err = errors.Join(err, fmt.Errorf("invalid header value: %s", rawVal))
@@ -651,3 +655,22 @@ func fallback[T any](val T) resolver[T] {
return s
}
}
+
+func isValidHeaderKey(key string) bool {
+ if key == "" {
+ return false
+ }
+ for _, c := range key {
+ if !isTokenChar(c) {
+ return false
+ }
+ }
+ return true
+}
+
+func isTokenChar(c rune) bool {
+ return c <= unicode.MaxASCII && (unicode.IsLetter(c) ||
+ unicode.IsDigit(c) ||
+ c == '!' || c == '#' || c == '$' || c == '%' || c == '&' || c == '\'' || c == '*' ||
+ c == '+' || c == '-' || c == '.' || c == '^' || c == '_' || c == '`' || c == '|' || c == '~')
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/retry/retry.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/retry/retry.go
index f2da12382..896c3a303 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/retry/retry.go
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/retry/retry.go
@@ -1,4 +1,4 @@
-// Code created by gotmpl. DO NOT MODIFY.
+// Code generated by gotmpl. DO NOT MODIFY.
// source: internal/shared/otlp/retry/retry.go.tmpl
// Copyright The OpenTelemetry Authors
@@ -14,7 +14,7 @@ import (
"fmt"
"time"
- "github.com/cenkalti/backoff/v4"
+ "github.com/cenkalti/backoff/v5"
)
// DefaultConfig are the recommended defaults to use.
@@ -77,12 +77,12 @@ func (c Config) RequestFunc(evaluate EvaluateFunc) RequestFunc {
RandomizationFactor: backoff.DefaultRandomizationFactor,
Multiplier: backoff.DefaultMultiplier,
MaxInterval: c.MaxInterval,
- MaxElapsedTime: c.MaxElapsedTime,
- Stop: backoff.Stop,
- Clock: backoff.SystemClock,
}
b.Reset()
+ maxElapsedTime := c.MaxElapsedTime
+ startTime := time.Now()
+
for {
err := fn(ctx)
if err == nil {
@@ -94,21 +94,17 @@ func (c Config) RequestFunc(evaluate EvaluateFunc) RequestFunc {
return err
}
- bOff := b.NextBackOff()
- if bOff == backoff.Stop {
+ if maxElapsedTime != 0 && time.Since(startTime) > maxElapsedTime {
return fmt.Errorf("max retry time elapsed: %w", err)
}
// Wait for the greater of the backoff or throttle delay.
- var delay time.Duration
- if bOff > throttle {
- delay = bOff
- } else {
- elapsed := b.GetElapsedTime()
- if b.MaxElapsedTime != 0 && elapsed+throttle > b.MaxElapsedTime {
- return fmt.Errorf("max retry time would elapse: %w", err)
- }
- delay = throttle
+ bOff := b.NextBackOff()
+ delay := max(throttle, bOff)
+
+ elapsed := time.Since(startTime)
+ if maxElapsedTime != 0 && elapsed+throttle > maxElapsedTime {
+ return fmt.Errorf("max retry time would elapse: %w", err)
}
if ctxErr := waitFunc(ctx, delay); ctxErr != nil {
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/transform/log.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/transform/log.go
index dfeecf596..7bb3967f7 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/transform/log.go
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/transform/log.go
@@ -1,4 +1,4 @@
-// Code created by gotmpl. DO NOT MODIFY.
+// Code generated by gotmpl. DO NOT MODIFY.
// source: internal/shared/otlp/otlplog/transform/log.go.tmpl
// Copyright The OpenTelemetry Authors
@@ -257,7 +257,7 @@ func stringSliceValues(vals []string) []*cpb.AnyValue {
return converted
}
-// Attrs transforms a slice of [api.KeyValue] into OTLP key-values.
+// LogAttrs transforms a slice of [api.KeyValue] into OTLP key-values.
func LogAttrs(attrs []api.KeyValue) []*cpb.KeyValue {
if len(attrs) == 0 {
return nil
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/version.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/version.go
index a68ed0591..954597340 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/version.go
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/version.go
@@ -5,5 +5,5 @@ package otlploggrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlplog/o
// Version is the current release version of the OpenTelemetry OTLP over gRPC logs exporter in use.
func Version() string {
- return "0.11.0"
+ return "0.12.2"
}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/client.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/client.go
index 279b4be4f..3af60258a 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/client.go
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/client.go
@@ -44,20 +44,23 @@ func newNoopClient() *client {
// newHTTPClient creates a new HTTP log client.
func newHTTPClient(cfg config) (*client, error) {
- hc := &http.Client{
- Transport: ourTransport,
- Timeout: cfg.timeout.Value,
- }
+ hc := cfg.httpClient
+ if hc == nil {
+ hc = &http.Client{
+ Transport: ourTransport,
+ Timeout: cfg.timeout.Value,
+ }
- if cfg.tlsCfg.Value != nil || cfg.proxy.Value != nil {
- clonedTransport := ourTransport.Clone()
- hc.Transport = clonedTransport
+ if cfg.tlsCfg.Value != nil || cfg.proxy.Value != nil {
+ clonedTransport := ourTransport.Clone()
+ hc.Transport = clonedTransport
- if cfg.tlsCfg.Value != nil {
- clonedTransport.TLSClientConfig = cfg.tlsCfg.Value
- }
- if cfg.proxy.Value != nil {
- clonedTransport.Proxy = cfg.proxy.Value
+ if cfg.tlsCfg.Value != nil {
+ clonedTransport.TLSClientConfig = cfg.tlsCfg.Value
+ }
+ if cfg.proxy.Value != nil {
+ clonedTransport.Proxy = cfg.proxy.Value
+ }
}
}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/config.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/config.go
index bfe768091..66140f3fe 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/config.go
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/config.go
@@ -14,6 +14,7 @@ import (
"strconv"
"strings"
"time"
+ "unicode"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/internal/retry"
@@ -94,6 +95,7 @@ type config struct {
timeout setting[time.Duration]
proxy setting[HTTPTransportProxyFunc]
retryCfg setting[retry.Config]
+ httpClient *http.Client
}
func newConfig(options []Option) config {
@@ -343,6 +345,25 @@ func WithProxy(pf HTTPTransportProxyFunc) Option {
})
}
+// WithHTTPClient sets the HTTP client to used by the exporter.
+//
+// This option will take precedence over [WithProxy], [WithTimeout],
+// [WithTLSClientConfig] options as well as OTEL_EXPORTER_OTLP_CERTIFICATE,
+// OTEL_EXPORTER_OTLP_LOGS_CERTIFICATE, OTEL_EXPORTER_OTLP_TIMEOUT,
+// OTEL_EXPORTER_OTLP_LOGS_TIMEOUT environment variables.
+//
+// Timeout and all other fields of the passed [http.Client] are left intact.
+//
+// Be aware that passing an HTTP client with transport like
+// [go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp.NewTransport] can
+// cause the client to be instrumented twice and cause infinite recursion.
+func WithHTTPClient(c *http.Client) Option {
+ return fnOpt(func(cfg config) config {
+ cfg.httpClient = c
+ return cfg
+ })
+}
+
// setting is a configuration setting value.
type setting[T any] struct {
Value T
@@ -544,13 +565,15 @@ func convHeaders(s string) (map[string]string, error) {
continue
}
- escKey, e := url.PathUnescape(rawKey)
- if e != nil {
+ key := strings.TrimSpace(rawKey)
+
+ // Validate the key.
+ if !isValidHeaderKey(key) {
err = errors.Join(err, fmt.Errorf("invalid header key: %s", rawKey))
continue
}
- key := strings.TrimSpace(escKey)
+ // Only decode the value.
escVal, e := url.PathUnescape(rawVal)
if e != nil {
err = errors.Join(err, fmt.Errorf("invalid header value: %s", rawVal))
@@ -600,3 +623,22 @@ func fallback[T any](val T) resolver[T] {
return s
}
}
+
+func isValidHeaderKey(key string) bool {
+ if key == "" {
+ return false
+ }
+ for _, c := range key {
+ if !isTokenChar(c) {
+ return false
+ }
+ }
+ return true
+}
+
+func isTokenChar(c rune) bool {
+ return c <= unicode.MaxASCII && (unicode.IsLetter(c) ||
+ unicode.IsDigit(c) ||
+ c == '!' || c == '#' || c == '$' || c == '%' || c == '&' || c == '\'' || c == '*' ||
+ c == '+' || c == '-' || c == '.' || c == '^' || c == '_' || c == '`' || c == '|' || c == '~')
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/internal/retry/retry.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/internal/retry/retry.go
index 661576ce2..bd9a750a1 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/internal/retry/retry.go
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/internal/retry/retry.go
@@ -1,4 +1,4 @@
-// Code created by gotmpl. DO NOT MODIFY.
+// Code generated by gotmpl. DO NOT MODIFY.
// source: internal/shared/otlp/retry/retry.go.tmpl
// Copyright The OpenTelemetry Authors
@@ -14,7 +14,7 @@ import (
"fmt"
"time"
- "github.com/cenkalti/backoff/v4"
+ "github.com/cenkalti/backoff/v5"
)
// DefaultConfig are the recommended defaults to use.
@@ -77,12 +77,12 @@ func (c Config) RequestFunc(evaluate EvaluateFunc) RequestFunc {
RandomizationFactor: backoff.DefaultRandomizationFactor,
Multiplier: backoff.DefaultMultiplier,
MaxInterval: c.MaxInterval,
- MaxElapsedTime: c.MaxElapsedTime,
- Stop: backoff.Stop,
- Clock: backoff.SystemClock,
}
b.Reset()
+ maxElapsedTime := c.MaxElapsedTime
+ startTime := time.Now()
+
for {
err := fn(ctx)
if err == nil {
@@ -94,21 +94,17 @@ func (c Config) RequestFunc(evaluate EvaluateFunc) RequestFunc {
return err
}
- bOff := b.NextBackOff()
- if bOff == backoff.Stop {
+ if maxElapsedTime != 0 && time.Since(startTime) > maxElapsedTime {
return fmt.Errorf("max retry time elapsed: %w", err)
}
// Wait for the greater of the backoff or throttle delay.
- var delay time.Duration
- if bOff > throttle {
- delay = bOff
- } else {
- elapsed := b.GetElapsedTime()
- if b.MaxElapsedTime != 0 && elapsed+throttle > b.MaxElapsedTime {
- return fmt.Errorf("max retry time would elapse: %w", err)
- }
- delay = throttle
+ bOff := b.NextBackOff()
+ delay := max(throttle, bOff)
+
+ elapsed := time.Since(startTime)
+ if maxElapsedTime != 0 && elapsed+throttle > maxElapsedTime {
+ return fmt.Errorf("max retry time would elapse: %w", err)
}
if ctxErr := waitFunc(ctx, delay); ctxErr != nil {
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/internal/transform/log.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/internal/transform/log.go
index adf407800..c3d9710c2 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/internal/transform/log.go
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/internal/transform/log.go
@@ -1,4 +1,4 @@
-// Code created by gotmpl. DO NOT MODIFY.
+// Code generated by gotmpl. DO NOT MODIFY.
// source: internal/shared/otlp/otlplog/transform/log.go.tmpl
// Copyright The OpenTelemetry Authors
@@ -257,7 +257,7 @@ func stringSliceValues(vals []string) []*cpb.AnyValue {
return converted
}
-// Attrs transforms a slice of [api.KeyValue] into OTLP key-values.
+// LogAttrs transforms a slice of [api.KeyValue] into OTLP key-values.
func LogAttrs(attrs []api.KeyValue) []*cpb.KeyValue {
if len(attrs) == 0 {
return nil
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/version.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/version.go
index 8315200fa..9702a4c0b 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/version.go
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/version.go
@@ -5,5 +5,5 @@ package otlploghttp // import "go.opentelemetry.io/otel/exporters/otlp/otlplog/o
// Version is the current release version of the OpenTelemetry OTLP over HTTP/protobuf logs exporter in use.
func Version() string {
- return "0.11.0"
+ return "0.12.2"
}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/config.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/config.go
index db6e3714b..c831bb60b 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/config.go
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/config.go
@@ -238,8 +238,9 @@ func WithTimeout(duration time.Duration) Option {
// explicitly returns a backoff time in the response, that time will take
// precedence over these settings.
//
-// These settings do not define any network retry strategy. That is entirely
-// handled by the gRPC ClientConn.
+// These settings define the retry strategy implemented by the exporter.
+// These settings do not define any network retry strategy.
+// That is handled by the gRPC ClientConn.
//
// If unset, the default retry policy will be used. It will retry the export
// 5 seconds after receiving a retryable error and increase exponentially
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/envconfig/envconfig.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/envconfig/envconfig.go
index 261f55026..2cd98b929 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/envconfig/envconfig.go
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/envconfig/envconfig.go
@@ -1,9 +1,11 @@
-// Code created by gotmpl. DO NOT MODIFY.
+// Code generated by gotmpl. DO NOT MODIFY.
// source: internal/shared/otlp/envconfig/envconfig.go.tmpl
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
+// Package envconfig provides functionality to parse configuration from
+// environment variables.
package envconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/envconfig"
import (
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/gen.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/gen.go
index 95e2f4ba3..b29cd11a6 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/gen.go
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/gen.go
@@ -1,6 +1,7 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
+// Package internal provides internal functionally for the otlpmetricgrpc package.
package internal // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal"
//go:generate gotmpl --body=../../../../../internal/shared/otlp/partialsuccess.go.tmpl "--data={}" --out=partialsuccess.go
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/envconfig.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/envconfig.go
index 7ae53f2d1..b54a173b6 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/envconfig.go
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/envconfig.go
@@ -1,4 +1,4 @@
-// Code created by gotmpl. DO NOT MODIFY.
+// Code generated by gotmpl. DO NOT MODIFY.
// source: internal/shared/otlp/otlpmetric/oconf/envconfig.go.tmpl
// Copyright The OpenTelemetry Authors
@@ -80,8 +80,16 @@ func getOptionsFromEnv() []GenericOption {
}),
envconfig.WithCertPool("CERTIFICATE", func(p *x509.CertPool) { tlsConf.RootCAs = p }),
envconfig.WithCertPool("METRICS_CERTIFICATE", func(p *x509.CertPool) { tlsConf.RootCAs = p }),
- envconfig.WithClientCert("CLIENT_CERTIFICATE", "CLIENT_KEY", func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }),
- envconfig.WithClientCert("METRICS_CLIENT_CERTIFICATE", "METRICS_CLIENT_KEY", func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }),
+ envconfig.WithClientCert(
+ "CLIENT_CERTIFICATE",
+ "CLIENT_KEY",
+ func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} },
+ ),
+ envconfig.WithClientCert(
+ "METRICS_CLIENT_CERTIFICATE",
+ "METRICS_CLIENT_KEY",
+ func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} },
+ ),
envconfig.WithBool("INSECURE", func(b bool) { opts = append(opts, withInsecure(b)) }),
envconfig.WithBool("METRICS_INSECURE", func(b bool) { opts = append(opts, withInsecure(b)) }),
withTLSConfig(tlsConf, func(c *tls.Config) { opts = append(opts, WithTLSClientConfig(c)) }),
@@ -91,8 +99,14 @@ func getOptionsFromEnv() []GenericOption {
WithEnvCompression("METRICS_COMPRESSION", func(c Compression) { opts = append(opts, WithCompression(c)) }),
envconfig.WithDuration("TIMEOUT", func(d time.Duration) { opts = append(opts, WithTimeout(d)) }),
envconfig.WithDuration("METRICS_TIMEOUT", func(d time.Duration) { opts = append(opts, WithTimeout(d)) }),
- withEnvTemporalityPreference("METRICS_TEMPORALITY_PREFERENCE", func(t metric.TemporalitySelector) { opts = append(opts, WithTemporalitySelector(t)) }),
- withEnvAggPreference("METRICS_DEFAULT_HISTOGRAM_AGGREGATION", func(a metric.AggregationSelector) { opts = append(opts, WithAggregationSelector(a)) }),
+ withEnvTemporalityPreference(
+ "METRICS_TEMPORALITY_PREFERENCE",
+ func(t metric.TemporalitySelector) { opts = append(opts, WithTemporalitySelector(t)) },
+ ),
+ withEnvAggPreference(
+ "METRICS_DEFAULT_HISTOGRAM_AGGREGATION",
+ func(a metric.AggregationSelector) { opts = append(opts, WithAggregationSelector(a)) },
+ ),
)
return opts
@@ -157,7 +171,11 @@ func withEnvTemporalityPreference(n string, fn func(metric.TemporalitySelector))
case "lowmemory":
fn(lowMemory)
default:
- global.Warn("OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE is set to an invalid value, ignoring.", "value", s)
+ global.Warn(
+ "OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE is set to an invalid value, ignoring.",
+ "value",
+ s,
+ )
}
}
}
@@ -203,7 +221,11 @@ func withEnvAggPreference(n string, fn func(metric.AggregationSelector)) func(e
return metric.DefaultAggregationSelector(kind)
})
default:
- global.Warn("OTEL_EXPORTER_OTLP_METRICS_DEFAULT_HISTOGRAM_AGGREGATION is set to an invalid value, ignoring.", "value", s)
+ global.Warn(
+ "OTEL_EXPORTER_OTLP_METRICS_DEFAULT_HISTOGRAM_AGGREGATION is set to an invalid value, ignoring.",
+ "value",
+ s,
+ )
}
}
}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/options.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/options.go
index 2ac8db5a8..cb77ae6a9 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/options.go
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/options.go
@@ -1,9 +1,10 @@
-// Code created by gotmpl. DO NOT MODIFY.
+// Code generated by gotmpl. DO NOT MODIFY.
// source: internal/shared/otlp/otlpmetric/oconf/options.go.tmpl
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
+// Package oconf provides configuration for the otlpmetric exporters.
package oconf // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf"
import (
@@ -56,13 +57,15 @@ type (
Timeout time.Duration
URLPath string
- // gRPC configurations
- GRPCCredentials credentials.TransportCredentials
-
TemporalitySelector metric.TemporalitySelector
AggregationSelector metric.AggregationSelector
- Proxy HTTPTransportProxyFunc
+ // gRPC configurations
+ GRPCCredentials credentials.TransportCredentials
+
+ // HTTP configurations
+ Proxy HTTPTransportProxyFunc
+ HTTPClient *http.Client
}
Config struct {
@@ -372,3 +375,10 @@ func WithProxy(pf HTTPTransportProxyFunc) GenericOption {
return cfg
})
}
+
+func WithHTTPClient(c *http.Client) GenericOption {
+ return newGenericOption(func(cfg Config) Config {
+ cfg.Metrics.HTTPClient = c
+ return cfg
+ })
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/optiontypes.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/optiontypes.go
index 83f6d7fd1..c18a6b1f2 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/optiontypes.go
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/optiontypes.go
@@ -1,4 +1,4 @@
-// Code created by gotmpl. DO NOT MODIFY.
+// Code generated by gotmpl. DO NOT MODIFY.
// source: internal/shared/otlp/otlpmetric/oconf/optiontypes.go.tmpl
// Copyright The OpenTelemetry Authors
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/tls.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/tls.go
index 03e7fbcdf..e4547b3a6 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/tls.go
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/tls.go
@@ -1,4 +1,4 @@
-// Code created by gotmpl. DO NOT MODIFY.
+// Code generated by gotmpl. DO NOT MODIFY.
// source: internal/shared/otlp/otlpmetric/oconf/tls.go.tmpl
// Copyright The OpenTelemetry Authors
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/partialsuccess.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/partialsuccess.go
index 50e25fdbc..6af5591ea 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/partialsuccess.go
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/partialsuccess.go
@@ -1,4 +1,4 @@
-// Code created by gotmpl. DO NOT MODIFY.
+// Code generated by gotmpl. DO NOT MODIFY.
// source: internal/shared/otlp/partialsuccess.go
// Copyright The OpenTelemetry Authors
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/retry/retry.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/retry/retry.go
index cc3a77055..37cc6c519 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/retry/retry.go
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/retry/retry.go
@@ -1,4 +1,4 @@
-// Code created by gotmpl. DO NOT MODIFY.
+// Code generated by gotmpl. DO NOT MODIFY.
// source: internal/shared/otlp/retry/retry.go.tmpl
// Copyright The OpenTelemetry Authors
@@ -14,7 +14,7 @@ import (
"fmt"
"time"
- "github.com/cenkalti/backoff/v4"
+ "github.com/cenkalti/backoff/v5"
)
// DefaultConfig are the recommended defaults to use.
@@ -77,12 +77,12 @@ func (c Config) RequestFunc(evaluate EvaluateFunc) RequestFunc {
RandomizationFactor: backoff.DefaultRandomizationFactor,
Multiplier: backoff.DefaultMultiplier,
MaxInterval: c.MaxInterval,
- MaxElapsedTime: c.MaxElapsedTime,
- Stop: backoff.Stop,
- Clock: backoff.SystemClock,
}
b.Reset()
+ maxElapsedTime := c.MaxElapsedTime
+ startTime := time.Now()
+
for {
err := fn(ctx)
if err == nil {
@@ -94,21 +94,17 @@ func (c Config) RequestFunc(evaluate EvaluateFunc) RequestFunc {
return err
}
- bOff := b.NextBackOff()
- if bOff == backoff.Stop {
+ if maxElapsedTime != 0 && time.Since(startTime) > maxElapsedTime {
return fmt.Errorf("max retry time elapsed: %w", err)
}
// Wait for the greater of the backoff or throttle delay.
- var delay time.Duration
- if bOff > throttle {
- delay = bOff
- } else {
- elapsed := b.GetElapsedTime()
- if b.MaxElapsedTime != 0 && elapsed+throttle > b.MaxElapsedTime {
- return fmt.Errorf("max retry time would elapse: %w", err)
- }
- delay = throttle
+ bOff := b.NextBackOff()
+ delay := max(throttle, bOff)
+
+ elapsed := time.Since(startTime)
+ if maxElapsedTime != 0 && elapsed+throttle > maxElapsedTime {
+ return fmt.Errorf("max retry time would elapse: %w", err)
}
if ctxErr := waitFunc(ctx, delay); ctxErr != nil {
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform/attribute.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform/attribute.go
index 2605c74d0..cb70a9c41 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform/attribute.go
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform/attribute.go
@@ -1,4 +1,4 @@
-// Code created by gotmpl. DO NOT MODIFY.
+// Code generated by gotmpl. DO NOT MODIFY.
// source: internal/shared/otlp/otlpmetric/transform/attribute.go.tmpl
// Copyright The OpenTelemetry Authors
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform/error.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform/error.go
index d31652b4d..f03bfec41 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform/error.go
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform/error.go
@@ -1,4 +1,4 @@
-// Code created by gotmpl. DO NOT MODIFY.
+// Code generated by gotmpl. DO NOT MODIFY.
// source: internal/shared/otlp/otlpmetric/transform/error.go.tmpl
// Copyright The OpenTelemetry Authors
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform/metricdata.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform/metricdata.go
index abf7f0219..9c156e91b 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform/metricdata.go
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform/metricdata.go
@@ -1,4 +1,4 @@
-// Code created by gotmpl. DO NOT MODIFY.
+// Code generated by gotmpl. DO NOT MODIFY.
// source: internal/shared/otlp/otlpmetric/transform/metricdata.go.tmpl
// Copyright The OpenTelemetry Authors
@@ -203,7 +203,9 @@ func HistogramDataPoints[N int64 | float64](dPts []metricdata.HistogramDataPoint
// ExponentialHistogram returns an OTLP Metric_ExponentialHistogram generated from h. An error is
// returned if the temporality of h is unknown.
-func ExponentialHistogram[N int64 | float64](h metricdata.ExponentialHistogram[N]) (*mpb.Metric_ExponentialHistogram, error) {
+func ExponentialHistogram[N int64 | float64](
+ h metricdata.ExponentialHistogram[N],
+) (*mpb.Metric_ExponentialHistogram, error) {
t, err := Temporality(h.Temporality)
if err != nil {
return nil, err
@@ -218,7 +220,9 @@ func ExponentialHistogram[N int64 | float64](h metricdata.ExponentialHistogram[N
// ExponentialHistogramDataPoints returns a slice of OTLP ExponentialHistogramDataPoint generated
// from dPts.
-func ExponentialHistogramDataPoints[N int64 | float64](dPts []metricdata.ExponentialHistogramDataPoint[N]) []*mpb.ExponentialHistogramDataPoint {
+func ExponentialHistogramDataPoints[N int64 | float64](
+ dPts []metricdata.ExponentialHistogramDataPoint[N],
+) []*mpb.ExponentialHistogramDataPoint {
out := make([]*mpb.ExponentialHistogramDataPoint, 0, len(dPts))
for _, dPt := range dPts {
sum := float64(dPt.Sum)
@@ -250,7 +254,9 @@ func ExponentialHistogramDataPoints[N int64 | float64](dPts []metricdata.Exponen
// ExponentialHistogramDataPointBuckets returns an OTLP ExponentialHistogramDataPoint_Buckets generated
// from bucket.
-func ExponentialHistogramDataPointBuckets(bucket metricdata.ExponentialBucket) *mpb.ExponentialHistogramDataPoint_Buckets {
+func ExponentialHistogramDataPointBuckets(
+ bucket metricdata.ExponentialBucket,
+) *mpb.ExponentialHistogramDataPoint_Buckets {
return &mpb.ExponentialHistogramDataPoint_Buckets{
Offset: bucket.Offset,
BucketCounts: bucket.Counts,
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/version.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/version.go
index 0b5dec3ac..58859f2c2 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/version.go
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/version.go
@@ -5,5 +5,5 @@ package otlpmetricgrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlpme
// Version is the current release version of the OpenTelemetry OTLP over gRPC metrics exporter in use.
func Version() string {
- return "1.35.0"
+ return "1.36.0"
}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/client.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/client.go
index 86da30e37..23f1f0031 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/client.go
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/client.go
@@ -55,20 +55,23 @@ var ourTransport = &http.Transport{
// newClient creates a new HTTP metric client.
func newClient(cfg oconf.Config) (*client, error) {
- httpClient := &http.Client{
- Transport: ourTransport,
- Timeout: cfg.Metrics.Timeout,
- }
+ httpClient := cfg.Metrics.HTTPClient
+ if httpClient == nil {
+ httpClient = &http.Client{
+ Transport: ourTransport,
+ Timeout: cfg.Metrics.Timeout,
+ }
- if cfg.Metrics.TLSCfg != nil || cfg.Metrics.Proxy != nil {
- clonedTransport := ourTransport.Clone()
- httpClient.Transport = clonedTransport
+ if cfg.Metrics.TLSCfg != nil || cfg.Metrics.Proxy != nil {
+ clonedTransport := ourTransport.Clone()
+ httpClient.Transport = clonedTransport
- if cfg.Metrics.TLSCfg != nil {
- clonedTransport.TLSClientConfig = cfg.Metrics.TLSCfg
- }
- if cfg.Metrics.Proxy != nil {
- clonedTransport.Proxy = cfg.Metrics.Proxy
+ if cfg.Metrics.TLSCfg != nil {
+ clonedTransport.TLSClientConfig = cfg.Metrics.TLSCfg
+ }
+ if cfg.Metrics.Proxy != nil {
+ clonedTransport.Proxy = cfg.Metrics.Proxy
+ }
}
}
@@ -277,7 +280,7 @@ type request struct {
// reset reinitializes the request Body and uses ctx for the request.
func (r *request) reset(ctx context.Context) {
r.Body = r.bodyReader()
- r.Request = r.Request.WithContext(ctx)
+ r.Request = r.WithContext(ctx)
}
// retryableError represents a request failure that can be retried.
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/config.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/config.go
index bf05adcf1..2b144f7eb 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/config.go
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/config.go
@@ -222,3 +222,19 @@ func WithAggregationSelector(selector metric.AggregationSelector) Option {
func WithProxy(pf HTTPTransportProxyFunc) Option {
return wrappedOption{oconf.WithProxy(oconf.HTTPTransportProxyFunc(pf))}
}
+
+// WithHTTPClient sets the HTTP client to used by the exporter.
+//
+// This option will take precedence over [WithProxy], [WithTimeout],
+// [WithTLSClientConfig] options as well as OTEL_EXPORTER_OTLP_CERTIFICATE,
+// OTEL_EXPORTER_OTLP_METRICS_CERTIFICATE, OTEL_EXPORTER_OTLP_TIMEOUT,
+// OTEL_EXPORTER_OTLP_METRICS_TIMEOUT environment variables.
+//
+// Timeout and all other fields of the passed [http.Client] are left intact.
+//
+// Be aware that passing an HTTP client with transport like
+// [go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp.NewTransport] can
+// cause the client to be instrumented twice and cause infinite recursion.
+func WithHTTPClient(c *http.Client) Option {
+ return wrappedOption{oconf.WithHTTPClient(c)}
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/envconfig/envconfig.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/envconfig/envconfig.go
index 7ac42759f..8be035fca 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/envconfig/envconfig.go
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/envconfig/envconfig.go
@@ -1,9 +1,11 @@
-// Code created by gotmpl. DO NOT MODIFY.
+// Code generated by gotmpl. DO NOT MODIFY.
// source: internal/shared/otlp/envconfig/envconfig.go.tmpl
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
+// Package envconfig provides functionality to parse configuration from
+// environment variables.
package envconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/envconfig"
import (
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/gen.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/gen.go
index 1b379f10c..8849f341a 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/gen.go
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/gen.go
@@ -1,6 +1,7 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
+// Package internal provides internal functionally for the otlpmetrichttp package.
package internal // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal"
//go:generate gotmpl --body=../../../../../internal/shared/otlp/partialsuccess.go.tmpl "--data={}" --out=partialsuccess.go
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/envconfig.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/envconfig.go
index 89b134a39..ef318ac67 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/envconfig.go
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/envconfig.go
@@ -1,4 +1,4 @@
-// Code created by gotmpl. DO NOT MODIFY.
+// Code generated by gotmpl. DO NOT MODIFY.
// source: internal/shared/otlp/otlpmetric/oconf/envconfig.go.tmpl
// Copyright The OpenTelemetry Authors
@@ -80,8 +80,16 @@ func getOptionsFromEnv() []GenericOption {
}),
envconfig.WithCertPool("CERTIFICATE", func(p *x509.CertPool) { tlsConf.RootCAs = p }),
envconfig.WithCertPool("METRICS_CERTIFICATE", func(p *x509.CertPool) { tlsConf.RootCAs = p }),
- envconfig.WithClientCert("CLIENT_CERTIFICATE", "CLIENT_KEY", func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }),
- envconfig.WithClientCert("METRICS_CLIENT_CERTIFICATE", "METRICS_CLIENT_KEY", func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }),
+ envconfig.WithClientCert(
+ "CLIENT_CERTIFICATE",
+ "CLIENT_KEY",
+ func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} },
+ ),
+ envconfig.WithClientCert(
+ "METRICS_CLIENT_CERTIFICATE",
+ "METRICS_CLIENT_KEY",
+ func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} },
+ ),
envconfig.WithBool("INSECURE", func(b bool) { opts = append(opts, withInsecure(b)) }),
envconfig.WithBool("METRICS_INSECURE", func(b bool) { opts = append(opts, withInsecure(b)) }),
withTLSConfig(tlsConf, func(c *tls.Config) { opts = append(opts, WithTLSClientConfig(c)) }),
@@ -91,8 +99,14 @@ func getOptionsFromEnv() []GenericOption {
WithEnvCompression("METRICS_COMPRESSION", func(c Compression) { opts = append(opts, WithCompression(c)) }),
envconfig.WithDuration("TIMEOUT", func(d time.Duration) { opts = append(opts, WithTimeout(d)) }),
envconfig.WithDuration("METRICS_TIMEOUT", func(d time.Duration) { opts = append(opts, WithTimeout(d)) }),
- withEnvTemporalityPreference("METRICS_TEMPORALITY_PREFERENCE", func(t metric.TemporalitySelector) { opts = append(opts, WithTemporalitySelector(t)) }),
- withEnvAggPreference("METRICS_DEFAULT_HISTOGRAM_AGGREGATION", func(a metric.AggregationSelector) { opts = append(opts, WithAggregationSelector(a)) }),
+ withEnvTemporalityPreference(
+ "METRICS_TEMPORALITY_PREFERENCE",
+ func(t metric.TemporalitySelector) { opts = append(opts, WithTemporalitySelector(t)) },
+ ),
+ withEnvAggPreference(
+ "METRICS_DEFAULT_HISTOGRAM_AGGREGATION",
+ func(a metric.AggregationSelector) { opts = append(opts, WithAggregationSelector(a)) },
+ ),
)
return opts
@@ -157,7 +171,11 @@ func withEnvTemporalityPreference(n string, fn func(metric.TemporalitySelector))
case "lowmemory":
fn(lowMemory)
default:
- global.Warn("OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE is set to an invalid value, ignoring.", "value", s)
+ global.Warn(
+ "OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE is set to an invalid value, ignoring.",
+ "value",
+ s,
+ )
}
}
}
@@ -203,7 +221,11 @@ func withEnvAggPreference(n string, fn func(metric.AggregationSelector)) func(e
return metric.DefaultAggregationSelector(kind)
})
default:
- global.Warn("OTEL_EXPORTER_OTLP_METRICS_DEFAULT_HISTOGRAM_AGGREGATION is set to an invalid value, ignoring.", "value", s)
+ global.Warn(
+ "OTEL_EXPORTER_OTLP_METRICS_DEFAULT_HISTOGRAM_AGGREGATION is set to an invalid value, ignoring.",
+ "value",
+ s,
+ )
}
}
}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/options.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/options.go
index db595e49e..cfe629a97 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/options.go
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/options.go
@@ -1,9 +1,10 @@
-// Code created by gotmpl. DO NOT MODIFY.
+// Code generated by gotmpl. DO NOT MODIFY.
// source: internal/shared/otlp/otlpmetric/oconf/options.go.tmpl
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
+// Package oconf provides configuration for the otlpmetric exporters.
package oconf // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf"
import (
@@ -56,13 +57,15 @@ type (
Timeout time.Duration
URLPath string
- // gRPC configurations
- GRPCCredentials credentials.TransportCredentials
-
TemporalitySelector metric.TemporalitySelector
AggregationSelector metric.AggregationSelector
- Proxy HTTPTransportProxyFunc
+ // gRPC configurations
+ GRPCCredentials credentials.TransportCredentials
+
+ // HTTP configurations
+ Proxy HTTPTransportProxyFunc
+ HTTPClient *http.Client
}
Config struct {
@@ -372,3 +375,10 @@ func WithProxy(pf HTTPTransportProxyFunc) GenericOption {
return cfg
})
}
+
+func WithHTTPClient(c *http.Client) GenericOption {
+ return newGenericOption(func(cfg Config) Config {
+ cfg.Metrics.HTTPClient = c
+ return cfg
+ })
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/optiontypes.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/optiontypes.go
index ae3d09787..d7b005c97 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/optiontypes.go
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/optiontypes.go
@@ -1,4 +1,4 @@
-// Code created by gotmpl. DO NOT MODIFY.
+// Code generated by gotmpl. DO NOT MODIFY.
// source: internal/shared/otlp/otlpmetric/oconf/optiontypes.go.tmpl
// Copyright The OpenTelemetry Authors
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/tls.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/tls.go
index f603dc605..e335cbd09 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/tls.go
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/tls.go
@@ -1,4 +1,4 @@
-// Code created by gotmpl. DO NOT MODIFY.
+// Code generated by gotmpl. DO NOT MODIFY.
// source: internal/shared/otlp/otlpmetric/oconf/tls.go.tmpl
// Copyright The OpenTelemetry Authors
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/partialsuccess.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/partialsuccess.go
index ed93844a4..c3b57c57c 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/partialsuccess.go
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/partialsuccess.go
@@ -1,4 +1,4 @@
-// Code created by gotmpl. DO NOT MODIFY.
+// Code generated by gotmpl. DO NOT MODIFY.
// source: internal/shared/otlp/partialsuccess.go
// Copyright The OpenTelemetry Authors
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/retry/retry.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/retry/retry.go
index a9a08ffe6..c855bdc93 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/retry/retry.go
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/retry/retry.go
@@ -1,4 +1,4 @@
-// Code created by gotmpl. DO NOT MODIFY.
+// Code generated by gotmpl. DO NOT MODIFY.
// source: internal/shared/otlp/retry/retry.go.tmpl
// Copyright The OpenTelemetry Authors
@@ -14,7 +14,7 @@ import (
"fmt"
"time"
- "github.com/cenkalti/backoff/v4"
+ "github.com/cenkalti/backoff/v5"
)
// DefaultConfig are the recommended defaults to use.
@@ -77,12 +77,12 @@ func (c Config) RequestFunc(evaluate EvaluateFunc) RequestFunc {
RandomizationFactor: backoff.DefaultRandomizationFactor,
Multiplier: backoff.DefaultMultiplier,
MaxInterval: c.MaxInterval,
- MaxElapsedTime: c.MaxElapsedTime,
- Stop: backoff.Stop,
- Clock: backoff.SystemClock,
}
b.Reset()
+ maxElapsedTime := c.MaxElapsedTime
+ startTime := time.Now()
+
for {
err := fn(ctx)
if err == nil {
@@ -94,21 +94,17 @@ func (c Config) RequestFunc(evaluate EvaluateFunc) RequestFunc {
return err
}
- bOff := b.NextBackOff()
- if bOff == backoff.Stop {
+ if maxElapsedTime != 0 && time.Since(startTime) > maxElapsedTime {
return fmt.Errorf("max retry time elapsed: %w", err)
}
// Wait for the greater of the backoff or throttle delay.
- var delay time.Duration
- if bOff > throttle {
- delay = bOff
- } else {
- elapsed := b.GetElapsedTime()
- if b.MaxElapsedTime != 0 && elapsed+throttle > b.MaxElapsedTime {
- return fmt.Errorf("max retry time would elapse: %w", err)
- }
- delay = throttle
+ bOff := b.NextBackOff()
+ delay := max(throttle, bOff)
+
+ elapsed := time.Since(startTime)
+ if maxElapsedTime != 0 && elapsed+throttle > maxElapsedTime {
+ return fmt.Errorf("max retry time would elapse: %w", err)
}
if ctxErr := waitFunc(ctx, delay); ctxErr != nil {
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/transform/attribute.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/transform/attribute.go
index d607da78e..6c9787189 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/transform/attribute.go
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/transform/attribute.go
@@ -1,4 +1,4 @@
-// Code created by gotmpl. DO NOT MODIFY.
+// Code generated by gotmpl. DO NOT MODIFY.
// source: internal/shared/otlp/otlpmetric/transform/attribute.go.tmpl
// Copyright The OpenTelemetry Authors
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/transform/error.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/transform/error.go
index bb6d21f0b..f65c87cbf 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/transform/error.go
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/transform/error.go
@@ -1,4 +1,4 @@
-// Code created by gotmpl. DO NOT MODIFY.
+// Code generated by gotmpl. DO NOT MODIFY.
// source: internal/shared/otlp/otlpmetric/transform/error.go.tmpl
// Copyright The OpenTelemetry Authors
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/transform/metricdata.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/transform/metricdata.go
index 8207b15a4..5e5f26aa4 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/transform/metricdata.go
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/transform/metricdata.go
@@ -1,4 +1,4 @@
-// Code created by gotmpl. DO NOT MODIFY.
+// Code generated by gotmpl. DO NOT MODIFY.
// source: internal/shared/otlp/otlpmetric/transform/metricdata.go.tmpl
// Copyright The OpenTelemetry Authors
@@ -203,7 +203,9 @@ func HistogramDataPoints[N int64 | float64](dPts []metricdata.HistogramDataPoint
// ExponentialHistogram returns an OTLP Metric_ExponentialHistogram generated from h. An error is
// returned if the temporality of h is unknown.
-func ExponentialHistogram[N int64 | float64](h metricdata.ExponentialHistogram[N]) (*mpb.Metric_ExponentialHistogram, error) {
+func ExponentialHistogram[N int64 | float64](
+ h metricdata.ExponentialHistogram[N],
+) (*mpb.Metric_ExponentialHistogram, error) {
t, err := Temporality(h.Temporality)
if err != nil {
return nil, err
@@ -218,7 +220,9 @@ func ExponentialHistogram[N int64 | float64](h metricdata.ExponentialHistogram[N
// ExponentialHistogramDataPoints returns a slice of OTLP ExponentialHistogramDataPoint generated
// from dPts.
-func ExponentialHistogramDataPoints[N int64 | float64](dPts []metricdata.ExponentialHistogramDataPoint[N]) []*mpb.ExponentialHistogramDataPoint {
+func ExponentialHistogramDataPoints[N int64 | float64](
+ dPts []metricdata.ExponentialHistogramDataPoint[N],
+) []*mpb.ExponentialHistogramDataPoint {
out := make([]*mpb.ExponentialHistogramDataPoint, 0, len(dPts))
for _, dPt := range dPts {
sum := float64(dPt.Sum)
@@ -250,7 +254,9 @@ func ExponentialHistogramDataPoints[N int64 | float64](dPts []metricdata.Exponen
// ExponentialHistogramDataPointBuckets returns an OTLP ExponentialHistogramDataPoint_Buckets generated
// from bucket.
-func ExponentialHistogramDataPointBuckets(bucket metricdata.ExponentialBucket) *mpb.ExponentialHistogramDataPoint_Buckets {
+func ExponentialHistogramDataPointBuckets(
+ bucket metricdata.ExponentialBucket,
+) *mpb.ExponentialHistogramDataPoint_Buckets {
return &mpb.ExponentialHistogramDataPoint_Buckets{
Offset: bucket.Offset,
BucketCounts: bucket.Counts,
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/version.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/version.go
index 2a67f5800..528533321 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/version.go
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/version.go
@@ -5,5 +5,5 @@ package otlpmetrichttp // import "go.opentelemetry.io/otel/exporters/otlp/otlpme
// Version is the current release version of the OpenTelemetry OTLP over HTTP/protobuf metrics exporter in use.
func Version() string {
- return "1.35.0"
+ return "1.36.0"
}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/attribute.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/attribute.go
index 4571a5ca3..ca4544f0d 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/attribute.go
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/attribute.go
@@ -1,6 +1,8 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
+// Package tracetransform provides conversion functionality for the otlptrace
+// exporters.
package tracetransform // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform"
import (
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig/envconfig.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig/envconfig.go
index 4abf48d1f..6eacdf311 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig/envconfig.go
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig/envconfig.go
@@ -1,9 +1,11 @@
-// Code created by gotmpl. DO NOT MODIFY.
+// Code generated by gotmpl. DO NOT MODIFY.
// source: internal/shared/otlp/envconfig/envconfig.go.tmpl
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
+// Package envconfig provides functionality to parse configuration from
+// environment variables.
package envconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig"
import (
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/gen.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/gen.go
index 97cd6c54f..b6e6b10fb 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/gen.go
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/gen.go
@@ -1,6 +1,7 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
+// Package internal provides internal functionally for the otlptracegrpc package.
package internal // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal"
//go:generate gotmpl --body=../../../../../internal/shared/otlp/partialsuccess.go.tmpl "--data={}" --out=partialsuccess.go
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/envconfig.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/envconfig.go
index 7bb189a94..1d840be20 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/envconfig.go
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/envconfig.go
@@ -1,4 +1,4 @@
-// Code created by gotmpl. DO NOT MODIFY.
+// Code generated by gotmpl. DO NOT MODIFY.
// source: internal/shared/otlp/otlptrace/otlpconfig/envconfig.go.tmpl
// Copyright The OpenTelemetry Authors
@@ -77,8 +77,16 @@ func getOptionsFromEnv() []GenericOption {
}),
envconfig.WithCertPool("CERTIFICATE", func(p *x509.CertPool) { tlsConf.RootCAs = p }),
envconfig.WithCertPool("TRACES_CERTIFICATE", func(p *x509.CertPool) { tlsConf.RootCAs = p }),
- envconfig.WithClientCert("CLIENT_CERTIFICATE", "CLIENT_KEY", func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }),
- envconfig.WithClientCert("TRACES_CLIENT_CERTIFICATE", "TRACES_CLIENT_KEY", func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }),
+ envconfig.WithClientCert(
+ "CLIENT_CERTIFICATE",
+ "CLIENT_KEY",
+ func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} },
+ ),
+ envconfig.WithClientCert(
+ "TRACES_CLIENT_CERTIFICATE",
+ "TRACES_CLIENT_KEY",
+ func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} },
+ ),
withTLSConfig(tlsConf, func(c *tls.Config) { opts = append(opts, WithTLSClientConfig(c)) }),
envconfig.WithBool("INSECURE", func(b bool) { opts = append(opts, withInsecure(b)) }),
envconfig.WithBool("TRACES_INSECURE", func(b bool) { opts = append(opts, withInsecure(b)) }),
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options.go
index 0a317d926..506ca00b6 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options.go
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options.go
@@ -1,9 +1,10 @@
-// Code created by gotmpl. DO NOT MODIFY.
+// Code generated by gotmpl. DO NOT MODIFY.
// source: internal/shared/otlp/otlptrace/otlpconfig/options.go.tmpl
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
+// Package otlpconfig provides configuration for the otlptrace exporters.
package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig"
import (
@@ -52,7 +53,9 @@ type (
// gRPC configurations
GRPCCredentials credentials.TransportCredentials
- Proxy HTTPTransportProxyFunc
+ // HTTP configurations
+ Proxy HTTPTransportProxyFunc
+ HTTPClient *http.Client
}
Config struct {
@@ -349,3 +352,10 @@ func WithProxy(pf HTTPTransportProxyFunc) GenericOption {
return cfg
})
}
+
+func WithHTTPClient(c *http.Client) GenericOption {
+ return newGenericOption(func(cfg Config) Config {
+ cfg.Traces.HTTPClient = c
+ return cfg
+ })
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/optiontypes.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/optiontypes.go
index 3d4f699d4..918490387 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/optiontypes.go
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/optiontypes.go
@@ -1,4 +1,4 @@
-// Code created by gotmpl. DO NOT MODIFY.
+// Code generated by gotmpl. DO NOT MODIFY.
// source: internal/shared/otlp/otlptrace/otlpconfig/optiontypes.go.tmpl
// Copyright The OpenTelemetry Authors
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/tls.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/tls.go
index 38b97a013..ba6e41183 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/tls.go
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/tls.go
@@ -1,4 +1,4 @@
-// Code created by gotmpl. DO NOT MODIFY.
+// Code generated by gotmpl. DO NOT MODIFY.
// source: internal/shared/otlp/otlptrace/otlpconfig/tls.go.tmpl
// Copyright The OpenTelemetry Authors
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/partialsuccess.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/partialsuccess.go
index a12ea4c48..1c4659423 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/partialsuccess.go
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/partialsuccess.go
@@ -1,4 +1,4 @@
-// Code created by gotmpl. DO NOT MODIFY.
+// Code generated by gotmpl. DO NOT MODIFY.
// source: internal/shared/otlp/partialsuccess.go
// Copyright The OpenTelemetry Authors
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry/retry.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry/retry.go
index 1c5450ab6..777e68a7b 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry/retry.go
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry/retry.go
@@ -1,4 +1,4 @@
-// Code created by gotmpl. DO NOT MODIFY.
+// Code generated by gotmpl. DO NOT MODIFY.
// source: internal/shared/otlp/retry/retry.go.tmpl
// Copyright The OpenTelemetry Authors
@@ -14,7 +14,7 @@ import (
"fmt"
"time"
- "github.com/cenkalti/backoff/v4"
+ "github.com/cenkalti/backoff/v5"
)
// DefaultConfig are the recommended defaults to use.
@@ -77,12 +77,12 @@ func (c Config) RequestFunc(evaluate EvaluateFunc) RequestFunc {
RandomizationFactor: backoff.DefaultRandomizationFactor,
Multiplier: backoff.DefaultMultiplier,
MaxInterval: c.MaxInterval,
- MaxElapsedTime: c.MaxElapsedTime,
- Stop: backoff.Stop,
- Clock: backoff.SystemClock,
}
b.Reset()
+ maxElapsedTime := c.MaxElapsedTime
+ startTime := time.Now()
+
for {
err := fn(ctx)
if err == nil {
@@ -94,21 +94,17 @@ func (c Config) RequestFunc(evaluate EvaluateFunc) RequestFunc {
return err
}
- bOff := b.NextBackOff()
- if bOff == backoff.Stop {
+ if maxElapsedTime != 0 && time.Since(startTime) > maxElapsedTime {
return fmt.Errorf("max retry time elapsed: %w", err)
}
// Wait for the greater of the backoff or throttle delay.
- var delay time.Duration
- if bOff > throttle {
- delay = bOff
- } else {
- elapsed := b.GetElapsedTime()
- if b.MaxElapsedTime != 0 && elapsed+throttle > b.MaxElapsedTime {
- return fmt.Errorf("max retry time would elapse: %w", err)
- }
- delay = throttle
+ bOff := b.NextBackOff()
+ delay := max(throttle, bOff)
+
+ elapsed := time.Since(startTime)
+ if maxElapsedTime != 0 && elapsed+throttle > maxElapsedTime {
+ return fmt.Errorf("max retry time would elapse: %w", err)
}
if ctxErr := waitFunc(ctx, delay); ctxErr != nil {
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/options.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/options.go
index 00ab1f20c..2da229870 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/options.go
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/options.go
@@ -199,8 +199,9 @@ func WithTimeout(duration time.Duration) Option {
// explicitly returns a backoff time in the response. That time will take
// precedence over these settings.
//
-// These settings do not define any network retry strategy. That is entirely
-// handled by the gRPC ClientConn.
+// These settings define the retry strategy implemented by the exporter.
+// These settings do not define any network retry strategy.
+// That is handled by the gRPC ClientConn.
//
// If unset, the default retry policy will be used. It will retry the export
// 5 seconds after receiving a retryable error and increase exponentially
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/client.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/client.go
index 16c006b2c..583a8f867 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/client.go
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/client.go
@@ -71,20 +71,24 @@ var _ otlptrace.Client = (*client)(nil)
func NewClient(opts ...Option) otlptrace.Client {
cfg := otlpconfig.NewHTTPConfig(asHTTPOptions(opts)...)
- httpClient := &http.Client{
- Transport: ourTransport,
- Timeout: cfg.Traces.Timeout,
- }
-
- if cfg.Traces.TLSCfg != nil || cfg.Traces.Proxy != nil {
- clonedTransport := ourTransport.Clone()
- httpClient.Transport = clonedTransport
+ httpClient := cfg.Traces.HTTPClient
- if cfg.Traces.TLSCfg != nil {
- clonedTransport.TLSClientConfig = cfg.Traces.TLSCfg
+ if httpClient == nil {
+ httpClient = &http.Client{
+ Transport: ourTransport,
+ Timeout: cfg.Traces.Timeout,
}
- if cfg.Traces.Proxy != nil {
- clonedTransport.Proxy = cfg.Traces.Proxy
+
+ if cfg.Traces.TLSCfg != nil || cfg.Traces.Proxy != nil {
+ clonedTransport := ourTransport.Clone()
+ httpClient.Transport = clonedTransport
+
+ if cfg.Traces.TLSCfg != nil {
+ clonedTransport.TLSClientConfig = cfg.Traces.TLSCfg
+ }
+ if cfg.Traces.Proxy != nil {
+ clonedTransport.Proxy = cfg.Traces.Proxy
+ }
}
}
@@ -300,7 +304,7 @@ type request struct {
// reset reinitializes the request Body and uses ctx for the request.
func (r *request) reset(ctx context.Context) {
r.Body = r.bodyReader()
- r.Request = r.Request.WithContext(ctx)
+ r.Request = r.WithContext(ctx)
}
// retryableError represents a request failure that can be retried.
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/envconfig/envconfig.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/envconfig/envconfig.go
index f30bb66ae..f4385fb4e 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/envconfig/envconfig.go
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/envconfig/envconfig.go
@@ -1,9 +1,11 @@
-// Code created by gotmpl. DO NOT MODIFY.
+// Code generated by gotmpl. DO NOT MODIFY.
// source: internal/shared/otlp/envconfig/envconfig.go.tmpl
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
+// Package envconfig provides functionality to parse configuration from
+// environment variables.
package envconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/envconfig"
import (
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/gen.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/gen.go
index e4142b9d7..3d344dc85 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/gen.go
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/gen.go
@@ -1,6 +1,7 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
+// Package internal provides internal functionally for the otlptracehttp package.
package internal // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal"
//go:generate gotmpl --body=../../../../../internal/shared/otlp/partialsuccess.go.tmpl "--data={}" --out=partialsuccess.go
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/envconfig.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/envconfig.go
index ff4141b6d..121b02f5c 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/envconfig.go
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/envconfig.go
@@ -1,4 +1,4 @@
-// Code created by gotmpl. DO NOT MODIFY.
+// Code generated by gotmpl. DO NOT MODIFY.
// source: internal/shared/otlp/otlptrace/otlpconfig/envconfig.go.tmpl
// Copyright The OpenTelemetry Authors
@@ -77,8 +77,16 @@ func getOptionsFromEnv() []GenericOption {
}),
envconfig.WithCertPool("CERTIFICATE", func(p *x509.CertPool) { tlsConf.RootCAs = p }),
envconfig.WithCertPool("TRACES_CERTIFICATE", func(p *x509.CertPool) { tlsConf.RootCAs = p }),
- envconfig.WithClientCert("CLIENT_CERTIFICATE", "CLIENT_KEY", func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }),
- envconfig.WithClientCert("TRACES_CLIENT_CERTIFICATE", "TRACES_CLIENT_KEY", func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }),
+ envconfig.WithClientCert(
+ "CLIENT_CERTIFICATE",
+ "CLIENT_KEY",
+ func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} },
+ ),
+ envconfig.WithClientCert(
+ "TRACES_CLIENT_CERTIFICATE",
+ "TRACES_CLIENT_KEY",
+ func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} },
+ ),
withTLSConfig(tlsConf, func(c *tls.Config) { opts = append(opts, WithTLSClientConfig(c)) }),
envconfig.WithBool("INSECURE", func(b bool) { opts = append(opts, withInsecure(b)) }),
envconfig.WithBool("TRACES_INSECURE", func(b bool) { opts = append(opts, withInsecure(b)) }),
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/options.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/options.go
index 6a9c4d3a6..c857db056 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/options.go
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/options.go
@@ -1,9 +1,10 @@
-// Code created by gotmpl. DO NOT MODIFY.
+// Code generated by gotmpl. DO NOT MODIFY.
// source: internal/shared/otlp/otlptrace/otlpconfig/options.go.tmpl
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
+// Package otlpconfig provides configuration for the otlptrace exporters.
package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig"
import (
@@ -52,7 +53,9 @@ type (
// gRPC configurations
GRPCCredentials credentials.TransportCredentials
- Proxy HTTPTransportProxyFunc
+ // HTTP configurations
+ Proxy HTTPTransportProxyFunc
+ HTTPClient *http.Client
}
Config struct {
@@ -349,3 +352,10 @@ func WithProxy(pf HTTPTransportProxyFunc) GenericOption {
return cfg
})
}
+
+func WithHTTPClient(c *http.Client) GenericOption {
+ return newGenericOption(func(cfg Config) Config {
+ cfg.Traces.HTTPClient = c
+ return cfg
+ })
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/optiontypes.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/optiontypes.go
index bc4db0595..6a52b58cc 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/optiontypes.go
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/optiontypes.go
@@ -1,4 +1,4 @@
-// Code created by gotmpl. DO NOT MODIFY.
+// Code generated by gotmpl. DO NOT MODIFY.
// source: internal/shared/otlp/otlptrace/otlpconfig/optiontypes.go.tmpl
// Copyright The OpenTelemetry Authors
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/tls.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/tls.go
index dd6f12b22..5b389cb03 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/tls.go
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/tls.go
@@ -1,4 +1,4 @@
-// Code created by gotmpl. DO NOT MODIFY.
+// Code generated by gotmpl. DO NOT MODIFY.
// source: internal/shared/otlp/otlptrace/otlpconfig/tls.go.tmpl
// Copyright The OpenTelemetry Authors
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/partialsuccess.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/partialsuccess.go
index 9e04a9bc1..418e66428 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/partialsuccess.go
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/partialsuccess.go
@@ -1,4 +1,4 @@
-// Code created by gotmpl. DO NOT MODIFY.
+// Code generated by gotmpl. DO NOT MODIFY.
// source: internal/shared/otlp/partialsuccess.go
// Copyright The OpenTelemetry Authors
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/retry/retry.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/retry/retry.go
index 86c4819f4..e9d35c7fa 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/retry/retry.go
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/retry/retry.go
@@ -1,4 +1,4 @@
-// Code created by gotmpl. DO NOT MODIFY.
+// Code generated by gotmpl. DO NOT MODIFY.
// source: internal/shared/otlp/retry/retry.go.tmpl
// Copyright The OpenTelemetry Authors
@@ -14,7 +14,7 @@ import (
"fmt"
"time"
- "github.com/cenkalti/backoff/v4"
+ "github.com/cenkalti/backoff/v5"
)
// DefaultConfig are the recommended defaults to use.
@@ -77,12 +77,12 @@ func (c Config) RequestFunc(evaluate EvaluateFunc) RequestFunc {
RandomizationFactor: backoff.DefaultRandomizationFactor,
Multiplier: backoff.DefaultMultiplier,
MaxInterval: c.MaxInterval,
- MaxElapsedTime: c.MaxElapsedTime,
- Stop: backoff.Stop,
- Clock: backoff.SystemClock,
}
b.Reset()
+ maxElapsedTime := c.MaxElapsedTime
+ startTime := time.Now()
+
for {
err := fn(ctx)
if err == nil {
@@ -94,21 +94,17 @@ func (c Config) RequestFunc(evaluate EvaluateFunc) RequestFunc {
return err
}
- bOff := b.NextBackOff()
- if bOff == backoff.Stop {
+ if maxElapsedTime != 0 && time.Since(startTime) > maxElapsedTime {
return fmt.Errorf("max retry time elapsed: %w", err)
}
// Wait for the greater of the backoff or throttle delay.
- var delay time.Duration
- if bOff > throttle {
- delay = bOff
- } else {
- elapsed := b.GetElapsedTime()
- if b.MaxElapsedTime != 0 && elapsed+throttle > b.MaxElapsedTime {
- return fmt.Errorf("max retry time would elapse: %w", err)
- }
- delay = throttle
+ bOff := b.NextBackOff()
+ delay := max(throttle, bOff)
+
+ elapsed := time.Since(startTime)
+ if maxElapsedTime != 0 && elapsed+throttle > maxElapsedTime {
+ return fmt.Errorf("max retry time would elapse: %w", err)
}
if ctxErr := waitFunc(ctx, delay); ctxErr != nil {
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/options.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/options.go
index 3559c5664..cfe21dbfb 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/options.go
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/options.go
@@ -153,3 +153,19 @@ func WithRetry(rc RetryConfig) Option {
func WithProxy(pf HTTPTransportProxyFunc) Option {
return wrappedOption{otlpconfig.WithProxy(otlpconfig.HTTPTransportProxyFunc(pf))}
}
+
+// WithHTTPClient sets the HTTP client to used by the exporter.
+//
+// This option will take precedence over [WithProxy], [WithTimeout],
+// [WithTLSClientConfig] options as well as OTEL_EXPORTER_OTLP_CERTIFICATE,
+// OTEL_EXPORTER_OTLP_TRACES_CERTIFICATE, OTEL_EXPORTER_OTLP_TIMEOUT,
+// OTEL_EXPORTER_OTLP_TRACES_TIMEOUT environment variables.
+//
+// Timeout and all other fields of the passed [http.Client] are left intact.
+//
+// Be aware that passing an HTTP client with transport like
+// [go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp.NewTransport] can
+// cause the client to be instrumented twice and cause infinite recursion.
+func WithHTTPClient(c *http.Client) Option {
+ return wrappedOption{otlpconfig.WithHTTPClient(c)}
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go
index f5cad46b7..5f78bfdfb 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go
@@ -5,5 +5,5 @@ package otlptrace // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace"
// Version is the current release version of the OpenTelemetry OTLP trace exporter in use.
func Version() string {
- return "1.35.0"
+ return "1.36.0"
}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/prometheus/config.go b/vendor/go.opentelemetry.io/otel/exporters/prometheus/config.go
index 660675dd6..ceb2d63e2 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/prometheus/config.go
+++ b/vendor/go.opentelemetry.io/otel/exporters/prometheus/config.go
@@ -5,11 +5,13 @@ package prometheus // import "go.opentelemetry.io/otel/exporters/prometheus"
import (
"strings"
+ "sync"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/model"
"go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/internal/global"
"go.opentelemetry.io/otel/sdk/metric"
)
@@ -25,6 +27,12 @@ type config struct {
resourceAttributesFilter attribute.Filter
}
+var logDeprecatedLegacyScheme = sync.OnceFunc(func() {
+ global.Warn(
+ "prometheus exporter legacy scheme deprecated: support for the legacy NameValidationScheme will be removed in a future release",
+ )
+})
+
// newConfig creates a validated config configured with options.
func newConfig(opts ...Option) config {
cfg := config{}
@@ -132,7 +140,8 @@ func WithoutScopeInfo() Option {
// have special behavior based on their name.
func WithNamespace(ns string) Option {
return optionFunc(func(cfg config) config {
- if model.NameValidationScheme != model.UTF8Validation {
+ if model.NameValidationScheme != model.UTF8Validation { // nolint:staticcheck // We need this check to keep supporting the legacy scheme.
+ logDeprecatedLegacyScheme()
// Only sanitize if prometheus does not support UTF-8.
ns = model.EscapeName(ns, model.NameEscapingScheme)
}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/prometheus/exporter.go b/vendor/go.opentelemetry.io/otel/exporters/prometheus/exporter.go
index a8677e93a..e0959641c 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/prometheus/exporter.go
+++ b/vendor/go.opentelemetry.io/otel/exporters/prometheus/exporter.go
@@ -8,6 +8,7 @@ import (
"encoding/hex"
"errors"
"fmt"
+ "math"
"slices"
"strings"
"sync"
@@ -40,7 +41,15 @@ const (
spanIDExemplarKey = "span_id"
)
-var errScopeInvalid = errors.New("invalid scope")
+var (
+ errScopeInvalid = errors.New("invalid scope")
+
+ metricsPool = sync.Pool{
+ New: func() interface{} {
+ return &metricdata.ResourceMetrics{}
+ },
+ }
+)
// Exporter is a Prometheus Exporter that embeds the OTel metric.Reader
// interface for easy instantiation with a MeterProvider.
@@ -144,9 +153,9 @@ func (c *collector) Describe(ch chan<- *prometheus.Desc) {
//
// This method is safe to call concurrently.
func (c *collector) Collect(ch chan<- prometheus.Metric) {
- // TODO (#3047): Use a sync.Pool instead of allocating metrics every Collect.
- metrics := metricdata.ResourceMetrics{}
- err := c.reader.Collect(context.TODO(), &metrics)
+ metrics := metricsPool.Get().(*metricdata.ResourceMetrics)
+ defer metricsPool.Put(metrics)
+ err := c.reader.Collect(context.TODO(), metrics)
if err != nil {
if errors.Is(err, metric.ErrReaderShutdown) {
return
@@ -233,6 +242,10 @@ func (c *collector) Collect(ch chan<- prometheus.Metric) {
addHistogramMetric(ch, v, m, name, kv)
case metricdata.Histogram[float64]:
addHistogramMetric(ch, v, m, name, kv)
+ case metricdata.ExponentialHistogram[int64]:
+ addExponentialHistogramMetric(ch, v, m, name, kv)
+ case metricdata.ExponentialHistogram[float64]:
+ addExponentialHistogramMetric(ch, v, m, name, kv)
case metricdata.Sum[int64]:
addSumMetric(ch, v, m, name, kv)
case metricdata.Sum[float64]:
@@ -246,7 +259,67 @@ func (c *collector) Collect(ch chan<- prometheus.Metric) {
}
}
-func addHistogramMetric[N int64 | float64](ch chan<- prometheus.Metric, histogram metricdata.Histogram[N], m metricdata.Metrics, name string, kv keyVals) {
+func addExponentialHistogramMetric[N int64 | float64](
+ ch chan<- prometheus.Metric,
+ histogram metricdata.ExponentialHistogram[N],
+ m metricdata.Metrics,
+ name string,
+ kv keyVals,
+) {
+ for _, dp := range histogram.DataPoints {
+ keys, values := getAttrs(dp.Attributes)
+ keys = append(keys, kv.keys...)
+ values = append(values, kv.vals...)
+
+ desc := prometheus.NewDesc(name, m.Description, keys, nil)
+
+ // From spec: note that Prometheus Native Histograms buckets are indexed by upper boundary while Exponential Histograms are indexed by lower boundary, the result being that the Offset fields are different-by-one.
+ positiveBuckets := make(map[int]int64)
+ for i, c := range dp.PositiveBucket.Counts {
+ if c > math.MaxInt64 {
+ otel.Handle(fmt.Errorf("positive count %d is too large to be represented as int64", c))
+ continue
+ }
+ positiveBuckets[int(dp.PositiveBucket.Offset)+i+1] = int64(c) // nolint: gosec // Size check above.
+ }
+
+ negativeBuckets := make(map[int]int64)
+ for i, c := range dp.NegativeBucket.Counts {
+ if c > math.MaxInt64 {
+ otel.Handle(fmt.Errorf("negative count %d is too large to be represented as int64", c))
+ continue
+ }
+ negativeBuckets[int(dp.NegativeBucket.Offset)+i+1] = int64(c) // nolint: gosec // Size check above.
+ }
+
+ m, err := prometheus.NewConstNativeHistogram(
+ desc,
+ dp.Count,
+ float64(dp.Sum),
+ positiveBuckets,
+ negativeBuckets,
+ dp.ZeroCount,
+ dp.Scale,
+ dp.ZeroThreshold,
+ dp.StartTime,
+ values...)
+ if err != nil {
+ otel.Handle(err)
+ continue
+ }
+
+ // TODO(GiedriusS): add exemplars here after https://github.com/prometheus/client_golang/pull/1654#pullrequestreview-2434669425 is done.
+ ch <- m
+ }
+}
+
+func addHistogramMetric[N int64 | float64](
+ ch chan<- prometheus.Metric,
+ histogram metricdata.Histogram[N],
+ m metricdata.Metrics,
+ name string,
+ kv keyVals,
+) {
for _, dp := range histogram.DataPoints {
keys, values := getAttrs(dp.Attributes)
keys = append(keys, kv.keys...)
@@ -270,7 +343,13 @@ func addHistogramMetric[N int64 | float64](ch chan<- prometheus.Metric, histogra
}
}
-func addSumMetric[N int64 | float64](ch chan<- prometheus.Metric, sum metricdata.Sum[N], m metricdata.Metrics, name string, kv keyVals) {
+func addSumMetric[N int64 | float64](
+ ch chan<- prometheus.Metric,
+ sum metricdata.Sum[N],
+ m metricdata.Metrics,
+ name string,
+ kv keyVals,
+) {
valueType := prometheus.CounterValue
if !sum.IsMonotonic {
valueType = prometheus.GaugeValue
@@ -296,7 +375,13 @@ func addSumMetric[N int64 | float64](ch chan<- prometheus.Metric, sum metricdata
}
}
-func addGaugeMetric[N int64 | float64](ch chan<- prometheus.Metric, gauge metricdata.Gauge[N], m metricdata.Metrics, name string, kv keyVals) {
+func addGaugeMetric[N int64 | float64](
+ ch chan<- prometheus.Metric,
+ gauge metricdata.Gauge[N],
+ m metricdata.Metrics,
+ name string,
+ kv keyVals,
+) {
for _, dp := range gauge.DataPoints {
keys, values := getAttrs(dp.Attributes)
keys = append(keys, kv.keys...)
@@ -319,7 +404,7 @@ func getAttrs(attrs attribute.Set) ([]string, []string) {
values := make([]string, 0, attrs.Len())
itr := attrs.Iter()
- if model.NameValidationScheme == model.UTF8Validation {
+ if model.NameValidationScheme == model.UTF8Validation { // nolint:staticcheck // We need this check to keep supporting the legacy scheme.
// Do not perform sanitization if prometheus supports UTF-8.
for itr.Next() {
kv := itr.Attribute()
@@ -405,8 +490,9 @@ var unitSuffixes = map[string]string{
// getName returns the sanitized name, prefixed with the namespace and suffixed with unit.
func (c *collector) getName(m metricdata.Metrics, typ *dto.MetricType) string {
name := m.Name
- if model.NameValidationScheme != model.UTF8Validation {
+ if model.NameValidationScheme != model.UTF8Validation { // nolint:staticcheck // We need this check to keep supporting the legacy scheme.
// Only sanitize if prometheus does not support UTF-8.
+ logDeprecatedLegacyScheme()
name = model.EscapeName(name, model.NameEscapingScheme)
}
addCounterSuffix := !c.withoutCounterSuffixes && *typ == dto.MetricType_COUNTER
@@ -436,11 +522,13 @@ func (c *collector) getName(m metricdata.Metrics, typ *dto.MetricType) string {
// underscore when the escaping scheme is underscore escaping. This is meant to
// capture any character that should be considered a "delimiter".
func convertsToUnderscore(b rune) bool {
- return !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == ':' || (b >= '0' && b <= '9'))
+ return (b < 'a' || b > 'z') && (b < 'A' || b > 'Z') && b != ':' && (b < '0' || b > '9')
}
func (c *collector) metricType(m metricdata.Metrics) *dto.MetricType {
switch v := m.Data.(type) {
+ case metricdata.ExponentialHistogram[int64], metricdata.ExponentialHistogram[float64]:
+ return dto.MetricType_HISTOGRAM.Enum()
case metricdata.Histogram[int64], metricdata.Histogram[float64]:
return dto.MetricType_HISTOGRAM.Enum()
case metricdata.Sum[float64]:
diff --git a/vendor/go.opentelemetry.io/otel/exporters/stdout/stdoutmetric/exporter.go b/vendor/go.opentelemetry.io/otel/exporters/stdout/stdoutmetric/exporter.go
index fc155d79f..76f15b96b 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/stdout/stdoutmetric/exporter.go
+++ b/vendor/go.opentelemetry.io/otel/exporters/stdout/stdoutmetric/exporter.go
@@ -131,7 +131,9 @@ func redactAggregationTimestamps(orig metricdata.Aggregation) metricdata.Aggrega
}
}
-func redactHistogramTimestamps[T int64 | float64](hdp []metricdata.HistogramDataPoint[T]) []metricdata.HistogramDataPoint[T] {
+func redactHistogramTimestamps[T int64 | float64](
+ hdp []metricdata.HistogramDataPoint[T],
+) []metricdata.HistogramDataPoint[T] {
out := make([]metricdata.HistogramDataPoint[T], len(hdp))
for i, dp := range hdp {
out[i] = metricdata.HistogramDataPoint[T]{
diff --git a/vendor/go.opentelemetry.io/otel/log/doc.go b/vendor/go.opentelemetry.io/otel/log/doc.go
index 18cbd1cb2..b7a085c63 100644
--- a/vendor/go.opentelemetry.io/otel/log/doc.go
+++ b/vendor/go.opentelemetry.io/otel/log/doc.go
@@ -4,10 +4,19 @@
/*
Package log provides the OpenTelemetry Logs API.
-This package is intended to be used by bridges between existing logging
-libraries and OpenTelemetry. Users should not directly use this package as a
-logging library. Instead, install one of the bridges listed in the
-[registry], and use the associated logging library.
+This API is separate from its implementation so the instrumentation built from
+it is reusable. See [go.opentelemetry.io/otel/sdk/log] for the official
+OpenTelemetry implementation of this API.
+
+The log package provides the OpenTelemetry Logs API, which serves as a standard
+interface for generating and managing log records within the OpenTelemetry ecosystem.
+This package allows users to emit LogRecords, enabling structured, context-rich logging
+that can be easily integrated with observability tools. It ensures that log data is captured
+in a way that is consistent with OpenTelemetry's data model.
+
+This package can be used to create bridges between existing logging libraries and OpenTelemetry.
+Log bridges allow integrating the existing logging setups with OpenTelemetry.
+Log bridges can be found in the [registry].
# API Implementations
diff --git a/vendor/go.opentelemetry.io/otel/log/embedded/embedded.go b/vendor/go.opentelemetry.io/otel/log/embedded/embedded.go
index a3714c4c6..9b401b2b1 100644
--- a/vendor/go.opentelemetry.io/otel/log/embedded/embedded.go
+++ b/vendor/go.opentelemetry.io/otel/log/embedded/embedded.go
@@ -4,33 +4,33 @@
// Package embedded provides interfaces embedded within the [OpenTelemetry Logs
// Bridge API].
//
-// Implementers of the [OpenTelemetry Logs Bridge API] can embed the relevant
+// Implementers of the [OpenTelemetry Logs API] can embed the relevant
// type from this package into their implementation directly. Doing so will
// result in a compilation error for users when the [OpenTelemetry Logs Bridge
// API] is extended (which is something that can happen without a major version
// bump of the API package).
//
-// [OpenTelemetry Logs Bridge API]: https://pkg.go.dev/go.opentelemetry.io/otel/log
+// [OpenTelemetry Logs API]: https://pkg.go.dev/go.opentelemetry.io/otel/log
package embedded // import "go.opentelemetry.io/otel/log/embedded"
-// LoggerProvider is embedded in the [Logs Bridge API LoggerProvider].
+// LoggerProvider is embedded in the [Logs API LoggerProvider].
//
-// Embed this interface in your implementation of the [Logs Bridge API
+// Embed this interface in your implementation of the [Logs API
// LoggerProvider] if you want users to experience a compilation error,
// signaling they need to update to your latest implementation, when the [Logs
// Bridge API LoggerProvider] interface is extended (which is something that
// can happen without a major version bump of the API package).
//
-// [Logs Bridge API LoggerProvider]: https://pkg.go.dev/go.opentelemetry.io/otel/log#LoggerProvider
+// [Logs API LoggerProvider]: https://pkg.go.dev/go.opentelemetry.io/otel/log#LoggerProvider
type LoggerProvider interface{ loggerProvider() }
-// Logger is embedded in [Logs Bridge API Logger].
+// Logger is embedded in [Logs API Logger].
//
-// Embed this interface in your implementation of the [Logs Bridge API Logger]
+// Embed this interface in your implementation of the [Logs API Logger]
// if you want users to experience a compilation error, signaling they need to
-// update to your latest implementation, when the [Logs Bridge API Logger]
+// update to your latest implementation, when the [Logs API Logger]
// interface is extended (which is something that can happen without a major
// version bump of the API package).
//
-// [Logs Bridge API Logger]: https://pkg.go.dev/go.opentelemetry.io/otel/log#Logger
+// [Logs API Logger]: https://pkg.go.dev/go.opentelemetry.io/otel/log#Logger
type Logger interface{ logger() }
diff --git a/vendor/go.opentelemetry.io/otel/log/keyvalue.go b/vendor/go.opentelemetry.io/otel/log/keyvalue.go
index 73e4e7dca..87d1a8275 100644
--- a/vendor/go.opentelemetry.io/otel/log/keyvalue.go
+++ b/vendor/go.opentelemetry.io/otel/log/keyvalue.go
@@ -301,7 +301,7 @@ func (v Value) String() string {
case KindBool:
return strconv.FormatBool(v.asBool())
case KindBytes:
- return fmt.Sprint(v.asBytes())
+ return fmt.Sprint(v.asBytes()) // nolint:staticcheck // Use fmt.Sprint to encode as slice.
case KindMap:
return fmt.Sprint(v.asMap())
case KindSlice:
diff --git a/vendor/go.opentelemetry.io/otel/log/noop/noop.go b/vendor/go.opentelemetry.io/otel/log/noop/noop.go
index f45a7c7e0..d779e5d80 100644
--- a/vendor/go.opentelemetry.io/otel/log/noop/noop.go
+++ b/vendor/go.opentelemetry.io/otel/log/noop/noop.go
@@ -4,14 +4,14 @@
// Package noop provides an implementation of the [OpenTelemetry Logs Bridge
// API] that produces no telemetry and minimizes used computation resources.
//
-// Using this package to implement the [OpenTelemetry Logs Bridge API] will
+// Using this package to implement the [OpenTelemetry Logs API] will
// effectively disable OpenTelemetry.
//
// This implementation can be embedded in other implementations of the
-// [OpenTelemetry Logs Bridge API]. Doing so will mean the implementation
+// [OpenTelemetry Logs API]. Doing so will mean the implementation
// defaults to no operation for methods it does not implement.
//
-// [OpenTelemetry Logs Bridge API]: https://pkg.go.dev/go.opentelemetry.io/otel/log
+// [OpenTelemetry Logs API]: https://pkg.go.dev/go.opentelemetry.io/otel/log
package noop // import "go.opentelemetry.io/otel/log/noop"
import (
diff --git a/vendor/go.opentelemetry.io/otel/sdk/log/batch.go b/vendor/go.opentelemetry.io/otel/sdk/log/batch.go
index 28c969262..b91741d58 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/log/batch.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/log/batch.go
@@ -156,13 +156,20 @@ func (b *BatchProcessor) poll(interval time.Duration) (done chan struct{}) {
global.Warn("dropped log records", "dropped", d)
}
- qLen := b.q.TryDequeue(buf, func(r []Record) bool {
- ok := b.exporter.EnqueueExport(r)
- if ok {
- buf = slices.Clone(buf)
- }
- return ok
- })
+ var qLen int
+ // Don't copy data from queue unless exporter can accept more, it is very expensive.
+ if b.exporter.Ready() {
+ qLen = b.q.TryDequeue(buf, func(r []Record) bool {
+ ok := b.exporter.EnqueueExport(r)
+ if ok {
+ buf = slices.Clone(buf)
+ }
+ return ok
+ })
+ } else {
+ qLen = b.q.Len()
+ }
+
if qLen >= b.batchSize {
// There is another full batch ready. Immediately trigger
// another export attempt.
@@ -272,6 +279,13 @@ func newQueue(size int) *queue {
}
}
+func (q *queue) Len() int {
+ q.Lock()
+ defer q.Unlock()
+
+ return q.len
+}
+
// Dropped returns the number of Records dropped during enqueueing since the
// last time Dropped was called.
func (q *queue) Dropped() uint64 {
diff --git a/vendor/go.opentelemetry.io/otel/sdk/log/doc.go b/vendor/go.opentelemetry.io/otel/sdk/log/doc.go
index 6a1f1b0e9..78935de63 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/log/doc.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/log/doc.go
@@ -31,6 +31,6 @@ is being run on. That way when multiple instances of the code are collected
at a single endpoint their origin is decipherable.
See [go.opentelemetry.io/otel/log] for more information about
-the OpenTelemetry Logs Bridge API.
+the OpenTelemetry Logs API.
*/
package log // import "go.opentelemetry.io/otel/sdk/log"
diff --git a/vendor/go.opentelemetry.io/otel/sdk/log/exporter.go b/vendor/go.opentelemetry.io/otel/sdk/log/exporter.go
index e4e3c5402..8cef5dde6 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/log/exporter.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/log/exporter.go
@@ -186,11 +186,10 @@ type bufferExporter struct {
// newBufferExporter returns a new bufferExporter that wraps exporter. The
// returned bufferExporter will buffer at most size number of export requests.
-// If size is less than zero, zero will be used (i.e. only synchronous
-// exporting will be supported).
+// If size is less than 1, 1 will be used.
func newBufferExporter(exporter Exporter, size int) *bufferExporter {
- if size < 0 {
- size = 0
+ if size < 1 {
+ size = 1
}
input := make(chan exportData, size)
return &bufferExporter{
@@ -201,6 +200,10 @@ func newBufferExporter(exporter Exporter, size int) *bufferExporter {
}
}
+func (e *bufferExporter) Ready() bool {
+ return len(e.input) != cap(e.input)
+}
+
var errStopped = errors.New("exporter stopped")
func (e *bufferExporter) enqueue(ctx context.Context, records []Record, rCh chan<- error) error {
diff --git a/vendor/go.opentelemetry.io/otel/sdk/log/filter_processor.go b/vendor/go.opentelemetry.io/otel/sdk/log/filter_processor.go
index 5b99a4a99..a39cad9e0 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/log/filter_processor.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/log/filter_processor.go
@@ -8,7 +8,6 @@ import (
"go.opentelemetry.io/otel/log"
"go.opentelemetry.io/otel/sdk/instrumentation"
- "go.opentelemetry.io/otel/sdk/resource"
)
// FilterProcessor is a [Processor] that knows, and can identify, what [Record]
@@ -56,7 +55,6 @@ type FilterProcessor interface {
// EnabledParameters represents payload for [FilterProcessor]'s Enabled method.
type EnabledParameters struct {
- Resource resource.Resource
InstrumentationScope instrumentation.Scope
Severity log.Severity
}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/log/logger.go b/vendor/go.opentelemetry.io/otel/sdk/log/logger.go
index 6211d5d92..cd3580ec0 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/log/logger.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/log/logger.go
@@ -50,7 +50,6 @@ func (l *logger) Emit(ctx context.Context, r log.Record) {
// returned if it can be positively verified that no Processor will process.
func (l *logger) Enabled(ctx context.Context, param log.EnabledParameters) bool {
p := EnabledParameters{
- Resource: *l.provider.resource,
InstrumentationScope: l.instrumentationScope,
Severity: param.Severity,
}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/log/provider.go b/vendor/go.opentelemetry.io/otel/sdk/log/provider.go
index 096944ea1..359357b7e 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/log/provider.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/log/provider.go
@@ -236,7 +236,7 @@ func WithAttributeCountLimit(limit int) LoggerProviderOption {
})
}
-// AttributeValueLengthLimit sets the maximum allowed attribute value length.
+// WithAttributeValueLengthLimit sets the maximum allowed attribute value length.
//
// This limit only applies to string and string slice attribute values.
// Any string longer than this value will be truncated to this length.
diff --git a/vendor/go.opentelemetry.io/proto/otlp/collector/logs/v1/logs_service_grpc.pb.go b/vendor/go.opentelemetry.io/proto/otlp/collector/logs/v1/logs_service_grpc.pb.go
index e1b7c457c..d1d24fe62 100644
--- a/vendor/go.opentelemetry.io/proto/otlp/collector/logs/v1/logs_service_grpc.pb.go
+++ b/vendor/go.opentelemetry.io/proto/otlp/collector/logs/v1/logs_service_grpc.pb.go
@@ -22,8 +22,6 @@ const _ = grpc.SupportPackageIsVersion7
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
type LogsServiceClient interface {
- // For performance reasons, it is recommended to keep this RPC
- // alive for the entire life of the application.
Export(ctx context.Context, in *ExportLogsServiceRequest, opts ...grpc.CallOption) (*ExportLogsServiceResponse, error)
}
@@ -48,8 +46,6 @@ func (c *logsServiceClient) Export(ctx context.Context, in *ExportLogsServiceReq
// All implementations must embed UnimplementedLogsServiceServer
// for forward compatibility
type LogsServiceServer interface {
- // For performance reasons, it is recommended to keep this RPC
- // alive for the entire life of the application.
Export(context.Context, *ExportLogsServiceRequest) (*ExportLogsServiceResponse, error)
mustEmbedUnimplementedLogsServiceServer()
}
diff --git a/vendor/go.opentelemetry.io/proto/otlp/collector/metrics/v1/metrics_service_grpc.pb.go b/vendor/go.opentelemetry.io/proto/otlp/collector/metrics/v1/metrics_service_grpc.pb.go
index 31d25fc15..fc668643c 100644
--- a/vendor/go.opentelemetry.io/proto/otlp/collector/metrics/v1/metrics_service_grpc.pb.go
+++ b/vendor/go.opentelemetry.io/proto/otlp/collector/metrics/v1/metrics_service_grpc.pb.go
@@ -22,8 +22,6 @@ const _ = grpc.SupportPackageIsVersion7
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
type MetricsServiceClient interface {
- // For performance reasons, it is recommended to keep this RPC
- // alive for the entire life of the application.
Export(ctx context.Context, in *ExportMetricsServiceRequest, opts ...grpc.CallOption) (*ExportMetricsServiceResponse, error)
}
@@ -48,8 +46,6 @@ func (c *metricsServiceClient) Export(ctx context.Context, in *ExportMetricsServ
// All implementations must embed UnimplementedMetricsServiceServer
// for forward compatibility
type MetricsServiceServer interface {
- // For performance reasons, it is recommended to keep this RPC
- // alive for the entire life of the application.
Export(context.Context, *ExportMetricsServiceRequest) (*ExportMetricsServiceResponse, error)
mustEmbedUnimplementedMetricsServiceServer()
}
diff --git a/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service_grpc.pb.go b/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service_grpc.pb.go
index dd1b73f1e..892864ea6 100644
--- a/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service_grpc.pb.go
+++ b/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service_grpc.pb.go
@@ -22,8 +22,6 @@ const _ = grpc.SupportPackageIsVersion7
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
type TraceServiceClient interface {
- // For performance reasons, it is recommended to keep this RPC
- // alive for the entire life of the application.
Export(ctx context.Context, in *ExportTraceServiceRequest, opts ...grpc.CallOption) (*ExportTraceServiceResponse, error)
}
@@ -48,8 +46,6 @@ func (c *traceServiceClient) Export(ctx context.Context, in *ExportTraceServiceR
// All implementations must embed UnimplementedTraceServiceServer
// for forward compatibility
type TraceServiceServer interface {
- // For performance reasons, it is recommended to keep this RPC
- // alive for the entire life of the application.
Export(context.Context, *ExportTraceServiceRequest) (*ExportTraceServiceResponse, error)
mustEmbedUnimplementedTraceServiceServer()
}
diff --git a/vendor/go.opentelemetry.io/proto/otlp/common/v1/common.pb.go b/vendor/go.opentelemetry.io/proto/otlp/common/v1/common.pb.go
index 852209b09..a7c5d19bf 100644
--- a/vendor/go.opentelemetry.io/proto/otlp/common/v1/common.pb.go
+++ b/vendor/go.opentelemetry.io/proto/otlp/common/v1/common.pb.go
@@ -430,6 +430,101 @@ func (x *InstrumentationScope) GetDroppedAttributesCount() uint32 {
return 0
}
+// A reference to an Entity.
+// Entity represents an object of interest associated with produced telemetry: e.g spans, metrics, profiles, or logs.
+//
+// Status: [Development]
+type EntityRef struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The Schema URL, if known. This is the identifier of the Schema that the entity data
+ // is recorded in. To learn more about Schema URL see
+ // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url
+ //
+ // This schema_url applies to the data in this message and to the Resource attributes
+ // referenced by id_keys and description_keys.
+ // TODO: discuss if we are happy with this somewhat complicated definition of what
+ // the schema_url applies to.
+ //
+ // This field obsoletes the schema_url field in ResourceMetrics/ResourceSpans/ResourceLogs.
+ SchemaUrl string `protobuf:"bytes,1,opt,name=schema_url,json=schemaUrl,proto3" json:"schema_url,omitempty"`
+ // Defines the type of the entity. MUST not change during the lifetime of the entity.
+ // For example: "service" or "host". This field is required and MUST not be empty
+ // for valid entities.
+ Type string `protobuf:"bytes,2,opt,name=type,proto3" json:"type,omitempty"`
+ // Attribute Keys that identify the entity.
+ // MUST not change during the lifetime of the entity. The Id must contain at least one attribute.
+ // These keys MUST exist in the containing {message}.attributes.
+ IdKeys []string `protobuf:"bytes,3,rep,name=id_keys,json=idKeys,proto3" json:"id_keys,omitempty"`
+ // Descriptive (non-identifying) attribute keys of the entity.
+ // MAY change over the lifetime of the entity. MAY be empty.
+ // These attribute keys are not part of entity's identity.
+ // These keys MUST exist in the containing {message}.attributes.
+ DescriptionKeys []string `protobuf:"bytes,4,rep,name=description_keys,json=descriptionKeys,proto3" json:"description_keys,omitempty"`
+}
+
+func (x *EntityRef) Reset() {
+ *x = EntityRef{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_opentelemetry_proto_common_v1_common_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *EntityRef) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*EntityRef) ProtoMessage() {}
+
+func (x *EntityRef) ProtoReflect() protoreflect.Message {
+ mi := &file_opentelemetry_proto_common_v1_common_proto_msgTypes[5]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use EntityRef.ProtoReflect.Descriptor instead.
+func (*EntityRef) Descriptor() ([]byte, []int) {
+ return file_opentelemetry_proto_common_v1_common_proto_rawDescGZIP(), []int{5}
+}
+
+func (x *EntityRef) GetSchemaUrl() string {
+ if x != nil {
+ return x.SchemaUrl
+ }
+ return ""
+}
+
+func (x *EntityRef) GetType() string {
+ if x != nil {
+ return x.Type
+ }
+ return ""
+}
+
+func (x *EntityRef) GetIdKeys() []string {
+ if x != nil {
+ return x.IdKeys
+ }
+ return nil
+}
+
+func (x *EntityRef) GetDescriptionKeys() []string {
+ if x != nil {
+ return x.DescriptionKeys
+ }
+ return nil
+}
+
var File_opentelemetry_proto_common_v1_common_proto protoreflect.FileDescriptor
var file_opentelemetry_proto_common_v1_common_proto_rawDesc = []byte{
@@ -488,15 +583,23 @@ var file_opentelemetry_proto_common_v1_common_proto_rawDesc = []byte{
0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65,
0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x16, 0x64,
0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73,
- 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x42, 0x7b, 0x0a, 0x20, 0x69, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e,
- 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e,
- 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x42, 0x0b, 0x43, 0x6f, 0x6d, 0x6d, 0x6f,
- 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x28, 0x67, 0x6f, 0x2e, 0x6f, 0x70, 0x65,
- 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x69, 0x6f, 0x2f, 0x70, 0x72,
- 0x6f, 0x74, 0x6f, 0x2f, 0x6f, 0x74, 0x6c, 0x70, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f,
- 0x76, 0x31, 0xaa, 0x02, 0x1d, 0x4f, 0x70, 0x65, 0x6e, 0x54, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74,
- 0x72, 0x79, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e,
- 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+ 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x82, 0x01, 0x0a, 0x09, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x79,
+ 0x52, 0x65, 0x66, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x5f, 0x75, 0x72,
+ 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x55,
+ 0x72, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x17, 0x0a, 0x07, 0x69, 0x64, 0x5f, 0x6b, 0x65, 0x79,
+ 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x69, 0x64, 0x4b, 0x65, 0x79, 0x73, 0x12,
+ 0x29, 0x0a, 0x10, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b,
+ 0x65, 0x79, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0f, 0x64, 0x65, 0x73, 0x63, 0x72,
+ 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x73, 0x42, 0x7b, 0x0a, 0x20, 0x69, 0x6f,
+ 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x42, 0x0b,
+ 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x28, 0x67,
+ 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e,
+ 0x69, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x6f, 0x74, 0x6c, 0x70, 0x2f, 0x63, 0x6f,
+ 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x76, 0x31, 0xaa, 0x02, 0x1d, 0x4f, 0x70, 0x65, 0x6e, 0x54, 0x65,
+ 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x43, 0x6f,
+ 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
@@ -511,13 +614,14 @@ func file_opentelemetry_proto_common_v1_common_proto_rawDescGZIP() []byte {
return file_opentelemetry_proto_common_v1_common_proto_rawDescData
}
-var file_opentelemetry_proto_common_v1_common_proto_msgTypes = make([]protoimpl.MessageInfo, 5)
+var file_opentelemetry_proto_common_v1_common_proto_msgTypes = make([]protoimpl.MessageInfo, 6)
var file_opentelemetry_proto_common_v1_common_proto_goTypes = []interface{}{
(*AnyValue)(nil), // 0: opentelemetry.proto.common.v1.AnyValue
(*ArrayValue)(nil), // 1: opentelemetry.proto.common.v1.ArrayValue
(*KeyValueList)(nil), // 2: opentelemetry.proto.common.v1.KeyValueList
(*KeyValue)(nil), // 3: opentelemetry.proto.common.v1.KeyValue
(*InstrumentationScope)(nil), // 4: opentelemetry.proto.common.v1.InstrumentationScope
+ (*EntityRef)(nil), // 5: opentelemetry.proto.common.v1.EntityRef
}
var file_opentelemetry_proto_common_v1_common_proto_depIdxs = []int32{
1, // 0: opentelemetry.proto.common.v1.AnyValue.array_value:type_name -> opentelemetry.proto.common.v1.ArrayValue
@@ -599,6 +703,18 @@ func file_opentelemetry_proto_common_v1_common_proto_init() {
return nil
}
}
+ file_opentelemetry_proto_common_v1_common_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*EntityRef); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
}
file_opentelemetry_proto_common_v1_common_proto_msgTypes[0].OneofWrappers = []interface{}{
(*AnyValue_StringValue)(nil),
@@ -615,7 +731,7 @@ func file_opentelemetry_proto_common_v1_common_proto_init() {
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_opentelemetry_proto_common_v1_common_proto_rawDesc,
NumEnums: 0,
- NumMessages: 5,
+ NumMessages: 6,
NumExtensions: 0,
NumServices: 0,
},
diff --git a/vendor/go.opentelemetry.io/proto/otlp/logs/v1/logs.pb.go b/vendor/go.opentelemetry.io/proto/otlp/logs/v1/logs.pb.go
index 9b47481ce..eb5b8df6b 100644
--- a/vendor/go.opentelemetry.io/proto/otlp/logs/v1/logs.pb.go
+++ b/vendor/go.opentelemetry.io/proto/otlp/logs/v1/logs.pb.go
@@ -501,8 +501,6 @@ type LogRecord struct {
// as an event.
//
// [Optional].
- //
- // Status: [Development]
EventName string `protobuf:"bytes,12,opt,name=event_name,json=eventName,proto3" json:"event_name,omitempty"`
}
diff --git a/vendor/go.opentelemetry.io/proto/otlp/metrics/v1/metrics.pb.go b/vendor/go.opentelemetry.io/proto/otlp/metrics/v1/metrics.pb.go
index 8799d6ba2..ec187b13d 100644
--- a/vendor/go.opentelemetry.io/proto/otlp/metrics/v1/metrics.pb.go
+++ b/vendor/go.opentelemetry.io/proto/otlp/metrics/v1/metrics.pb.go
@@ -526,7 +526,7 @@ type Metric struct {
// description of the metric, which can be used in documentation.
Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"`
// unit in which the metric value is reported. Follows the format
- // described by http://unitsofmeasure.org/ucum.html.
+ // described by https://unitsofmeasure.org/ucum.html.
Unit string `protobuf:"bytes,3,opt,name=unit,proto3" json:"unit,omitempty"`
// Data determines the aggregation type (if any) of the metric, what is the
// reported value type for the data points, as well as the relatationship to
@@ -929,7 +929,7 @@ func (x *ExponentialHistogram) GetAggregationTemporality() AggregationTemporalit
// Summary metric data are used to convey quantile summaries,
// a Prometheus (see: https://prometheus.io/docs/concepts/metric_types/#summary)
-// and OpenMetrics (see: https://github.com/OpenObservability/OpenMetrics/blob/4dbf6075567ab43296eed941037c12951faafb92/protos/prometheus.proto#L45)
+// and OpenMetrics (see: https://github.com/prometheus/OpenMetrics/blob/4dbf6075567ab43296eed941037c12951faafb92/protos/prometheus.proto#L45)
// data type. These data points cannot always be merged in a meaningful way.
// While they can be useful in some applications, histogram data points are
// recommended for new applications.
@@ -1175,7 +1175,9 @@ type HistogramDataPoint struct {
// The sum of the bucket_counts must equal the value in the count field.
//
// The number of elements in bucket_counts array must be by one greater than
- // the number of elements in explicit_bounds array.
+ // the number of elements in explicit_bounds array. The exception to this rule
+ // is when the length of bucket_counts is 0, then the length of explicit_bounds
+ // must also be 0.
BucketCounts []uint64 `protobuf:"fixed64,6,rep,packed,name=bucket_counts,json=bucketCounts,proto3" json:"bucket_counts,omitempty"`
// explicit_bounds specifies buckets with explicitly defined bounds for values.
//
@@ -1190,6 +1192,9 @@ type HistogramDataPoint struct {
// Histogram buckets are inclusive of their upper boundary, except the last
// bucket where the boundary is at infinity. This format is intentionally
// compatible with the OpenMetrics histogram definition.
+ //
+ // If bucket_counts length is 0 then explicit_bounds length must also be 0,
+ // otherwise the data point is invalid.
ExplicitBounds []float64 `protobuf:"fixed64,7,rep,packed,name=explicit_bounds,json=explicitBounds,proto3" json:"explicit_bounds,omitempty"`
// (Optional) List of exemplars collected from
// measurements that were used to form the data point
diff --git a/vendor/go.opentelemetry.io/proto/otlp/resource/v1/resource.pb.go b/vendor/go.opentelemetry.io/proto/otlp/resource/v1/resource.pb.go
index b7545b03b..eb7745d66 100644
--- a/vendor/go.opentelemetry.io/proto/otlp/resource/v1/resource.pb.go
+++ b/vendor/go.opentelemetry.io/proto/otlp/resource/v1/resource.pb.go
@@ -48,6 +48,12 @@ type Resource struct {
// dropped_attributes_count is the number of dropped attributes. If the value is 0, then
// no attributes were dropped.
DroppedAttributesCount uint32 `protobuf:"varint,2,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"`
+ // Set of entities that participate in this Resource.
+ //
+ // Note: keys in the references MUST exist in attributes of this message.
+ //
+ // Status: [Development]
+ EntityRefs []*v1.EntityRef `protobuf:"bytes,3,rep,name=entity_refs,json=entityRefs,proto3" json:"entity_refs,omitempty"`
}
func (x *Resource) Reset() {
@@ -96,6 +102,13 @@ func (x *Resource) GetDroppedAttributesCount() uint32 {
return 0
}
+func (x *Resource) GetEntityRefs() []*v1.EntityRef {
+ if x != nil {
+ return x.EntityRefs
+ }
+ return nil
+}
+
var File_opentelemetry_proto_resource_v1_resource_proto protoreflect.FileDescriptor
var file_opentelemetry_proto_resource_v1_resource_proto_rawDesc = []byte{
@@ -106,7 +119,7 @@ var file_opentelemetry_proto_resource_v1_resource_proto_rawDesc = []byte{
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x76,
0x31, 0x1a, 0x2a, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79,
0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x76, 0x31,
- 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x8d, 0x01,
+ 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xd8, 0x01,
0x0a, 0x08, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x47, 0x0a, 0x0a, 0x61, 0x74,
0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27,
0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70,
@@ -115,16 +128,21 @@ var file_opentelemetry_proto_resource_v1_resource_proto_rawDesc = []byte{
0x74, 0x65, 0x73, 0x12, 0x38, 0x0a, 0x18, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x61,
0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18,
0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x16, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x41, 0x74,
- 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x42, 0x83, 0x01,
- 0x0a, 0x22, 0x69, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74,
- 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63,
- 0x65, 0x2e, 0x76, 0x31, 0x42, 0x0d, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x50, 0x72,
- 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2a, 0x67, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65,
- 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x69, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f,
- 0x2f, 0x6f, 0x74, 0x6c, 0x70, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2f, 0x76,
- 0x31, 0xaa, 0x02, 0x1f, 0x4f, 0x70, 0x65, 0x6e, 0x54, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72,
- 0x79, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65,
- 0x2e, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+ 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x49, 0x0a,
+ 0x0b, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x72, 0x65, 0x66, 0x73, 0x18, 0x03, 0x20, 0x03,
+ 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74,
+ 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e,
+ 0x76, 0x31, 0x2e, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x65, 0x66, 0x52, 0x0a, 0x65, 0x6e,
+ 0x74, 0x69, 0x74, 0x79, 0x52, 0x65, 0x66, 0x73, 0x42, 0x83, 0x01, 0x0a, 0x22, 0x69, 0x6f, 0x2e,
+ 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x2e, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x42,
+ 0x0d, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01,
+ 0x5a, 0x2a, 0x67, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74,
+ 0x72, 0x79, 0x2e, 0x69, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x6f, 0x74, 0x6c, 0x70,
+ 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2f, 0x76, 0x31, 0xaa, 0x02, 0x1f, 0x4f,
+ 0x70, 0x65, 0x6e, 0x54, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x50, 0x72, 0x6f,
+ 0x74, 0x6f, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x56, 0x31, 0x62, 0x06,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
@@ -141,16 +159,18 @@ func file_opentelemetry_proto_resource_v1_resource_proto_rawDescGZIP() []byte {
var file_opentelemetry_proto_resource_v1_resource_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
var file_opentelemetry_proto_resource_v1_resource_proto_goTypes = []interface{}{
- (*Resource)(nil), // 0: opentelemetry.proto.resource.v1.Resource
- (*v1.KeyValue)(nil), // 1: opentelemetry.proto.common.v1.KeyValue
+ (*Resource)(nil), // 0: opentelemetry.proto.resource.v1.Resource
+ (*v1.KeyValue)(nil), // 1: opentelemetry.proto.common.v1.KeyValue
+ (*v1.EntityRef)(nil), // 2: opentelemetry.proto.common.v1.EntityRef
}
var file_opentelemetry_proto_resource_v1_resource_proto_depIdxs = []int32{
1, // 0: opentelemetry.proto.resource.v1.Resource.attributes:type_name -> opentelemetry.proto.common.v1.KeyValue
- 1, // [1:1] is the sub-list for method output_type
- 1, // [1:1] is the sub-list for method input_type
- 1, // [1:1] is the sub-list for extension type_name
- 1, // [1:1] is the sub-list for extension extendee
- 0, // [0:1] is the sub-list for field type_name
+ 2, // 1: opentelemetry.proto.resource.v1.Resource.entity_refs:type_name -> opentelemetry.proto.common.v1.EntityRef
+ 2, // [2:2] is the sub-list for method output_type
+ 2, // [2:2] is the sub-list for method input_type
+ 2, // [2:2] is the sub-list for extension type_name
+ 2, // [2:2] is the sub-list for extension extendee
+ 0, // [0:2] is the sub-list for field type_name
}
func init() { file_opentelemetry_proto_resource_v1_resource_proto_init() }
diff --git a/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go b/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go
index f388426b0..d083dde3e 100644
--- a/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go
+++ b/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go
@@ -1,4 +1,4 @@
-// Copyright 2024 Google LLC
+// Copyright 2025 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
diff --git a/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go b/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go
index 3cd9a5bb8..e017ef071 100644
--- a/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go
+++ b/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go
@@ -1,4 +1,4 @@
-// Copyright 2024 Google LLC
+// Copyright 2025 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -703,6 +703,65 @@ type QuotaFailure_Violation struct {
// For example: "Service disabled" or "Daily Limit for read operations
// exceeded".
Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"`
+ // The API Service from which the `QuotaFailure.Violation` orginates. In
+ // some cases, Quota issues originate from an API Service other than the one
+ // that was called. In other words, a dependency of the called API Service
+ // could be the cause of the `QuotaFailure`, and this field would have the
+ // dependency API service name.
+ //
+ // For example, if the called API is Kubernetes Engine API
+ // (container.googleapis.com), and a quota violation occurs in the
+ // Kubernetes Engine API itself, this field would be
+ // "container.googleapis.com". On the other hand, if the quota violation
+ // occurs when the Kubernetes Engine API creates VMs in the Compute Engine
+ // API (compute.googleapis.com), this field would be
+ // "compute.googleapis.com".
+ ApiService string `protobuf:"bytes,3,opt,name=api_service,json=apiService,proto3" json:"api_service,omitempty"`
+ // The metric of the violated quota. A quota metric is a named counter to
+ // measure usage, such as API requests or CPUs. When an activity occurs in a
+ // service, such as Virtual Machine allocation, one or more quota metrics
+ // may be affected.
+ //
+ // For example, "compute.googleapis.com/cpus_per_vm_family",
+ // "storage.googleapis.com/internet_egress_bandwidth".
+ QuotaMetric string `protobuf:"bytes,4,opt,name=quota_metric,json=quotaMetric,proto3" json:"quota_metric,omitempty"`
+ // The id of the violated quota. Also know as "limit name", this is the
+ // unique identifier of a quota in the context of an API service.
+ //
+ // For example, "CPUS-PER-VM-FAMILY-per-project-region".
+ QuotaId string `protobuf:"bytes,5,opt,name=quota_id,json=quotaId,proto3" json:"quota_id,omitempty"`
+ // The dimensions of the violated quota. Every non-global quota is enforced
+ // on a set of dimensions. While quota metric defines what to count, the
+ // dimensions specify for what aspects the counter should be increased.
+ //
+ // For example, the quota "CPUs per region per VM family" enforces a limit
+ // on the metric "compute.googleapis.com/cpus_per_vm_family" on dimensions
+ // "region" and "vm_family". And if the violation occurred in region
+ // "us-central1" and for VM family "n1", the quota_dimensions would be,
+ //
+ // {
+ // "region": "us-central1",
+ // "vm_family": "n1",
+ // }
+ //
+ // When a quota is enforced globally, the quota_dimensions would always be
+ // empty.
+ QuotaDimensions map[string]string `protobuf:"bytes,6,rep,name=quota_dimensions,json=quotaDimensions,proto3" json:"quota_dimensions,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ // The enforced quota value at the time of the `QuotaFailure`.
+ //
+ // For example, if the enforced quota value at the time of the
+ // `QuotaFailure` on the number of CPUs is "10", then the value of this
+ // field would reflect this quantity.
+ QuotaValue int64 `protobuf:"varint,7,opt,name=quota_value,json=quotaValue,proto3" json:"quota_value,omitempty"`
+ // The new quota value being rolled out at the time of the violation. At the
+ // completion of the rollout, this value will be enforced in place of
+ // quota_value. If no rollout is in progress at the time of the violation,
+ // this field is not set.
+ //
+ // For example, if at the time of the violation a rollout is in progress
+ // changing the number of CPUs quota from 10 to 20, 20 would be the value of
+ // this field.
+ FutureQuotaValue *int64 `protobuf:"varint,8,opt,name=future_quota_value,json=futureQuotaValue,proto3,oneof" json:"future_quota_value,omitempty"`
}
func (x *QuotaFailure_Violation) Reset() {
@@ -751,6 +810,48 @@ func (x *QuotaFailure_Violation) GetDescription() string {
return ""
}
+func (x *QuotaFailure_Violation) GetApiService() string {
+ if x != nil {
+ return x.ApiService
+ }
+ return ""
+}
+
+func (x *QuotaFailure_Violation) GetQuotaMetric() string {
+ if x != nil {
+ return x.QuotaMetric
+ }
+ return ""
+}
+
+func (x *QuotaFailure_Violation) GetQuotaId() string {
+ if x != nil {
+ return x.QuotaId
+ }
+ return ""
+}
+
+func (x *QuotaFailure_Violation) GetQuotaDimensions() map[string]string {
+ if x != nil {
+ return x.QuotaDimensions
+ }
+ return nil
+}
+
+func (x *QuotaFailure_Violation) GetQuotaValue() int64 {
+ if x != nil {
+ return x.QuotaValue
+ }
+ return 0
+}
+
+func (x *QuotaFailure_Violation) GetFutureQuotaValue() int64 {
+ if x != nil && x.FutureQuotaValue != nil {
+ return *x.FutureQuotaValue
+ }
+ return 0
+}
+
// A message type used to describe a single precondition failure.
type PreconditionFailure_Violation struct {
state protoimpl.MessageState
@@ -775,7 +876,7 @@ type PreconditionFailure_Violation struct {
func (x *PreconditionFailure_Violation) Reset() {
*x = PreconditionFailure_Violation{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_rpc_error_details_proto_msgTypes[12]
+ mi := &file_google_rpc_error_details_proto_msgTypes[13]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -788,7 +889,7 @@ func (x *PreconditionFailure_Violation) String() string {
func (*PreconditionFailure_Violation) ProtoMessage() {}
func (x *PreconditionFailure_Violation) ProtoReflect() protoreflect.Message {
- mi := &file_google_rpc_error_details_proto_msgTypes[12]
+ mi := &file_google_rpc_error_details_proto_msgTypes[13]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -886,7 +987,7 @@ type BadRequest_FieldViolation struct {
func (x *BadRequest_FieldViolation) Reset() {
*x = BadRequest_FieldViolation{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_rpc_error_details_proto_msgTypes[13]
+ mi := &file_google_rpc_error_details_proto_msgTypes[14]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -899,7 +1000,7 @@ func (x *BadRequest_FieldViolation) String() string {
func (*BadRequest_FieldViolation) ProtoMessage() {}
func (x *BadRequest_FieldViolation) ProtoReflect() protoreflect.Message {
- mi := &file_google_rpc_error_details_proto_msgTypes[13]
+ mi := &file_google_rpc_error_details_proto_msgTypes[14]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -958,7 +1059,7 @@ type Help_Link struct {
func (x *Help_Link) Reset() {
*x = Help_Link{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_rpc_error_details_proto_msgTypes[14]
+ mi := &file_google_rpc_error_details_proto_msgTypes[15]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -971,7 +1072,7 @@ func (x *Help_Link) String() string {
func (*Help_Link) ProtoMessage() {}
func (x *Help_Link) ProtoReflect() protoreflect.Message {
- mi := &file_google_rpc_error_details_proto_msgTypes[14]
+ mi := &file_google_rpc_error_details_proto_msgTypes[15]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -1029,79 +1130,102 @@ var file_google_rpc_error_details_proto_rawDesc = []byte{
0x0a, 0x0d, 0x73, 0x74, 0x61, 0x63, 0x6b, 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18,
0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x74, 0x61, 0x63, 0x6b, 0x45, 0x6e, 0x74, 0x72,
0x69, 0x65, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x18, 0x02, 0x20,
- 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x22, 0x9b, 0x01, 0x0a, 0x0c,
+ 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x22, 0x8e, 0x04, 0x0a, 0x0c,
0x51, 0x75, 0x6f, 0x74, 0x61, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x12, 0x42, 0x0a, 0x0a,
0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b,
0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x51, 0x75,
0x6f, 0x74, 0x61, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x2e, 0x56, 0x69, 0x6f, 0x6c, 0x61,
0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73,
- 0x1a, 0x47, 0x0a, 0x09, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a,
- 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07,
- 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72,
- 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65,
- 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xbd, 0x01, 0x0a, 0x13, 0x50, 0x72,
- 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72,
- 0x65, 0x12, 0x49, 0x0a, 0x0a, 0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18,
- 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72,
- 0x70, 0x63, 0x2e, 0x50, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x46,
- 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x2e, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e,
- 0x52, 0x0a, 0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x5b, 0x0a, 0x09,
- 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70,
- 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x18, 0x0a,
- 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07,
- 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72,
- 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65,
- 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x8c, 0x02, 0x0a, 0x0a, 0x42, 0x61,
- 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x50, 0x0a, 0x10, 0x66, 0x69, 0x65, 0x6c,
- 0x64, 0x5f, 0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03,
- 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e,
- 0x42, 0x61, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64,
- 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0f, 0x66, 0x69, 0x65, 0x6c, 0x64,
- 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0xab, 0x01, 0x0a, 0x0e, 0x46,
- 0x69, 0x65, 0x6c, 0x64, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a,
- 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x66, 0x69,
- 0x65, 0x6c, 0x64, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69,
- 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69,
- 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18,
- 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x12, 0x49, 0x0a,
- 0x11, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61,
- 0x67, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x4d,
- 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x10, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x7a, 0x65,
- 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x4f, 0x0a, 0x0b, 0x52, 0x65, 0x71, 0x75,
- 0x65, 0x73, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65,
- 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x72, 0x65, 0x71,
- 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x6e,
- 0x67, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x65,
- 0x72, 0x76, 0x69, 0x6e, 0x67, 0x44, 0x61, 0x74, 0x61, 0x22, 0x90, 0x01, 0x0a, 0x0c, 0x52, 0x65,
- 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65,
- 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
- 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12,
- 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65,
- 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65,
- 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, 0x03, 0x20,
- 0x01, 0x28, 0x09, 0x52, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65,
- 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x6f, 0x0a, 0x04,
- 0x48, 0x65, 0x6c, 0x70, 0x12, 0x2b, 0x0a, 0x05, 0x6c, 0x69, 0x6e, 0x6b, 0x73, 0x18, 0x01, 0x20,
- 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63,
- 0x2e, 0x48, 0x65, 0x6c, 0x70, 0x2e, 0x4c, 0x69, 0x6e, 0x6b, 0x52, 0x05, 0x6c, 0x69, 0x6e, 0x6b,
- 0x73, 0x1a, 0x3a, 0x0a, 0x04, 0x4c, 0x69, 0x6e, 0x6b, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73,
- 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b,
- 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x75,
- 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x22, 0x44, 0x0a,
- 0x10, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67,
- 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
- 0x09, 0x52, 0x06, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73,
- 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73,
- 0x61, 0x67, 0x65, 0x42, 0x6c, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x2e, 0x72, 0x70, 0x63, 0x42, 0x11, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x44, 0x65, 0x74, 0x61,
- 0x69, 0x6c, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3f, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65,
- 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69,
- 0x73, 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x65, 0x72, 0x72, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73,
- 0x3b, 0x65, 0x72, 0x72, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0xa2, 0x02, 0x03, 0x52, 0x50,
- 0x43, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+ 0x1a, 0xb9, 0x03, 0x0a, 0x09, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18,
+ 0x0a, 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63,
+ 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64,
+ 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x70,
+ 0x69, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x0a, 0x61, 0x70, 0x69, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x71,
+ 0x75, 0x6f, 0x74, 0x61, 0x5f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x18, 0x04, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x0b, 0x71, 0x75, 0x6f, 0x74, 0x61, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x12, 0x19,
+ 0x0a, 0x08, 0x71, 0x75, 0x6f, 0x74, 0x61, 0x5f, 0x69, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x07, 0x71, 0x75, 0x6f, 0x74, 0x61, 0x49, 0x64, 0x12, 0x62, 0x0a, 0x10, 0x71, 0x75, 0x6f,
+ 0x74, 0x61, 0x5f, 0x64, 0x69, 0x6d, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x06, 0x20,
+ 0x03, 0x28, 0x0b, 0x32, 0x37, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63,
+ 0x2e, 0x51, 0x75, 0x6f, 0x74, 0x61, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x2e, 0x56, 0x69,
+ 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x51, 0x75, 0x6f, 0x74, 0x61, 0x44, 0x69, 0x6d,
+ 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0f, 0x71, 0x75,
+ 0x6f, 0x74, 0x61, 0x44, 0x69, 0x6d, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x1f, 0x0a,
+ 0x0b, 0x71, 0x75, 0x6f, 0x74, 0x61, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01,
+ 0x28, 0x03, 0x52, 0x0a, 0x71, 0x75, 0x6f, 0x74, 0x61, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x31,
+ 0x0a, 0x12, 0x66, 0x75, 0x74, 0x75, 0x72, 0x65, 0x5f, 0x71, 0x75, 0x6f, 0x74, 0x61, 0x5f, 0x76,
+ 0x61, 0x6c, 0x75, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x10, 0x66, 0x75,
+ 0x74, 0x75, 0x72, 0x65, 0x51, 0x75, 0x6f, 0x74, 0x61, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x88, 0x01,
+ 0x01, 0x1a, 0x42, 0x0a, 0x14, 0x51, 0x75, 0x6f, 0x74, 0x61, 0x44, 0x69, 0x6d, 0x65, 0x6e, 0x73,
+ 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76,
+ 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75,
+ 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x15, 0x0a, 0x13, 0x5f, 0x66, 0x75, 0x74, 0x75, 0x72, 0x65,
+ 0x5f, 0x71, 0x75, 0x6f, 0x74, 0x61, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xbd, 0x01, 0x0a,
+ 0x13, 0x50, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x61, 0x69,
+ 0x6c, 0x75, 0x72, 0x65, 0x12, 0x49, 0x0a, 0x0a, 0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x50, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69,
+ 0x6f, 0x6e, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x2e, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a,
+ 0x5b, 0x0a, 0x09, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04,
+ 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65,
+ 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65,
+ 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x8c, 0x02, 0x0a,
+ 0x0a, 0x42, 0x61, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x50, 0x0a, 0x10, 0x66,
+ 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18,
+ 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72,
+ 0x70, 0x63, 0x2e, 0x42, 0x61, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x46, 0x69,
+ 0x65, 0x6c, 0x64, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0f, 0x66, 0x69,
+ 0x65, 0x6c, 0x64, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0xab, 0x01,
+ 0x0a, 0x0e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x12, 0x14, 0x0a, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69,
+ 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73,
+ 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65, 0x61, 0x73,
+ 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e,
+ 0x12, 0x49, 0x0a, 0x11, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x5f, 0x6d, 0x65,
+ 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x7a,
+ 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x10, 0x6c, 0x6f, 0x63, 0x61, 0x6c,
+ 0x69, 0x7a, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x4f, 0x0a, 0x0b, 0x52,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09,
+ 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x65, 0x72,
+ 0x76, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x0b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x44, 0x61, 0x74, 0x61, 0x22, 0x90, 0x01, 0x0a,
+ 0x0c, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x23, 0x0a,
+ 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79,
+ 0x70, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6e,
+ 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x6f, 0x75,
+ 0x72, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72,
+ 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x20, 0x0a,
+ 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22,
+ 0x6f, 0x0a, 0x04, 0x48, 0x65, 0x6c, 0x70, 0x12, 0x2b, 0x0a, 0x05, 0x6c, 0x69, 0x6e, 0x6b, 0x73,
+ 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x72, 0x70, 0x63, 0x2e, 0x48, 0x65, 0x6c, 0x70, 0x2e, 0x4c, 0x69, 0x6e, 0x6b, 0x52, 0x05, 0x6c,
+ 0x69, 0x6e, 0x6b, 0x73, 0x1a, 0x3a, 0x0a, 0x04, 0x4c, 0x69, 0x6e, 0x6b, 0x12, 0x20, 0x0a, 0x0b,
+ 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x10,
+ 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c,
+ 0x22, 0x44, 0x0a, 0x10, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x4d, 0x65, 0x73,
+ 0x73, 0x61, 0x67, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x65, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x65, 0x12, 0x18, 0x0a, 0x07,
+ 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d,
+ 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, 0x6c, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x42, 0x11, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x44,
+ 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3f, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67,
+ 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x61, 0x70, 0x69, 0x73, 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x65, 0x72, 0x72, 0x64, 0x65, 0x74, 0x61,
+ 0x69, 0x6c, 0x73, 0x3b, 0x65, 0x72, 0x72, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0xa2, 0x02,
+ 0x03, 0x52, 0x50, 0x43, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
@@ -1116,7 +1240,7 @@ func file_google_rpc_error_details_proto_rawDescGZIP() []byte {
return file_google_rpc_error_details_proto_rawDescData
}
-var file_google_rpc_error_details_proto_msgTypes = make([]protoimpl.MessageInfo, 15)
+var file_google_rpc_error_details_proto_msgTypes = make([]protoimpl.MessageInfo, 16)
var file_google_rpc_error_details_proto_goTypes = []interface{}{
(*ErrorInfo)(nil), // 0: google.rpc.ErrorInfo
(*RetryInfo)(nil), // 1: google.rpc.RetryInfo
@@ -1130,24 +1254,26 @@ var file_google_rpc_error_details_proto_goTypes = []interface{}{
(*LocalizedMessage)(nil), // 9: google.rpc.LocalizedMessage
nil, // 10: google.rpc.ErrorInfo.MetadataEntry
(*QuotaFailure_Violation)(nil), // 11: google.rpc.QuotaFailure.Violation
- (*PreconditionFailure_Violation)(nil), // 12: google.rpc.PreconditionFailure.Violation
- (*BadRequest_FieldViolation)(nil), // 13: google.rpc.BadRequest.FieldViolation
- (*Help_Link)(nil), // 14: google.rpc.Help.Link
- (*durationpb.Duration)(nil), // 15: google.protobuf.Duration
+ nil, // 12: google.rpc.QuotaFailure.Violation.QuotaDimensionsEntry
+ (*PreconditionFailure_Violation)(nil), // 13: google.rpc.PreconditionFailure.Violation
+ (*BadRequest_FieldViolation)(nil), // 14: google.rpc.BadRequest.FieldViolation
+ (*Help_Link)(nil), // 15: google.rpc.Help.Link
+ (*durationpb.Duration)(nil), // 16: google.protobuf.Duration
}
var file_google_rpc_error_details_proto_depIdxs = []int32{
10, // 0: google.rpc.ErrorInfo.metadata:type_name -> google.rpc.ErrorInfo.MetadataEntry
- 15, // 1: google.rpc.RetryInfo.retry_delay:type_name -> google.protobuf.Duration
+ 16, // 1: google.rpc.RetryInfo.retry_delay:type_name -> google.protobuf.Duration
11, // 2: google.rpc.QuotaFailure.violations:type_name -> google.rpc.QuotaFailure.Violation
- 12, // 3: google.rpc.PreconditionFailure.violations:type_name -> google.rpc.PreconditionFailure.Violation
- 13, // 4: google.rpc.BadRequest.field_violations:type_name -> google.rpc.BadRequest.FieldViolation
- 14, // 5: google.rpc.Help.links:type_name -> google.rpc.Help.Link
- 9, // 6: google.rpc.BadRequest.FieldViolation.localized_message:type_name -> google.rpc.LocalizedMessage
- 7, // [7:7] is the sub-list for method output_type
- 7, // [7:7] is the sub-list for method input_type
- 7, // [7:7] is the sub-list for extension type_name
- 7, // [7:7] is the sub-list for extension extendee
- 0, // [0:7] is the sub-list for field type_name
+ 13, // 3: google.rpc.PreconditionFailure.violations:type_name -> google.rpc.PreconditionFailure.Violation
+ 14, // 4: google.rpc.BadRequest.field_violations:type_name -> google.rpc.BadRequest.FieldViolation
+ 15, // 5: google.rpc.Help.links:type_name -> google.rpc.Help.Link
+ 12, // 6: google.rpc.QuotaFailure.Violation.quota_dimensions:type_name -> google.rpc.QuotaFailure.Violation.QuotaDimensionsEntry
+ 9, // 7: google.rpc.BadRequest.FieldViolation.localized_message:type_name -> google.rpc.LocalizedMessage
+ 8, // [8:8] is the sub-list for method output_type
+ 8, // [8:8] is the sub-list for method input_type
+ 8, // [8:8] is the sub-list for extension type_name
+ 8, // [8:8] is the sub-list for extension extendee
+ 0, // [0:8] is the sub-list for field type_name
}
func init() { file_google_rpc_error_details_proto_init() }
@@ -1288,7 +1414,7 @@ func file_google_rpc_error_details_proto_init() {
return nil
}
}
- file_google_rpc_error_details_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} {
+ file_google_rpc_error_details_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*PreconditionFailure_Violation); i {
case 0:
return &v.state
@@ -1300,7 +1426,7 @@ func file_google_rpc_error_details_proto_init() {
return nil
}
}
- file_google_rpc_error_details_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} {
+ file_google_rpc_error_details_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*BadRequest_FieldViolation); i {
case 0:
return &v.state
@@ -1312,7 +1438,7 @@ func file_google_rpc_error_details_proto_init() {
return nil
}
}
- file_google_rpc_error_details_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} {
+ file_google_rpc_error_details_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Help_Link); i {
case 0:
return &v.state
@@ -1325,13 +1451,14 @@ func file_google_rpc_error_details_proto_init() {
}
}
}
+ file_google_rpc_error_details_proto_msgTypes[11].OneofWrappers = []interface{}{}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_google_rpc_error_details_proto_rawDesc,
NumEnums: 0,
- NumMessages: 15,
+ NumMessages: 16,
NumExtensions: 0,
NumServices: 0,
},
diff --git a/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go b/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go
index 6ad1b1c1d..06a3f7106 100644
--- a/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go
+++ b/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go
@@ -1,4 +1,4 @@
-// Copyright 2024 Google LLC
+// Copyright 2025 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
diff --git a/vendor/google.golang.org/grpc/balancer/base/balancer.go b/vendor/google.golang.org/grpc/balancer/base/balancer.go
index d5ed172ae..4d576876d 100644
--- a/vendor/google.golang.org/grpc/balancer/base/balancer.go
+++ b/vendor/google.golang.org/grpc/balancer/base/balancer.go
@@ -41,7 +41,7 @@ func (bb *baseBuilder) Build(cc balancer.ClientConn, _ balancer.BuildOptions) ba
cc: cc,
pickerBuilder: bb.pickerBuilder,
- subConns: resolver.NewAddressMap(),
+ subConns: resolver.NewAddressMapV2[balancer.SubConn](),
scStates: make(map[balancer.SubConn]connectivity.State),
csEvltr: &balancer.ConnectivityStateEvaluator{},
config: bb.config,
@@ -65,7 +65,7 @@ type baseBalancer struct {
csEvltr *balancer.ConnectivityStateEvaluator
state connectivity.State
- subConns *resolver.AddressMap
+ subConns *resolver.AddressMapV2[balancer.SubConn]
scStates map[balancer.SubConn]connectivity.State
picker balancer.Picker
config Config
@@ -100,7 +100,7 @@ func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error {
// Successful resolution; clear resolver error and ensure we return nil.
b.resolverErr = nil
// addrsSet is the set converted from addrs, it's used for quick lookup of an address.
- addrsSet := resolver.NewAddressMap()
+ addrsSet := resolver.NewAddressMapV2[any]()
for _, a := range s.ResolverState.Addresses {
addrsSet.Set(a, nil)
if _, ok := b.subConns.Get(a); !ok {
@@ -122,8 +122,7 @@ func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error {
}
}
for _, a := range b.subConns.Keys() {
- sci, _ := b.subConns.Get(a)
- sc := sci.(balancer.SubConn)
+ sc, _ := b.subConns.Get(a)
// a was removed by resolver.
if _, ok := addrsSet.Get(a); !ok {
sc.Shutdown()
@@ -173,8 +172,7 @@ func (b *baseBalancer) regeneratePicker() {
// Filter out all ready SCs from full subConn map.
for _, addr := range b.subConns.Keys() {
- sci, _ := b.subConns.Get(addr)
- sc := sci.(balancer.SubConn)
+ sc, _ := b.subConns.Get(addr)
if st, ok := b.scStates[sc]; ok && st == connectivity.Ready {
readySCs[sc] = SubConnInfo{Address: addr}
}
diff --git a/vendor/google.golang.org/grpc/balancer/endpointsharding/endpointsharding.go b/vendor/google.golang.org/grpc/balancer/endpointsharding/endpointsharding.go
index 421c4fecc..cc606f4da 100644
--- a/vendor/google.golang.org/grpc/balancer/endpointsharding/endpointsharding.go
+++ b/vendor/google.golang.org/grpc/balancer/endpointsharding/endpointsharding.go
@@ -73,7 +73,7 @@ func NewBalancer(cc balancer.ClientConn, opts balancer.BuildOptions, childBuilde
esOpts: esOpts,
childBuilder: childBuilder,
}
- es.children.Store(resolver.NewEndpointMap())
+ es.children.Store(resolver.NewEndpointMap[*balancerWrapper]())
return es
}
@@ -90,7 +90,7 @@ type endpointSharding struct {
// calls into a child. To avoid deadlocks, do not acquire childMu while
// holding mu.
childMu sync.Mutex
- children atomic.Pointer[resolver.EndpointMap] // endpoint -> *balancerWrapper
+ children atomic.Pointer[resolver.EndpointMap[*balancerWrapper]]
// inhibitChildUpdates is set during UpdateClientConnState/ResolverError
// calls (calls to children will each produce an update, only want one
@@ -122,7 +122,7 @@ func (es *endpointSharding) UpdateClientConnState(state balancer.ClientConnState
var ret error
children := es.children.Load()
- newChildren := resolver.NewEndpointMap()
+ newChildren := resolver.NewEndpointMap[*balancerWrapper]()
// Update/Create new children.
for _, endpoint := range state.ResolverState.Endpoints {
@@ -131,9 +131,8 @@ func (es *endpointSharding) UpdateClientConnState(state balancer.ClientConnState
// update.
continue
}
- var childBalancer *balancerWrapper
- if val, ok := children.Get(endpoint); ok {
- childBalancer = val.(*balancerWrapper)
+ childBalancer, ok := children.Get(endpoint)
+ if ok {
// Endpoint attributes may have changed, update the stored endpoint.
es.mu.Lock()
childBalancer.childState.Endpoint = endpoint
@@ -166,7 +165,7 @@ func (es *endpointSharding) UpdateClientConnState(state balancer.ClientConnState
for _, e := range children.Keys() {
child, _ := children.Get(e)
if _, ok := newChildren.Get(e); !ok {
- child.(*balancerWrapper).closeLocked()
+ child.closeLocked()
}
}
es.children.Store(newChildren)
@@ -189,7 +188,7 @@ func (es *endpointSharding) ResolverError(err error) {
}()
children := es.children.Load()
for _, child := range children.Values() {
- child.(*balancerWrapper).resolverErrorLocked(err)
+ child.resolverErrorLocked(err)
}
}
@@ -202,7 +201,7 @@ func (es *endpointSharding) Close() {
defer es.childMu.Unlock()
children := es.children.Load()
for _, child := range children.Values() {
- child.(*balancerWrapper).closeLocked()
+ child.closeLocked()
}
}
@@ -222,8 +221,7 @@ func (es *endpointSharding) updateState() {
childStates := make([]ChildState, 0, children.Len())
for _, child := range children.Values() {
- bw := child.(*balancerWrapper)
- childState := bw.childState
+ childState := child.childState
childStates = append(childStates, childState)
childPicker := childState.State.Picker
switch childState.State.ConnectivityState {
diff --git a/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go
index 113181e6b..494314f23 100644
--- a/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go
+++ b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go
@@ -122,7 +122,7 @@ func (pickfirstBuilder) Build(cc balancer.ClientConn, bo balancer.BuildOptions)
target: bo.Target.String(),
metricsRecorder: cc.MetricsRecorder(),
- subConns: resolver.NewAddressMap(),
+ subConns: resolver.NewAddressMapV2[*scData](),
state: connectivity.Connecting,
cancelConnectionTimer: func() {},
}
@@ -220,7 +220,7 @@ type pickfirstBalancer struct {
// updates.
state connectivity.State
// scData for active subonns mapped by address.
- subConns *resolver.AddressMap
+ subConns *resolver.AddressMapV2[*scData]
addressList addressList
firstPass bool
numTF int
@@ -319,7 +319,7 @@ func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState
prevAddr := b.addressList.currentAddress()
prevSCData, found := b.subConns.Get(prevAddr)
prevAddrsCount := b.addressList.size()
- isPrevRawConnectivityStateReady := found && prevSCData.(*scData).rawConnectivityState == connectivity.Ready
+ isPrevRawConnectivityStateReady := found && prevSCData.rawConnectivityState == connectivity.Ready
b.addressList.updateAddrs(newAddrs)
// If the previous ready SubConn exists in new address list,
@@ -381,21 +381,21 @@ func (b *pickfirstBalancer) startFirstPassLocked() {
b.numTF = 0
// Reset the connection attempt record for existing SubConns.
for _, sd := range b.subConns.Values() {
- sd.(*scData).connectionFailedInFirstPass = false
+ sd.connectionFailedInFirstPass = false
}
b.requestConnectionLocked()
}
func (b *pickfirstBalancer) closeSubConnsLocked() {
for _, sd := range b.subConns.Values() {
- sd.(*scData).subConn.Shutdown()
+ sd.subConn.Shutdown()
}
- b.subConns = resolver.NewAddressMap()
+ b.subConns = resolver.NewAddressMapV2[*scData]()
}
// deDupAddresses ensures that each address appears only once in the slice.
func deDupAddresses(addrs []resolver.Address) []resolver.Address {
- seenAddrs := resolver.NewAddressMap()
+ seenAddrs := resolver.NewAddressMapV2[*scData]()
retAddrs := []resolver.Address{}
for _, addr := range addrs {
@@ -481,7 +481,7 @@ func addressFamily(address string) ipAddrFamily {
// This ensures that the subchannel map accurately reflects the current set of
// addresses received from the name resolver.
func (b *pickfirstBalancer) reconcileSubConnsLocked(newAddrs []resolver.Address) {
- newAddrsMap := resolver.NewAddressMap()
+ newAddrsMap := resolver.NewAddressMapV2[bool]()
for _, addr := range newAddrs {
newAddrsMap.Set(addr, true)
}
@@ -491,7 +491,7 @@ func (b *pickfirstBalancer) reconcileSubConnsLocked(newAddrs []resolver.Address)
continue
}
val, _ := b.subConns.Get(oldAddr)
- val.(*scData).subConn.Shutdown()
+ val.subConn.Shutdown()
b.subConns.Delete(oldAddr)
}
}
@@ -500,13 +500,12 @@ func (b *pickfirstBalancer) reconcileSubConnsLocked(newAddrs []resolver.Address)
// becomes ready, which means that all other subConn must be shutdown.
func (b *pickfirstBalancer) shutdownRemainingLocked(selected *scData) {
b.cancelConnectionTimer()
- for _, v := range b.subConns.Values() {
- sd := v.(*scData)
+ for _, sd := range b.subConns.Values() {
if sd.subConn != selected.subConn {
sd.subConn.Shutdown()
}
}
- b.subConns = resolver.NewAddressMap()
+ b.subConns = resolver.NewAddressMapV2[*scData]()
b.subConns.Set(selected.addr, selected)
}
@@ -539,18 +538,17 @@ func (b *pickfirstBalancer) requestConnectionLocked() {
b.subConns.Set(curAddr, sd)
}
- scd := sd.(*scData)
- switch scd.rawConnectivityState {
+ switch sd.rawConnectivityState {
case connectivity.Idle:
- scd.subConn.Connect()
+ sd.subConn.Connect()
b.scheduleNextConnectionLocked()
return
case connectivity.TransientFailure:
// The SubConn is being re-used and failed during a previous pass
// over the addressList. It has not completed backoff yet.
// Mark it as having failed and try the next address.
- scd.connectionFailedInFirstPass = true
- lastErr = scd.lastErr
+ sd.connectionFailedInFirstPass = true
+ lastErr = sd.lastErr
continue
case connectivity.Connecting:
// Wait for the connection attempt to complete or the timer to fire
@@ -558,7 +556,7 @@ func (b *pickfirstBalancer) requestConnectionLocked() {
b.scheduleNextConnectionLocked()
return
default:
- b.logger.Errorf("SubConn with unexpected state %v present in SubConns map.", scd.rawConnectivityState)
+ b.logger.Errorf("SubConn with unexpected state %v present in SubConns map.", sd.rawConnectivityState)
return
}
@@ -753,8 +751,7 @@ func (b *pickfirstBalancer) endFirstPassIfPossibleLocked(lastErr error) {
}
// Connect() has been called on all the SubConns. The first pass can be
// ended if all the SubConns have reported a failure.
- for _, v := range b.subConns.Values() {
- sd := v.(*scData)
+ for _, sd := range b.subConns.Values() {
if !sd.connectionFailedInFirstPass {
return
}
@@ -765,8 +762,7 @@ func (b *pickfirstBalancer) endFirstPassIfPossibleLocked(lastErr error) {
Picker: &picker{err: lastErr},
})
// Start re-connecting all the SubConns that are already in IDLE.
- for _, v := range b.subConns.Values() {
- sd := v.(*scData)
+ for _, sd := range b.subConns.Values() {
if sd.rawConnectivityState == connectivity.Idle {
sd.subConn.Connect()
}
@@ -927,6 +923,5 @@ func (al *addressList) hasNext() bool {
// fields that are meaningful to the SubConn.
func equalAddressIgnoringBalAttributes(a, b *resolver.Address) bool {
return a.Addr == b.Addr && a.ServerName == b.ServerName &&
- a.Attributes.Equal(b.Attributes) &&
- a.Metadata == b.Metadata
+ a.Attributes.Equal(b.Attributes)
}
diff --git a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go
index b2f8fc7f4..825c31795 100644
--- a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go
+++ b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go
@@ -18,7 +18,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.36.4
+// protoc-gen-go v1.36.5
// protoc v5.27.1
// source: grpc/binlog/v1/binarylog.proto
diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go
index a319ef979..4f350ca56 100644
--- a/vendor/google.golang.org/grpc/clientconn.go
+++ b/vendor/google.golang.org/grpc/clientconn.go
@@ -1231,8 +1231,7 @@ func (ac *addrConn) updateConnectivityState(s connectivity.State, lastErr error)
// adjustParams updates parameters used to create transports upon
// receiving a GoAway.
func (ac *addrConn) adjustParams(r transport.GoAwayReason) {
- switch r {
- case transport.GoAwayTooManyPings:
+ if r == transport.GoAwayTooManyPings {
v := 2 * ac.dopts.copts.KeepaliveParams.Time
ac.cc.mu.Lock()
if v > ac.cc.keepaliveParams.Time {
diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go
index 94177b05c..faa59e418 100644
--- a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go
+++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go
@@ -17,7 +17,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.36.4
+// protoc-gen-go v1.36.5
// protoc v5.27.1
// source: grpc/health/v1/health.proto
@@ -178,6 +178,87 @@ func (x *HealthCheckResponse) GetStatus() HealthCheckResponse_ServingStatus {
return HealthCheckResponse_UNKNOWN
}
+type HealthListRequest struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *HealthListRequest) Reset() {
+ *x = HealthListRequest{}
+ mi := &file_grpc_health_v1_health_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *HealthListRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*HealthListRequest) ProtoMessage() {}
+
+func (x *HealthListRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_grpc_health_v1_health_proto_msgTypes[2]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use HealthListRequest.ProtoReflect.Descriptor instead.
+func (*HealthListRequest) Descriptor() ([]byte, []int) {
+ return file_grpc_health_v1_health_proto_rawDescGZIP(), []int{2}
+}
+
+type HealthListResponse struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ // statuses contains all the services and their respective status.
+ Statuses map[string]*HealthCheckResponse `protobuf:"bytes,1,rep,name=statuses,proto3" json:"statuses,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *HealthListResponse) Reset() {
+ *x = HealthListResponse{}
+ mi := &file_grpc_health_v1_health_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *HealthListResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*HealthListResponse) ProtoMessage() {}
+
+func (x *HealthListResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_grpc_health_v1_health_proto_msgTypes[3]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use HealthListResponse.ProtoReflect.Descriptor instead.
+func (*HealthListResponse) Descriptor() ([]byte, []int) {
+ return file_grpc_health_v1_health_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *HealthListResponse) GetStatuses() map[string]*HealthCheckResponse {
+ if x != nil {
+ return x.Statuses
+ }
+ return nil
+}
+
var File_grpc_health_v1_health_proto protoreflect.FileDescriptor
var file_grpc_health_v1_health_proto_rawDesc = string([]byte{
@@ -198,25 +279,44 @@ var file_grpc_health_v1_health_proto_rawDesc = string([]byte{
0x0a, 0x07, 0x53, 0x45, 0x52, 0x56, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0f, 0x0a, 0x0b, 0x4e,
0x4f, 0x54, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x49, 0x4e, 0x47, 0x10, 0x02, 0x12, 0x13, 0x0a, 0x0f,
0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10,
- 0x03, 0x32, 0xae, 0x01, 0x0a, 0x06, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x12, 0x50, 0x0a, 0x05,
- 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x22, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x68, 0x65, 0x61,
+ 0x03, 0x22, 0x13, 0x0a, 0x11, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x4c, 0x69, 0x73, 0x74, 0x52,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0xc4, 0x01, 0x0a, 0x12, 0x48, 0x65, 0x61, 0x6c, 0x74,
+ 0x68, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4c, 0x0a,
+ 0x08, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32,
+ 0x30, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31,
+ 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f,
+ 0x6e, 0x73, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72,
+ 0x79, 0x52, 0x08, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x1a, 0x60, 0x0a, 0x0d, 0x53,
+ 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03,
+ 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x39,
+ 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e,
+ 0x67, 0x72, 0x70, 0x63, 0x2e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x2e, 0x48,
+ 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
+ 0x73, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x32, 0xfd, 0x01,
+ 0x0a, 0x06, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x12, 0x50, 0x0a, 0x05, 0x43, 0x68, 0x65, 0x63,
+ 0x6b, 0x12, 0x22, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2e,
+ 0x76, 0x31, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x68, 0x65, 0x61,
0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65,
- 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x67, 0x72, 0x70, 0x63,
- 0x2e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74,
- 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x52,
- 0x0a, 0x05, 0x57, 0x61, 0x74, 0x63, 0x68, 0x12, 0x22, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x68,
- 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43,
- 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x67, 0x72,
- 0x70, 0x63, 0x2e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x65, 0x61,
- 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
- 0x30, 0x01, 0x42, 0x70, 0x0a, 0x11, 0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x68, 0x65,
- 0x61, 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x42, 0x0b, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x50,
- 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67,
- 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x68,
- 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x68, 0x65, 0x61, 0x6c, 0x74,
- 0x68, 0x5f, 0x76, 0x31, 0xa2, 0x02, 0x0c, 0x47, 0x72, 0x70, 0x63, 0x48, 0x65, 0x61, 0x6c, 0x74,
- 0x68, 0x56, 0x31, 0xaa, 0x02, 0x0e, 0x47, 0x72, 0x70, 0x63, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74,
- 0x68, 0x2e, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+ 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4d, 0x0a, 0x04, 0x4c, 0x69,
+ 0x73, 0x74, 0x12, 0x21, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68,
+ 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x68, 0x65, 0x61,
+ 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x4c, 0x69, 0x73,
+ 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x52, 0x0a, 0x05, 0x57, 0x61, 0x74,
+ 0x63, 0x68, 0x12, 0x22, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68,
+ 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x68, 0x65,
+ 0x61, 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68,
+ 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x30, 0x01, 0x42, 0x70, 0x0a,
+ 0x11, 0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2e,
+ 0x76, 0x31, 0x42, 0x0b, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50,
+ 0x01, 0x5a, 0x2c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67,
+ 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68,
+ 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x5f, 0x76, 0x31, 0xa2,
+ 0x02, 0x0c, 0x47, 0x72, 0x70, 0x63, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x56, 0x31, 0xaa, 0x02,
+ 0x0e, 0x47, 0x72, 0x70, 0x63, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2e, 0x56, 0x31, 0x62,
+ 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
})
var (
@@ -232,23 +332,30 @@ func file_grpc_health_v1_health_proto_rawDescGZIP() []byte {
}
var file_grpc_health_v1_health_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
-var file_grpc_health_v1_health_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
+var file_grpc_health_v1_health_proto_msgTypes = make([]protoimpl.MessageInfo, 5)
var file_grpc_health_v1_health_proto_goTypes = []any{
(HealthCheckResponse_ServingStatus)(0), // 0: grpc.health.v1.HealthCheckResponse.ServingStatus
(*HealthCheckRequest)(nil), // 1: grpc.health.v1.HealthCheckRequest
(*HealthCheckResponse)(nil), // 2: grpc.health.v1.HealthCheckResponse
+ (*HealthListRequest)(nil), // 3: grpc.health.v1.HealthListRequest
+ (*HealthListResponse)(nil), // 4: grpc.health.v1.HealthListResponse
+ nil, // 5: grpc.health.v1.HealthListResponse.StatusesEntry
}
var file_grpc_health_v1_health_proto_depIdxs = []int32{
0, // 0: grpc.health.v1.HealthCheckResponse.status:type_name -> grpc.health.v1.HealthCheckResponse.ServingStatus
- 1, // 1: grpc.health.v1.Health.Check:input_type -> grpc.health.v1.HealthCheckRequest
- 1, // 2: grpc.health.v1.Health.Watch:input_type -> grpc.health.v1.HealthCheckRequest
- 2, // 3: grpc.health.v1.Health.Check:output_type -> grpc.health.v1.HealthCheckResponse
- 2, // 4: grpc.health.v1.Health.Watch:output_type -> grpc.health.v1.HealthCheckResponse
- 3, // [3:5] is the sub-list for method output_type
- 1, // [1:3] is the sub-list for method input_type
- 1, // [1:1] is the sub-list for extension type_name
- 1, // [1:1] is the sub-list for extension extendee
- 0, // [0:1] is the sub-list for field type_name
+ 5, // 1: grpc.health.v1.HealthListResponse.statuses:type_name -> grpc.health.v1.HealthListResponse.StatusesEntry
+ 2, // 2: grpc.health.v1.HealthListResponse.StatusesEntry.value:type_name -> grpc.health.v1.HealthCheckResponse
+ 1, // 3: grpc.health.v1.Health.Check:input_type -> grpc.health.v1.HealthCheckRequest
+ 3, // 4: grpc.health.v1.Health.List:input_type -> grpc.health.v1.HealthListRequest
+ 1, // 5: grpc.health.v1.Health.Watch:input_type -> grpc.health.v1.HealthCheckRequest
+ 2, // 6: grpc.health.v1.Health.Check:output_type -> grpc.health.v1.HealthCheckResponse
+ 4, // 7: grpc.health.v1.Health.List:output_type -> grpc.health.v1.HealthListResponse
+ 2, // 8: grpc.health.v1.Health.Watch:output_type -> grpc.health.v1.HealthCheckResponse
+ 6, // [6:9] is the sub-list for method output_type
+ 3, // [3:6] is the sub-list for method input_type
+ 3, // [3:3] is the sub-list for extension type_name
+ 3, // [3:3] is the sub-list for extension extendee
+ 0, // [0:3] is the sub-list for field type_name
}
func init() { file_grpc_health_v1_health_proto_init() }
@@ -262,7 +369,7 @@ func file_grpc_health_v1_health_proto_init() {
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_grpc_health_v1_health_proto_rawDesc), len(file_grpc_health_v1_health_proto_rawDesc)),
NumEnums: 1,
- NumMessages: 2,
+ NumMessages: 5,
NumExtensions: 0,
NumServices: 1,
},
diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go
index f96b8ab49..93136610e 100644
--- a/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go
+++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go
@@ -37,6 +37,7 @@ const _ = grpc.SupportPackageIsVersion9
const (
Health_Check_FullMethodName = "/grpc.health.v1.Health/Check"
+ Health_List_FullMethodName = "/grpc.health.v1.Health/List"
Health_Watch_FullMethodName = "/grpc.health.v1.Health/Watch"
)
@@ -55,9 +56,19 @@ type HealthClient interface {
//
// Clients should set a deadline when calling Check, and can declare the
// server unhealthy if they do not receive a timely response.
- //
- // Check implementations should be idempotent and side effect free.
Check(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error)
+ // List provides a non-atomic snapshot of the health of all the available
+ // services.
+ //
+ // The server may respond with a RESOURCE_EXHAUSTED error if too many services
+ // exist.
+ //
+ // Clients should set a deadline when calling List, and can declare the server
+ // unhealthy if they do not receive a timely response.
+ //
+ // Clients should keep in mind that the list of health services exposed by an
+ // application can change over the lifetime of the process.
+ List(ctx context.Context, in *HealthListRequest, opts ...grpc.CallOption) (*HealthListResponse, error)
// Performs a watch for the serving status of the requested service.
// The server will immediately send back a message indicating the current
// serving status. It will then subsequently send a new message whenever
@@ -94,6 +105,16 @@ func (c *healthClient) Check(ctx context.Context, in *HealthCheckRequest, opts .
return out, nil
}
+func (c *healthClient) List(ctx context.Context, in *HealthListRequest, opts ...grpc.CallOption) (*HealthListResponse, error) {
+ cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
+ out := new(HealthListResponse)
+ err := c.cc.Invoke(ctx, Health_List_FullMethodName, in, out, cOpts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
func (c *healthClient) Watch(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[HealthCheckResponse], error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
stream, err := c.cc.NewStream(ctx, &Health_ServiceDesc.Streams[0], Health_Watch_FullMethodName, cOpts...)
@@ -128,9 +149,19 @@ type HealthServer interface {
//
// Clients should set a deadline when calling Check, and can declare the
// server unhealthy if they do not receive a timely response.
- //
- // Check implementations should be idempotent and side effect free.
Check(context.Context, *HealthCheckRequest) (*HealthCheckResponse, error)
+ // List provides a non-atomic snapshot of the health of all the available
+ // services.
+ //
+ // The server may respond with a RESOURCE_EXHAUSTED error if too many services
+ // exist.
+ //
+ // Clients should set a deadline when calling List, and can declare the server
+ // unhealthy if they do not receive a timely response.
+ //
+ // Clients should keep in mind that the list of health services exposed by an
+ // application can change over the lifetime of the process.
+ List(context.Context, *HealthListRequest) (*HealthListResponse, error)
// Performs a watch for the serving status of the requested service.
// The server will immediately send back a message indicating the current
// serving status. It will then subsequently send a new message whenever
@@ -159,6 +190,9 @@ type UnimplementedHealthServer struct{}
func (UnimplementedHealthServer) Check(context.Context, *HealthCheckRequest) (*HealthCheckResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method Check not implemented")
}
+func (UnimplementedHealthServer) List(context.Context, *HealthListRequest) (*HealthListResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method List not implemented")
+}
func (UnimplementedHealthServer) Watch(*HealthCheckRequest, grpc.ServerStreamingServer[HealthCheckResponse]) error {
return status.Errorf(codes.Unimplemented, "method Watch not implemented")
}
@@ -200,6 +234,24 @@ func _Health_Check_Handler(srv interface{}, ctx context.Context, dec func(interf
return interceptor(ctx, in, info, handler)
}
+func _Health_List_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(HealthListRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(HealthServer).List(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: Health_List_FullMethodName,
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(HealthServer).List(ctx, req.(*HealthListRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
func _Health_Watch_Handler(srv interface{}, stream grpc.ServerStream) error {
m := new(HealthCheckRequest)
if err := stream.RecvMsg(m); err != nil {
@@ -222,6 +274,10 @@ var Health_ServiceDesc = grpc.ServiceDesc{
MethodName: "Check",
Handler: _Health_Check_Handler,
},
+ {
+ MethodName: "List",
+ Handler: _Health_List_Handler,
+ },
},
Streams: []grpc.StreamDesc{
{
diff --git a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go
index 1e42b6fdc..cc5713fd9 100644
--- a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go
+++ b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go
@@ -51,10 +51,24 @@ var (
// xDS server in the list of server configs will be used.
XDSFallbackSupport = boolFromEnv("GRPC_EXPERIMENTAL_XDS_FALLBACK", true)
// NewPickFirstEnabled is set if the new pickfirst leaf policy is to be used
- // instead of the exiting pickfirst implementation. This can be enabled by
+ // instead of the exiting pickfirst implementation. This can be disabled by
// setting the environment variable "GRPC_EXPERIMENTAL_ENABLE_NEW_PICK_FIRST"
- // to "true".
- NewPickFirstEnabled = boolFromEnv("GRPC_EXPERIMENTAL_ENABLE_NEW_PICK_FIRST", false)
+ // to "false".
+ NewPickFirstEnabled = boolFromEnv("GRPC_EXPERIMENTAL_ENABLE_NEW_PICK_FIRST", true)
+
+ // XDSEndpointHashKeyBackwardCompat controls the parsing of the endpoint hash
+ // key from EDS LbEndpoint metadata. Endpoint hash keys can be disabled by
+ // setting "GRPC_XDS_ENDPOINT_HASH_KEY_BACKWARD_COMPAT" to "true". When the
+ // implementation of A76 is stable, we will flip the default value to false
+ // in a subsequent release. A final release will remove this environment
+ // variable, enabling the new behavior unconditionally.
+ XDSEndpointHashKeyBackwardCompat = boolFromEnv("GRPC_XDS_ENDPOINT_HASH_KEY_BACKWARD_COMPAT", true)
+
+ // RingHashSetRequestHashKey is set if the ring hash balancer can get the
+ // request hash header by setting the "requestHashHeader" field, according
+ // to gRFC A76. It can be enabled by setting the environment variable
+ // "GRPC_EXPERIMENTAL_RING_HASH_SET_REQUEST_HASH_KEY" to "true".
+ RingHashSetRequestHashKey = boolFromEnv("GRPC_EXPERIMENTAL_RING_HASH_SET_REQUEST_HASH_KEY", false)
)
func boolFromEnv(envVar string, def bool) bool {
diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go
index 13e1f386b..2ce012cda 100644
--- a/vendor/google.golang.org/grpc/internal/internal.go
+++ b/vendor/google.golang.org/grpc/internal/internal.go
@@ -259,6 +259,13 @@ var (
// SetBufferPoolingThresholdForTesting updates the buffer pooling threshold, for
// testing purposes.
SetBufferPoolingThresholdForTesting any // func(int)
+
+ // TimeAfterFunc is used to create timers. During tests the function is
+ // replaced to track allocated timers and fail the test if a timer isn't
+ // cancelled.
+ TimeAfterFunc = func(d time.Duration, f func()) Timer {
+ return time.AfterFunc(d, f)
+ }
)
// HealthChecker defines the signature of the client-side LB channel health
@@ -300,3 +307,9 @@ type EnforceSubConnEmbedding interface {
type EnforceClientConnEmbedding interface {
enforceClientConnEmbedding()
}
+
+// Timer is an interface to allow injecting different time.Timer implementations
+// during tests.
+type Timer interface {
+ Stop() bool
+}
diff --git a/vendor/google.golang.org/grpc/internal/metadata/metadata.go b/vendor/google.golang.org/grpc/internal/metadata/metadata.go
index 900bfb716..c4055bc00 100644
--- a/vendor/google.golang.org/grpc/internal/metadata/metadata.go
+++ b/vendor/google.golang.org/grpc/internal/metadata/metadata.go
@@ -97,13 +97,11 @@ func hasNotPrintable(msg string) bool {
return false
}
-// ValidatePair validate a key-value pair with the following rules (the pseudo-header will be skipped) :
-//
-// - key must contain one or more characters.
-// - the characters in the key must be contained in [0-9 a-z _ - .].
-// - if the key ends with a "-bin" suffix, no validation of the corresponding value is performed.
-// - the characters in the every value must be printable (in [%x20-%x7E]).
-func ValidatePair(key string, vals ...string) error {
+// ValidateKey validates a key with the following rules (pseudo-headers are
+// skipped):
+// - the key must contain one or more characters.
+// - the characters in the key must be in [0-9 a-z _ - .].
+func ValidateKey(key string) error {
// key should not be empty
if key == "" {
return fmt.Errorf("there is an empty key in the header")
@@ -119,6 +117,20 @@ func ValidatePair(key string, vals ...string) error {
return fmt.Errorf("header key %q contains illegal characters not in [0-9a-z-_.]", key)
}
}
+ return nil
+}
+
+// ValidatePair validates a key-value pair with the following rules
+// (pseudo-header are skipped):
+// - the key must contain one or more characters.
+// - the characters in the key must be in [0-9 a-z _ - .].
+// - if the key ends with a "-bin" suffix, no validation of the corresponding
+// value is performed.
+// - the characters in every value must be printable (in [%x20-%x7E]).
+func ValidatePair(key string, vals ...string) error {
+ if err := ValidateKey(key); err != nil {
+ return err
+ }
if strings.HasSuffix(key, "-bin") {
return nil
}
diff --git a/vendor/google.golang.org/grpc/internal/resolver/delegatingresolver/delegatingresolver.go b/vendor/google.golang.org/grpc/internal/resolver/delegatingresolver/delegatingresolver.go
index a6c647013..c0e227577 100644
--- a/vendor/google.golang.org/grpc/internal/resolver/delegatingresolver/delegatingresolver.go
+++ b/vendor/google.golang.org/grpc/internal/resolver/delegatingresolver/delegatingresolver.go
@@ -28,6 +28,8 @@ import (
"google.golang.org/grpc/grpclog"
"google.golang.org/grpc/internal/proxyattributes"
+ "google.golang.org/grpc/internal/transport"
+ "google.golang.org/grpc/internal/transport/networktype"
"google.golang.org/grpc/resolver"
"google.golang.org/grpc/serviceconfig"
)
@@ -40,19 +42,26 @@ var (
// delegatingResolver manages both target URI and proxy address resolution by
// delegating these tasks to separate child resolvers. Essentially, it acts as
-// a intermediary between the gRPC ClientConn and the child resolvers.
+// an intermediary between the gRPC ClientConn and the child resolvers.
//
// It implements the [resolver.Resolver] interface.
type delegatingResolver struct {
- target resolver.Target // parsed target URI to be resolved
- cc resolver.ClientConn // gRPC ClientConn
- targetResolver resolver.Resolver // resolver for the target URI, based on its scheme
- proxyResolver resolver.Resolver // resolver for the proxy URI; nil if no proxy is configured
- proxyURL *url.URL // proxy URL, derived from proxy environment and target
+ target resolver.Target // parsed target URI to be resolved
+ cc resolver.ClientConn // gRPC ClientConn
+ proxyURL *url.URL // proxy URL, derived from proxy environment and target
+ // We do not hold both mu and childMu in the same goroutine. Avoid holding
+ // both locks when calling into the child, as the child resolver may
+ // synchronously callback into the channel.
mu sync.Mutex // protects all the fields below
targetResolverState *resolver.State // state of the target resolver
proxyAddrs []resolver.Address // resolved proxy addresses; empty if no proxy is configured
+
+ // childMu serializes calls into child resolvers. It also protects access to
+ // the following fields.
+ childMu sync.Mutex
+ targetResolver resolver.Resolver // resolver for the target URI, based on its scheme
+ proxyResolver resolver.Resolver // resolver for the proxy URI; nil if no proxy is configured
}
// nopResolver is a resolver that does nothing.
@@ -62,8 +71,8 @@ func (nopResolver) ResolveNow(resolver.ResolveNowOptions) {}
func (nopResolver) Close() {}
-// proxyURLForTarget determines the proxy URL for the given address based on
-// the environment. It can return the following:
+// proxyURLForTarget determines the proxy URL for the given address based on the
+// environment. It can return the following:
// - nil URL, nil error: No proxy is configured or the address is excluded
// using the `NO_PROXY` environment variable or if req.URL.Host is
// "localhost" (with or without // a port number)
@@ -82,7 +91,8 @@ func proxyURLForTarget(address string) (*url.URL, error) {
// resolvers:
// - one to resolve the proxy address specified using the supported
// environment variables. This uses the registered resolver for the "dns"
-// scheme.
+// scheme. It is lazily built when a target resolver update contains at least
+// one TCP address.
// - one to resolve the target URI using the resolver specified by the scheme
// in the target URI or specified by the user using the WithResolvers dial
// option. As a special case, if the target URI's scheme is "dns" and a
@@ -91,8 +101,10 @@ func proxyURLForTarget(address string) (*url.URL, error) {
// resolution is enabled using the dial option.
func New(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions, targetResolverBuilder resolver.Builder, targetResolutionEnabled bool) (resolver.Resolver, error) {
r := &delegatingResolver{
- target: target,
- cc: cc,
+ target: target,
+ cc: cc,
+ proxyResolver: nopResolver{},
+ targetResolver: nopResolver{},
}
var err error
@@ -111,41 +123,34 @@ func New(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOpti
logger.Infof("Proxy URL detected : %s", r.proxyURL)
}
+ // Resolver updates from one child may trigger calls into the other. Block
+ // updates until the children are initialized.
+ r.childMu.Lock()
+ defer r.childMu.Unlock()
// When the scheme is 'dns' and target resolution on client is not enabled,
// resolution should be handled by the proxy, not the client. Therefore, we
// bypass the target resolver and store the unresolved target address.
if target.URL.Scheme == "dns" && !targetResolutionEnabled {
- state := resolver.State{
+ r.targetResolverState = &resolver.State{
Addresses: []resolver.Address{{Addr: target.Endpoint()}},
Endpoints: []resolver.Endpoint{{Addresses: []resolver.Address{{Addr: target.Endpoint()}}}},
}
- r.targetResolverState = &state
- } else {
- wcc := &wrappingClientConn{
- stateListener: r.updateTargetResolverState,
- parent: r,
- }
- if r.targetResolver, err = targetResolverBuilder.Build(target, wcc, opts); err != nil {
- return nil, fmt.Errorf("delegating_resolver: unable to build the resolver for target %s: %v", target, err)
- }
+ r.updateTargetResolverState(*r.targetResolverState)
+ return r, nil
}
-
- if r.proxyResolver, err = r.proxyURIResolver(opts); err != nil {
- return nil, fmt.Errorf("delegating_resolver: failed to build resolver for proxy URL %q: %v", r.proxyURL, err)
- }
-
- if r.targetResolver == nil {
- r.targetResolver = nopResolver{}
+ wcc := &wrappingClientConn{
+ stateListener: r.updateTargetResolverState,
+ parent: r,
}
- if r.proxyResolver == nil {
- r.proxyResolver = nopResolver{}
+ if r.targetResolver, err = targetResolverBuilder.Build(target, wcc, opts); err != nil {
+ return nil, fmt.Errorf("delegating_resolver: unable to build the resolver for target %s: %v", target, err)
}
return r, nil
}
-// proxyURIResolver creates a resolver for resolving proxy URIs using the
-// "dns" scheme. It adjusts the proxyURL to conform to the "dns:///" format and
-// builds a resolver with a wrappingClientConn to capture resolved addresses.
+// proxyURIResolver creates a resolver for resolving proxy URIs using the "dns"
+// scheme. It adjusts the proxyURL to conform to the "dns:///" format and builds
+// a resolver with a wrappingClientConn to capture resolved addresses.
func (r *delegatingResolver) proxyURIResolver(opts resolver.BuildOptions) (resolver.Resolver, error) {
proxyBuilder := resolver.Get("dns")
if proxyBuilder == nil {
@@ -165,11 +170,15 @@ func (r *delegatingResolver) proxyURIResolver(opts resolver.BuildOptions) (resol
}
func (r *delegatingResolver) ResolveNow(o resolver.ResolveNowOptions) {
+ r.childMu.Lock()
+ defer r.childMu.Unlock()
r.targetResolver.ResolveNow(o)
r.proxyResolver.ResolveNow(o)
}
func (r *delegatingResolver) Close() {
+ r.childMu.Lock()
+ defer r.childMu.Unlock()
r.targetResolver.Close()
r.targetResolver = nil
@@ -177,18 +186,43 @@ func (r *delegatingResolver) Close() {
r.proxyResolver = nil
}
-// updateClientConnStateLocked creates a list of combined addresses by
-// pairing each proxy address with every target address. For each pair, it
-// generates a new [resolver.Address] using the proxy address, and adding the
-// target address as the attribute along with user info. It returns nil if
-// either resolver has not sent update even once and returns the error from
-// ClientConn update once both resolvers have sent update atleast once.
+func networkTypeFromAddr(addr resolver.Address) string {
+ networkType, ok := networktype.Get(addr)
+ if !ok {
+ networkType, _ = transport.ParseDialTarget(addr.Addr)
+ }
+ return networkType
+}
+
+func isTCPAddressPresent(state *resolver.State) bool {
+ for _, addr := range state.Addresses {
+ if networkType := networkTypeFromAddr(addr); networkType == "tcp" {
+ return true
+ }
+ }
+ for _, endpoint := range state.Endpoints {
+ for _, addr := range endpoint.Addresses {
+ if networktype := networkTypeFromAddr(addr); networktype == "tcp" {
+ return true
+ }
+ }
+ }
+ return false
+}
+
+// updateClientConnStateLocked constructs a combined list of addresses by
+// pairing each proxy address with every target address of type TCP. For each
+// pair, it creates a new [resolver.Address] using the proxy address and
+// attaches the corresponding target address and user info as attributes. Target
+// addresses that are not of type TCP are appended to the list as-is. The
+// function returns nil if either resolver has not yet provided an update, and
+// returns the result of ClientConn.UpdateState once both resolvers have
+// provided at least one update.
func (r *delegatingResolver) updateClientConnStateLocked() error {
if r.targetResolverState == nil || r.proxyAddrs == nil {
return nil
}
- curState := *r.targetResolverState
// If multiple resolved proxy addresses are present, we send only the
// unresolved proxy host and let net.Dial handle the proxy host name
// resolution when creating the transport. Sending all resolved addresses
@@ -206,24 +240,30 @@ func (r *delegatingResolver) updateClientConnStateLocked() error {
}
var addresses []resolver.Address
for _, targetAddr := range (*r.targetResolverState).Addresses {
+ // Avoid proxy when network is not tcp.
+ if networkType := networkTypeFromAddr(targetAddr); networkType != "tcp" {
+ addresses = append(addresses, targetAddr)
+ continue
+ }
addresses = append(addresses, proxyattributes.Set(proxyAddr, proxyattributes.Options{
User: r.proxyURL.User,
ConnectAddr: targetAddr.Addr,
}))
}
- // Create a list of combined endpoints by pairing all proxy endpoints
- // with every target endpoint. Each time, it constructs a new
- // [resolver.Endpoint] using the all addresses from all the proxy endpoint
- // and the target addresses from one endpoint. The target address and user
- // information from the proxy URL are added as attributes to the proxy
- // address.The resulting list of addresses is then grouped into endpoints,
- // covering all combinations of proxy and target endpoints.
+ // For each target endpoint, construct a new [resolver.Endpoint] that
+ // includes all addresses from all proxy endpoints and the addresses from
+ // that target endpoint, preserving the number of target endpoints.
var endpoints []resolver.Endpoint
for _, endpt := range (*r.targetResolverState).Endpoints {
var addrs []resolver.Address
- for _, proxyAddr := range r.proxyAddrs {
- for _, targetAddr := range endpt.Addresses {
+ for _, targetAddr := range endpt.Addresses {
+ // Avoid proxy when network is not tcp.
+ if networkType := networkTypeFromAddr(targetAddr); networkType != "tcp" {
+ addrs = append(addrs, targetAddr)
+ continue
+ }
+ for _, proxyAddr := range r.proxyAddrs {
addrs = append(addrs, proxyattributes.Set(proxyAddr, proxyattributes.Options{
User: r.proxyURL.User,
ConnectAddr: targetAddr.Addr,
@@ -234,8 +274,9 @@ func (r *delegatingResolver) updateClientConnStateLocked() error {
}
// Use the targetResolverState for its service config and attributes
// contents. The state update is only sent after both the target and proxy
- // resolvers have sent their updates, and curState has been updated with
- // the combined addresses.
+ // resolvers have sent their updates, and curState has been updated with the
+ // combined addresses.
+ curState := *r.targetResolverState
curState.Addresses = addresses
curState.Endpoints = endpoints
return r.cc.UpdateState(curState)
@@ -245,7 +286,8 @@ func (r *delegatingResolver) updateClientConnStateLocked() error {
// addresses and endpoints, marking the resolver as ready, and triggering a
// state update if both proxy and target resolvers are ready. If the ClientConn
// returns a non-nil error, it calls `ResolveNow()` on the target resolver. It
-// is a StateListener function of wrappingClientConn passed to the proxy resolver.
+// is a StateListener function of wrappingClientConn passed to the proxy
+// resolver.
func (r *delegatingResolver) updateProxyResolverState(state resolver.State) error {
r.mu.Lock()
defer r.mu.Unlock()
@@ -253,8 +295,8 @@ func (r *delegatingResolver) updateProxyResolverState(state resolver.State) erro
logger.Infof("Addresses received from proxy resolver: %s", state.Addresses)
}
if len(state.Endpoints) > 0 {
- // We expect exactly one address per endpoint because the proxy
- // resolver uses "dns" resolution.
+ // We expect exactly one address per endpoint because the proxy resolver
+ // uses "dns" resolution.
r.proxyAddrs = make([]resolver.Address, 0, len(state.Endpoints))
for _, endpoint := range state.Endpoints {
r.proxyAddrs = append(r.proxyAddrs, endpoint.Addresses...)
@@ -267,20 +309,29 @@ func (r *delegatingResolver) updateProxyResolverState(state resolver.State) erro
err := r.updateClientConnStateLocked()
// Another possible approach was to block until updates are received from
// both resolvers. But this is not used because calling `New()` triggers
- // `Build()` for the first resolver, which calls `UpdateState()`. And the
+ // `Build()` for the first resolver, which calls `UpdateState()`. And the
// second resolver hasn't sent an update yet, so it would cause `New()` to
// block indefinitely.
if err != nil {
- r.targetResolver.ResolveNow(resolver.ResolveNowOptions{})
+ go func() {
+ r.childMu.Lock()
+ defer r.childMu.Unlock()
+ if r.targetResolver != nil {
+ r.targetResolver.ResolveNow(resolver.ResolveNowOptions{})
+ }
+ }()
}
return err
}
-// updateTargetResolverState updates the target resolver state by storing target
-// addresses, endpoints, and service config, marking the resolver as ready, and
-// triggering a state update if both resolvers are ready. If the ClientConn
-// returns a non-nil error, it calls `ResolveNow()` on the proxy resolver. It
-// is a StateListener function of wrappingClientConn passed to the target resolver.
+// updateTargetResolverState is the StateListener function provided to the
+// target resolver via wrappingClientConn. It updates the resolver state and
+// marks the target resolver as ready. If the update includes at least one TCP
+// address and the proxy resolver has not yet been constructed, it initializes
+// the proxy resolver. A combined state update is triggered once both resolvers
+// are ready. If all addresses are non-TCP, it proceeds without waiting for the
+// proxy resolver. If ClientConn.UpdateState returns a non-nil error,
+// ResolveNow() is called on the proxy resolver.
func (r *delegatingResolver) updateTargetResolverState(state resolver.State) error {
r.mu.Lock()
defer r.mu.Unlock()
@@ -289,9 +340,40 @@ func (r *delegatingResolver) updateTargetResolverState(state resolver.State) err
logger.Infof("Addresses received from target resolver: %v", state.Addresses)
}
r.targetResolverState = &state
+ // If no addresses returned by resolver have network type as tcp , do not
+ // wait for proxy update.
+ if !isTCPAddressPresent(r.targetResolverState) {
+ return r.cc.UpdateState(*r.targetResolverState)
+ }
+
+ // The proxy resolver may be rebuilt multiple times, specifically each time
+ // the target resolver sends an update, even if the target resolver is built
+ // successfully but building the proxy resolver fails.
+ if len(r.proxyAddrs) == 0 {
+ go func() {
+ r.childMu.Lock()
+ defer r.childMu.Unlock()
+ if _, ok := r.proxyResolver.(nopResolver); !ok {
+ return
+ }
+ proxyResolver, err := r.proxyURIResolver(resolver.BuildOptions{})
+ if err != nil {
+ r.cc.ReportError(fmt.Errorf("delegating_resolver: unable to build the proxy resolver: %v", err))
+ return
+ }
+ r.proxyResolver = proxyResolver
+ }()
+ }
+
err := r.updateClientConnStateLocked()
if err != nil {
- r.proxyResolver.ResolveNow(resolver.ResolveNowOptions{})
+ go func() {
+ r.childMu.Lock()
+ defer r.childMu.Unlock()
+ if r.proxyResolver != nil {
+ r.proxyResolver.ResolveNow(resolver.ResolveNowOptions{})
+ }
+ }()
}
return nil
}
@@ -311,7 +393,8 @@ func (wcc *wrappingClientConn) UpdateState(state resolver.State) error {
return wcc.stateListener(state)
}
-// ReportError intercepts errors from the child resolvers and passes them to ClientConn.
+// ReportError intercepts errors from the child resolvers and passes them to
+// ClientConn.
func (wcc *wrappingClientConn) ReportError(err error) {
wcc.parent.cc.ReportError(err)
}
@@ -322,8 +405,8 @@ func (wcc *wrappingClientConn) NewAddress(addrs []resolver.Address) {
wcc.UpdateState(resolver.State{Addresses: addrs})
}
-// ParseServiceConfig parses the provided service config and returns an
-// object that provides the parsed config.
+// ParseServiceConfig parses the provided service config and returns an object
+// that provides the parsed config.
func (wcc *wrappingClientConn) ParseServiceConfig(serviceConfigJSON string) *serviceconfig.ParseResult {
return wcc.parent.cc.ParseServiceConfig(serviceConfigJSON)
}
diff --git a/vendor/google.golang.org/grpc/internal/transport/client_stream.go b/vendor/google.golang.org/grpc/internal/transport/client_stream.go
index 8ed347c54..ccc0e017e 100644
--- a/vendor/google.golang.org/grpc/internal/transport/client_stream.go
+++ b/vendor/google.golang.org/grpc/internal/transport/client_stream.go
@@ -59,7 +59,7 @@ func (s *ClientStream) Read(n int) (mem.BufferSlice, error) {
return b, err
}
-// Close closes the stream and popagates err to any readers.
+// Close closes the stream and propagates err to any readers.
func (s *ClientStream) Close(err error) {
var (
rst bool
diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go
index 513dbb93d..171e690a3 100644
--- a/vendor/google.golang.org/grpc/internal/transport/http2_client.go
+++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go
@@ -176,7 +176,7 @@ func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error
return fn(ctx, address)
}
if !ok {
- networkType, address = parseDialTarget(address)
+ networkType, address = ParseDialTarget(address)
}
if opts, present := proxyattributes.Get(addr); present {
return proxyDial(ctx, addr, grpcUA, opts)
@@ -1242,7 +1242,8 @@ func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) {
statusCode = codes.DeadlineExceeded
}
}
- t.closeStream(s, io.EOF, false, http2.ErrCodeNo, status.Newf(statusCode, "stream terminated by RST_STREAM with error code: %v", f.ErrCode), nil, false)
+ st := status.Newf(statusCode, "stream terminated by RST_STREAM with error code: %v", f.ErrCode)
+ t.closeStream(s, st.Err(), false, http2.ErrCodeNo, st, nil, false)
}
func (t *http2Client) handleSettings(f *http2.SettingsFrame, isFirst bool) {
@@ -1390,8 +1391,7 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) error {
// the caller.
func (t *http2Client) setGoAwayReason(f *http2.GoAwayFrame) {
t.goAwayReason = GoAwayNoReason
- switch f.ErrCode {
- case http2.ErrCodeEnhanceYourCalm:
+ if f.ErrCode == http2.ErrCodeEnhanceYourCalm {
if string(f.DebugData()) == "too_many_pings" {
t.goAwayReason = GoAwayTooManyPings
}
diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go
index 997b0a59b..7e53eb173 100644
--- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go
+++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go
@@ -35,6 +35,7 @@ import (
"golang.org/x/net/http2"
"golang.org/x/net/http2/hpack"
+ "google.golang.org/grpc/internal"
"google.golang.org/grpc/internal/grpclog"
"google.golang.org/grpc/internal/grpcutil"
"google.golang.org/grpc/internal/pretty"
@@ -598,6 +599,22 @@ func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeade
if len(t.activeStreams) == 1 {
t.idle = time.Time{}
}
+ // Start a timer to close the stream on reaching the deadline.
+ if timeoutSet {
+ // We need to wait for s.cancel to be updated before calling
+ // t.closeStream to avoid data races.
+ cancelUpdated := make(chan struct{})
+ timer := internal.TimeAfterFunc(timeout, func() {
+ <-cancelUpdated
+ t.closeStream(s, true, http2.ErrCodeCancel, false)
+ })
+ oldCancel := s.cancel
+ s.cancel = func() {
+ oldCancel()
+ timer.Stop()
+ }
+ close(cancelUpdated)
+ }
t.mu.Unlock()
if channelz.IsOn() {
t.channelz.SocketMetrics.StreamsStarted.Add(1)
@@ -1274,7 +1291,6 @@ func (t *http2Server) Close(err error) {
// deleteStream deletes the stream s from transport's active streams.
func (t *http2Server) deleteStream(s *ServerStream, eosReceived bool) {
-
t.mu.Lock()
if _, ok := t.activeStreams[s.id]; ok {
delete(t.activeStreams, s.id)
@@ -1324,7 +1340,10 @@ func (t *http2Server) closeStream(s *ServerStream, rst bool, rstCode http2.ErrCo
// called to interrupt the potential blocking on other goroutines.
s.cancel()
- s.swapState(streamDone)
+ oldState := s.swapState(streamDone)
+ if oldState == streamDone {
+ return
+ }
t.deleteStream(s, eosReceived)
t.controlBuf.put(&cleanupStream{
diff --git a/vendor/google.golang.org/grpc/internal/transport/http_util.go b/vendor/google.golang.org/grpc/internal/transport/http_util.go
index 3613d7b64..f997f9fdb 100644
--- a/vendor/google.golang.org/grpc/internal/transport/http_util.go
+++ b/vendor/google.golang.org/grpc/internal/transport/http_util.go
@@ -439,8 +439,8 @@ func getWriteBufferPool(size int) *sync.Pool {
return pool
}
-// parseDialTarget returns the network and address to pass to dialer.
-func parseDialTarget(target string) (string, string) {
+// ParseDialTarget returns the network and address to pass to dialer.
+func ParseDialTarget(target string) (string, string) {
net := "tcp"
m1 := strings.Index(target, ":")
m2 := strings.Index(target, ":/")
diff --git a/vendor/google.golang.org/grpc/internal/transport/server_stream.go b/vendor/google.golang.org/grpc/internal/transport/server_stream.go
index a22a90151..cf8da0b52 100644
--- a/vendor/google.golang.org/grpc/internal/transport/server_stream.go
+++ b/vendor/google.golang.org/grpc/internal/transport/server_stream.go
@@ -35,8 +35,10 @@ type ServerStream struct {
*Stream // Embed for common stream functionality.
st internalServerTransport
- ctxDone <-chan struct{} // closed at the end of stream. Cache of ctx.Done() (for performance)
- cancel context.CancelFunc // invoked at the end of stream to cancel ctx.
+ ctxDone <-chan struct{} // closed at the end of stream. Cache of ctx.Done() (for performance)
+ // cancel is invoked at the end of stream to cancel ctx. It also stops the
+ // timer for monitoring the rpc deadline if configured.
+ cancel func()
// Holds compressor names passed in grpc-accept-encoding metadata from the
// client.
diff --git a/vendor/google.golang.org/grpc/resolver/map.go b/vendor/google.golang.org/grpc/resolver/map.go
index ada5b9bb7..c3c15ac96 100644
--- a/vendor/google.golang.org/grpc/resolver/map.go
+++ b/vendor/google.golang.org/grpc/resolver/map.go
@@ -18,16 +18,28 @@
package resolver
-type addressMapEntry struct {
+import (
+ "encoding/base64"
+ "sort"
+ "strings"
+)
+
+type addressMapEntry[T any] struct {
addr Address
- value any
+ value T
}
-// AddressMap is a map of addresses to arbitrary values taking into account
+// AddressMap is an AddressMapV2[any]. It will be deleted in an upcoming
+// release of grpc-go.
+//
+// Deprecated: use the generic AddressMapV2 type instead.
+type AddressMap = AddressMapV2[any]
+
+// AddressMapV2 is a map of addresses to arbitrary values taking into account
// Attributes. BalancerAttributes are ignored, as are Metadata and Type.
// Multiple accesses may not be performed concurrently. Must be created via
// NewAddressMap; do not construct directly.
-type AddressMap struct {
+type AddressMapV2[T any] struct {
// The underlying map is keyed by an Address with fields that we don't care
// about being set to their zero values. The only fields that we care about
// are `Addr`, `ServerName` and `Attributes`. Since we need to be able to
@@ -41,23 +53,30 @@ type AddressMap struct {
// The value type of the map contains a slice of addresses which match the key
// in their `Addr` and `ServerName` fields and contain the corresponding value
// associated with them.
- m map[Address]addressMapEntryList
+ m map[Address]addressMapEntryList[T]
}
func toMapKey(addr *Address) Address {
return Address{Addr: addr.Addr, ServerName: addr.ServerName}
}
-type addressMapEntryList []*addressMapEntry
+type addressMapEntryList[T any] []*addressMapEntry[T]
-// NewAddressMap creates a new AddressMap.
+// NewAddressMap creates a new AddressMapV2[any].
+//
+// Deprecated: use the generic NewAddressMapV2 constructor instead.
func NewAddressMap() *AddressMap {
- return &AddressMap{m: make(map[Address]addressMapEntryList)}
+ return NewAddressMapV2[any]()
+}
+
+// NewAddressMapV2 creates a new AddressMapV2.
+func NewAddressMapV2[T any]() *AddressMapV2[T] {
+ return &AddressMapV2[T]{m: make(map[Address]addressMapEntryList[T])}
}
// find returns the index of addr in the addressMapEntry slice, or -1 if not
// present.
-func (l addressMapEntryList) find(addr Address) int {
+func (l addressMapEntryList[T]) find(addr Address) int {
for i, entry := range l {
// Attributes are the only thing to match on here, since `Addr` and
// `ServerName` are already equal.
@@ -69,28 +88,28 @@ func (l addressMapEntryList) find(addr Address) int {
}
// Get returns the value for the address in the map, if present.
-func (a *AddressMap) Get(addr Address) (value any, ok bool) {
+func (a *AddressMapV2[T]) Get(addr Address) (value T, ok bool) {
addrKey := toMapKey(&addr)
entryList := a.m[addrKey]
if entry := entryList.find(addr); entry != -1 {
return entryList[entry].value, true
}
- return nil, false
+ return value, false
}
// Set updates or adds the value to the address in the map.
-func (a *AddressMap) Set(addr Address, value any) {
+func (a *AddressMapV2[T]) Set(addr Address, value T) {
addrKey := toMapKey(&addr)
entryList := a.m[addrKey]
if entry := entryList.find(addr); entry != -1 {
entryList[entry].value = value
return
}
- a.m[addrKey] = append(entryList, &addressMapEntry{addr: addr, value: value})
+ a.m[addrKey] = append(entryList, &addressMapEntry[T]{addr: addr, value: value})
}
// Delete removes addr from the map.
-func (a *AddressMap) Delete(addr Address) {
+func (a *AddressMapV2[T]) Delete(addr Address) {
addrKey := toMapKey(&addr)
entryList := a.m[addrKey]
entry := entryList.find(addr)
@@ -107,7 +126,7 @@ func (a *AddressMap) Delete(addr Address) {
}
// Len returns the number of entries in the map.
-func (a *AddressMap) Len() int {
+func (a *AddressMapV2[T]) Len() int {
ret := 0
for _, entryList := range a.m {
ret += len(entryList)
@@ -116,7 +135,7 @@ func (a *AddressMap) Len() int {
}
// Keys returns a slice of all current map keys.
-func (a *AddressMap) Keys() []Address {
+func (a *AddressMapV2[T]) Keys() []Address {
ret := make([]Address, 0, a.Len())
for _, entryList := range a.m {
for _, entry := range entryList {
@@ -127,8 +146,8 @@ func (a *AddressMap) Keys() []Address {
}
// Values returns a slice of all current map values.
-func (a *AddressMap) Values() []any {
- ret := make([]any, 0, a.Len())
+func (a *AddressMapV2[T]) Values() []T {
+ ret := make([]T, 0, a.Len())
for _, entryList := range a.m {
for _, entry := range entryList {
ret = append(ret, entry.value)
@@ -137,70 +156,65 @@ func (a *AddressMap) Values() []any {
return ret
}
-type endpointNode struct {
- addrs map[string]struct{}
-}
-
-// Equal returns whether the unordered set of addrs are the same between the
-// endpoint nodes.
-func (en *endpointNode) Equal(en2 *endpointNode) bool {
- if len(en.addrs) != len(en2.addrs) {
- return false
- }
- for addr := range en.addrs {
- if _, ok := en2.addrs[addr]; !ok {
- return false
- }
- }
- return true
-}
-
-func toEndpointNode(endpoint Endpoint) endpointNode {
- en := make(map[string]struct{})
- for _, addr := range endpoint.Addresses {
- en[addr.Addr] = struct{}{}
- }
- return endpointNode{
- addrs: en,
- }
-}
+type endpointMapKey string
// EndpointMap is a map of endpoints to arbitrary values keyed on only the
// unordered set of address strings within an endpoint. This map is not thread
// safe, thus it is unsafe to access concurrently. Must be created via
// NewEndpointMap; do not construct directly.
-type EndpointMap struct {
- endpoints map[*endpointNode]any
+type EndpointMap[T any] struct {
+ endpoints map[endpointMapKey]endpointData[T]
+}
+
+type endpointData[T any] struct {
+ // decodedKey stores the original key to avoid decoding when iterating on
+ // EndpointMap keys.
+ decodedKey Endpoint
+ value T
}
// NewEndpointMap creates a new EndpointMap.
-func NewEndpointMap() *EndpointMap {
- return &EndpointMap{
- endpoints: make(map[*endpointNode]any),
+func NewEndpointMap[T any]() *EndpointMap[T] {
+ return &EndpointMap[T]{
+ endpoints: make(map[endpointMapKey]endpointData[T]),
}
}
+// encodeEndpoint returns a string that uniquely identifies the unordered set of
+// addresses within an endpoint.
+func encodeEndpoint(e Endpoint) endpointMapKey {
+ addrs := make([]string, 0, len(e.Addresses))
+ // base64 encoding the address strings restricts the characters present
+ // within the strings. This allows us to use a delimiter without the need of
+ // escape characters.
+ for _, addr := range e.Addresses {
+ addrs = append(addrs, base64.StdEncoding.EncodeToString([]byte(addr.Addr)))
+ }
+ sort.Strings(addrs)
+ // " " should not appear in base64 encoded strings.
+ return endpointMapKey(strings.Join(addrs, " "))
+}
+
// Get returns the value for the address in the map, if present.
-func (em *EndpointMap) Get(e Endpoint) (value any, ok bool) {
- en := toEndpointNode(e)
- if endpoint := em.find(en); endpoint != nil {
- return em.endpoints[endpoint], true
+func (em *EndpointMap[T]) Get(e Endpoint) (value T, ok bool) {
+ val, found := em.endpoints[encodeEndpoint(e)]
+ if found {
+ return val.value, true
}
- return nil, false
+ return value, false
}
// Set updates or adds the value to the address in the map.
-func (em *EndpointMap) Set(e Endpoint, value any) {
- en := toEndpointNode(e)
- if endpoint := em.find(en); endpoint != nil {
- em.endpoints[endpoint] = value
- return
+func (em *EndpointMap[T]) Set(e Endpoint, value T) {
+ en := encodeEndpoint(e)
+ em.endpoints[en] = endpointData[T]{
+ decodedKey: Endpoint{Addresses: e.Addresses},
+ value: value,
}
- em.endpoints[&en] = value
}
// Len returns the number of entries in the map.
-func (em *EndpointMap) Len() int {
+func (em *EndpointMap[T]) Len() int {
return len(em.endpoints)
}
@@ -209,43 +223,25 @@ func (em *EndpointMap) Len() int {
// the unordered set of addresses. Thus, endpoint information returned is not
// the full endpoint data (drops duplicated addresses and attributes) but can be
// used for EndpointMap accesses.
-func (em *EndpointMap) Keys() []Endpoint {
+func (em *EndpointMap[T]) Keys() []Endpoint {
ret := make([]Endpoint, 0, len(em.endpoints))
- for en := range em.endpoints {
- var endpoint Endpoint
- for addr := range en.addrs {
- endpoint.Addresses = append(endpoint.Addresses, Address{Addr: addr})
- }
- ret = append(ret, endpoint)
+ for _, en := range em.endpoints {
+ ret = append(ret, en.decodedKey)
}
return ret
}
// Values returns a slice of all current map values.
-func (em *EndpointMap) Values() []any {
- ret := make([]any, 0, len(em.endpoints))
+func (em *EndpointMap[T]) Values() []T {
+ ret := make([]T, 0, len(em.endpoints))
for _, val := range em.endpoints {
- ret = append(ret, val)
+ ret = append(ret, val.value)
}
return ret
}
-// find returns a pointer to the endpoint node in em if the endpoint node is
-// already present. If not found, nil is returned. The comparisons are done on
-// the unordered set of addresses within an endpoint.
-func (em EndpointMap) find(e endpointNode) *endpointNode {
- for endpoint := range em.endpoints {
- if e.Equal(endpoint) {
- return endpoint
- }
- }
- return nil
-}
-
// Delete removes the specified endpoint from the map.
-func (em *EndpointMap) Delete(e Endpoint) {
- en := toEndpointNode(e)
- if entry := em.find(en); entry != nil {
- delete(em.endpoints, entry)
- }
+func (em *EndpointMap[T]) Delete(e Endpoint) {
+ en := encodeEndpoint(e)
+ delete(em.endpoints, en)
}
diff --git a/vendor/google.golang.org/grpc/resolver_wrapper.go b/vendor/google.golang.org/grpc/resolver_wrapper.go
index 945e24ff8..80e16a327 100644
--- a/vendor/google.golang.org/grpc/resolver_wrapper.go
+++ b/vendor/google.golang.org/grpc/resolver_wrapper.go
@@ -134,12 +134,7 @@ func (ccr *ccResolverWrapper) UpdateState(s resolver.State) error {
return nil
}
if s.Endpoints == nil {
- s.Endpoints = make([]resolver.Endpoint, 0, len(s.Addresses))
- for _, a := range s.Addresses {
- ep := resolver.Endpoint{Addresses: []resolver.Address{a}, Attributes: a.BalancerAttributes}
- ep.Addresses[0].BalancerAttributes = nil
- s.Endpoints = append(s.Endpoints, ep)
- }
+ s.Endpoints = addressesToEndpoints(s.Addresses)
}
ccr.addChannelzTraceEvent(s)
ccr.curState = s
@@ -172,7 +167,11 @@ func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) {
ccr.cc.mu.Unlock()
return
}
- s := resolver.State{Addresses: addrs, ServiceConfig: ccr.curState.ServiceConfig}
+ s := resolver.State{
+ Addresses: addrs,
+ ServiceConfig: ccr.curState.ServiceConfig,
+ Endpoints: addressesToEndpoints(addrs),
+ }
ccr.addChannelzTraceEvent(s)
ccr.curState = s
ccr.mu.Unlock()
@@ -210,3 +209,13 @@ func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) {
}
channelz.Infof(logger, ccr.cc.channelz, "Resolver state updated: %s (%v)", pretty.ToJSON(s), strings.Join(updates, "; "))
}
+
+func addressesToEndpoints(addrs []resolver.Address) []resolver.Endpoint {
+ endpoints := make([]resolver.Endpoint, 0, len(addrs))
+ for _, a := range addrs {
+ ep := resolver.Endpoint{Addresses: []resolver.Address{a}, Attributes: a.BalancerAttributes}
+ ep.Addresses[0].BalancerAttributes = nil
+ endpoints = append(endpoints, ep)
+ }
+ return endpoints
+}
diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go
index a8ddb0af5..ad20e9dff 100644
--- a/vendor/google.golang.org/grpc/rpc_util.go
+++ b/vendor/google.golang.org/grpc/rpc_util.go
@@ -870,13 +870,19 @@ func decompress(compressor encoding.Compressor, d mem.BufferSlice, dc Decompress
return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the message: %v", err)
}
- out, err := mem.ReadAll(io.LimitReader(dcReader, int64(maxReceiveMessageSize)), pool)
+ // Read at most one byte more than the limit from the decompressor.
+ // Unless the limit is MaxInt64, in which case, that's impossible, so
+ // apply no limit.
+ if limit := int64(maxReceiveMessageSize); limit < math.MaxInt64 {
+ dcReader = io.LimitReader(dcReader, limit+1)
+ }
+ out, err := mem.ReadAll(dcReader, pool)
if err != nil {
out.Free()
return nil, status.Errorf(codes.Internal, "grpc: failed to read decompressed data: %v", err)
}
- if out.Len() == maxReceiveMessageSize && !atEOF(dcReader) {
+ if out.Len() > maxReceiveMessageSize {
out.Free()
return nil, status.Errorf(codes.ResourceExhausted, "grpc: received message after decompression larger than max %d", maxReceiveMessageSize)
}
@@ -885,12 +891,6 @@ func decompress(compressor encoding.Compressor, d mem.BufferSlice, dc Decompress
return nil, status.Errorf(codes.Internal, "grpc: no decompressor available for compressed payload")
}
-// atEOF reads data from r and returns true if zero bytes could be read and r.Read returns EOF.
-func atEOF(dcReader io.Reader) bool {
- n, err := dcReader.Read(make([]byte, 1))
- return n == 0 && err == io.EOF
-}
-
type recvCompressor interface {
RecvCompress() string
}
diff --git a/vendor/google.golang.org/grpc/stats/stats.go b/vendor/google.golang.org/grpc/stats/stats.go
index 6f20d2d54..baf7740ef 100644
--- a/vendor/google.golang.org/grpc/stats/stats.go
+++ b/vendor/google.golang.org/grpc/stats/stats.go
@@ -36,7 +36,12 @@ type RPCStats interface {
IsClient() bool
}
-// Begin contains stats when an RPC attempt begins.
+// Begin contains stats for the start of an RPC attempt.
+//
+// - Server-side: Triggered after `InHeader`, as headers are processed
+// before the RPC lifecycle begins.
+// - Client-side: The first stats event recorded.
+//
// FailFast is only valid if this Begin is from client side.
type Begin struct {
// Client is true if this Begin is from client side.
@@ -69,7 +74,7 @@ func (*PickerUpdated) IsClient() bool { return true }
func (*PickerUpdated) isRPCStats() {}
-// InPayload contains the information for an incoming payload.
+// InPayload contains stats about an incoming payload.
type InPayload struct {
// Client is true if this InPayload is from client side.
Client bool
@@ -98,7 +103,9 @@ func (s *InPayload) IsClient() bool { return s.Client }
func (s *InPayload) isRPCStats() {}
-// InHeader contains stats when a header is received.
+// InHeader contains stats about header reception.
+//
+// - Server-side: The first stats event after the RPC request is received.
type InHeader struct {
// Client is true if this InHeader is from client side.
Client bool
@@ -123,7 +130,7 @@ func (s *InHeader) IsClient() bool { return s.Client }
func (s *InHeader) isRPCStats() {}
-// InTrailer contains stats when a trailer is received.
+// InTrailer contains stats about trailer reception.
type InTrailer struct {
// Client is true if this InTrailer is from client side.
Client bool
@@ -139,7 +146,7 @@ func (s *InTrailer) IsClient() bool { return s.Client }
func (s *InTrailer) isRPCStats() {}
-// OutPayload contains the information for an outgoing payload.
+// OutPayload contains stats about an outgoing payload.
type OutPayload struct {
// Client is true if this OutPayload is from client side.
Client bool
@@ -166,7 +173,10 @@ func (s *OutPayload) IsClient() bool { return s.Client }
func (s *OutPayload) isRPCStats() {}
-// OutHeader contains stats when a header is sent.
+// OutHeader contains stats about header transmission.
+//
+// - Client-side: Only occurs after 'Begin', as headers are always the first
+// thing sent on a stream.
type OutHeader struct {
// Client is true if this OutHeader is from client side.
Client bool
@@ -189,14 +199,15 @@ func (s *OutHeader) IsClient() bool { return s.Client }
func (s *OutHeader) isRPCStats() {}
-// OutTrailer contains stats when a trailer is sent.
+// OutTrailer contains stats about trailer transmission.
type OutTrailer struct {
// Client is true if this OutTrailer is from client side.
Client bool
// WireLength is the wire length of trailer.
//
- // Deprecated: This field is never set. The length is not known when this message is
- // emitted because the trailer fields are compressed with hpack after that.
+ // Deprecated: This field is never set. The length is not known when this
+ // message is emitted because the trailer fields are compressed with hpack
+ // after that.
WireLength int
// Trailer contains the trailer metadata sent to the client. This
// field is only valid if this OutTrailer is from the server side.
@@ -208,7 +219,7 @@ func (s *OutTrailer) IsClient() bool { return s.Client }
func (s *OutTrailer) isRPCStats() {}
-// End contains stats when an RPC ends.
+// End contains stats about RPC completion.
type End struct {
// Client is true if this End is from client side.
Client bool
@@ -238,7 +249,7 @@ type ConnStats interface {
IsClient() bool
}
-// ConnBegin contains the stats of a connection when it is established.
+// ConnBegin contains stats about connection establishment.
type ConnBegin struct {
// Client is true if this ConnBegin is from client side.
Client bool
@@ -249,7 +260,7 @@ func (s *ConnBegin) IsClient() bool { return s.Client }
func (s *ConnBegin) isConnStats() {}
-// ConnEnd contains the stats of a connection when it ends.
+// ConnEnd contains stats about connection termination.
type ConnEnd struct {
// Client is true if this ConnEnd is from client side.
Client bool
diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go
index 783c41f78..51da8ed59 100644
--- a/vendor/google.golang.org/grpc/version.go
+++ b/vendor/google.golang.org/grpc/version.go
@@ -19,4 +19,4 @@
package grpc
// Version is the current grpc version.
-const Version = "1.71.0"
+const Version = "1.72.1"
diff --git a/vendor/modules.txt b/vendor/modules.txt
index e101ac4c6..80fd676ee 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -364,9 +364,9 @@ github.com/bytedance/sonic/loader/internal/abi
github.com/bytedance/sonic/loader/internal/iasm/expr
github.com/bytedance/sonic/loader/internal/iasm/x86_64
github.com/bytedance/sonic/loader/internal/rt
-# github.com/cenkalti/backoff/v4 v4.3.0
-## explicit; go 1.18
-github.com/cenkalti/backoff/v4
+# github.com/cenkalti/backoff/v5 v5.0.2
+## explicit; go 1.23
+github.com/cenkalti/backoff/v5
# github.com/cespare/xxhash/v2 v2.3.0
## explicit; go 1.11
github.com/cespare/xxhash/v2
@@ -586,8 +586,8 @@ github.com/gorilla/sessions
# github.com/gorilla/websocket v1.5.3
## explicit; go 1.12
github.com/gorilla/websocket
-# github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.1
-## explicit; go 1.22
+# github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3
+## explicit; go 1.23.0
github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule
github.com/grpc-ecosystem/grpc-gateway/v2/runtime
github.com/grpc-ecosystem/grpc-gateway/v2/utilities
@@ -766,15 +766,15 @@ github.com/prometheus/client_golang/prometheus
github.com/prometheus/client_golang/prometheus/internal
github.com/prometheus/client_golang/prometheus/promhttp
github.com/prometheus/client_golang/prometheus/promhttp/internal
-# github.com/prometheus/client_model v0.6.1
-## explicit; go 1.19
+# github.com/prometheus/client_model v0.6.2
+## explicit; go 1.22.0
github.com/prometheus/client_model/go
-# github.com/prometheus/common v0.62.0
-## explicit; go 1.21
+# github.com/prometheus/common v0.64.0
+## explicit; go 1.23.0
github.com/prometheus/common/expfmt
github.com/prometheus/common/model
-# github.com/prometheus/procfs v0.15.1
-## explicit; go 1.20
+# github.com/prometheus/procfs v0.16.1
+## explicit; go 1.23.0
github.com/prometheus/procfs
github.com/prometheus/procfs/internal/fs
github.com/prometheus/procfs/internal/util
@@ -993,14 +993,14 @@ go.mongodb.org/mongo-driver/x/bsonx/bsoncore
## explicit; go 1.22.0
go.opentelemetry.io/auto/sdk
go.opentelemetry.io/auto/sdk/internal/telemetry
-# go.opentelemetry.io/contrib/bridges/prometheus v0.60.0
-## explicit; go 1.22.0
+# go.opentelemetry.io/contrib/bridges/prometheus v0.61.0
+## explicit; go 1.23.0
go.opentelemetry.io/contrib/bridges/prometheus
-# go.opentelemetry.io/contrib/exporters/autoexport v0.60.0
-## explicit; go 1.22.0
+# go.opentelemetry.io/contrib/exporters/autoexport v0.61.0
+## explicit; go 1.23.0
go.opentelemetry.io/contrib/exporters/autoexport
-# go.opentelemetry.io/contrib/instrumentation/runtime v0.60.0
-## explicit; go 1.22.0
+# go.opentelemetry.io/contrib/instrumentation/runtime v0.61.0
+## explicit; go 1.23.0
go.opentelemetry.io/contrib/instrumentation/runtime
go.opentelemetry.io/contrib/instrumentation/runtime/internal/deprecatedruntime
go.opentelemetry.io/contrib/instrumentation/runtime/internal/x
@@ -1021,64 +1021,64 @@ go.opentelemetry.io/otel/semconv/v1.20.0
go.opentelemetry.io/otel/semconv/v1.24.0
go.opentelemetry.io/otel/semconv/v1.26.0
go.opentelemetry.io/otel/semconv/v1.7.0
-# go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.11.0
-## explicit; go 1.22.0
+# go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.12.2
+## explicit; go 1.23.0
go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc
go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/retry
go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/transform
-# go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.11.0
-## explicit; go 1.22.0
+# go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.12.2
+## explicit; go 1.23.0
go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp
go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/internal/retry
go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/internal/transform
-# go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.35.0
-## explicit; go 1.22.0
+# go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.36.0
+## explicit; go 1.23.0
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/envconfig
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/retry
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform
-# go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.35.0
-## explicit; go 1.22.0
+# go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.36.0
+## explicit; go 1.23.0
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/envconfig
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/retry
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/transform
-# go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.35.0
-## explicit; go 1.22.0
+# go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.36.0
+## explicit; go 1.23.0
go.opentelemetry.io/otel/exporters/otlp/otlptrace
go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform
-# go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.35.0
-## explicit; go 1.22.0
+# go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.36.0
+## explicit; go 1.23.0
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry
-# go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.35.0
-## explicit; go 1.22.0
+# go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.36.0
+## explicit; go 1.23.0
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/envconfig
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/retry
-# go.opentelemetry.io/otel/exporters/prometheus v0.57.0
-## explicit; go 1.22.0
+# go.opentelemetry.io/otel/exporters/prometheus v0.58.0
+## explicit; go 1.23.0
go.opentelemetry.io/otel/exporters/prometheus
-# go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.11.0
-## explicit; go 1.22.0
+# go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.12.2
+## explicit; go 1.23.0
go.opentelemetry.io/otel/exporters/stdout/stdoutlog
-# go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.35.0
-## explicit; go 1.22.0
+# go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.36.0
+## explicit; go 1.23.0
go.opentelemetry.io/otel/exporters/stdout/stdoutmetric
-# go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.35.0
-## explicit; go 1.22.0
+# go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.36.0
+## explicit; go 1.23.0
go.opentelemetry.io/otel/exporters/stdout/stdouttrace
-# go.opentelemetry.io/otel/log v0.11.0
-## explicit; go 1.22.0
+# go.opentelemetry.io/otel/log v0.12.2
+## explicit; go 1.23.0
go.opentelemetry.io/otel/log
go.opentelemetry.io/otel/log/embedded
go.opentelemetry.io/otel/log/noop
@@ -1096,8 +1096,8 @@ go.opentelemetry.io/otel/sdk/internal/x
go.opentelemetry.io/otel/sdk/resource
go.opentelemetry.io/otel/sdk/trace
go.opentelemetry.io/otel/sdk/trace/tracetest
-# go.opentelemetry.io/otel/sdk/log v0.11.0
-## explicit; go 1.22.0
+# go.opentelemetry.io/otel/sdk/log v0.12.2
+## explicit; go 1.23.0
go.opentelemetry.io/otel/sdk/log
# go.opentelemetry.io/otel/sdk/metric v1.36.0
## explicit; go 1.23.0
@@ -1113,8 +1113,8 @@ go.opentelemetry.io/otel/trace
go.opentelemetry.io/otel/trace/embedded
go.opentelemetry.io/otel/trace/internal/telemetry
go.opentelemetry.io/otel/trace/noop
-# go.opentelemetry.io/proto/otlp v1.5.0
-## explicit; go 1.22.0
+# go.opentelemetry.io/proto/otlp v1.6.0
+## explicit; go 1.23.0
go.opentelemetry.io/proto/otlp/collector/logs/v1
go.opentelemetry.io/proto/otlp/collector/metrics/v1
go.opentelemetry.io/proto/otlp/collector/trace/v1
@@ -1248,15 +1248,15 @@ golang.org/x/tools/internal/stdlib
golang.org/x/tools/internal/typeparams
golang.org/x/tools/internal/typesinternal
golang.org/x/tools/internal/versions
-# google.golang.org/genproto/googleapis/api v0.0.0-20250218202821-56aae31c358a
-## explicit; go 1.22
+# google.golang.org/genproto/googleapis/api v0.0.0-20250519155744-55703ea1f237
+## explicit; go 1.23.0
google.golang.org/genproto/googleapis/api/httpbody
-# google.golang.org/genproto/googleapis/rpc v0.0.0-20250218202821-56aae31c358a
-## explicit; go 1.22
+# google.golang.org/genproto/googleapis/rpc v0.0.0-20250519155744-55703ea1f237
+## explicit; go 1.23.0
google.golang.org/genproto/googleapis/rpc/errdetails
google.golang.org/genproto/googleapis/rpc/status
-# google.golang.org/grpc v1.71.0
-## explicit; go 1.22.0
+# google.golang.org/grpc v1.72.1
+## explicit; go 1.23
google.golang.org/grpc
google.golang.org/grpc/attributes
google.golang.org/grpc/backoff