summaryrefslogtreecommitdiff
path: root/vendor/go.opentelemetry.io/otel/sdk/trace
diff options
context:
space:
mode:
Diffstat (limited to 'vendor/go.opentelemetry.io/otel/sdk/trace')
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go122
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/trace/doc.go3
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/trace/id_generator.go4
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/trace/internal/x/README.md35
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/trace/internal/x/x.go63
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/trace/provider.go40
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go8
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/trace/simple_span_processor.go6
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/trace/snapshot.go2
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/trace/span.go19
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go138
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/exporter.go4
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/recorder.go4
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/span.go2
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/trace/version.go9
15 files changed, 414 insertions, 45 deletions
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go b/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go
index 6966ed861..9bc3e525d 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go
@@ -6,24 +6,35 @@ package trace // import "go.opentelemetry.io/otel/sdk/trace"
import (
"context"
"errors"
+ "fmt"
"sync"
"sync/atomic"
"time"
"go.opentelemetry.io/otel"
+ "go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/internal/global"
+ "go.opentelemetry.io/otel/metric"
+ "go.opentelemetry.io/otel/sdk"
"go.opentelemetry.io/otel/sdk/internal/env"
+ "go.opentelemetry.io/otel/sdk/trace/internal/x"
+ semconv "go.opentelemetry.io/otel/semconv/v1.37.0"
+ "go.opentelemetry.io/otel/semconv/v1.37.0/otelconv"
"go.opentelemetry.io/otel/trace"
)
// Defaults for BatchSpanProcessorOptions.
const (
- DefaultMaxQueueSize = 2048
- DefaultScheduleDelay = 5000
+ DefaultMaxQueueSize = 2048
+ // DefaultScheduleDelay is the delay interval between two consecutive exports, in milliseconds.
+ DefaultScheduleDelay = 5000
+ // DefaultExportTimeout is the duration after which an export is cancelled, in milliseconds.
DefaultExportTimeout = 30000
DefaultMaxExportBatchSize = 512
)
+var queueFull = otelconv.ErrorTypeAttr("queue_full")
+
// BatchSpanProcessorOption configures a BatchSpanProcessor.
type BatchSpanProcessorOption func(o *BatchSpanProcessorOptions)
@@ -67,6 +78,11 @@ type batchSpanProcessor struct {
queue chan ReadOnlySpan
dropped uint32
+ selfObservabilityEnabled bool
+ callbackRegistration metric.Registration
+ spansProcessedCounter otelconv.SDKProcessorSpanProcessed
+ componentNameAttr attribute.KeyValue
+
batch []ReadOnlySpan
batchMutex sync.Mutex
timer *time.Timer
@@ -87,11 +103,7 @@ func NewBatchSpanProcessor(exporter SpanExporter, options ...BatchSpanProcessorO
maxExportBatchSize := env.BatchSpanProcessorMaxExportBatchSize(DefaultMaxExportBatchSize)
if maxExportBatchSize > maxQueueSize {
- if DefaultMaxExportBatchSize > maxQueueSize {
- maxExportBatchSize = maxQueueSize
- } else {
- maxExportBatchSize = DefaultMaxExportBatchSize
- }
+ maxExportBatchSize = min(DefaultMaxExportBatchSize, maxQueueSize)
}
o := BatchSpanProcessorOptions{
@@ -112,6 +124,21 @@ func NewBatchSpanProcessor(exporter SpanExporter, options ...BatchSpanProcessorO
stopCh: make(chan struct{}),
}
+ if x.SelfObservability.Enabled() {
+ bsp.selfObservabilityEnabled = true
+ bsp.componentNameAttr = componentName()
+
+ var err error
+ bsp.spansProcessedCounter, bsp.callbackRegistration, err = newBSPObs(
+ bsp.componentNameAttr,
+ func() int64 { return int64(len(bsp.queue)) },
+ int64(bsp.o.MaxQueueSize),
+ )
+ if err != nil {
+ otel.Handle(err)
+ }
+ }
+
bsp.stopWait.Add(1)
go func() {
defer bsp.stopWait.Done()
@@ -122,8 +149,61 @@ func NewBatchSpanProcessor(exporter SpanExporter, options ...BatchSpanProcessorO
return bsp
}
+var processorIDCounter atomic.Int64
+
+// nextProcessorID returns an identifier for this batch span processor,
+// starting with 0 and incrementing by 1 each time it is called.
+func nextProcessorID() int64 {
+ return processorIDCounter.Add(1) - 1
+}
+
+func componentName() attribute.KeyValue {
+ id := nextProcessorID()
+ name := fmt.Sprintf("%s/%d", otelconv.ComponentTypeBatchingSpanProcessor, id)
+ return semconv.OTelComponentName(name)
+}
+
+// newBSPObs creates and returns a new set of metrics instruments and a
+// registration for a BatchSpanProcessor. It is the caller's responsibility
+// to unregister the registration when it is no longer needed.
+func newBSPObs(
+ cmpnt attribute.KeyValue,
+ qLen func() int64,
+ qMax int64,
+) (otelconv.SDKProcessorSpanProcessed, metric.Registration, error) {
+ meter := otel.GetMeterProvider().Meter(
+ selfObsScopeName,
+ metric.WithInstrumentationVersion(sdk.Version()),
+ metric.WithSchemaURL(semconv.SchemaURL),
+ )
+
+ qCap, err := otelconv.NewSDKProcessorSpanQueueCapacity(meter)
+
+ qSize, e := otelconv.NewSDKProcessorSpanQueueSize(meter)
+ err = errors.Join(err, e)
+
+ spansProcessed, e := otelconv.NewSDKProcessorSpanProcessed(meter)
+ err = errors.Join(err, e)
+
+ cmpntT := semconv.OTelComponentTypeBatchingSpanProcessor
+ attrs := metric.WithAttributes(cmpnt, cmpntT)
+
+ reg, e := meter.RegisterCallback(
+ func(_ context.Context, o metric.Observer) error {
+ o.ObserveInt64(qSize.Inst(), qLen(), attrs)
+ o.ObserveInt64(qCap.Inst(), qMax, attrs)
+ return nil
+ },
+ qSize.Inst(),
+ qCap.Inst(),
+ )
+ err = errors.Join(err, e)
+
+ return spansProcessed, reg, err
+}
+
// OnStart method does nothing.
-func (bsp *batchSpanProcessor) OnStart(parent context.Context, s ReadWriteSpan) {}
+func (*batchSpanProcessor) OnStart(context.Context, ReadWriteSpan) {}
// OnEnd method enqueues a ReadOnlySpan for later processing.
func (bsp *batchSpanProcessor) OnEnd(s ReadOnlySpan) {
@@ -162,6 +242,9 @@ func (bsp *batchSpanProcessor) Shutdown(ctx context.Context) error {
case <-ctx.Done():
err = ctx.Err()
}
+ if bsp.selfObservabilityEnabled {
+ err = errors.Join(err, bsp.callbackRegistration.Unregister())
+ }
})
return err
}
@@ -171,7 +254,7 @@ type forceFlushSpan struct {
flushed chan struct{}
}
-func (f forceFlushSpan) SpanContext() trace.SpanContext {
+func (forceFlushSpan) SpanContext() trace.SpanContext {
return trace.NewSpanContext(trace.SpanContextConfig{TraceFlags: trace.FlagsSampled})
}
@@ -274,6 +357,11 @@ func (bsp *batchSpanProcessor) exportSpans(ctx context.Context) error {
if l := len(bsp.batch); l > 0 {
global.Debug("exporting spans", "count", len(bsp.batch), "total_dropped", atomic.LoadUint32(&bsp.dropped))
+ if bsp.selfObservabilityEnabled {
+ bsp.spansProcessedCounter.Add(ctx, int64(l),
+ bsp.componentNameAttr,
+ bsp.spansProcessedCounter.AttrComponentType(otelconv.ComponentTypeBatchingSpanProcessor))
+ }
err := bsp.e.ExportSpans(ctx, bsp.batch)
// A new batch is always created after exporting, even if the batch failed to be exported.
@@ -382,11 +470,17 @@ func (bsp *batchSpanProcessor) enqueueBlockOnQueueFull(ctx context.Context, sd R
case bsp.queue <- sd:
return true
case <-ctx.Done():
+ if bsp.selfObservabilityEnabled {
+ bsp.spansProcessedCounter.Add(ctx, 1,
+ bsp.componentNameAttr,
+ bsp.spansProcessedCounter.AttrComponentType(otelconv.ComponentTypeBatchingSpanProcessor),
+ bsp.spansProcessedCounter.AttrErrorType(queueFull))
+ }
return false
}
}
-func (bsp *batchSpanProcessor) enqueueDrop(_ context.Context, sd ReadOnlySpan) bool {
+func (bsp *batchSpanProcessor) enqueueDrop(ctx context.Context, sd ReadOnlySpan) bool {
if !sd.SpanContext().IsSampled() {
return false
}
@@ -396,12 +490,18 @@ func (bsp *batchSpanProcessor) enqueueDrop(_ context.Context, sd ReadOnlySpan) b
return true
default:
atomic.AddUint32(&bsp.dropped, 1)
+ if bsp.selfObservabilityEnabled {
+ bsp.spansProcessedCounter.Add(ctx, 1,
+ bsp.componentNameAttr,
+ bsp.spansProcessedCounter.AttrComponentType(otelconv.ComponentTypeBatchingSpanProcessor),
+ bsp.spansProcessedCounter.AttrErrorType(queueFull))
+ }
}
return false
}
// MarshalLog is the marshaling function used by the logging system to represent this Span Processor.
-func (bsp *batchSpanProcessor) MarshalLog() interface{} {
+func (bsp *batchSpanProcessor) MarshalLog() any {
return struct {
Type string
SpanExporter SpanExporter
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/doc.go b/vendor/go.opentelemetry.io/otel/sdk/trace/doc.go
index 1f60524e3..e58e7f6ed 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/trace/doc.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/doc.go
@@ -6,5 +6,8 @@ Package trace contains support for OpenTelemetry distributed tracing.
The following assumes a basic familiarity with OpenTelemetry concepts.
See https://opentelemetry.io.
+
+See [go.opentelemetry.io/otel/sdk/trace/internal/x] for information about
+the experimental features.
*/
package trace // import "go.opentelemetry.io/otel/sdk/trace"
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/id_generator.go b/vendor/go.opentelemetry.io/otel/sdk/trace/id_generator.go
index c8d3fb7e3..3649322a6 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/trace/id_generator.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/id_generator.go
@@ -32,7 +32,7 @@ type randomIDGenerator struct{}
var _ IDGenerator = &randomIDGenerator{}
// NewSpanID returns a non-zero span ID from a randomly-chosen sequence.
-func (gen *randomIDGenerator) NewSpanID(ctx context.Context, traceID trace.TraceID) trace.SpanID {
+func (*randomIDGenerator) NewSpanID(context.Context, trace.TraceID) trace.SpanID {
sid := trace.SpanID{}
for {
binary.NativeEndian.PutUint64(sid[:], rand.Uint64())
@@ -45,7 +45,7 @@ func (gen *randomIDGenerator) NewSpanID(ctx context.Context, traceID trace.Trace
// NewIDs returns a non-zero trace ID and a non-zero span ID from a
// randomly-chosen sequence.
-func (gen *randomIDGenerator) NewIDs(ctx context.Context) (trace.TraceID, trace.SpanID) {
+func (*randomIDGenerator) NewIDs(context.Context) (trace.TraceID, trace.SpanID) {
tid := trace.TraceID{}
sid := trace.SpanID{}
for {
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/internal/x/README.md b/vendor/go.opentelemetry.io/otel/sdk/trace/internal/x/README.md
new file mode 100644
index 000000000..feec16fa6
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/internal/x/README.md
@@ -0,0 +1,35 @@
+# Experimental Features
+
+The Trace SDK contains features that have not yet stabilized in the OpenTelemetry specification.
+These features are added to the OpenTelemetry Go Trace SDK prior to stabilization in the specification so that users can start experimenting with them and provide feedback.
+
+These features may change in backwards incompatible ways as feedback is applied.
+See the [Compatibility and Stability](#compatibility-and-stability) section for more information.
+
+## Features
+
+- [Self-Observability](#self-observability)
+
+### Self-Observability
+
+The SDK provides a self-observability feature that allows you to monitor the SDK itself.
+
+To opt-in, set the environment variable `OTEL_GO_X_SELF_OBSERVABILITY` to `true`.
+
+When enabled, the SDK will create the following metrics using the global `MeterProvider`:
+
+- `otel.sdk.span.live`
+- `otel.sdk.span.started`
+
+Please see the [Semantic conventions for OpenTelemetry SDK metrics] documentation for more details on these metrics.
+
+[Semantic conventions for OpenTelemetry SDK metrics]: https://github.com/open-telemetry/semantic-conventions/blob/v1.36.0/docs/otel/sdk-metrics.md
+
+## Compatibility and Stability
+
+Experimental features do not fall within the scope of the OpenTelemetry Go versioning and stability [policy](../../../../VERSIONING.md).
+These features may be removed or modified in successive version releases, including patch versions.
+
+When an experimental feature is promoted to a stable feature, a migration path will be included in the changelog entry of the release.
+There is no guarantee that any environment variable feature flags that enabled the experimental feature will be supported by the stable version.
+If they are supported, they may be accompanied with a deprecation notice stating a timeline for the removal of that support.
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/internal/x/x.go b/vendor/go.opentelemetry.io/otel/sdk/trace/internal/x/x.go
new file mode 100644
index 000000000..2fcbbcc66
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/internal/x/x.go
@@ -0,0 +1,63 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Package x documents experimental features for [go.opentelemetry.io/otel/sdk/trace].
+package x // import "go.opentelemetry.io/otel/sdk/trace/internal/x"
+
+import (
+ "os"
+ "strings"
+)
+
+// SelfObservability is an experimental feature flag that determines if SDK
+// self-observability metrics are enabled.
+//
+// To enable this feature set the OTEL_GO_X_SELF_OBSERVABILITY environment variable
+// to the case-insensitive string value of "true" (i.e. "True" and "TRUE"
+// will also enable this).
+var SelfObservability = newFeature("SELF_OBSERVABILITY", func(v string) (string, bool) {
+ if strings.EqualFold(v, "true") {
+ return v, true
+ }
+ return "", false
+})
+
+// Feature is an experimental feature control flag. It provides a uniform way
+// to interact with these feature flags and parse their values.
+type Feature[T any] struct {
+ key string
+ parse func(v string) (T, bool)
+}
+
+func newFeature[T any](suffix string, parse func(string) (T, bool)) Feature[T] {
+ const envKeyRoot = "OTEL_GO_X_"
+ return Feature[T]{
+ key: envKeyRoot + suffix,
+ parse: parse,
+ }
+}
+
+// Key returns the environment variable key that needs to be set to enable the
+// feature.
+func (f Feature[T]) Key() string { return f.key }
+
+// Lookup returns the user configured value for the feature and true if the
+// user has enabled the feature. Otherwise, if the feature is not enabled, a
+// zero-value and false are returned.
+func (f Feature[T]) Lookup() (v T, ok bool) {
+ // https://github.com/open-telemetry/opentelemetry-specification/blob/62effed618589a0bec416a87e559c0a9d96289bb/specification/configuration/sdk-environment-variables.md#parsing-empty-value
+ //
+ // > The SDK MUST interpret an empty value of an environment variable the
+ // > same way as when the variable is unset.
+ vRaw := os.Getenv(f.key)
+ if vRaw == "" {
+ return v, ok
+ }
+ return f.parse(vRaw)
+}
+
+// Enabled reports whether the feature is enabled.
+func (f Feature[T]) Enabled() bool {
+ _, ok := f.Lookup()
+ return ok
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go b/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go
index 0e2a2e7c6..37ce2ac87 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go
@@ -5,14 +5,20 @@ package trace // import "go.opentelemetry.io/otel/sdk/trace"
import (
"context"
+ "errors"
"fmt"
"sync"
"sync/atomic"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/internal/global"
+ "go.opentelemetry.io/otel/metric"
+ "go.opentelemetry.io/otel/sdk"
"go.opentelemetry.io/otel/sdk/instrumentation"
"go.opentelemetry.io/otel/sdk/resource"
+ "go.opentelemetry.io/otel/sdk/trace/internal/x"
+ semconv "go.opentelemetry.io/otel/semconv/v1.37.0"
+ "go.opentelemetry.io/otel/semconv/v1.37.0/otelconv"
"go.opentelemetry.io/otel/trace"
"go.opentelemetry.io/otel/trace/embedded"
"go.opentelemetry.io/otel/trace/noop"
@@ -20,6 +26,7 @@ import (
const (
defaultTracerName = "go.opentelemetry.io/otel/sdk/tracer"
+ selfObsScopeName = "go.opentelemetry.io/otel/sdk/trace"
)
// tracerProviderConfig.
@@ -45,7 +52,7 @@ type tracerProviderConfig struct {
}
// MarshalLog is the marshaling function used by the logging system to represent this Provider.
-func (cfg tracerProviderConfig) MarshalLog() interface{} {
+func (cfg tracerProviderConfig) MarshalLog() any {
return struct {
SpanProcessors []SpanProcessor
SamplerType string
@@ -156,8 +163,18 @@ func (p *TracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.T
t, ok := p.namedTracer[is]
if !ok {
t = &tracer{
- provider: p,
- instrumentationScope: is,
+ provider: p,
+ instrumentationScope: is,
+ selfObservabilityEnabled: x.SelfObservability.Enabled(),
+ }
+ if t.selfObservabilityEnabled {
+ var err error
+ t.spanLiveMetric, t.spanStartedMetric, err = newInst()
+ if err != nil {
+ msg := "failed to create self-observability metrics for tracer: %w"
+ err := fmt.Errorf(msg, err)
+ otel.Handle(err)
+ }
}
p.namedTracer[is] = t
}
@@ -184,6 +201,23 @@ func (p *TracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.T
return t
}
+func newInst() (otelconv.SDKSpanLive, otelconv.SDKSpanStarted, error) {
+ m := otel.GetMeterProvider().Meter(
+ selfObsScopeName,
+ metric.WithInstrumentationVersion(sdk.Version()),
+ metric.WithSchemaURL(semconv.SchemaURL),
+ )
+
+ var err error
+ spanLiveMetric, e := otelconv.NewSDKSpanLive(m)
+ err = errors.Join(err, e)
+
+ spanStartedMetric, e := otelconv.NewSDKSpanStarted(m)
+ err = errors.Join(err, e)
+
+ return spanLiveMetric, spanStartedMetric, err
+}
+
// RegisterSpanProcessor adds the given SpanProcessor to the list of SpanProcessors.
func (p *TracerProvider) RegisterSpanProcessor(sp SpanProcessor) {
// This check prevents calls during a shutdown.
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go b/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go
index aa7b262d0..689663d48 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go
@@ -110,14 +110,14 @@ func TraceIDRatioBased(fraction float64) Sampler {
type alwaysOnSampler struct{}
-func (as alwaysOnSampler) ShouldSample(p SamplingParameters) SamplingResult {
+func (alwaysOnSampler) ShouldSample(p SamplingParameters) SamplingResult {
return SamplingResult{
Decision: RecordAndSample,
Tracestate: trace.SpanContextFromContext(p.ParentContext).TraceState(),
}
}
-func (as alwaysOnSampler) Description() string {
+func (alwaysOnSampler) Description() string {
return "AlwaysOnSampler"
}
@@ -131,14 +131,14 @@ func AlwaysSample() Sampler {
type alwaysOffSampler struct{}
-func (as alwaysOffSampler) ShouldSample(p SamplingParameters) SamplingResult {
+func (alwaysOffSampler) ShouldSample(p SamplingParameters) SamplingResult {
return SamplingResult{
Decision: Drop,
Tracestate: trace.SpanContextFromContext(p.ParentContext).TraceState(),
}
}
-func (as alwaysOffSampler) Description() string {
+func (alwaysOffSampler) Description() string {
return "AlwaysOffSampler"
}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/simple_span_processor.go b/vendor/go.opentelemetry.io/otel/sdk/trace/simple_span_processor.go
index 664e13e03..411d9ccdd 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/trace/simple_span_processor.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/simple_span_processor.go
@@ -39,7 +39,7 @@ func NewSimpleSpanProcessor(exporter SpanExporter) SpanProcessor {
}
// OnStart does nothing.
-func (ssp *simpleSpanProcessor) OnStart(context.Context, ReadWriteSpan) {}
+func (*simpleSpanProcessor) OnStart(context.Context, ReadWriteSpan) {}
// OnEnd immediately exports a ReadOnlySpan.
func (ssp *simpleSpanProcessor) OnEnd(s ReadOnlySpan) {
@@ -104,13 +104,13 @@ func (ssp *simpleSpanProcessor) Shutdown(ctx context.Context) error {
}
// ForceFlush does nothing as there is no data to flush.
-func (ssp *simpleSpanProcessor) ForceFlush(context.Context) error {
+func (*simpleSpanProcessor) ForceFlush(context.Context) error {
return nil
}
// MarshalLog is the marshaling function used by the logging system to represent
// this Span Processor.
-func (ssp *simpleSpanProcessor) MarshalLog() interface{} {
+func (ssp *simpleSpanProcessor) MarshalLog() any {
return struct {
Type string
Exporter SpanExporter
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/snapshot.go b/vendor/go.opentelemetry.io/otel/sdk/trace/snapshot.go
index d511d0f27..63aa33780 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/trace/snapshot.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/snapshot.go
@@ -35,7 +35,7 @@ type snapshot struct {
var _ ReadOnlySpan = snapshot{}
-func (s snapshot) private() {}
+func (snapshot) private() {}
// Name returns the name of the span.
func (s snapshot) Name() string {
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/span.go b/vendor/go.opentelemetry.io/otel/sdk/trace/span.go
index 1785a4bbb..b376051fb 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/trace/span.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/span.go
@@ -20,7 +20,7 @@ import (
"go.opentelemetry.io/otel/internal/global"
"go.opentelemetry.io/otel/sdk/instrumentation"
"go.opentelemetry.io/otel/sdk/resource"
- semconv "go.opentelemetry.io/otel/semconv/v1.34.0"
+ semconv "go.opentelemetry.io/otel/semconv/v1.37.0"
"go.opentelemetry.io/otel/trace"
"go.opentelemetry.io/otel/trace/embedded"
)
@@ -61,6 +61,7 @@ type ReadOnlySpan interface {
InstrumentationScope() instrumentation.Scope
// InstrumentationLibrary returns information about the instrumentation
// library that created the span.
+ //
// Deprecated: please use InstrumentationScope instead.
InstrumentationLibrary() instrumentation.Library //nolint:staticcheck // This method needs to be define for backwards compatibility
// Resource returns information about the entity that produced the span.
@@ -165,7 +166,7 @@ func (s *recordingSpan) SpanContext() trace.SpanContext {
return s.spanContext
}
-// IsRecording returns if this span is being recorded. If this span has ended
+// IsRecording reports whether this span is being recorded. If this span has ended
// this will return false.
func (s *recordingSpan) IsRecording() bool {
if s == nil {
@@ -177,7 +178,7 @@ func (s *recordingSpan) IsRecording() bool {
return s.isRecording()
}
-// isRecording returns if this span is being recorded. If this span has ended
+// isRecording reports whether this span is being recorded. If this span has ended
// this will return false.
//
// This method assumes s.mu.Lock is held by the caller.
@@ -495,6 +496,16 @@ func (s *recordingSpan) End(options ...trace.SpanEndOption) {
}
s.mu.Unlock()
+ if s.tracer.selfObservabilityEnabled {
+ defer func() {
+ // Add the span to the context to ensure the metric is recorded
+ // with the correct span context.
+ ctx := trace.ContextWithSpan(context.Background(), s)
+ set := spanLiveSet(s.spanContext.IsSampled())
+ s.tracer.spanLiveMetric.AddSet(ctx, -1, set)
+ }()
+ }
+
sps := s.tracer.provider.getSpanProcessors()
if len(sps) == 0 {
return
@@ -545,7 +556,7 @@ func (s *recordingSpan) RecordError(err error, opts ...trace.EventOption) {
s.addEvent(semconv.ExceptionEventName, opts...)
}
-func typeStr(i interface{}) string {
+func typeStr(i any) string {
t := reflect.TypeOf(i)
if t.PkgPath() == "" && t.Name() == "" {
// Likely a builtin type.
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go b/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go
index 0b65ae9ab..e965c4cce 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go
@@ -7,7 +7,9 @@ import (
"context"
"time"
+ "go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/sdk/instrumentation"
+ "go.opentelemetry.io/otel/semconv/v1.37.0/otelconv"
"go.opentelemetry.io/otel/trace"
"go.opentelemetry.io/otel/trace/embedded"
)
@@ -17,6 +19,10 @@ type tracer struct {
provider *TracerProvider
instrumentationScope instrumentation.Scope
+
+ selfObservabilityEnabled bool
+ spanLiveMetric otelconv.SDKSpanLive
+ spanStartedMetric otelconv.SDKSpanStarted
}
var _ trace.Tracer = &tracer{}
@@ -46,17 +52,25 @@ func (tr *tracer) Start(
}
s := tr.newSpan(ctx, name, &config)
+ newCtx := trace.ContextWithSpan(ctx, s)
+ if tr.selfObservabilityEnabled {
+ psc := trace.SpanContextFromContext(ctx)
+ set := spanStartedSet(psc, s)
+ tr.spanStartedMetric.AddSet(newCtx, 1, set)
+ }
+
if rw, ok := s.(ReadWriteSpan); ok && s.IsRecording() {
sps := tr.provider.getSpanProcessors()
for _, sp := range sps {
+ // Use original context.
sp.sp.OnStart(ctx, rw)
}
}
if rtt, ok := s.(runtimeTracer); ok {
- ctx = rtt.runtimeTrace(ctx)
+ newCtx = rtt.runtimeTrace(newCtx)
}
- return trace.ContextWithSpan(ctx, s), s
+ return newCtx, s
}
type runtimeTracer interface {
@@ -112,11 +126,12 @@ func (tr *tracer) newSpan(ctx context.Context, name string, config *trace.SpanCo
if !isRecording(samplingResult) {
return tr.newNonRecordingSpan(sc)
}
- return tr.newRecordingSpan(psc, sc, name, samplingResult, config)
+ return tr.newRecordingSpan(ctx, psc, sc, name, samplingResult, config)
}
// newRecordingSpan returns a new configured recordingSpan.
func (tr *tracer) newRecordingSpan(
+ ctx context.Context,
psc, sc trace.SpanContext,
name string,
sr SamplingResult,
@@ -153,6 +168,14 @@ func (tr *tracer) newRecordingSpan(
s.SetAttributes(sr.Attributes...)
s.SetAttributes(config.Attributes()...)
+ if tr.selfObservabilityEnabled {
+ // Propagate any existing values from the context with the new span to
+ // the measurement context.
+ ctx = trace.ContextWithSpan(ctx, s)
+ set := spanLiveSet(s.spanContext.IsSampled())
+ tr.spanLiveMetric.AddSet(ctx, 1, set)
+ }
+
return s
}
@@ -160,3 +183,112 @@ func (tr *tracer) newRecordingSpan(
func (tr *tracer) newNonRecordingSpan(sc trace.SpanContext) nonRecordingSpan {
return nonRecordingSpan{tracer: tr, sc: sc}
}
+
+type parentState int
+
+const (
+ parentStateNoParent parentState = iota
+ parentStateLocalParent
+ parentStateRemoteParent
+)
+
+type samplingState int
+
+const (
+ samplingStateDrop samplingState = iota
+ samplingStateRecordOnly
+ samplingStateRecordAndSample
+)
+
+type spanStartedSetKey struct {
+ parent parentState
+ sampling samplingState
+}
+
+var spanStartedSetCache = map[spanStartedSetKey]attribute.Set{
+ {parentStateNoParent, samplingStateDrop}: attribute.NewSet(
+ otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginNone),
+ otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultDrop),
+ ),
+ {parentStateLocalParent, samplingStateDrop}: attribute.NewSet(
+ otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginLocal),
+ otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultDrop),
+ ),
+ {parentStateRemoteParent, samplingStateDrop}: attribute.NewSet(
+ otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginRemote),
+ otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultDrop),
+ ),
+
+ {parentStateNoParent, samplingStateRecordOnly}: attribute.NewSet(
+ otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginNone),
+ otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultRecordOnly),
+ ),
+ {parentStateLocalParent, samplingStateRecordOnly}: attribute.NewSet(
+ otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginLocal),
+ otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultRecordOnly),
+ ),
+ {parentStateRemoteParent, samplingStateRecordOnly}: attribute.NewSet(
+ otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginRemote),
+ otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultRecordOnly),
+ ),
+
+ {parentStateNoParent, samplingStateRecordAndSample}: attribute.NewSet(
+ otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginNone),
+ otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultRecordAndSample),
+ ),
+ {parentStateLocalParent, samplingStateRecordAndSample}: attribute.NewSet(
+ otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginLocal),
+ otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultRecordAndSample),
+ ),
+ {parentStateRemoteParent, samplingStateRecordAndSample}: attribute.NewSet(
+ otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginRemote),
+ otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultRecordAndSample),
+ ),
+}
+
+func spanStartedSet(psc trace.SpanContext, span trace.Span) attribute.Set {
+ key := spanStartedSetKey{
+ parent: parentStateNoParent,
+ sampling: samplingStateDrop,
+ }
+
+ if psc.IsValid() {
+ if psc.IsRemote() {
+ key.parent = parentStateRemoteParent
+ } else {
+ key.parent = parentStateLocalParent
+ }
+ }
+
+ if span.IsRecording() {
+ if span.SpanContext().IsSampled() {
+ key.sampling = samplingStateRecordAndSample
+ } else {
+ key.sampling = samplingStateRecordOnly
+ }
+ }
+
+ return spanStartedSetCache[key]
+}
+
+type spanLiveSetKey struct {
+ sampled bool
+}
+
+var spanLiveSetCache = map[spanLiveSetKey]attribute.Set{
+ {true}: attribute.NewSet(
+ otelconv.SDKSpanLive{}.AttrSpanSamplingResult(
+ otelconv.SpanSamplingResultRecordAndSample,
+ ),
+ ),
+ {false}: attribute.NewSet(
+ otelconv.SDKSpanLive{}.AttrSpanSamplingResult(
+ otelconv.SpanSamplingResultRecordOnly,
+ ),
+ ),
+}
+
+func spanLiveSet(sampled bool) attribute.Set {
+ key := spanLiveSetKey{sampled: sampled}
+ return spanLiveSetCache[key]
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/exporter.go b/vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/exporter.go
index 07117495a..e12fa67e6 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/exporter.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/exporter.go
@@ -25,10 +25,10 @@ func NewNoopExporter() *NoopExporter {
type NoopExporter struct{}
// ExportSpans handles export of spans by dropping them.
-func (nsb *NoopExporter) ExportSpans(context.Context, []trace.ReadOnlySpan) error { return nil }
+func (*NoopExporter) ExportSpans(context.Context, []trace.ReadOnlySpan) error { return nil }
// Shutdown stops the exporter by doing nothing.
-func (nsb *NoopExporter) Shutdown(context.Context) error { return nil }
+func (*NoopExporter) Shutdown(context.Context) error { return nil }
var _ trace.SpanExporter = (*InMemoryExporter)(nil)
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/recorder.go b/vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/recorder.go
index 732669a17..ca63038f3 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/recorder.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/recorder.go
@@ -47,14 +47,14 @@ func (sr *SpanRecorder) OnEnd(s sdktrace.ReadOnlySpan) {
// Shutdown does nothing.
//
// This method is safe to be called concurrently.
-func (sr *SpanRecorder) Shutdown(context.Context) error {
+func (*SpanRecorder) Shutdown(context.Context) error {
return nil
}
// ForceFlush does nothing.
//
// This method is safe to be called concurrently.
-func (sr *SpanRecorder) ForceFlush(context.Context) error {
+func (*SpanRecorder) ForceFlush(context.Context) error {
return nil
}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/span.go b/vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/span.go
index cd2cc30ca..12b384b08 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/span.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/span.go
@@ -37,7 +37,7 @@ func (s SpanStubs) Snapshots() []tracesdk.ReadOnlySpan {
}
ro := make([]tracesdk.ReadOnlySpan, len(s))
- for i := 0; i < len(s); i++ {
+ for i := range s {
ro[i] = s[i].Snapshot()
}
return ro
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/version.go b/vendor/go.opentelemetry.io/otel/sdk/trace/version.go
deleted file mode 100644
index b84dd2c5e..000000000
--- a/vendor/go.opentelemetry.io/otel/sdk/trace/version.go
+++ /dev/null
@@ -1,9 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package trace // import "go.opentelemetry.io/otel/sdk/trace"
-
-// version is the current release version of the metric SDK in use.
-func version() string {
- return "1.16.0-rc.1"
-}