diff options
Diffstat (limited to 'vendor/go.opentelemetry.io/otel/sdk/metric/internal')
15 files changed, 897 insertions, 37 deletions
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/aggregate.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/aggregate.go index 8dec14237..4060a2f76 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/aggregate.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/aggregate.go @@ -19,6 +19,7 @@ import ( "time" "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" "go.opentelemetry.io/otel/sdk/metric/metricdata" ) @@ -44,17 +45,43 @@ type Builder[N int64 | float64] struct { // Filter is the attribute filter the aggregate function will use on the // input of measurements. Filter attribute.Filter + // ReservoirFunc is the factory function used by aggregate functions to + // create new exemplar reservoirs for a new seen attribute set. + // + // If this is not provided a default factory function that returns an + // exemplar.Drop reservoir will be used. + ReservoirFunc func() exemplar.Reservoir[N] + // AggregationLimit is the cardinality limit of measurement attributes. Any + // measurement for new attributes once the limit has been reached will be + // aggregated into a single aggregate for the "otel.metric.overflow" + // attribute. + // + // If AggregationLimit is less than or equal to zero there will not be an + // aggregation limit imposed (i.e. unlimited attribute sets). + AggregationLimit int +} + +func (b Builder[N]) resFunc() func() exemplar.Reservoir[N] { + if b.ReservoirFunc != nil { + return b.ReservoirFunc + } + + return exemplar.Drop[N] } -func (b Builder[N]) filter(f Measure[N]) Measure[N] { +type fltrMeasure[N int64 | float64] func(ctx context.Context, value N, fltrAttr attribute.Set, droppedAttr []attribute.KeyValue) + +func (b Builder[N]) filter(f fltrMeasure[N]) Measure[N] { if b.Filter != nil { fltr := b.Filter // Copy to make it immutable after assignment. return func(ctx context.Context, n N, a attribute.Set) { - fAttr, _ := a.Filter(fltr) - f(ctx, n, fAttr) + fAttr, dropped := a.Filter(fltr) + f(ctx, n, fAttr, dropped) } } - return f + return func(ctx context.Context, n N, a attribute.Set) { + f(ctx, n, a, nil) + } } // LastValue returns a last-value aggregate function input and output. @@ -63,7 +90,7 @@ func (b Builder[N]) filter(f Measure[N]) Measure[N] { func (b Builder[N]) LastValue() (Measure[N], ComputeAggregation) { // Delta temporality is the only temporality that makes semantic sense for // a last-value aggregate. - lv := newLastValue[N]() + lv := newLastValue[N](b.AggregationLimit, b.resFunc()) return b.filter(lv.measure), func(dest *metricdata.Aggregation) int { // Ignore if dest is not a metricdata.Gauge. The chance for memory @@ -79,7 +106,7 @@ func (b Builder[N]) LastValue() (Measure[N], ComputeAggregation) { // PrecomputedSum returns a sum aggregate function input and output. The // arguments passed to the input are expected to be the precomputed sum values. func (b Builder[N]) PrecomputedSum(monotonic bool) (Measure[N], ComputeAggregation) { - s := newPrecomputedSum[N](monotonic) + s := newPrecomputedSum[N](monotonic, b.AggregationLimit, b.resFunc()) switch b.Temporality { case metricdata.DeltaTemporality: return b.filter(s.measure), s.delta @@ -90,7 +117,7 @@ func (b Builder[N]) PrecomputedSum(monotonic bool) (Measure[N], ComputeAggregati // Sum returns a sum aggregate function input and output. func (b Builder[N]) Sum(monotonic bool) (Measure[N], ComputeAggregation) { - s := newSum[N](monotonic) + s := newSum[N](monotonic, b.AggregationLimit, b.resFunc()) switch b.Temporality { case metricdata.DeltaTemporality: return b.filter(s.measure), s.delta @@ -102,7 +129,7 @@ func (b Builder[N]) Sum(monotonic bool) (Measure[N], ComputeAggregation) { // ExplicitBucketHistogram returns a histogram aggregate function input and // output. func (b Builder[N]) ExplicitBucketHistogram(boundaries []float64, noMinMax, noSum bool) (Measure[N], ComputeAggregation) { - h := newHistogram[N](boundaries, noMinMax, noSum) + h := newHistogram[N](boundaries, noMinMax, noSum, b.AggregationLimit, b.resFunc()) switch b.Temporality { case metricdata.DeltaTemporality: return b.filter(h.measure), h.delta @@ -114,7 +141,7 @@ func (b Builder[N]) ExplicitBucketHistogram(boundaries []float64, noMinMax, noSu // ExponentialBucketHistogram returns a histogram aggregate function input and // output. func (b Builder[N]) ExponentialBucketHistogram(maxSize, maxScale int32, noMinMax, noSum bool) (Measure[N], ComputeAggregation) { - h := newExponentialHistogram[N](maxSize, maxScale, noMinMax, noSum) + h := newExponentialHistogram[N](maxSize, maxScale, noMinMax, noSum, b.AggregationLimit, b.resFunc()) switch b.Temporality { case metricdata.DeltaTemporality: return b.filter(h.measure), h.delta diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exponential_histogram.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exponential_histogram.go index 98b7dc1e0..4139a6d15 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exponential_histogram.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exponential_histogram.go @@ -23,6 +23,7 @@ import ( "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" "go.opentelemetry.io/otel/sdk/metric/metricdata" ) @@ -40,6 +41,8 @@ const ( // expoHistogramDataPoint is a single data point in an exponential histogram. type expoHistogramDataPoint[N int64 | float64] struct { + res exemplar.Reservoir[N] + count uint64 min N max N @@ -288,13 +291,15 @@ func (b *expoBuckets) downscale(delta int) { // newExponentialHistogram returns an Aggregator that summarizes a set of // measurements as an exponential histogram. Each histogram is scoped by attributes // and the aggregation cycle the measurements were made in. -func newExponentialHistogram[N int64 | float64](maxSize, maxScale int32, noMinMax, noSum bool) *expoHistogram[N] { +func newExponentialHistogram[N int64 | float64](maxSize, maxScale int32, noMinMax, noSum bool, limit int, r func() exemplar.Reservoir[N]) *expoHistogram[N] { return &expoHistogram[N]{ noSum: noSum, noMinMax: noMinMax, maxSize: int(maxSize), maxScale: int(maxScale), + newRes: r, + limit: newLimiter[*expoHistogramDataPoint[N]](limit), values: make(map[attribute.Set]*expoHistogramDataPoint[N]), start: now(), @@ -309,27 +314,35 @@ type expoHistogram[N int64 | float64] struct { maxSize int maxScale int + newRes func() exemplar.Reservoir[N] + limit limiter[*expoHistogramDataPoint[N]] values map[attribute.Set]*expoHistogramDataPoint[N] valuesMu sync.Mutex start time.Time } -func (e *expoHistogram[N]) measure(_ context.Context, value N, attr attribute.Set) { +func (e *expoHistogram[N]) measure(ctx context.Context, value N, fltrAttr attribute.Set, droppedAttr []attribute.KeyValue) { // Ignore NaN and infinity. if math.IsInf(float64(value), 0) || math.IsNaN(float64(value)) { return } + t := now() + e.valuesMu.Lock() defer e.valuesMu.Unlock() + attr := e.limit.Attributes(fltrAttr, e.values) v, ok := e.values[attr] if !ok { v = newExpoHistogramDataPoint[N](e.maxSize, e.maxScale, e.noMinMax, e.noSum) + v.res = e.newRes() + e.values[attr] = v } v.record(value) + v.res.Offer(ctx, t, value, droppedAttr) } func (e *expoHistogram[N]) delta(dest *metricdata.Aggregation) int { @@ -362,6 +375,7 @@ func (e *expoHistogram[N]) delta(dest *metricdata.Aggregation) int { hDPts[i].NegativeBucket.Offset = int32(b.negBuckets.startBin) hDPts[i].NegativeBucket.Counts = reset(hDPts[i].NegativeBucket.Counts, len(b.negBuckets.counts), len(b.negBuckets.counts)) + copy(hDPts[i].NegativeBucket.Counts, b.negBuckets.counts) if !e.noSum { hDPts[i].Sum = b.sum @@ -371,6 +385,8 @@ func (e *expoHistogram[N]) delta(dest *metricdata.Aggregation) int { hDPts[i].Max = metricdata.NewExtrema(b.max) } + b.res.Collect(&hDPts[i].Exemplars) + delete(e.values, a) i++ } @@ -410,6 +426,7 @@ func (e *expoHistogram[N]) cumulative(dest *metricdata.Aggregation) int { hDPts[i].NegativeBucket.Offset = int32(b.negBuckets.startBin) hDPts[i].NegativeBucket.Counts = reset(hDPts[i].NegativeBucket.Counts, len(b.negBuckets.counts), len(b.negBuckets.counts)) + copy(hDPts[i].NegativeBucket.Counts, b.negBuckets.counts) if !e.noSum { hDPts[i].Sum = b.sum @@ -419,6 +436,8 @@ func (e *expoHistogram[N]) cumulative(dest *metricdata.Aggregation) int { hDPts[i].Max = metricdata.NewExtrema(b.max) } + b.res.Collect(&hDPts[i].Exemplars) + i++ // TODO (#3006): This will use an unbounded amount of memory if there // are unbounded number of attribute sets being aggregated. Attribute diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/histogram.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/histogram.go index 62ec51e1f..a9a4706bf 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/histogram.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/histogram.go @@ -21,10 +21,13 @@ import ( "time" "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" "go.opentelemetry.io/otel/sdk/metric/metricdata" ) type buckets[N int64 | float64] struct { + res exemplar.Reservoir[N] + counts []uint64 count uint64 total N @@ -54,11 +57,13 @@ type histValues[N int64 | float64] struct { noSum bool bounds []float64 + newRes func() exemplar.Reservoir[N] + limit limiter[*buckets[N]] values map[attribute.Set]*buckets[N] valuesMu sync.Mutex } -func newHistValues[N int64 | float64](bounds []float64, noSum bool) *histValues[N] { +func newHistValues[N int64 | float64](bounds []float64, noSum bool, limit int, r func() exemplar.Reservoir[N]) *histValues[N] { // The responsibility of keeping all buckets correctly associated with the // passed boundaries is ultimately this type's responsibility. Make a copy // here so we can always guarantee this. Or, in the case of failure, have @@ -69,13 +74,15 @@ func newHistValues[N int64 | float64](bounds []float64, noSum bool) *histValues[ return &histValues[N]{ noSum: noSum, bounds: b, + newRes: r, + limit: newLimiter[*buckets[N]](limit), values: make(map[attribute.Set]*buckets[N]), } } // Aggregate records the measurement value, scoped by attr, and aggregates it // into a histogram. -func (s *histValues[N]) measure(_ context.Context, value N, attr attribute.Set) { +func (s *histValues[N]) measure(ctx context.Context, value N, fltrAttr attribute.Set, droppedAttr []attribute.KeyValue) { // This search will return an index in the range [0, len(s.bounds)], where // it will return len(s.bounds) if value is greater than the last element // of s.bounds. This aligns with the buckets in that the length of buckets @@ -83,9 +90,12 @@ func (s *histValues[N]) measure(_ context.Context, value N, attr attribute.Set) // (s.bounds[len(s.bounds)-1], +∞). idx := sort.SearchFloat64s(s.bounds, float64(value)) + t := now() + s.valuesMu.Lock() defer s.valuesMu.Unlock() + attr := s.limit.Attributes(fltrAttr, s.values) b, ok := s.values[attr] if !ok { // N+1 buckets. For example: @@ -96,6 +106,8 @@ func (s *histValues[N]) measure(_ context.Context, value N, attr attribute.Set) // // buckets = (-∞, 0], (0, 5.0], (5.0, 10.0], (10.0, +∞) b = newBuckets[N](len(s.bounds) + 1) + b.res = s.newRes() + // Ensure min and max are recorded values (not zero), for new buckets. b.min, b.max = value, value s.values[attr] = b @@ -104,13 +116,14 @@ func (s *histValues[N]) measure(_ context.Context, value N, attr attribute.Set) if !s.noSum { b.sum(value) } + b.res.Offer(ctx, t, value, droppedAttr) } // newHistogram returns an Aggregator that summarizes a set of measurements as // an histogram. -func newHistogram[N int64 | float64](boundaries []float64, noMinMax, noSum bool) *histogram[N] { +func newHistogram[N int64 | float64](boundaries []float64, noMinMax, noSum bool, limit int, r func() exemplar.Reservoir[N]) *histogram[N] { return &histogram[N]{ - histValues: newHistValues[N](boundaries, noSum), + histValues: newHistValues[N](boundaries, noSum, limit, r), noMinMax: noMinMax, start: now(), } @@ -161,6 +174,8 @@ func (s *histogram[N]) delta(dest *metricdata.Aggregation) int { hDPts[i].Max = metricdata.NewExtrema(b.max) } + b.res.Collect(&hDPts[i].Exemplars) + // Unused attribute sets do not report. delete(s.values, a) i++ @@ -217,6 +232,9 @@ func (s *histogram[N]) cumulative(dest *metricdata.Aggregation) int { hDPts[i].Min = metricdata.NewExtrema(b.min) hDPts[i].Max = metricdata.NewExtrema(b.max) } + + b.res.Collect(&hDPts[i].Exemplars) + i++ // TODO (#3006): This will use an unbounded amount of memory if there // are unbounded number of attribute sets being aggregated. Attribute diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/lastvalue.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/lastvalue.go index 6af2d6061..5699e728f 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/lastvalue.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/lastvalue.go @@ -20,6 +20,7 @@ import ( "time" "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" "go.opentelemetry.io/otel/sdk/metric/metricdata" ) @@ -27,24 +28,43 @@ import ( type datapoint[N int64 | float64] struct { timestamp time.Time value N + res exemplar.Reservoir[N] } -func newLastValue[N int64 | float64]() *lastValue[N] { - return &lastValue[N]{values: make(map[attribute.Set]datapoint[N])} +func newLastValue[N int64 | float64](limit int, r func() exemplar.Reservoir[N]) *lastValue[N] { + return &lastValue[N]{ + newRes: r, + limit: newLimiter[datapoint[N]](limit), + values: make(map[attribute.Set]datapoint[N]), + } } // lastValue summarizes a set of measurements as the last one made. type lastValue[N int64 | float64] struct { sync.Mutex + newRes func() exemplar.Reservoir[N] + limit limiter[datapoint[N]] values map[attribute.Set]datapoint[N] } -func (s *lastValue[N]) measure(ctx context.Context, value N, attr attribute.Set) { - d := datapoint[N]{timestamp: now(), value: value} +func (s *lastValue[N]) measure(ctx context.Context, value N, fltrAttr attribute.Set, droppedAttr []attribute.KeyValue) { + t := now() + s.Lock() + defer s.Unlock() + + attr := s.limit.Attributes(fltrAttr, s.values) + d, ok := s.values[attr] + if !ok { + d.res = s.newRes() + } + + d.timestamp = t + d.value = value + d.res.Offer(ctx, t, value, droppedAttr) + s.values[attr] = d - s.Unlock() } func (s *lastValue[N]) computeAggregation(dest *[]metricdata.DataPoint[N]) { @@ -61,6 +81,7 @@ func (s *lastValue[N]) computeAggregation(dest *[]metricdata.DataPoint[N]) { // ignored. (*dest)[i].Time = v.timestamp (*dest)[i].Value = v.value + v.res.Collect(&(*dest)[i].Exemplars) // Do not report stale values. delete(s.values, a) i++ diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/limit.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/limit.go new file mode 100644 index 000000000..d3de84272 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/limit.go @@ -0,0 +1,53 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package aggregate // import "go.opentelemetry.io/otel/sdk/metric/internal/aggregate" + +import "go.opentelemetry.io/otel/attribute" + +// overflowSet is the attribute set used to record a measurement when adding +// another distinct attribute set to the aggregate would exceed the aggregate +// limit. +var overflowSet = attribute.NewSet(attribute.Bool("otel.metric.overflow", true)) + +// limiter limits aggregate values. +type limiter[V any] struct { + // aggLimit is the maximum number of metric streams that can be aggregated. + // + // Any metric stream with attributes distinct from any set already + // aggregated once the aggLimit will be meet will instead be aggregated + // into an "overflow" metric stream. That stream will only contain the + // "otel.metric.overflow"=true attribute. + aggLimit int +} + +// newLimiter returns a new Limiter with the provided aggregation limit. +func newLimiter[V any](aggregation int) limiter[V] { + return limiter[V]{aggLimit: aggregation} +} + +// Attributes checks if adding a measurement for attrs will exceed the +// aggregation cardinality limit for the existing measurements. If it will, +// overflowSet is returned. Otherwise, if it will not exceed the limit, or the +// limit is not set (limit <= 0), attr is returned. +func (l limiter[V]) Attributes(attrs attribute.Set, measurements map[attribute.Set]V) attribute.Set { + if l.aggLimit > 0 { + _, exists := measurements[attrs] + if !exists && len(measurements) >= l.aggLimit-1 { + return overflowSet + } + } + + return attrs +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/sum.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/sum.go index 1e52ff0d1..02de2483f 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/sum.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/sum.go @@ -20,31 +20,55 @@ import ( "time" "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" "go.opentelemetry.io/otel/sdk/metric/metricdata" ) +type sumValue[N int64 | float64] struct { + n N + res exemplar.Reservoir[N] +} + // valueMap is the storage for sums. type valueMap[N int64 | float64] struct { sync.Mutex - values map[attribute.Set]N + newRes func() exemplar.Reservoir[N] + limit limiter[sumValue[N]] + values map[attribute.Set]sumValue[N] } -func newValueMap[N int64 | float64]() *valueMap[N] { - return &valueMap[N]{values: make(map[attribute.Set]N)} +func newValueMap[N int64 | float64](limit int, r func() exemplar.Reservoir[N]) *valueMap[N] { + return &valueMap[N]{ + newRes: r, + limit: newLimiter[sumValue[N]](limit), + values: make(map[attribute.Set]sumValue[N]), + } } -func (s *valueMap[N]) measure(_ context.Context, value N, attr attribute.Set) { +func (s *valueMap[N]) measure(ctx context.Context, value N, fltrAttr attribute.Set, droppedAttr []attribute.KeyValue) { + t := now() + s.Lock() - s.values[attr] += value - s.Unlock() + defer s.Unlock() + + attr := s.limit.Attributes(fltrAttr, s.values) + v, ok := s.values[attr] + if !ok { + v.res = s.newRes() + } + + v.n += value + v.res.Offer(ctx, t, value, droppedAttr) + + s.values[attr] = v } // newSum returns an aggregator that summarizes a set of measurements as their // arithmetic sum. Each sum is scoped by attributes and the aggregation cycle // the measurements were made in. -func newSum[N int64 | float64](monotonic bool) *sum[N] { +func newSum[N int64 | float64](monotonic bool, limit int, r func() exemplar.Reservoir[N]) *sum[N] { return &sum[N]{ - valueMap: newValueMap[N](), + valueMap: newValueMap[N](limit, r), monotonic: monotonic, start: now(), } @@ -74,11 +98,12 @@ func (s *sum[N]) delta(dest *metricdata.Aggregation) int { dPts := reset(sData.DataPoints, n, n) var i int - for attr, value := range s.values { + for attr, val := range s.values { dPts[i].Attributes = attr dPts[i].StartTime = s.start dPts[i].Time = t - dPts[i].Value = value + dPts[i].Value = val.n + val.res.Collect(&dPts[i].Exemplars) // Do not report stale values. delete(s.values, attr) i++ @@ -112,7 +137,8 @@ func (s *sum[N]) cumulative(dest *metricdata.Aggregation) int { dPts[i].Attributes = attr dPts[i].StartTime = s.start dPts[i].Time = t - dPts[i].Value = value + dPts[i].Value = value.n + value.res.Collect(&dPts[i].Exemplars) // TODO (#3006): This will use an unbounded amount of memory if there // are unbounded number of attribute sets being aggregated. Attribute // sets that become "stale" need to be forgotten so this will not @@ -129,9 +155,9 @@ func (s *sum[N]) cumulative(dest *metricdata.Aggregation) int { // newPrecomputedSum returns an aggregator that summarizes a set of // observatrions as their arithmetic sum. Each sum is scoped by attributes and // the aggregation cycle the measurements were made in. -func newPrecomputedSum[N int64 | float64](monotonic bool) *precomputedSum[N] { +func newPrecomputedSum[N int64 | float64](monotonic bool, limit int, r func() exemplar.Reservoir[N]) *precomputedSum[N] { return &precomputedSum[N]{ - valueMap: newValueMap[N](), + valueMap: newValueMap[N](limit, r), monotonic: monotonic, start: now(), } @@ -165,14 +191,15 @@ func (s *precomputedSum[N]) delta(dest *metricdata.Aggregation) int { var i int for attr, value := range s.values { - delta := value - s.reported[attr] + delta := value.n - s.reported[attr] dPts[i].Attributes = attr dPts[i].StartTime = s.start dPts[i].Time = t dPts[i].Value = delta + value.res.Collect(&dPts[i].Exemplars) - newReported[attr] = value + newReported[attr] = value.n // Unused attribute sets do not report. delete(s.values, attr) i++ @@ -204,11 +231,12 @@ func (s *precomputedSum[N]) cumulative(dest *metricdata.Aggregation) int { dPts := reset(sData.DataPoints, n, n) var i int - for attr, value := range s.values { + for attr, val := range s.values { dPts[i].Attributes = attr dPts[i].StartTime = s.start dPts[i].Time = t - dPts[i].Value = value + dPts[i].Value = val.n + val.res.Collect(&dPts[i].Exemplars) // Unused attribute sets do not report. delete(s.values, attr) diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/doc.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/doc.go new file mode 100644 index 000000000..3caeb542c --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/doc.go @@ -0,0 +1,17 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package exemplar provides an implementation of the OpenTelemetry exemplar +// reservoir to be used in metric collection pipelines. +package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/drop.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/drop.go new file mode 100644 index 000000000..39bf37b9e --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/drop.go @@ -0,0 +1,36 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" + +import ( + "context" + "time" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/sdk/metric/metricdata" +) + +// Drop returns a [Reservoir] that drops all measurements it is offered. +func Drop[N int64 | float64]() Reservoir[N] { return &dropRes[N]{} } + +type dropRes[N int64 | float64] struct{} + +// Offer does nothing, all measurements offered will be dropped. +func (r *dropRes[N]) Offer(context.Context, time.Time, N, []attribute.KeyValue) {} + +// Collect resets dest. No exemplars will ever be returned. +func (r *dropRes[N]) Collect(dest *[]metricdata.Exemplar[N]) { + *dest = (*dest)[:0] +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/filter.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/filter.go new file mode 100644 index 000000000..4f5946fb9 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/filter.go @@ -0,0 +1,40 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" + +import ( + "context" + "time" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" +) + +// SampledFilter returns a [Reservoir] wrapping r that will only offer measurements +// to r if the passed context associated with the measurement contains a sampled +// [go.opentelemetry.io/otel/trace.SpanContext]. +func SampledFilter[N int64 | float64](r Reservoir[N]) Reservoir[N] { + return filtered[N]{Reservoir: r} +} + +type filtered[N int64 | float64] struct { + Reservoir[N] +} + +func (f filtered[N]) Offer(ctx context.Context, t time.Time, n N, a []attribute.KeyValue) { + if trace.SpanContextFromContext(ctx).IsSampled() { + f.Reservoir.Offer(ctx, t, n, a) + } +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/hist.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/hist.go new file mode 100644 index 000000000..6f4fe5524 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/hist.go @@ -0,0 +1,47 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" + +import ( + "context" + "sort" + "time" + + "go.opentelemetry.io/otel/attribute" +) + +// Histogram returns a [Reservoir] that samples the last measurement that falls +// within a histogram bucket. The histogram bucket upper-boundaries are define +// by bounds. +// +// The passed bounds will be sorted by this function. +func Histogram[N int64 | float64](bounds []float64) Reservoir[N] { + sort.Float64s(bounds) + return &histRes[N]{ + bounds: bounds, + storage: newStorage[N](len(bounds) + 1), + } +} + +type histRes[N int64 | float64] struct { + *storage[N] + + // bounds are bucket bounds in ascending order. + bounds []float64 +} + +func (r *histRes[N]) Offer(ctx context.Context, t time.Time, n N, a []attribute.KeyValue) { + r.store[sort.SearchFloat64s(r.bounds, float64(n))] = newMeasurement(ctx, t, n, a) +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/rand.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/rand.go new file mode 100644 index 000000000..7f9fda5b4 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/rand.go @@ -0,0 +1,195 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" + +import ( + "context" + "math" + "math/rand" + "time" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/sdk/metric/metricdata" +) + +// rng is used to make sampling decisions. +// +// Do not use crypto/rand. There is no reason for the decrease in performance +// given this is not a security sensitive decision. +var rng = rand.New(rand.NewSource(time.Now().UnixNano())) + +// random returns, as a float64, a uniform pseudo-random number in the open +// interval (0.0,1.0). +func random() float64 { + // TODO: This does not return a uniform number. rng.Float64 returns a + // uniformly random int in [0,2^53) that is divided by 2^53. Meaning it + // returns multiples of 2^-53, and not all floating point numbers between 0 + // and 1 (i.e. for values less than 2^-4 the 4 last bits of the significand + // are always going to be 0). + // + // An alternative algorithm should be considered that will actually return + // a uniform number in the interval (0,1). For example, since the default + // rand source provides a uniform distribution for Int63, this can be + // converted following the prototypical code of Mersenne Twister 64 (Takuji + // Nishimura and Makoto Matsumoto: + // http://www.math.sci.hiroshima-u.ac.jp/m-mat/MT/VERSIONS/C-LANG/mt19937-64.c) + // + // (float64(rng.Int63()>>11) + 0.5) * (1.0 / 4503599627370496.0) + // + // There are likely many other methods to explore here as well. + + f := rng.Float64() + for f == 0 { + f = rng.Float64() + } + return f +} + +// FixedSize returns a [Reservoir] that samples at most k exemplars. If there +// are k or less measurements made, the Reservoir will sample each one. If +// there are more than k, the Reservoir will then randomly sample all +// additional measurement with a decreasing probability. +func FixedSize[N int64 | float64](k int) Reservoir[N] { + r := &randRes[N]{storage: newStorage[N](k)} + r.reset() + return r +} + +type randRes[N int64 | float64] struct { + *storage[N] + + // count is the number of measurement seen. + count int64 + // next is the next count that will store a measurement at a random index + // once the reservoir has been filled. + next int64 + // w is the largest random number in a distribution that is used to compute + // the next next. + w float64 +} + +func (r *randRes[N]) Offer(ctx context.Context, t time.Time, n N, a []attribute.KeyValue) { + // The following algorithm is "Algorithm L" from Li, Kim-Hung (4 December + // 1994). "Reservoir-Sampling Algorithms of Time Complexity + // O(n(1+log(N/n)))". ACM Transactions on Mathematical Software. 20 (4): + // 481–493 (https://dl.acm.org/doi/10.1145/198429.198435). + // + // A high-level overview of "Algorithm L": + // 0) Pre-calculate the random count greater than the storage size when + // an exemplar will be replaced. + // 1) Accept all measurements offered until the configured storage size is + // reached. + // 2) Loop: + // a) When the pre-calculate count is reached, replace a random + // existing exemplar with the offered measurement. + // b) Calculate the next random count greater than the existing one + // which will replace another exemplars + // + // The way a "replacement" count is computed is by looking at `n` number of + // independent random numbers each corresponding to an offered measurement. + // Of these numbers the smallest `k` (the same size as the storage + // capacity) of them are kept as a subset. The maximum value in this + // subset, called `w` is used to weight another random number generation + // for the next count that will be considered. + // + // By weighting the next count computation like described, it is able to + // perform a uniformly-weighted sampling algorithm based on the number of + // samples the reservoir has seen so far. The sampling will "slow down" as + // more and more samples are offered so as to reduce a bias towards those + // offered just prior to the end of the collection. + // + // This algorithm is preferred because of its balance of simplicity and + // performance. It will compute three random numbers (the bulk of + // computation time) for each item that becomes part of the reservoir, but + // it does not spend any time on items that do not. In particular it has an + // asymptotic runtime of O(k(1 + log(n/k)) where n is the number of + // measurements offered and k is the reservoir size. + // + // See https://en.wikipedia.org/wiki/Reservoir_sampling for an overview of + // this and other reservoir sampling algorithms. See + // https://github.com/MrAlias/reservoir-sampling for a performance + // comparison of reservoir sampling algorithms. + + if int(r.count) < cap(r.store) { + r.store[r.count] = newMeasurement(ctx, t, n, a) + } else { + if r.count == r.next { + // Overwrite a random existing measurement with the one offered. + idx := int(rng.Int63n(int64(cap(r.store)))) + r.store[idx] = newMeasurement(ctx, t, n, a) + r.advance() + } + } + r.count++ +} + +// reset resets r to the initial state. +func (r *randRes[N]) reset() { + // This resets the number of exemplars known. + r.count = 0 + // Random index inserts should only happen after the storage is full. + r.next = int64(cap(r.store)) + + // Initial random number in the series used to generate r.next. + // + // This is set before r.advance to reset or initialize the random number + // series. Without doing so it would always be 0 or never restart a new + // random number series. + // + // This maps the uniform random number in (0,1) to a geometric distribution + // over the same interval. The mean of the distribution is inversely + // proportional to the storage capacity. + r.w = math.Exp(math.Log(random()) / float64(cap(r.store))) + + r.advance() +} + +// advance updates the count at which the offered measurement will overwrite an +// existing exemplar. +func (r *randRes[N]) advance() { + // Calculate the next value in the random number series. + // + // The current value of r.w is based on the max of a distribution of random + // numbers (i.e. `w = max(u_1,u_2,...,u_k)` for `k` equal to the capacity + // of the storage and each `u` in the interval (0,w)). To calculate the + // next r.w we use the fact that when the next exemplar is selected to be + // included in the storage an existing one will be dropped, and the + // corresponding random number in the set used to calculate r.w will also + // be replaced. The replacement random number will also be within (0,w), + // therefore the next r.w will be based on the same distribution (i.e. + // `max(u_1,u_2,...,u_k)`). Therefore, we can sample the next r.w by + // computing the next random number `u` and take r.w as `w * u^(1/k)`. + r.w *= math.Exp(math.Log(random()) / float64(cap(r.store))) + // Use the new random number in the series to calculate the count of the + // next measurement that will be stored. + // + // Given 0 < r.w < 1, each iteration will result in subsequent r.w being + // smaller. This translates here into the next next being selected against + // a distribution with a higher mean (i.e. the expected value will increase + // and replacements become less likely) + // + // Important to note, the new r.next will always be at least 1 more than + // the last r.next. + r.next += int64(math.Log(random())/math.Log(1-r.w)) + 1 +} + +func (r *randRes[N]) Collect(dest *[]metricdata.Exemplar[N]) { + r.storage.Collect(dest) + // Call reset here even though it will reset r.count and restart the random + // number series. This will persist any old exemplars as long as no new + // measurements are offered, but it will also prioritize those new + // measurements that are made over the older collection cycle ones. + r.reset() +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/reservoir.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/reservoir.go new file mode 100644 index 000000000..7d5276a34 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/reservoir.go @@ -0,0 +1,44 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" + +import ( + "context" + "time" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/sdk/metric/metricdata" +) + +// Reservoir holds the sampled exemplar of measurements made. +type Reservoir[N int64 | float64] interface { + // Offer accepts the parameters associated with a measurement. The + // parameters will be stored as an exemplar if the Reservoir decides to + // sample the measurement. + // + // The passed ctx needs to contain any baggage or span that were active + // when the measurement was made. This information may be used by the + // Reservoir in making a sampling decision. + // + // The time t is the time when the measurement was made. The val and attr + // parameters are the value and dropped (filtered) attributes of the + // measurement respectively. + Offer(ctx context.Context, t time.Time, val N, attr []attribute.KeyValue) + + // Collect returns all the held exemplars. + // + // The Reservoir state is preserved after this call. + Collect(dest *[]metricdata.Exemplar[N]) +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/storage.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/storage.go new file mode 100644 index 000000000..e2c2b90a3 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/storage.go @@ -0,0 +1,107 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" + +import ( + "context" + "time" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/sdk/metric/metricdata" + "go.opentelemetry.io/otel/trace" +) + +// storage is an exemplar storage for [Reservoir] implementations. +type storage[N int64 | float64] struct { + // store are the measurements sampled. + // + // This does not use []metricdata.Exemplar because it potentially would + // require an allocation for trace and span IDs in the hot path of Offer. + store []measurement[N] +} + +func newStorage[N int64 | float64](n int) *storage[N] { + return &storage[N]{store: make([]measurement[N], n)} +} + +// Collect returns all the held exemplars. +// +// The Reservoir state is preserved after this call. +func (r *storage[N]) Collect(dest *[]metricdata.Exemplar[N]) { + *dest = reset(*dest, len(r.store), len(r.store)) + var n int + for _, m := range r.store { + if !m.valid { + continue + } + + m.Exemplar(&(*dest)[n]) + n++ + } + *dest = (*dest)[:n] +} + +// measurement is a measurement made by a telemetry system. +type measurement[N int64 | float64] struct { + // FilteredAttributes are the attributes dropped during the measurement. + FilteredAttributes []attribute.KeyValue + // Time is the time when the measurement was made. + Time time.Time + // Value is the value of the measurement. + Value N + // SpanContext is the SpanContext active when a measurement was made. + SpanContext trace.SpanContext + + valid bool +} + +// newMeasurement returns a new non-empty Measurement. +func newMeasurement[N int64 | float64](ctx context.Context, ts time.Time, v N, droppedAttr []attribute.KeyValue) measurement[N] { + return measurement[N]{ + FilteredAttributes: droppedAttr, + Time: ts, + Value: v, + SpanContext: trace.SpanContextFromContext(ctx), + valid: true, + } +} + +// Exemplar returns m as a [metricdata.Exemplar]. +func (m measurement[N]) Exemplar(dest *metricdata.Exemplar[N]) { + dest.FilteredAttributes = m.FilteredAttributes + dest.Time = m.Time + dest.Value = m.Value + + if m.SpanContext.HasTraceID() { + traceID := m.SpanContext.TraceID() + dest.TraceID = traceID[:] + } else { + dest.TraceID = dest.TraceID[:0] + } + + if m.SpanContext.HasSpanID() { + spanID := m.SpanContext.SpanID() + dest.SpanID = spanID[:] + } else { + dest.SpanID = dest.SpanID[:0] + } +} + +func reset[T any](s []T, length, capacity int) []T { + if cap(s) < capacity { + return make([]T, length, capacity) + } + return s[:length] +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/x/README.md b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/x/README.md new file mode 100644 index 000000000..aba69d654 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/x/README.md @@ -0,0 +1,112 @@ +# Experimental Features + +The metric SDK contains features that have not yet stabilized in the OpenTelemetry specification. +These features are added to the OpenTelemetry Go metric SDK prior to stabilization in the specification so that users can start experimenting with them and provide feedback. + +These feature may change in backwards incompatible ways as feedback is applied. +See the [Compatibility and Stability](#compatibility-and-stability) section for more information. + +## Features + +- [Cardinality Limit](#cardinality-limit) +- [Exemplars](#exemplars) + +### Cardinality Limit + +The cardinality limit is the hard limit on the number of metric streams that can be collected for a single instrument. + +This experimental feature can be enabled by setting the `OTEL_GO_X_CARDINALITY_LIMIT` environment value. +The value must be an integer value. +All other values are ignored. + +If the value set is less than or equal to `0`, no limit will be applied. + +#### Examples + +Set the cardinality limit to 2000. + +```console +export OTEL_GO_X_CARDINALITY_LIMIT=2000 +``` + +Set an infinite cardinality limit (functionally equivalent to disabling the feature). + +```console +export OTEL_GO_X_CARDINALITY_LIMIT=-1 +``` + +Disable the cardinality limit. + +```console +unset OTEL_GO_X_CARDINALITY_LIMIT +``` + +### Exemplars + +A sample of measurements made may be exported directly as a set of exemplars. + +This experimental feature can be enabled by setting the `OTEL_GO_X_EXEMPLAR` environment variable. +The value of must be the case-insensitive string of `"true"` to enable the feature. +All other values are ignored. + +Exemplar filters are a supported. +The exemplar filter applies to all measurements made. +They filter these measurements, only allowing certain measurements to be passed to the underlying exemplar reservoir. + +To change the exemplar filter from the default `"trace_based"` filter set the `OTEL_METRICS_EXEMPLAR_FILTER` environment variable. +The value must be the case-sensitive string defined by the [OpenTelemetry specification]. + +- `"always_on"`: allows all measurements +- `"always_off"`: denies all measurements +- `"trace_based"`: allows only sampled measurements + +All values other than these will result in the default, `"trace_based"`, exemplar filter being used. + +[OpenTelemetry specification]: https://github.com/open-telemetry/opentelemetry-specification/blob/a6ca2fd484c9e76fe1d8e1c79c99f08f4745b5ee/specification/configuration/sdk-environment-variables.md#exemplar + +#### Examples + +Enable exemplars to be exported. + +```console +export OTEL_GO_X_EXEMPLAR=true +``` + +Disable exemplars from being exported. + +```console +unset OTEL_GO_X_EXEMPLAR +``` + +Set the exemplar filter to allow all measurements. + +```console +export OTEL_METRICS_EXEMPLAR_FILTER=always_on +``` + +Set the exemplar filter to deny all measurements. + +```console +export OTEL_METRICS_EXEMPLAR_FILTER=always_off +``` + +Set the exemplar filter to only allow sampled measurements. + +```console +export OTEL_METRICS_EXEMPLAR_FILTER=trace_based +``` + +Revert to the default exemplar filter (`"trace_based"`) + +```console +unset OTEL_METRICS_EXEMPLAR_FILTER +``` + +## Compatibility and Stability + +Experimental features do not fall within the scope of the OpenTelemetry Go versioning and stability [policy](../../../../VERSIONING.md). +These features may be removed or modified in successive version releases, including patch versions. + +When an experimental feature is promoted to a stable feature, a migration path will be included in the changelog entry of the release. +There is no guarantee that any environment variable feature flags that enabled the experimental feature will be supported by the stable version. +If they are supported, they may be accompanied with a deprecation notice stating a timeline for the removal of that support. diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/x/x.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/x/x.go new file mode 100644 index 000000000..541160f94 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/x/x.go @@ -0,0 +1,96 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package x contains support for OTel metric SDK experimental features. +// +// This package should only be used for features defined in the specification. +// It should not be used for experiments or new project ideas. +package x // import "go.opentelemetry.io/otel/sdk/metric/internal/x" + +import ( + "os" + "strconv" + "strings" +) + +var ( + // Exemplars is an experimental feature flag that defines if exemplars + // should be recorded for metric data-points. + // + // To enable this feature set the OTEL_GO_X_EXEMPLAR environment variable + // to the case-insensitive string value of "true" (i.e. "True" and "TRUE" + // will also enable this). + Exemplars = newFeature("EXEMPLAR", func(v string) (string, bool) { + if strings.ToLower(v) == "true" { + return v, true + } + return "", false + }) + + // CardinalityLimit is an experimental feature flag that defines if + // cardinality limits should be applied to the recorded metric data-points. + // + // To enable this feature set the OTEL_GO_X_CARDINALITY_LIMIT environment + // variable to the integer limit value you want to use. + // + // Setting OTEL_GO_X_CARDINALITY_LIMIT to a value less than or equal to 0 + // will disable the cardinality limits. + CardinalityLimit = newFeature("CARDINALITY_LIMIT", func(v string) (int, bool) { + n, err := strconv.Atoi(v) + if err != nil { + return 0, false + } + return n, true + }) +) + +// Feature is an experimental feature control flag. It provides a uniform way +// to interact with these feature flags and parse their values. +type Feature[T any] struct { + key string + parse func(v string) (T, bool) +} + +func newFeature[T any](suffix string, parse func(string) (T, bool)) Feature[T] { + const envKeyRoot = "OTEL_GO_X_" + return Feature[T]{ + key: envKeyRoot + suffix, + parse: parse, + } +} + +// Key returns the environment variable key that needs to be set to enable the +// feature. +func (f Feature[T]) Key() string { return f.key } + +// Lookup returns the user configured value for the feature and true if the +// user has enabled the feature. Otherwise, if the feature is not enabled, a +// zero-value and false are returned. +func (f Feature[T]) Lookup() (v T, ok bool) { + // https://github.com/open-telemetry/opentelemetry-specification/blob/62effed618589a0bec416a87e559c0a9d96289bb/specification/configuration/sdk-environment-variables.md#parsing-empty-value + // + // > The SDK MUST interpret an empty value of an environment variable the + // > same way as when the variable is unset. + vRaw := os.Getenv(f.key) + if vRaw == "" { + return v, ok + } + return f.parse(vRaw) +} + +// Enabled returns if the feature is enabled. +func (f Feature[T]) Enabled() bool { + _, ok := f.Lookup() + return ok +} |
