summaryrefslogtreecommitdiff
path: root/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate
diff options
context:
space:
mode:
Diffstat (limited to 'vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate')
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/aggregate.go45
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exponential_histogram.go23
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/histogram.go26
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/lastvalue.go31
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/limit.go53
-rw-r--r--vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/sum.go62
6 files changed, 203 insertions, 37 deletions
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/aggregate.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/aggregate.go
index 8dec14237..4060a2f76 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/aggregate.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/aggregate.go
@@ -19,6 +19,7 @@ import (
"time"
"go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/sdk/metric/internal/exemplar"
"go.opentelemetry.io/otel/sdk/metric/metricdata"
)
@@ -44,17 +45,43 @@ type Builder[N int64 | float64] struct {
// Filter is the attribute filter the aggregate function will use on the
// input of measurements.
Filter attribute.Filter
+ // ReservoirFunc is the factory function used by aggregate functions to
+ // create new exemplar reservoirs for a new seen attribute set.
+ //
+ // If this is not provided a default factory function that returns an
+ // exemplar.Drop reservoir will be used.
+ ReservoirFunc func() exemplar.Reservoir[N]
+ // AggregationLimit is the cardinality limit of measurement attributes. Any
+ // measurement for new attributes once the limit has been reached will be
+ // aggregated into a single aggregate for the "otel.metric.overflow"
+ // attribute.
+ //
+ // If AggregationLimit is less than or equal to zero there will not be an
+ // aggregation limit imposed (i.e. unlimited attribute sets).
+ AggregationLimit int
+}
+
+func (b Builder[N]) resFunc() func() exemplar.Reservoir[N] {
+ if b.ReservoirFunc != nil {
+ return b.ReservoirFunc
+ }
+
+ return exemplar.Drop[N]
}
-func (b Builder[N]) filter(f Measure[N]) Measure[N] {
+type fltrMeasure[N int64 | float64] func(ctx context.Context, value N, fltrAttr attribute.Set, droppedAttr []attribute.KeyValue)
+
+func (b Builder[N]) filter(f fltrMeasure[N]) Measure[N] {
if b.Filter != nil {
fltr := b.Filter // Copy to make it immutable after assignment.
return func(ctx context.Context, n N, a attribute.Set) {
- fAttr, _ := a.Filter(fltr)
- f(ctx, n, fAttr)
+ fAttr, dropped := a.Filter(fltr)
+ f(ctx, n, fAttr, dropped)
}
}
- return f
+ return func(ctx context.Context, n N, a attribute.Set) {
+ f(ctx, n, a, nil)
+ }
}
// LastValue returns a last-value aggregate function input and output.
@@ -63,7 +90,7 @@ func (b Builder[N]) filter(f Measure[N]) Measure[N] {
func (b Builder[N]) LastValue() (Measure[N], ComputeAggregation) {
// Delta temporality is the only temporality that makes semantic sense for
// a last-value aggregate.
- lv := newLastValue[N]()
+ lv := newLastValue[N](b.AggregationLimit, b.resFunc())
return b.filter(lv.measure), func(dest *metricdata.Aggregation) int {
// Ignore if dest is not a metricdata.Gauge. The chance for memory
@@ -79,7 +106,7 @@ func (b Builder[N]) LastValue() (Measure[N], ComputeAggregation) {
// PrecomputedSum returns a sum aggregate function input and output. The
// arguments passed to the input are expected to be the precomputed sum values.
func (b Builder[N]) PrecomputedSum(monotonic bool) (Measure[N], ComputeAggregation) {
- s := newPrecomputedSum[N](monotonic)
+ s := newPrecomputedSum[N](monotonic, b.AggregationLimit, b.resFunc())
switch b.Temporality {
case metricdata.DeltaTemporality:
return b.filter(s.measure), s.delta
@@ -90,7 +117,7 @@ func (b Builder[N]) PrecomputedSum(monotonic bool) (Measure[N], ComputeAggregati
// Sum returns a sum aggregate function input and output.
func (b Builder[N]) Sum(monotonic bool) (Measure[N], ComputeAggregation) {
- s := newSum[N](monotonic)
+ s := newSum[N](monotonic, b.AggregationLimit, b.resFunc())
switch b.Temporality {
case metricdata.DeltaTemporality:
return b.filter(s.measure), s.delta
@@ -102,7 +129,7 @@ func (b Builder[N]) Sum(monotonic bool) (Measure[N], ComputeAggregation) {
// ExplicitBucketHistogram returns a histogram aggregate function input and
// output.
func (b Builder[N]) ExplicitBucketHistogram(boundaries []float64, noMinMax, noSum bool) (Measure[N], ComputeAggregation) {
- h := newHistogram[N](boundaries, noMinMax, noSum)
+ h := newHistogram[N](boundaries, noMinMax, noSum, b.AggregationLimit, b.resFunc())
switch b.Temporality {
case metricdata.DeltaTemporality:
return b.filter(h.measure), h.delta
@@ -114,7 +141,7 @@ func (b Builder[N]) ExplicitBucketHistogram(boundaries []float64, noMinMax, noSu
// ExponentialBucketHistogram returns a histogram aggregate function input and
// output.
func (b Builder[N]) ExponentialBucketHistogram(maxSize, maxScale int32, noMinMax, noSum bool) (Measure[N], ComputeAggregation) {
- h := newExponentialHistogram[N](maxSize, maxScale, noMinMax, noSum)
+ h := newExponentialHistogram[N](maxSize, maxScale, noMinMax, noSum, b.AggregationLimit, b.resFunc())
switch b.Temporality {
case metricdata.DeltaTemporality:
return b.filter(h.measure), h.delta
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exponential_histogram.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exponential_histogram.go
index 98b7dc1e0..4139a6d15 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exponential_histogram.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exponential_histogram.go
@@ -23,6 +23,7 @@ import (
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/sdk/metric/internal/exemplar"
"go.opentelemetry.io/otel/sdk/metric/metricdata"
)
@@ -40,6 +41,8 @@ const (
// expoHistogramDataPoint is a single data point in an exponential histogram.
type expoHistogramDataPoint[N int64 | float64] struct {
+ res exemplar.Reservoir[N]
+
count uint64
min N
max N
@@ -288,13 +291,15 @@ func (b *expoBuckets) downscale(delta int) {
// newExponentialHistogram returns an Aggregator that summarizes a set of
// measurements as an exponential histogram. Each histogram is scoped by attributes
// and the aggregation cycle the measurements were made in.
-func newExponentialHistogram[N int64 | float64](maxSize, maxScale int32, noMinMax, noSum bool) *expoHistogram[N] {
+func newExponentialHistogram[N int64 | float64](maxSize, maxScale int32, noMinMax, noSum bool, limit int, r func() exemplar.Reservoir[N]) *expoHistogram[N] {
return &expoHistogram[N]{
noSum: noSum,
noMinMax: noMinMax,
maxSize: int(maxSize),
maxScale: int(maxScale),
+ newRes: r,
+ limit: newLimiter[*expoHistogramDataPoint[N]](limit),
values: make(map[attribute.Set]*expoHistogramDataPoint[N]),
start: now(),
@@ -309,27 +314,35 @@ type expoHistogram[N int64 | float64] struct {
maxSize int
maxScale int
+ newRes func() exemplar.Reservoir[N]
+ limit limiter[*expoHistogramDataPoint[N]]
values map[attribute.Set]*expoHistogramDataPoint[N]
valuesMu sync.Mutex
start time.Time
}
-func (e *expoHistogram[N]) measure(_ context.Context, value N, attr attribute.Set) {
+func (e *expoHistogram[N]) measure(ctx context.Context, value N, fltrAttr attribute.Set, droppedAttr []attribute.KeyValue) {
// Ignore NaN and infinity.
if math.IsInf(float64(value), 0) || math.IsNaN(float64(value)) {
return
}
+ t := now()
+
e.valuesMu.Lock()
defer e.valuesMu.Unlock()
+ attr := e.limit.Attributes(fltrAttr, e.values)
v, ok := e.values[attr]
if !ok {
v = newExpoHistogramDataPoint[N](e.maxSize, e.maxScale, e.noMinMax, e.noSum)
+ v.res = e.newRes()
+
e.values[attr] = v
}
v.record(value)
+ v.res.Offer(ctx, t, value, droppedAttr)
}
func (e *expoHistogram[N]) delta(dest *metricdata.Aggregation) int {
@@ -362,6 +375,7 @@ func (e *expoHistogram[N]) delta(dest *metricdata.Aggregation) int {
hDPts[i].NegativeBucket.Offset = int32(b.negBuckets.startBin)
hDPts[i].NegativeBucket.Counts = reset(hDPts[i].NegativeBucket.Counts, len(b.negBuckets.counts), len(b.negBuckets.counts))
+ copy(hDPts[i].NegativeBucket.Counts, b.negBuckets.counts)
if !e.noSum {
hDPts[i].Sum = b.sum
@@ -371,6 +385,8 @@ func (e *expoHistogram[N]) delta(dest *metricdata.Aggregation) int {
hDPts[i].Max = metricdata.NewExtrema(b.max)
}
+ b.res.Collect(&hDPts[i].Exemplars)
+
delete(e.values, a)
i++
}
@@ -410,6 +426,7 @@ func (e *expoHistogram[N]) cumulative(dest *metricdata.Aggregation) int {
hDPts[i].NegativeBucket.Offset = int32(b.negBuckets.startBin)
hDPts[i].NegativeBucket.Counts = reset(hDPts[i].NegativeBucket.Counts, len(b.negBuckets.counts), len(b.negBuckets.counts))
+ copy(hDPts[i].NegativeBucket.Counts, b.negBuckets.counts)
if !e.noSum {
hDPts[i].Sum = b.sum
@@ -419,6 +436,8 @@ func (e *expoHistogram[N]) cumulative(dest *metricdata.Aggregation) int {
hDPts[i].Max = metricdata.NewExtrema(b.max)
}
+ b.res.Collect(&hDPts[i].Exemplars)
+
i++
// TODO (#3006): This will use an unbounded amount of memory if there
// are unbounded number of attribute sets being aggregated. Attribute
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/histogram.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/histogram.go
index 62ec51e1f..a9a4706bf 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/histogram.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/histogram.go
@@ -21,10 +21,13 @@ import (
"time"
"go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/sdk/metric/internal/exemplar"
"go.opentelemetry.io/otel/sdk/metric/metricdata"
)
type buckets[N int64 | float64] struct {
+ res exemplar.Reservoir[N]
+
counts []uint64
count uint64
total N
@@ -54,11 +57,13 @@ type histValues[N int64 | float64] struct {
noSum bool
bounds []float64
+ newRes func() exemplar.Reservoir[N]
+ limit limiter[*buckets[N]]
values map[attribute.Set]*buckets[N]
valuesMu sync.Mutex
}
-func newHistValues[N int64 | float64](bounds []float64, noSum bool) *histValues[N] {
+func newHistValues[N int64 | float64](bounds []float64, noSum bool, limit int, r func() exemplar.Reservoir[N]) *histValues[N] {
// The responsibility of keeping all buckets correctly associated with the
// passed boundaries is ultimately this type's responsibility. Make a copy
// here so we can always guarantee this. Or, in the case of failure, have
@@ -69,13 +74,15 @@ func newHistValues[N int64 | float64](bounds []float64, noSum bool) *histValues[
return &histValues[N]{
noSum: noSum,
bounds: b,
+ newRes: r,
+ limit: newLimiter[*buckets[N]](limit),
values: make(map[attribute.Set]*buckets[N]),
}
}
// Aggregate records the measurement value, scoped by attr, and aggregates it
// into a histogram.
-func (s *histValues[N]) measure(_ context.Context, value N, attr attribute.Set) {
+func (s *histValues[N]) measure(ctx context.Context, value N, fltrAttr attribute.Set, droppedAttr []attribute.KeyValue) {
// This search will return an index in the range [0, len(s.bounds)], where
// it will return len(s.bounds) if value is greater than the last element
// of s.bounds. This aligns with the buckets in that the length of buckets
@@ -83,9 +90,12 @@ func (s *histValues[N]) measure(_ context.Context, value N, attr attribute.Set)
// (s.bounds[len(s.bounds)-1], +∞).
idx := sort.SearchFloat64s(s.bounds, float64(value))
+ t := now()
+
s.valuesMu.Lock()
defer s.valuesMu.Unlock()
+ attr := s.limit.Attributes(fltrAttr, s.values)
b, ok := s.values[attr]
if !ok {
// N+1 buckets. For example:
@@ -96,6 +106,8 @@ func (s *histValues[N]) measure(_ context.Context, value N, attr attribute.Set)
//
// buckets = (-∞, 0], (0, 5.0], (5.0, 10.0], (10.0, +∞)
b = newBuckets[N](len(s.bounds) + 1)
+ b.res = s.newRes()
+
// Ensure min and max are recorded values (not zero), for new buckets.
b.min, b.max = value, value
s.values[attr] = b
@@ -104,13 +116,14 @@ func (s *histValues[N]) measure(_ context.Context, value N, attr attribute.Set)
if !s.noSum {
b.sum(value)
}
+ b.res.Offer(ctx, t, value, droppedAttr)
}
// newHistogram returns an Aggregator that summarizes a set of measurements as
// an histogram.
-func newHistogram[N int64 | float64](boundaries []float64, noMinMax, noSum bool) *histogram[N] {
+func newHistogram[N int64 | float64](boundaries []float64, noMinMax, noSum bool, limit int, r func() exemplar.Reservoir[N]) *histogram[N] {
return &histogram[N]{
- histValues: newHistValues[N](boundaries, noSum),
+ histValues: newHistValues[N](boundaries, noSum, limit, r),
noMinMax: noMinMax,
start: now(),
}
@@ -161,6 +174,8 @@ func (s *histogram[N]) delta(dest *metricdata.Aggregation) int {
hDPts[i].Max = metricdata.NewExtrema(b.max)
}
+ b.res.Collect(&hDPts[i].Exemplars)
+
// Unused attribute sets do not report.
delete(s.values, a)
i++
@@ -217,6 +232,9 @@ func (s *histogram[N]) cumulative(dest *metricdata.Aggregation) int {
hDPts[i].Min = metricdata.NewExtrema(b.min)
hDPts[i].Max = metricdata.NewExtrema(b.max)
}
+
+ b.res.Collect(&hDPts[i].Exemplars)
+
i++
// TODO (#3006): This will use an unbounded amount of memory if there
// are unbounded number of attribute sets being aggregated. Attribute
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/lastvalue.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/lastvalue.go
index 6af2d6061..5699e728f 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/lastvalue.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/lastvalue.go
@@ -20,6 +20,7 @@ import (
"time"
"go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/sdk/metric/internal/exemplar"
"go.opentelemetry.io/otel/sdk/metric/metricdata"
)
@@ -27,24 +28,43 @@ import (
type datapoint[N int64 | float64] struct {
timestamp time.Time
value N
+ res exemplar.Reservoir[N]
}
-func newLastValue[N int64 | float64]() *lastValue[N] {
- return &lastValue[N]{values: make(map[attribute.Set]datapoint[N])}
+func newLastValue[N int64 | float64](limit int, r func() exemplar.Reservoir[N]) *lastValue[N] {
+ return &lastValue[N]{
+ newRes: r,
+ limit: newLimiter[datapoint[N]](limit),
+ values: make(map[attribute.Set]datapoint[N]),
+ }
}
// lastValue summarizes a set of measurements as the last one made.
type lastValue[N int64 | float64] struct {
sync.Mutex
+ newRes func() exemplar.Reservoir[N]
+ limit limiter[datapoint[N]]
values map[attribute.Set]datapoint[N]
}
-func (s *lastValue[N]) measure(ctx context.Context, value N, attr attribute.Set) {
- d := datapoint[N]{timestamp: now(), value: value}
+func (s *lastValue[N]) measure(ctx context.Context, value N, fltrAttr attribute.Set, droppedAttr []attribute.KeyValue) {
+ t := now()
+
s.Lock()
+ defer s.Unlock()
+
+ attr := s.limit.Attributes(fltrAttr, s.values)
+ d, ok := s.values[attr]
+ if !ok {
+ d.res = s.newRes()
+ }
+
+ d.timestamp = t
+ d.value = value
+ d.res.Offer(ctx, t, value, droppedAttr)
+
s.values[attr] = d
- s.Unlock()
}
func (s *lastValue[N]) computeAggregation(dest *[]metricdata.DataPoint[N]) {
@@ -61,6 +81,7 @@ func (s *lastValue[N]) computeAggregation(dest *[]metricdata.DataPoint[N]) {
// ignored.
(*dest)[i].Time = v.timestamp
(*dest)[i].Value = v.value
+ v.res.Collect(&(*dest)[i].Exemplars)
// Do not report stale values.
delete(s.values, a)
i++
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/limit.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/limit.go
new file mode 100644
index 000000000..d3de84272
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/limit.go
@@ -0,0 +1,53 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package aggregate // import "go.opentelemetry.io/otel/sdk/metric/internal/aggregate"
+
+import "go.opentelemetry.io/otel/attribute"
+
+// overflowSet is the attribute set used to record a measurement when adding
+// another distinct attribute set to the aggregate would exceed the aggregate
+// limit.
+var overflowSet = attribute.NewSet(attribute.Bool("otel.metric.overflow", true))
+
+// limiter limits aggregate values.
+type limiter[V any] struct {
+ // aggLimit is the maximum number of metric streams that can be aggregated.
+ //
+ // Any metric stream with attributes distinct from any set already
+ // aggregated once the aggLimit will be meet will instead be aggregated
+ // into an "overflow" metric stream. That stream will only contain the
+ // "otel.metric.overflow"=true attribute.
+ aggLimit int
+}
+
+// newLimiter returns a new Limiter with the provided aggregation limit.
+func newLimiter[V any](aggregation int) limiter[V] {
+ return limiter[V]{aggLimit: aggregation}
+}
+
+// Attributes checks if adding a measurement for attrs will exceed the
+// aggregation cardinality limit for the existing measurements. If it will,
+// overflowSet is returned. Otherwise, if it will not exceed the limit, or the
+// limit is not set (limit <= 0), attr is returned.
+func (l limiter[V]) Attributes(attrs attribute.Set, measurements map[attribute.Set]V) attribute.Set {
+ if l.aggLimit > 0 {
+ _, exists := measurements[attrs]
+ if !exists && len(measurements) >= l.aggLimit-1 {
+ return overflowSet
+ }
+ }
+
+ return attrs
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/sum.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/sum.go
index 1e52ff0d1..02de2483f 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/sum.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/sum.go
@@ -20,31 +20,55 @@ import (
"time"
"go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/sdk/metric/internal/exemplar"
"go.opentelemetry.io/otel/sdk/metric/metricdata"
)
+type sumValue[N int64 | float64] struct {
+ n N
+ res exemplar.Reservoir[N]
+}
+
// valueMap is the storage for sums.
type valueMap[N int64 | float64] struct {
sync.Mutex
- values map[attribute.Set]N
+ newRes func() exemplar.Reservoir[N]
+ limit limiter[sumValue[N]]
+ values map[attribute.Set]sumValue[N]
}
-func newValueMap[N int64 | float64]() *valueMap[N] {
- return &valueMap[N]{values: make(map[attribute.Set]N)}
+func newValueMap[N int64 | float64](limit int, r func() exemplar.Reservoir[N]) *valueMap[N] {
+ return &valueMap[N]{
+ newRes: r,
+ limit: newLimiter[sumValue[N]](limit),
+ values: make(map[attribute.Set]sumValue[N]),
+ }
}
-func (s *valueMap[N]) measure(_ context.Context, value N, attr attribute.Set) {
+func (s *valueMap[N]) measure(ctx context.Context, value N, fltrAttr attribute.Set, droppedAttr []attribute.KeyValue) {
+ t := now()
+
s.Lock()
- s.values[attr] += value
- s.Unlock()
+ defer s.Unlock()
+
+ attr := s.limit.Attributes(fltrAttr, s.values)
+ v, ok := s.values[attr]
+ if !ok {
+ v.res = s.newRes()
+ }
+
+ v.n += value
+ v.res.Offer(ctx, t, value, droppedAttr)
+
+ s.values[attr] = v
}
// newSum returns an aggregator that summarizes a set of measurements as their
// arithmetic sum. Each sum is scoped by attributes and the aggregation cycle
// the measurements were made in.
-func newSum[N int64 | float64](monotonic bool) *sum[N] {
+func newSum[N int64 | float64](monotonic bool, limit int, r func() exemplar.Reservoir[N]) *sum[N] {
return &sum[N]{
- valueMap: newValueMap[N](),
+ valueMap: newValueMap[N](limit, r),
monotonic: monotonic,
start: now(),
}
@@ -74,11 +98,12 @@ func (s *sum[N]) delta(dest *metricdata.Aggregation) int {
dPts := reset(sData.DataPoints, n, n)
var i int
- for attr, value := range s.values {
+ for attr, val := range s.values {
dPts[i].Attributes = attr
dPts[i].StartTime = s.start
dPts[i].Time = t
- dPts[i].Value = value
+ dPts[i].Value = val.n
+ val.res.Collect(&dPts[i].Exemplars)
// Do not report stale values.
delete(s.values, attr)
i++
@@ -112,7 +137,8 @@ func (s *sum[N]) cumulative(dest *metricdata.Aggregation) int {
dPts[i].Attributes = attr
dPts[i].StartTime = s.start
dPts[i].Time = t
- dPts[i].Value = value
+ dPts[i].Value = value.n
+ value.res.Collect(&dPts[i].Exemplars)
// TODO (#3006): This will use an unbounded amount of memory if there
// are unbounded number of attribute sets being aggregated. Attribute
// sets that become "stale" need to be forgotten so this will not
@@ -129,9 +155,9 @@ func (s *sum[N]) cumulative(dest *metricdata.Aggregation) int {
// newPrecomputedSum returns an aggregator that summarizes a set of
// observatrions as their arithmetic sum. Each sum is scoped by attributes and
// the aggregation cycle the measurements were made in.
-func newPrecomputedSum[N int64 | float64](monotonic bool) *precomputedSum[N] {
+func newPrecomputedSum[N int64 | float64](monotonic bool, limit int, r func() exemplar.Reservoir[N]) *precomputedSum[N] {
return &precomputedSum[N]{
- valueMap: newValueMap[N](),
+ valueMap: newValueMap[N](limit, r),
monotonic: monotonic,
start: now(),
}
@@ -165,14 +191,15 @@ func (s *precomputedSum[N]) delta(dest *metricdata.Aggregation) int {
var i int
for attr, value := range s.values {
- delta := value - s.reported[attr]
+ delta := value.n - s.reported[attr]
dPts[i].Attributes = attr
dPts[i].StartTime = s.start
dPts[i].Time = t
dPts[i].Value = delta
+ value.res.Collect(&dPts[i].Exemplars)
- newReported[attr] = value
+ newReported[attr] = value.n
// Unused attribute sets do not report.
delete(s.values, attr)
i++
@@ -204,11 +231,12 @@ func (s *precomputedSum[N]) cumulative(dest *metricdata.Aggregation) int {
dPts := reset(sData.DataPoints, n, n)
var i int
- for attr, value := range s.values {
+ for attr, val := range s.values {
dPts[i].Attributes = attr
dPts[i].StartTime = s.start
dPts[i].Time = t
- dPts[i].Value = value
+ dPts[i].Value = val.n
+ val.res.Collect(&dPts[i].Exemplars)
// Unused attribute sets do not report.
delete(s.values, attr)