mirror of
https://github.com/open-telemetry/opentelemetry-go.git
synced 2025-04-25 12:04:40 +02:00
Part of addressing https://github.com/open-telemetry/opentelemetry-go/issues/5542. ### Motivation This removes the `time.Now()` call from filtered-out Exemplars by only invoking `time.Now()` after the filtering decision is made. This improvement is especially noticeable for measurements without any attributes. ``` goos: linux goarch: amd64 pkg: go.opentelemetry.io/otel/sdk/metric cpu: AMD EPYC 7B12 │ old.txt │ new.txt │ │ sec/op │ sec/op vs base │ SyncMeasure/NoView/Int64Counter/Attributes/0-24 158.20n ± 4% 99.83n ± 1% -36.90% (p=0.000 n=10) SyncMeasure/NoView/Int64Counter/Attributes/1-24 333.3n ± 4% 274.8n ± 1% -17.55% (p=0.000 n=10) SyncMeasure/NoView/Int64Counter/Attributes/10-24 1.640µ ± 1% 1.600µ ± 1% -2.41% (p=0.000 n=10) SyncMeasure/NoView/Float64Counter/Attributes/0-24 159.0n ± 3% 101.3n ± 0% -36.27% (p=0.000 n=10) SyncMeasure/NoView/Float64Counter/Attributes/1-24 340.0n ± 2% 272.0n ± 1% -20.00% (p=0.000 n=10) SyncMeasure/NoView/Float64Counter/Attributes/10-24 1.661µ ± 1% 1.597µ ± 0% -3.85% (p=0.000 n=10) SyncMeasure/NoView/Int64UpDownCounter/Attributes/0-24 159.8n ± 1% 103.1n ± 0% -35.50% (p=0.000 n=10) SyncMeasure/NoView/Int64UpDownCounter/Attributes/1-24 339.5n ± 1% 273.1n ± 0% -19.57% (p=0.000 n=10) SyncMeasure/NoView/Int64UpDownCounter/Attributes/10-24 1.656µ ± 0% 1.589µ ± 0% -4.05% (p=0.000 n=10) SyncMeasure/NoView/Float64UpDownCounter/Attributes/0-24 159.3n ± 2% 100.8n ± 0% -36.74% (p=0.000 n=10) SyncMeasure/NoView/Float64UpDownCounter/Attributes/1-24 337.9n ± 2% 271.8n ± 1% -19.55% (p=0.000 n=10) SyncMeasure/NoView/Float64UpDownCounter/Attributes/10-24 1.657µ ± 0% 1.593µ ± 1% -3.83% (p=0.000 n=10) SyncMeasure/NoView/Int64Histogram/Attributes/0-24 144.65n ± 4% 89.38n ± 0% -38.21% (p=0.000 n=10) SyncMeasure/NoView/Int64Histogram/Attributes/1-24 235.7n ± 2% 183.5n ± 0% -22.15% (p=0.000 n=10) SyncMeasure/NoView/Int64Histogram/Attributes/10-24 900.8n ± 1% 836.8n ± 0% -7.10% (p=0.000 n=10) SyncMeasure/NoView/Float64Histogram/Attributes/0-24 145.60n ± 5% 93.48n ± 1% -35.80% (p=0.000 n=10) SyncMeasure/NoView/Float64Histogram/Attributes/1-24 240.9n ± 1% 183.0n ± 0% -24.06% (p=0.000 n=10) SyncMeasure/NoView/Float64Histogram/Attributes/10-24 905.6n ± 1% 826.3n ± 0% -8.76% (p=0.000 n=10) SyncMeasure/DropView/Int64Counter/Attributes/0-24 20.33n ± 0% 20.35n ± 0% ~ (p=0.302 n=10) SyncMeasure/DropView/Int64Counter/Attributes/1-24 26.46n ± 0% 26.45n ± 1% ~ (p=0.868 n=10) SyncMeasure/DropView/Int64Counter/Attributes/10-24 26.50n ± 0% 26.47n ± 0% ~ (p=0.208 n=10) SyncMeasure/DropView/Float64Counter/Attributes/0-24 20.34n ± 1% 20.27n ± 0% -0.34% (p=0.009 n=10) SyncMeasure/DropView/Float64Counter/Attributes/1-24 26.55n ± 0% 26.60n ± 1% ~ (p=0.109 n=10) SyncMeasure/DropView/Float64Counter/Attributes/10-24 26.59n ± 1% 26.57n ± 1% ~ (p=0.926 n=10) SyncMeasure/DropView/Int64UpDownCounter/Attributes/0-24 20.38n ± 1% 20.38n ± 0% ~ (p=0.725 n=10) SyncMeasure/DropView/Int64UpDownCounter/Attributes/1-24 26.39n ± 0% 26.44n ± 0% ~ (p=0.238 n=10) SyncMeasure/DropView/Int64UpDownCounter/Attributes/10-24 26.52n ± 0% 26.42n ± 0% -0.36% (p=0.049 n=10) SyncMeasure/DropView/Float64UpDownCounter/Attributes/0-24 20.30n ± 0% 20.25n ± 0% ~ (p=0.196 n=10) SyncMeasure/DropView/Float64UpDownCounter/Attributes/1-24 26.57n ± 0% 26.54n ± 1% ~ (p=0.540 n=10) SyncMeasure/DropView/Float64UpDownCounter/Attributes/10-24 26.57n ± 0% 26.51n ± 1% ~ (p=0.643 n=10) SyncMeasure/DropView/Int64Histogram/Attributes/0-24 20.37n ± 0% 20.36n ± 1% ~ (p=1.000 n=10) SyncMeasure/DropView/Int64Histogram/Attributes/1-24 26.41n ± 0% 26.50n ± 0% +0.32% (p=0.007 n=10) SyncMeasure/DropView/Int64Histogram/Attributes/10-24 26.44n ± 0% 26.55n ± 1% +0.42% (p=0.012 n=10) SyncMeasure/DropView/Float64Histogram/Attributes/0-24 20.30n ± 0% 20.45n ± 0% +0.74% (p=0.000 n=10) SyncMeasure/DropView/Float64Histogram/Attributes/1-24 26.52n ± 0% 26.48n ± 0% ~ (p=0.127 n=10) SyncMeasure/DropView/Float64Histogram/Attributes/10-24 26.55n ± 0% 26.48n ± 0% -0.26% (p=0.002 n=10) SyncMeasure/AttrFilterView/Int64Counter/Attributes/0-24 170.5n ± 2% 110.8n ± 0% -35.03% (p=0.000 n=10) SyncMeasure/AttrFilterView/Int64Counter/Attributes/1-24 402.5n ± 1% 331.5n ± 1% -17.64% (p=0.000 n=10) SyncMeasure/AttrFilterView/Int64Counter/Attributes/10-24 1.363µ ± 1% 1.281µ ± 1% -6.02% (p=0.000 n=10) SyncMeasure/AttrFilterView/Float64Counter/Attributes/0-24 170.6n ± 1% 111.5n ± 1% -34.64% (p=0.000 n=10) SyncMeasure/AttrFilterView/Float64Counter/Attributes/1-24 397.1n ± 1% 335.9n ± 0% -15.41% (p=0.000 n=10) SyncMeasure/AttrFilterView/Float64Counter/Attributes/10-24 1.371µ ± 1% 1.279µ ± 1% -6.71% (p=0.000 n=10) SyncMeasure/AttrFilterView/Int64UpDownCounter/Attributes/0-24 170.1n ± 1% 112.2n ± 0% -34.09% (p=0.000 n=10) SyncMeasure/AttrFilterView/Int64UpDownCounter/Attributes/1-24 397.5n ± 1% 330.2n ± 0% -16.93% (p=0.000 n=10) SyncMeasure/AttrFilterView/Int64UpDownCounter/Attributes/10-24 1.371µ ± 1% 1.289µ ± 1% -5.95% (p=0.000 n=10) SyncMeasure/AttrFilterView/Float64UpDownCounter/Attributes/0-24 171.4n ± 2% 112.9n ± 0% -34.13% (p=0.000 n=10) SyncMeasure/AttrFilterView/Float64UpDownCounter/Attributes/1-24 397.0n ± 3% 336.4n ± 0% -15.24% (p=0.000 n=10) SyncMeasure/AttrFilterView/Float64UpDownCounter/Attributes/10-24 1.383µ ± 1% 1.305µ ± 1% -5.61% (p=0.000 n=10) SyncMeasure/AttrFilterView/Int64Histogram/Attributes/0-24 157.30n ± 2% 98.58n ± 1% -37.33% (p=0.000 n=6+10) ``` ### Changes * Introduce `exemplar.Filter`, which is a filter function based on the context. It will not be user-facing, so we can always add other parameters later if needed. * Introduce `exemplar.FilteredReservoir`, which is similar to a reservoir, except it does not receive a timestamp. It gets the current time after the filter decision has been made. It uses generics to avoid the call to exemplar.NewValue(), since it is internal-only. * The `exemplar.Reservoir` is left as-is, so that it can be made public when exemplars are stable. It still includes a timestamp argument. * Unit tests are updated to expect a much lower number of calls to time.Now * `exemplar.Drop` is now an `exemplar.FilteredReservoir` instead of a `Reservoir`, since we don't need a Reservoir to store things in if the measurement is always dropped. Co-authored-by: Sam Xie <sam@samxie.me> Co-authored-by: Tyler Yahn <MrAlias@users.noreply.github.com>
443 lines
12 KiB
Go
443 lines
12 KiB
Go
// Copyright The OpenTelemetry Authors
|
|
// SPDX-License-Identifier: Apache-2.0
|
|
|
|
package aggregate // import "go.opentelemetry.io/otel/sdk/metric/internal/aggregate"
|
|
|
|
import (
|
|
"context"
|
|
"errors"
|
|
"math"
|
|
"sync"
|
|
"time"
|
|
|
|
"go.opentelemetry.io/otel"
|
|
"go.opentelemetry.io/otel/attribute"
|
|
"go.opentelemetry.io/otel/sdk/metric/internal/exemplar"
|
|
"go.opentelemetry.io/otel/sdk/metric/metricdata"
|
|
)
|
|
|
|
const (
|
|
expoMaxScale = 20
|
|
expoMinScale = -10
|
|
|
|
smallestNonZeroNormalFloat64 = 0x1p-1022
|
|
|
|
// These redefine the Math constants with a type, so the compiler won't coerce
|
|
// them into an int on 32 bit platforms.
|
|
maxInt64 int64 = math.MaxInt64
|
|
minInt64 int64 = math.MinInt64
|
|
)
|
|
|
|
// expoHistogramDataPoint is a single data point in an exponential histogram.
|
|
type expoHistogramDataPoint[N int64 | float64] struct {
|
|
attrs attribute.Set
|
|
res exemplar.FilteredReservoir[N]
|
|
|
|
count uint64
|
|
min N
|
|
max N
|
|
sum N
|
|
|
|
maxSize int
|
|
noMinMax bool
|
|
noSum bool
|
|
|
|
scale int
|
|
|
|
posBuckets expoBuckets
|
|
negBuckets expoBuckets
|
|
zeroCount uint64
|
|
}
|
|
|
|
func newExpoHistogramDataPoint[N int64 | float64](attrs attribute.Set, maxSize, maxScale int, noMinMax, noSum bool) *expoHistogramDataPoint[N] {
|
|
f := math.MaxFloat64
|
|
max := N(f) // if N is int64, max will overflow to -9223372036854775808
|
|
min := N(-f)
|
|
if N(maxInt64) > N(f) {
|
|
max = N(maxInt64)
|
|
min = N(minInt64)
|
|
}
|
|
return &expoHistogramDataPoint[N]{
|
|
attrs: attrs,
|
|
min: max,
|
|
max: min,
|
|
maxSize: maxSize,
|
|
noMinMax: noMinMax,
|
|
noSum: noSum,
|
|
scale: maxScale,
|
|
}
|
|
}
|
|
|
|
// record adds a new measurement to the histogram. It will rescale the buckets if needed.
|
|
func (p *expoHistogramDataPoint[N]) record(v N) {
|
|
p.count++
|
|
|
|
if !p.noMinMax {
|
|
if v < p.min {
|
|
p.min = v
|
|
}
|
|
if v > p.max {
|
|
p.max = v
|
|
}
|
|
}
|
|
if !p.noSum {
|
|
p.sum += v
|
|
}
|
|
|
|
absV := math.Abs(float64(v))
|
|
|
|
if float64(absV) == 0.0 {
|
|
p.zeroCount++
|
|
return
|
|
}
|
|
|
|
bin := p.getBin(absV)
|
|
|
|
bucket := &p.posBuckets
|
|
if v < 0 {
|
|
bucket = &p.negBuckets
|
|
}
|
|
|
|
// If the new bin would make the counts larger than maxScale, we need to
|
|
// downscale current measurements.
|
|
if scaleDelta := p.scaleChange(bin, bucket.startBin, len(bucket.counts)); scaleDelta > 0 {
|
|
if p.scale-scaleDelta < expoMinScale {
|
|
// With a scale of -10 there is only two buckets for the whole range of float64 values.
|
|
// This can only happen if there is a max size of 1.
|
|
otel.Handle(errors.New("exponential histogram scale underflow"))
|
|
return
|
|
}
|
|
// Downscale
|
|
p.scale -= scaleDelta
|
|
p.posBuckets.downscale(scaleDelta)
|
|
p.negBuckets.downscale(scaleDelta)
|
|
|
|
bin = p.getBin(absV)
|
|
}
|
|
|
|
bucket.record(bin)
|
|
}
|
|
|
|
// getBin returns the bin v should be recorded into.
|
|
func (p *expoHistogramDataPoint[N]) getBin(v float64) int {
|
|
frac, exp := math.Frexp(v)
|
|
if p.scale <= 0 {
|
|
// Because of the choice of fraction is always 1 power of two higher than we want.
|
|
correction := 1
|
|
if frac == .5 {
|
|
// If v is an exact power of two the frac will be .5 and the exp
|
|
// will be one higher than we want.
|
|
correction = 2
|
|
}
|
|
return (exp - correction) >> (-p.scale)
|
|
}
|
|
return exp<<p.scale + int(math.Log(frac)*scaleFactors[p.scale]) - 1
|
|
}
|
|
|
|
// scaleFactors are constants used in calculating the logarithm index. They are
|
|
// equivalent to 2^index/log(2).
|
|
var scaleFactors = [21]float64{
|
|
math.Ldexp(math.Log2E, 0),
|
|
math.Ldexp(math.Log2E, 1),
|
|
math.Ldexp(math.Log2E, 2),
|
|
math.Ldexp(math.Log2E, 3),
|
|
math.Ldexp(math.Log2E, 4),
|
|
math.Ldexp(math.Log2E, 5),
|
|
math.Ldexp(math.Log2E, 6),
|
|
math.Ldexp(math.Log2E, 7),
|
|
math.Ldexp(math.Log2E, 8),
|
|
math.Ldexp(math.Log2E, 9),
|
|
math.Ldexp(math.Log2E, 10),
|
|
math.Ldexp(math.Log2E, 11),
|
|
math.Ldexp(math.Log2E, 12),
|
|
math.Ldexp(math.Log2E, 13),
|
|
math.Ldexp(math.Log2E, 14),
|
|
math.Ldexp(math.Log2E, 15),
|
|
math.Ldexp(math.Log2E, 16),
|
|
math.Ldexp(math.Log2E, 17),
|
|
math.Ldexp(math.Log2E, 18),
|
|
math.Ldexp(math.Log2E, 19),
|
|
math.Ldexp(math.Log2E, 20),
|
|
}
|
|
|
|
// scaleChange returns the magnitude of the scale change needed to fit bin in
|
|
// the bucket. If no scale change is needed 0 is returned.
|
|
func (p *expoHistogramDataPoint[N]) scaleChange(bin, startBin, length int) int {
|
|
if length == 0 {
|
|
// No need to rescale if there are no buckets.
|
|
return 0
|
|
}
|
|
|
|
low := startBin
|
|
high := bin
|
|
if startBin >= bin {
|
|
low = bin
|
|
high = startBin + length - 1
|
|
}
|
|
|
|
count := 0
|
|
for high-low >= p.maxSize {
|
|
low = low >> 1
|
|
high = high >> 1
|
|
count++
|
|
if count > expoMaxScale-expoMinScale {
|
|
return count
|
|
}
|
|
}
|
|
return count
|
|
}
|
|
|
|
// expoBuckets is a set of buckets in an exponential histogram.
|
|
type expoBuckets struct {
|
|
startBin int
|
|
counts []uint64
|
|
}
|
|
|
|
// record increments the count for the given bin, and expands the buckets if needed.
|
|
// Size changes must be done before calling this function.
|
|
func (b *expoBuckets) record(bin int) {
|
|
if len(b.counts) == 0 {
|
|
b.counts = []uint64{1}
|
|
b.startBin = bin
|
|
return
|
|
}
|
|
|
|
endBin := b.startBin + len(b.counts) - 1
|
|
|
|
// if the new bin is inside the current range
|
|
if bin >= b.startBin && bin <= endBin {
|
|
b.counts[bin-b.startBin]++
|
|
return
|
|
}
|
|
// if the new bin is before the current start add spaces to the counts
|
|
if bin < b.startBin {
|
|
origLen := len(b.counts)
|
|
newLength := endBin - bin + 1
|
|
shift := b.startBin - bin
|
|
|
|
if newLength > cap(b.counts) {
|
|
b.counts = append(b.counts, make([]uint64, newLength-len(b.counts))...)
|
|
}
|
|
|
|
copy(b.counts[shift:origLen+shift], b.counts[:])
|
|
b.counts = b.counts[:newLength]
|
|
for i := 1; i < shift; i++ {
|
|
b.counts[i] = 0
|
|
}
|
|
b.startBin = bin
|
|
b.counts[0] = 1
|
|
return
|
|
}
|
|
// if the new is after the end add spaces to the end
|
|
if bin > endBin {
|
|
if bin-b.startBin < cap(b.counts) {
|
|
b.counts = b.counts[:bin-b.startBin+1]
|
|
for i := endBin + 1 - b.startBin; i < len(b.counts); i++ {
|
|
b.counts[i] = 0
|
|
}
|
|
b.counts[bin-b.startBin] = 1
|
|
return
|
|
}
|
|
|
|
end := make([]uint64, bin-b.startBin-len(b.counts)+1)
|
|
b.counts = append(b.counts, end...)
|
|
b.counts[bin-b.startBin] = 1
|
|
}
|
|
}
|
|
|
|
// downscale shrinks a bucket by a factor of 2*s. It will sum counts into the
|
|
// correct lower resolution bucket.
|
|
func (b *expoBuckets) downscale(delta int) {
|
|
// Example
|
|
// delta = 2
|
|
// Original offset: -6
|
|
// Counts: [ 3, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
|
|
// bins: -6 -5, -4, -3, -2, -1, 0, 1, 2, 3, 4
|
|
// new bins:-2, -2, -1, -1, -1, -1, 0, 0, 0, 0, 1
|
|
// new Offset: -2
|
|
// new Counts: [4, 14, 30, 10]
|
|
|
|
if len(b.counts) <= 1 || delta < 1 {
|
|
b.startBin = b.startBin >> delta
|
|
return
|
|
}
|
|
|
|
steps := 1 << delta
|
|
offset := b.startBin % steps
|
|
offset = (offset + steps) % steps // to make offset positive
|
|
for i := 1; i < len(b.counts); i++ {
|
|
idx := i + offset
|
|
if idx%steps == 0 {
|
|
b.counts[idx/steps] = b.counts[i]
|
|
continue
|
|
}
|
|
b.counts[idx/steps] += b.counts[i]
|
|
}
|
|
|
|
lastIdx := (len(b.counts) - 1 + offset) / steps
|
|
b.counts = b.counts[:lastIdx+1]
|
|
b.startBin = b.startBin >> delta
|
|
}
|
|
|
|
// newExponentialHistogram returns an Aggregator that summarizes a set of
|
|
// measurements as an exponential histogram. Each histogram is scoped by attributes
|
|
// and the aggregation cycle the measurements were made in.
|
|
func newExponentialHistogram[N int64 | float64](maxSize, maxScale int32, noMinMax, noSum bool, limit int, r func() exemplar.FilteredReservoir[N]) *expoHistogram[N] {
|
|
return &expoHistogram[N]{
|
|
noSum: noSum,
|
|
noMinMax: noMinMax,
|
|
maxSize: int(maxSize),
|
|
maxScale: int(maxScale),
|
|
|
|
newRes: r,
|
|
limit: newLimiter[*expoHistogramDataPoint[N]](limit),
|
|
values: make(map[attribute.Distinct]*expoHistogramDataPoint[N]),
|
|
|
|
start: now(),
|
|
}
|
|
}
|
|
|
|
// expoHistogram summarizes a set of measurements as an histogram with exponentially
|
|
// defined buckets.
|
|
type expoHistogram[N int64 | float64] struct {
|
|
noSum bool
|
|
noMinMax bool
|
|
maxSize int
|
|
maxScale int
|
|
|
|
newRes func() exemplar.FilteredReservoir[N]
|
|
limit limiter[*expoHistogramDataPoint[N]]
|
|
values map[attribute.Distinct]*expoHistogramDataPoint[N]
|
|
valuesMu sync.Mutex
|
|
|
|
start time.Time
|
|
}
|
|
|
|
func (e *expoHistogram[N]) measure(ctx context.Context, value N, fltrAttr attribute.Set, droppedAttr []attribute.KeyValue) {
|
|
// Ignore NaN and infinity.
|
|
if math.IsInf(float64(value), 0) || math.IsNaN(float64(value)) {
|
|
return
|
|
}
|
|
|
|
e.valuesMu.Lock()
|
|
defer e.valuesMu.Unlock()
|
|
|
|
attr := e.limit.Attributes(fltrAttr, e.values)
|
|
v, ok := e.values[attr.Equivalent()]
|
|
if !ok {
|
|
v = newExpoHistogramDataPoint[N](attr, e.maxSize, e.maxScale, e.noMinMax, e.noSum)
|
|
v.res = e.newRes()
|
|
|
|
e.values[attr.Equivalent()] = v
|
|
}
|
|
v.record(value)
|
|
v.res.Offer(ctx, value, droppedAttr)
|
|
}
|
|
|
|
func (e *expoHistogram[N]) delta(dest *metricdata.Aggregation) int {
|
|
t := now()
|
|
|
|
// If *dest is not a metricdata.ExponentialHistogram, memory reuse is missed.
|
|
// In that case, use the zero-value h and hope for better alignment next cycle.
|
|
h, _ := (*dest).(metricdata.ExponentialHistogram[N])
|
|
h.Temporality = metricdata.DeltaTemporality
|
|
|
|
e.valuesMu.Lock()
|
|
defer e.valuesMu.Unlock()
|
|
|
|
n := len(e.values)
|
|
hDPts := reset(h.DataPoints, n, n)
|
|
|
|
var i int
|
|
for _, val := range e.values {
|
|
hDPts[i].Attributes = val.attrs
|
|
hDPts[i].StartTime = e.start
|
|
hDPts[i].Time = t
|
|
hDPts[i].Count = val.count
|
|
hDPts[i].Scale = int32(val.scale)
|
|
hDPts[i].ZeroCount = val.zeroCount
|
|
hDPts[i].ZeroThreshold = 0.0
|
|
|
|
hDPts[i].PositiveBucket.Offset = int32(val.posBuckets.startBin)
|
|
hDPts[i].PositiveBucket.Counts = reset(hDPts[i].PositiveBucket.Counts, len(val.posBuckets.counts), len(val.posBuckets.counts))
|
|
copy(hDPts[i].PositiveBucket.Counts, val.posBuckets.counts)
|
|
|
|
hDPts[i].NegativeBucket.Offset = int32(val.negBuckets.startBin)
|
|
hDPts[i].NegativeBucket.Counts = reset(hDPts[i].NegativeBucket.Counts, len(val.negBuckets.counts), len(val.negBuckets.counts))
|
|
copy(hDPts[i].NegativeBucket.Counts, val.negBuckets.counts)
|
|
|
|
if !e.noSum {
|
|
hDPts[i].Sum = val.sum
|
|
}
|
|
if !e.noMinMax {
|
|
hDPts[i].Min = metricdata.NewExtrema(val.min)
|
|
hDPts[i].Max = metricdata.NewExtrema(val.max)
|
|
}
|
|
|
|
collectExemplars(&hDPts[i].Exemplars, val.res.Collect)
|
|
|
|
i++
|
|
}
|
|
// Unused attribute sets do not report.
|
|
clear(e.values)
|
|
|
|
e.start = t
|
|
h.DataPoints = hDPts
|
|
*dest = h
|
|
return n
|
|
}
|
|
|
|
func (e *expoHistogram[N]) cumulative(dest *metricdata.Aggregation) int {
|
|
t := now()
|
|
|
|
// If *dest is not a metricdata.ExponentialHistogram, memory reuse is missed.
|
|
// In that case, use the zero-value h and hope for better alignment next cycle.
|
|
h, _ := (*dest).(metricdata.ExponentialHistogram[N])
|
|
h.Temporality = metricdata.CumulativeTemporality
|
|
|
|
e.valuesMu.Lock()
|
|
defer e.valuesMu.Unlock()
|
|
|
|
n := len(e.values)
|
|
hDPts := reset(h.DataPoints, n, n)
|
|
|
|
var i int
|
|
for _, val := range e.values {
|
|
hDPts[i].Attributes = val.attrs
|
|
hDPts[i].StartTime = e.start
|
|
hDPts[i].Time = t
|
|
hDPts[i].Count = val.count
|
|
hDPts[i].Scale = int32(val.scale)
|
|
hDPts[i].ZeroCount = val.zeroCount
|
|
hDPts[i].ZeroThreshold = 0.0
|
|
|
|
hDPts[i].PositiveBucket.Offset = int32(val.posBuckets.startBin)
|
|
hDPts[i].PositiveBucket.Counts = reset(hDPts[i].PositiveBucket.Counts, len(val.posBuckets.counts), len(val.posBuckets.counts))
|
|
copy(hDPts[i].PositiveBucket.Counts, val.posBuckets.counts)
|
|
|
|
hDPts[i].NegativeBucket.Offset = int32(val.negBuckets.startBin)
|
|
hDPts[i].NegativeBucket.Counts = reset(hDPts[i].NegativeBucket.Counts, len(val.negBuckets.counts), len(val.negBuckets.counts))
|
|
copy(hDPts[i].NegativeBucket.Counts, val.negBuckets.counts)
|
|
|
|
if !e.noSum {
|
|
hDPts[i].Sum = val.sum
|
|
}
|
|
if !e.noMinMax {
|
|
hDPts[i].Min = metricdata.NewExtrema(val.min)
|
|
hDPts[i].Max = metricdata.NewExtrema(val.max)
|
|
}
|
|
|
|
collectExemplars(&hDPts[i].Exemplars, val.res.Collect)
|
|
|
|
i++
|
|
// TODO (#3006): This will use an unbounded amount of memory if there
|
|
// are unbounded number of attribute sets being aggregated. Attribute
|
|
// sets that become "stale" need to be forgotten so this will not
|
|
// overload the system.
|
|
}
|
|
|
|
h.DataPoints = hDPts
|
|
*dest = h
|
|
return n
|
|
}
|