mirror of
https://github.com/open-telemetry/opentelemetry-go.git
synced 2025-01-03 22:52:30 +02:00
Do not export aggregations without any data points (#3436)
* Return empty nil aggs if no meas * Update tests with new expected behavior * Add change to changelog * Set PR number in changelog * Run lint * Fix pipeline_test * Scope change in changelog to pkg * Clean up init of agg types
This commit is contained in:
parent
acd87773e2
commit
d091ba88e4
@ -42,6 +42,7 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm
|
||||
- Cumulative metrics from the OpenCensus bridge (`go.opentelemetry.io/otel/bridge/opencensus`) are defined as monotonic sums, instead of non-monotonic. (#3389)
|
||||
- Asynchronous counters (`Counter` and `UpDownCounter`) from the metric SDK now produce delta sums when configured with delta temporality. (#3398)
|
||||
- Exported `Status` codes in the `go.opentelemetry.io/otel/exporters/zipkin` exporter are now exported as all upper case values. (#3340)
|
||||
- `Aggregation`s from `go.opentelemetry.io/otel/sdk/metric` with no data are not exported. (#3394, #3436)
|
||||
- Reenabled Attribute Filters in the Metric SDK. (#3396)
|
||||
- Do not report empty partial-success responses in the `go.opentelemetry.io/otel/exporters/otlp` exporters. (#3438, #3432)
|
||||
- Handle partial success responses in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` exporters. (#3162, #3440)
|
||||
|
@ -130,20 +130,21 @@ type deltaHistogram[N int64 | float64] struct {
|
||||
}
|
||||
|
||||
func (s *deltaHistogram[N]) Aggregation() metricdata.Aggregation {
|
||||
h := metricdata.Histogram{Temporality: metricdata.DeltaTemporality}
|
||||
|
||||
s.valuesMu.Lock()
|
||||
defer s.valuesMu.Unlock()
|
||||
|
||||
if len(s.values) == 0 {
|
||||
return h
|
||||
return nil
|
||||
}
|
||||
|
||||
t := now()
|
||||
// Do not allow modification of our copy of bounds.
|
||||
bounds := make([]float64, len(s.bounds))
|
||||
copy(bounds, s.bounds)
|
||||
t := now()
|
||||
h.DataPoints = make([]metricdata.HistogramDataPoint, 0, len(s.values))
|
||||
h := metricdata.Histogram{
|
||||
Temporality: metricdata.DeltaTemporality,
|
||||
DataPoints: make([]metricdata.HistogramDataPoint, 0, len(s.values)),
|
||||
}
|
||||
for a, b := range s.values {
|
||||
hdp := metricdata.HistogramDataPoint{
|
||||
Attributes: a,
|
||||
@ -192,20 +193,21 @@ type cumulativeHistogram[N int64 | float64] struct {
|
||||
}
|
||||
|
||||
func (s *cumulativeHistogram[N]) Aggregation() metricdata.Aggregation {
|
||||
h := metricdata.Histogram{Temporality: metricdata.CumulativeTemporality}
|
||||
|
||||
s.valuesMu.Lock()
|
||||
defer s.valuesMu.Unlock()
|
||||
|
||||
if len(s.values) == 0 {
|
||||
return h
|
||||
return nil
|
||||
}
|
||||
|
||||
t := now()
|
||||
// Do not allow modification of our copy of bounds.
|
||||
bounds := make([]float64, len(s.bounds))
|
||||
copy(bounds, s.bounds)
|
||||
t := now()
|
||||
h.DataPoints = make([]metricdata.HistogramDataPoint, 0, len(s.values))
|
||||
h := metricdata.Histogram{
|
||||
Temporality: metricdata.CumulativeTemporality,
|
||||
DataPoints: make([]metricdata.HistogramDataPoint, 0, len(s.values)),
|
||||
}
|
||||
for a, b := range s.values {
|
||||
// The HistogramDataPoint field values returned need to be copies of
|
||||
// the buckets value as we will keep updating them.
|
||||
|
@ -169,17 +169,17 @@ func TestCumulativeHistogramImutableCounts(t *testing.T) {
|
||||
func TestDeltaHistogramReset(t *testing.T) {
|
||||
t.Cleanup(mockTime(now))
|
||||
|
||||
expect := metricdata.Histogram{Temporality: metricdata.DeltaTemporality}
|
||||
a := NewDeltaHistogram[int64](histConf)
|
||||
metricdatatest.AssertAggregationsEqual(t, expect, a.Aggregation())
|
||||
assert.Nil(t, a.Aggregation())
|
||||
|
||||
a.Aggregate(1, alice)
|
||||
expect := metricdata.Histogram{Temporality: metricdata.DeltaTemporality}
|
||||
expect.DataPoints = []metricdata.HistogramDataPoint{hPoint(alice, 1, 1)}
|
||||
metricdatatest.AssertAggregationsEqual(t, expect, a.Aggregation())
|
||||
|
||||
// The attr set should be forgotten once Aggregations is called.
|
||||
expect.DataPoints = nil
|
||||
metricdatatest.AssertAggregationsEqual(t, expect, a.Aggregation())
|
||||
assert.Nil(t, a.Aggregation())
|
||||
|
||||
// Aggregating another set should not affect the original (alice).
|
||||
a.Aggregate(1, bob)
|
||||
@ -187,6 +187,13 @@ func TestDeltaHistogramReset(t *testing.T) {
|
||||
metricdatatest.AssertAggregationsEqual(t, expect, a.Aggregation())
|
||||
}
|
||||
|
||||
func TestEmptyHistogramNilAggregation(t *testing.T) {
|
||||
assert.Nil(t, NewCumulativeHistogram[int64](histConf).Aggregation())
|
||||
assert.Nil(t, NewCumulativeHistogram[float64](histConf).Aggregation())
|
||||
assert.Nil(t, NewDeltaHistogram[int64](histConf).Aggregation())
|
||||
assert.Nil(t, NewDeltaHistogram[float64](histConf).Aggregation())
|
||||
}
|
||||
|
||||
func BenchmarkHistogram(b *testing.B) {
|
||||
b.Run("Int64", benchmarkHistogram[int64])
|
||||
b.Run("Float64", benchmarkHistogram[float64])
|
||||
|
@ -49,16 +49,16 @@ func (s *lastValue[N]) Aggregate(value N, attr attribute.Set) {
|
||||
}
|
||||
|
||||
func (s *lastValue[N]) Aggregation() metricdata.Aggregation {
|
||||
gauge := metricdata.Gauge[N]{}
|
||||
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
if len(s.values) == 0 {
|
||||
return gauge
|
||||
return nil
|
||||
}
|
||||
|
||||
gauge.DataPoints = make([]metricdata.DataPoint[N], 0, len(s.values))
|
||||
gauge := metricdata.Gauge[N]{
|
||||
DataPoints: make([]metricdata.DataPoint[N], 0, len(s.values)),
|
||||
}
|
||||
for a, v := range s.values {
|
||||
gauge.DataPoints = append(gauge.DataPoints, metricdata.DataPoint[N]{
|
||||
Attributes: a,
|
||||
|
@ -17,6 +17,8 @@ package internal // import "go.opentelemetry.io/otel/sdk/metric/internal"
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"go.opentelemetry.io/otel/sdk/metric/metricdata"
|
||||
"go.opentelemetry.io/otel/sdk/metric/metricdata/metricdatatest"
|
||||
)
|
||||
@ -52,20 +54,21 @@ func testLastValueReset[N int64 | float64](t *testing.T) {
|
||||
t.Cleanup(mockTime(now))
|
||||
|
||||
a := NewLastValue[N]()
|
||||
expect := metricdata.Gauge[N]{}
|
||||
metricdatatest.AssertAggregationsEqual(t, expect, a.Aggregation())
|
||||
assert.Nil(t, a.Aggregation())
|
||||
|
||||
a.Aggregate(1, alice)
|
||||
expect.DataPoints = []metricdata.DataPoint[N]{{
|
||||
Attributes: alice,
|
||||
Time: now(),
|
||||
Value: 1,
|
||||
}}
|
||||
expect := metricdata.Gauge[N]{
|
||||
DataPoints: []metricdata.DataPoint[N]{{
|
||||
Attributes: alice,
|
||||
Time: now(),
|
||||
Value: 1,
|
||||
}},
|
||||
}
|
||||
metricdatatest.AssertAggregationsEqual(t, expect, a.Aggregation())
|
||||
|
||||
// The attr set should be forgotten once Aggregations is called.
|
||||
expect.DataPoints = nil
|
||||
metricdatatest.AssertAggregationsEqual(t, expect, a.Aggregation())
|
||||
assert.Nil(t, a.Aggregation())
|
||||
|
||||
// Aggregating another set should not affect the original (alice).
|
||||
a.Aggregate(1, bob)
|
||||
@ -82,6 +85,11 @@ func TestLastValueReset(t *testing.T) {
|
||||
t.Run("Float64", testLastValueReset[float64])
|
||||
}
|
||||
|
||||
func TestEmptyLastValueNilAggregation(t *testing.T) {
|
||||
assert.Nil(t, NewLastValue[int64]().Aggregation())
|
||||
assert.Nil(t, NewLastValue[float64]().Aggregation())
|
||||
}
|
||||
|
||||
func BenchmarkLastValue(b *testing.B) {
|
||||
b.Run("Int64", benchmarkAggregator(NewLastValue[int64]))
|
||||
b.Run("Float64", benchmarkAggregator(NewLastValue[float64]))
|
||||
|
@ -76,20 +76,19 @@ type deltaSum[N int64 | float64] struct {
|
||||
}
|
||||
|
||||
func (s *deltaSum[N]) Aggregation() metricdata.Aggregation {
|
||||
out := metricdata.Sum[N]{
|
||||
Temporality: metricdata.DeltaTemporality,
|
||||
IsMonotonic: s.monotonic,
|
||||
}
|
||||
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
if len(s.values) == 0 {
|
||||
return out
|
||||
return nil
|
||||
}
|
||||
|
||||
t := now()
|
||||
out.DataPoints = make([]metricdata.DataPoint[N], 0, len(s.values))
|
||||
out := metricdata.Sum[N]{
|
||||
Temporality: metricdata.DeltaTemporality,
|
||||
IsMonotonic: s.monotonic,
|
||||
DataPoints: make([]metricdata.DataPoint[N], 0, len(s.values)),
|
||||
}
|
||||
for attr, value := range s.values {
|
||||
out.DataPoints = append(out.DataPoints, metricdata.DataPoint[N]{
|
||||
Attributes: attr,
|
||||
@ -137,20 +136,19 @@ type cumulativeSum[N int64 | float64] struct {
|
||||
}
|
||||
|
||||
func (s *cumulativeSum[N]) Aggregation() metricdata.Aggregation {
|
||||
out := metricdata.Sum[N]{
|
||||
Temporality: metricdata.CumulativeTemporality,
|
||||
IsMonotonic: s.monotonic,
|
||||
}
|
||||
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
if len(s.values) == 0 {
|
||||
return out
|
||||
return nil
|
||||
}
|
||||
|
||||
t := now()
|
||||
out.DataPoints = make([]metricdata.DataPoint[N], 0, len(s.values))
|
||||
out := metricdata.Sum[N]{
|
||||
Temporality: metricdata.CumulativeTemporality,
|
||||
IsMonotonic: s.monotonic,
|
||||
DataPoints: make([]metricdata.DataPoint[N], 0, len(s.values)),
|
||||
}
|
||||
for attr, value := range s.values {
|
||||
out.DataPoints = append(out.DataPoints, metricdata.DataPoint[N]{
|
||||
Attributes: attr,
|
||||
@ -204,20 +202,19 @@ func (s *precomputedDeltaSum[N]) Aggregate(value N, attr attribute.Set) {
|
||||
}
|
||||
|
||||
func (s *precomputedDeltaSum[N]) Aggregation() metricdata.Aggregation {
|
||||
out := metricdata.Sum[N]{
|
||||
Temporality: metricdata.DeltaTemporality,
|
||||
IsMonotonic: s.monotonic,
|
||||
}
|
||||
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
if len(s.recorded) == 0 {
|
||||
return out
|
||||
return nil
|
||||
}
|
||||
|
||||
t := now()
|
||||
out.DataPoints = make([]metricdata.DataPoint[N], 0, len(s.recorded))
|
||||
out := metricdata.Sum[N]{
|
||||
Temporality: metricdata.DeltaTemporality,
|
||||
IsMonotonic: s.monotonic,
|
||||
DataPoints: make([]metricdata.DataPoint[N], 0, len(s.recorded)),
|
||||
}
|
||||
for attr, recorded := range s.recorded {
|
||||
value := recorded - s.reported[attr]
|
||||
out.DataPoints = append(out.DataPoints, metricdata.DataPoint[N]{
|
||||
|
@ -17,6 +17,8 @@ package internal // import "go.opentelemetry.io/otel/sdk/metric/internal"
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/sdk/metric/metricdata"
|
||||
"go.opentelemetry.io/otel/sdk/metric/metricdata/metricdatatest"
|
||||
@ -138,17 +140,17 @@ func point[N int64 | float64](a attribute.Set, v N) metricdata.DataPoint[N] {
|
||||
func testDeltaSumReset[N int64 | float64](t *testing.T) {
|
||||
t.Cleanup(mockTime(now))
|
||||
|
||||
expect := metricdata.Sum[N]{Temporality: metricdata.DeltaTemporality}
|
||||
a := NewDeltaSum[N](false)
|
||||
metricdatatest.AssertAggregationsEqual(t, expect, a.Aggregation())
|
||||
assert.Nil(t, a.Aggregation())
|
||||
|
||||
a.Aggregate(1, alice)
|
||||
expect := metricdata.Sum[N]{Temporality: metricdata.DeltaTemporality}
|
||||
expect.DataPoints = []metricdata.DataPoint[N]{point[N](alice, 1)}
|
||||
metricdatatest.AssertAggregationsEqual(t, expect, a.Aggregation())
|
||||
|
||||
// The attr set should be forgotten once Aggregations is called.
|
||||
expect.DataPoints = nil
|
||||
metricdatatest.AssertAggregationsEqual(t, expect, a.Aggregation())
|
||||
assert.Nil(t, a.Aggregation())
|
||||
|
||||
// Aggregating another set should not affect the original (alice).
|
||||
a.Aggregate(1, bob)
|
||||
@ -161,6 +163,25 @@ func TestDeltaSumReset(t *testing.T) {
|
||||
t.Run("Float64", testDeltaSumReset[float64])
|
||||
}
|
||||
|
||||
func TestEmptySumNilAggregation(t *testing.T) {
|
||||
assert.Nil(t, NewCumulativeSum[int64](true).Aggregation())
|
||||
assert.Nil(t, NewCumulativeSum[int64](false).Aggregation())
|
||||
assert.Nil(t, NewCumulativeSum[float64](true).Aggregation())
|
||||
assert.Nil(t, NewCumulativeSum[float64](false).Aggregation())
|
||||
assert.Nil(t, NewDeltaSum[int64](true).Aggregation())
|
||||
assert.Nil(t, NewDeltaSum[int64](false).Aggregation())
|
||||
assert.Nil(t, NewDeltaSum[float64](true).Aggregation())
|
||||
assert.Nil(t, NewDeltaSum[float64](false).Aggregation())
|
||||
assert.Nil(t, NewPrecomputedCumulativeSum[int64](true).Aggregation())
|
||||
assert.Nil(t, NewPrecomputedCumulativeSum[int64](false).Aggregation())
|
||||
assert.Nil(t, NewPrecomputedCumulativeSum[float64](true).Aggregation())
|
||||
assert.Nil(t, NewPrecomputedCumulativeSum[float64](false).Aggregation())
|
||||
assert.Nil(t, NewPrecomputedDeltaSum[int64](true).Aggregation())
|
||||
assert.Nil(t, NewPrecomputedDeltaSum[int64](false).Aggregation())
|
||||
assert.Nil(t, NewPrecomputedDeltaSum[float64](true).Aggregation())
|
||||
assert.Nil(t, NewPrecomputedDeltaSum[float64](false).Aggregation())
|
||||
}
|
||||
|
||||
func BenchmarkSum(b *testing.B) {
|
||||
b.Run("Int64", benchmarkSum[int64])
|
||||
b.Run("Float64", benchmarkSum[float64])
|
||||
|
@ -167,6 +167,9 @@ func testDefaultViewImplicit[N int64 | float64]() func(t *testing.T) {
|
||||
got, err := i.Instrument(inst)
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, got, 1, "default view not applied")
|
||||
for _, a := range got {
|
||||
a.Aggregate(1, *attribute.EmptySet())
|
||||
}
|
||||
|
||||
out, err := test.pipe.produce(context.Background())
|
||||
require.NoError(t, err)
|
||||
@ -180,6 +183,7 @@ func testDefaultViewImplicit[N int64 | float64]() func(t *testing.T) {
|
||||
Data: metricdata.Sum[N]{
|
||||
Temporality: metricdata.CumulativeTemporality,
|
||||
IsMonotonic: true,
|
||||
DataPoints: []metricdata.DataPoint[N]{{Value: N(1)}},
|
||||
},
|
||||
}, sm.Metrics[0], metricdatatest.IgnoreTimestamp())
|
||||
})
|
||||
|
Loading…
Reference in New Issue
Block a user