1
0
mirror of https://github.com/open-telemetry/opentelemetry-go.git synced 2024-12-02 08:52:21 +02:00

Utilize the new slices package in sdk/metric (#4982)

This commit is contained in:
Tyler Yahn 2024-02-26 23:00:29 -08:00 committed by GitHub
parent 561714acb2
commit b302227390
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
5 changed files with 20 additions and 24 deletions

View File

@ -17,6 +17,7 @@ package metric // import "go.opentelemetry.io/otel/sdk/metric"
import ( import (
"errors" "errors"
"fmt" "fmt"
"slices"
) )
// errAgg is wrapped by misconfigured aggregations. // errAgg is wrapped by misconfigured aggregations.
@ -141,10 +142,8 @@ func (h AggregationExplicitBucketHistogram) err() error {
// copy returns a deep copy of h. // copy returns a deep copy of h.
func (h AggregationExplicitBucketHistogram) copy() Aggregation { func (h AggregationExplicitBucketHistogram) copy() Aggregation {
b := make([]float64, len(h.Boundaries))
copy(b, h.Boundaries)
return AggregationExplicitBucketHistogram{ return AggregationExplicitBucketHistogram{
Boundaries: b, Boundaries: slices.Clone(h.Boundaries),
NoMinMax: h.NoMinMax, NoMinMax: h.NoMinMax,
} }
} }

View File

@ -17,6 +17,7 @@ package metric // import "go.opentelemetry.io/otel/sdk/metric"
import ( import (
"os" "os"
"runtime" "runtime"
"slices"
"go.opentelemetry.io/otel/sdk/metric/internal/exemplar" "go.opentelemetry.io/otel/sdk/metric/internal/exemplar"
"go.opentelemetry.io/otel/sdk/metric/internal/x" "go.opentelemetry.io/otel/sdk/metric/internal/x"
@ -40,8 +41,7 @@ func reservoirFunc[N int64 | float64](agg Aggregation) func() exemplar.Reservoir
// use AlignedHistogramBucketExemplarReservoir. // use AlignedHistogramBucketExemplarReservoir.
a, ok := agg.(AggregationExplicitBucketHistogram) a, ok := agg.(AggregationExplicitBucketHistogram)
if ok && len(a.Boundaries) > 0 { if ok && len(a.Boundaries) > 0 {
cp := make([]float64, len(a.Boundaries)) cp := slices.Clone(a.Boundaries)
copy(cp, a.Boundaries)
return func() exemplar.Reservoir[N] { return func() exemplar.Reservoir[N] {
bounds := cp bounds := cp
return exemplar.Histogram[N](bounds) return exemplar.Histogram[N](bounds)

View File

@ -16,6 +16,7 @@ package aggregate // import "go.opentelemetry.io/otel/sdk/metric/internal/aggreg
import ( import (
"context" "context"
"slices"
"sort" "sort"
"sync" "sync"
"time" "time"
@ -68,9 +69,8 @@ func newHistValues[N int64 | float64](bounds []float64, noSum bool, limit int, r
// passed boundaries is ultimately this type's responsibility. Make a copy // passed boundaries is ultimately this type's responsibility. Make a copy
// here so we can always guarantee this. Or, in the case of failure, have // here so we can always guarantee this. Or, in the case of failure, have
// complete control over the fix. // complete control over the fix.
b := make([]float64, len(bounds)) b := slices.Clone(bounds)
copy(b, bounds) slices.Sort(b)
sort.Float64s(b)
return &histValues[N]{ return &histValues[N]{
noSum: noSum, noSum: noSum,
bounds: b, bounds: b,
@ -150,8 +150,7 @@ func (s *histogram[N]) delta(dest *metricdata.Aggregation) int {
defer s.valuesMu.Unlock() defer s.valuesMu.Unlock()
// Do not allow modification of our copy of bounds. // Do not allow modification of our copy of bounds.
bounds := make([]float64, len(s.bounds)) bounds := slices.Clone(s.bounds)
copy(bounds, s.bounds)
n := len(s.values) n := len(s.values)
hDPts := reset(h.DataPoints, n, n) hDPts := reset(h.DataPoints, n, n)
@ -201,28 +200,25 @@ func (s *histogram[N]) cumulative(dest *metricdata.Aggregation) int {
defer s.valuesMu.Unlock() defer s.valuesMu.Unlock()
// Do not allow modification of our copy of bounds. // Do not allow modification of our copy of bounds.
bounds := make([]float64, len(s.bounds)) bounds := slices.Clone(s.bounds)
copy(bounds, s.bounds)
n := len(s.values) n := len(s.values)
hDPts := reset(h.DataPoints, n, n) hDPts := reset(h.DataPoints, n, n)
var i int var i int
for a, b := range s.values { for a, b := range s.values {
// The HistogramDataPoint field values returned need to be copies of
// the buckets value as we will keep updating them.
//
// TODO (#3047): Making copies for bounds and counts incurs a large
// memory allocation footprint. Alternatives should be explored.
counts := make([]uint64, len(b.counts))
copy(counts, b.counts)
hDPts[i].Attributes = a hDPts[i].Attributes = a
hDPts[i].StartTime = s.start hDPts[i].StartTime = s.start
hDPts[i].Time = t hDPts[i].Time = t
hDPts[i].Count = b.count hDPts[i].Count = b.count
hDPts[i].Bounds = bounds hDPts[i].Bounds = bounds
hDPts[i].BucketCounts = counts
// The HistogramDataPoint field values returned need to be copies of
// the buckets value as we will keep updating them.
//
// TODO (#3047): Making copies for bounds and counts incurs a large
// memory allocation footprint. Alternatives should be explored.
hDPts[i].BucketCounts = slices.Clone(b.counts)
if !s.noSum { if !s.noSum {
hDPts[i].Sum = b.total hDPts[i].Sum = b.total

View File

@ -16,6 +16,7 @@ package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exempla
import ( import (
"context" "context"
"slices"
"sort" "sort"
"time" "time"
@ -28,7 +29,7 @@ import (
// //
// The passed bounds will be sorted by this function. // The passed bounds will be sorted by this function.
func Histogram[N int64 | float64](bounds []float64) Reservoir[N] { func Histogram[N int64 | float64](bounds []float64) Reservoir[N] {
sort.Float64s(bounds) slices.Sort(bounds)
return &histRes[N]{ return &histRes[N]{
bounds: bounds, bounds: bounds,
storage: newStorage[N](len(bounds) + 1), storage: newStorage[N](len(bounds) + 1),

View File

@ -17,7 +17,7 @@ package exemplar
import ( import (
"context" "context"
"math" "math"
"sort" "slices"
"testing" "testing"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
@ -43,7 +43,7 @@ func TestFixedSizeSamplingCorrectness(t *testing.T) {
data[i] = (-1.0 / intensity) * math.Log(random()) data[i] = (-1.0 / intensity) * math.Log(random())
} }
// Sort to test position bias. // Sort to test position bias.
sort.Float64s(data) slices.Sort(data)
r := FixedSize[float64](sampleSize) r := FixedSize[float64](sampleSize)
for _, value := range data { for _, value := range data {