You've already forked opentelemetry-go
mirror of
https://github.com/open-telemetry/opentelemetry-go.git
synced 2025-11-25 22:41:46 +02:00
Instead of using a global random number generator for all `randRes`, have each value use its own. This removes the need for locking and managing concurrent safe access to the global. Also, the field, given the `Reservoir` type is not concurrent safe and the metric pipeline guards this, does not need a `sync.Mutex` to guard it. Supersedes https://github.com/open-telemetry/opentelemetry-go/pull/5815 Fix #5814 ### Performance Analysis This change has approximately equivalent performance as the existing code based on existing benchmarks. ```terminal goos: linux goarch: amd64 pkg: go.opentelemetry.io/otel/sdk/metric cpu: Intel(R) Core(TM) i7-8550U CPU @ 1.80GHz │ old.txt │ new.txt │ │ sec/op │ sec/op vs base │ Exemplars/Int64Counter/8-8 14.00µ ± 3% 13.44µ ± 4% -3.98% (p=0.001 n=10) │ old.txt │ new.txt │ │ B/op │ B/op vs base │ Exemplars/Int64Counter/8-8 3.791Ki ± 0% 3.791Ki ± 0% ~ (p=1.000 n=10) ¹ ¹ all samples are equal │ old.txt │ new.txt │ │ allocs/op │ allocs/op vs base │ Exemplars/Int64Counter/8-8 84.00 ± 0% 84.00 ± 0% ~ (p=1.000 n=10) ¹ ¹ all samples are equal ```
64 lines
1.3 KiB
Go
64 lines
1.3 KiB
Go
// Copyright The OpenTelemetry Authors
|
|
// SPDX-License-Identifier: Apache-2.0
|
|
|
|
package metric // import "go.opentelemetry.io/otel/sdk/metric"
|
|
|
|
import (
|
|
"context"
|
|
"runtime"
|
|
"sync"
|
|
"testing"
|
|
|
|
"github.com/stretchr/testify/require"
|
|
|
|
"go.opentelemetry.io/otel/sdk/metric/metricdata"
|
|
)
|
|
|
|
func TestFixedSizeExemplarConcurrentSafe(t *testing.T) {
|
|
// Tests https://github.com/open-telemetry/opentelemetry-go/issues/5814
|
|
|
|
t.Setenv("OTEL_METRICS_EXEMPLAR_FILTER", "always_on")
|
|
|
|
r := NewManualReader()
|
|
m := NewMeterProvider(WithReader(r)).Meter("exemplar-concurrency")
|
|
// Use two instruments to get concurrent access to any shared globals.
|
|
i0, err := m.Int64Counter("counter.0")
|
|
require.NoError(t, err)
|
|
i1, err := m.Int64Counter("counter.1")
|
|
require.NoError(t, err)
|
|
|
|
ctx, cancel := context.WithCancel(context.Background())
|
|
|
|
add := func() {
|
|
i0.Add(ctx, 1)
|
|
i1.Add(ctx, 2)
|
|
}
|
|
|
|
goRoutines := max(10, runtime.NumCPU())
|
|
|
|
var wg sync.WaitGroup
|
|
for n := 0; n < goRoutines; n++ {
|
|
wg.Add(1)
|
|
go func() {
|
|
defer wg.Done()
|
|
for {
|
|
select {
|
|
case <-ctx.Done():
|
|
return
|
|
default:
|
|
require.NotPanics(t, add)
|
|
}
|
|
}
|
|
}()
|
|
}
|
|
|
|
const collections = 100
|
|
var rm metricdata.ResourceMetrics
|
|
for c := 0; c < collections; c++ {
|
|
require.NotPanics(t, func() { _ = r.Collect(ctx, &rm) })
|
|
}
|
|
|
|
cancel()
|
|
wg.Wait()
|
|
}
|