1
0
mirror of https://github.com/open-telemetry/opentelemetry-go.git synced 2025-11-29 23:07:45 +02:00

refactor: replace context.Background() with t.Context()/b.Context() in tests (#7352)

Based on the Go version we currently use, the dependency already
supports 1.24+, which allows using `t.Context()` and `b.Context()` in
unit tests and benchmarks respectively.

- Enable `context-background` and `context-todo` in
[`usetesting`](https://golangci-lint.run/docs/linters/configuration/#usetesting)
- Adjust the code to support linter detection

---------

Co-authored-by: Tyler Yahn <MrAlias@users.noreply.github.com>
Co-authored-by: Tyler Yahn <codingalias@gmail.com>
Co-authored-by: Damien Mathieu <42@dmathieu.com>
This commit is contained in:
Flc゛
2025-09-23 15:52:45 +08:00
committed by GitHub
parent 2389f4488f
commit 80cb909774
106 changed files with 763 additions and 778 deletions

View File

@@ -44,7 +44,7 @@ func (b *concurrentBuffer) String() string {
func TestEmptyBatchConfig(t *testing.T) {
assert.NotPanics(t, func() {
var bp BatchProcessor
ctx := context.Background()
ctx := t.Context()
record := new(Record)
assert.NoError(t, bp.OnEmit(ctx, record), "OnEmit")
assert.NoError(t, bp.ForceFlush(ctx), "ForceFlush")
@@ -191,7 +191,7 @@ func TestNewBatchConfig(t *testing.T) {
}
func TestBatchProcessor(t *testing.T) {
ctx := context.Background()
ctx := t.Context()
t.Run("NilExporter", func(t *testing.T) {
assert.NotPanics(t, func() { NewBatchProcessor(nil) })
@@ -326,7 +326,7 @@ func TestBatchProcessor(t *testing.T) {
t.Cleanup(func() { close(e.ExportTrigger) })
b := NewBatchProcessor(e)
ctx := context.Background()
ctx := t.Context()
c, cancel := context.WithCancel(ctx)
cancel()
@@ -649,7 +649,7 @@ func BenchmarkBatchProcessorOnEmit(b *testing.B) {
r.SetBody(body)
rSize := unsafe.Sizeof(r) + unsafe.Sizeof(body)
ctx := context.Background()
ctx := b.Context()
bp := NewBatchProcessor(
defaultNoopExporter,
WithMaxQueueSize(b.N+1),

View File

@@ -104,7 +104,10 @@ func BenchmarkProcessor(b *testing.B) {
} {
b.Run(tc.name, func(b *testing.B) {
provider := NewLoggerProvider(tc.f()...)
b.Cleanup(func() { assert.NoError(b, provider.Shutdown(context.Background())) })
b.Cleanup(func() {
//nolint:usetesting // required to avoid getting a canceled context at cleanup.
assert.NoError(b, provider.Shutdown(context.Background()))
})
logger := provider.Logger(b.Name())
b.ReportAllocs()
@@ -119,7 +122,7 @@ func BenchmarkProcessor(b *testing.B) {
log.Int("int", 123),
log.Bool("bool", true),
)
logger.Emit(context.Background(), r)
logger.Emit(b.Context(), r)
}
})
})

View File

@@ -136,7 +136,7 @@ func TestChunker(t *testing.T) {
t.Cleanup(exp.Stop)
c := newChunkExporter(exp, 0)
const size = 100
_ = c.Export(context.Background(), make([]Record, size))
_ = c.Export(t.Context(), make([]Record, size))
assert.Equal(t, 1, exp.ExportN())
records := exp.Records()
@@ -148,7 +148,7 @@ func TestChunker(t *testing.T) {
exp := newTestExporter(nil)
t.Cleanup(exp.Stop)
c := newChunkExporter(exp, 0)
_ = c.ForceFlush(context.Background())
_ = c.ForceFlush(t.Context())
assert.Equal(t, 1, exp.ForceFlushN(), "ForceFlush not passed through")
})
@@ -156,7 +156,7 @@ func TestChunker(t *testing.T) {
exp := newTestExporter(nil)
t.Cleanup(exp.Stop)
c := newChunkExporter(exp, 0)
_ = c.Shutdown(context.Background())
_ = c.Shutdown(t.Context())
assert.Equal(t, 1, exp.ShutdownN(), "Shutdown not passed through")
})
@@ -164,8 +164,8 @@ func TestChunker(t *testing.T) {
exp := newTestExporter(nil)
t.Cleanup(exp.Stop)
c := newChunkExporter(exp, 10)
assert.NoError(t, c.Export(context.Background(), make([]Record, 5)))
assert.NoError(t, c.Export(context.Background(), make([]Record, 25)))
assert.NoError(t, c.Export(t.Context(), make([]Record, 5)))
assert.NoError(t, c.Export(t.Context(), make([]Record, 25)))
wantLens := []int{5, 10, 10, 5}
records := exp.Records()
@@ -179,7 +179,7 @@ func TestChunker(t *testing.T) {
exp := newTestExporter(assert.AnError)
t.Cleanup(exp.Stop)
c := newChunkExporter(exp, 0)
ctx := context.Background()
ctx := t.Context()
records := make([]Record, 25)
err := c.Export(ctx, records)
assert.ErrorIs(t, err, assert.AnError, "no chunking")
@@ -224,7 +224,7 @@ func TestExportSync(t *testing.T) {
defer wg.Done()
in <- exportData{
ctx: context.Background(),
ctx: t.Context(),
records: make([]Record, 1),
}
}()
@@ -254,7 +254,7 @@ func TestExportSync(t *testing.T) {
resp := make(chan error, 1)
in <- exportData{
ctx: context.Background(),
ctx: t.Context(),
records: []Record{r},
respCh: resp,
}
@@ -264,7 +264,7 @@ func TestExportSync(t *testing.T) {
}
// Empty records should be ignored.
in <- exportData{ctx: context.Background()}
in <- exportData{ctx: t.Context()}
wg.Wait()
@@ -307,7 +307,7 @@ func TestTimeoutExporter(t *testing.T) {
out := make(chan error, 1)
go func() {
out <- e.Export(context.Background(), make([]Record, 1))
out <- e.Export(t.Context(), make([]Record, 1))
}()
var err error
@@ -333,7 +333,7 @@ func TestBufferExporter(t *testing.T) {
t.Cleanup(exp.Stop)
e := newBufferExporter(exp, goRoutines)
ctx := context.Background()
ctx := t.Context()
records := make([]Record, 10)
stop := make(chan struct{})
@@ -370,10 +370,10 @@ func TestBufferExporter(t *testing.T) {
t.Cleanup(exp.Stop)
e := newBufferExporter(exp, 1)
assert.NoError(t, e.Shutdown(context.Background()))
assert.NoError(t, e.Shutdown(t.Context()))
assert.Equal(t, 1, exp.ShutdownN(), "first Shutdown")
assert.NoError(t, e.Shutdown(context.Background()))
assert.NoError(t, e.Shutdown(t.Context()))
assert.Equal(t, 1, exp.ShutdownN(), "second Shutdown")
})
@@ -396,7 +396,7 @@ func TestBufferExporter(t *testing.T) {
// Make sure there is something to flush.
require.True(t, e.EnqueueExport(make([]Record, 1)))
ctx, cancel := context.WithCancel(context.Background())
ctx, cancel := context.WithCancel(t.Context())
cancel()
err := e.Shutdown(ctx)
@@ -409,7 +409,7 @@ func TestBufferExporter(t *testing.T) {
t.Cleanup(exp.Stop)
e := newBufferExporter(exp, 1)
assert.ErrorIs(t, e.Shutdown(context.Background()), assert.AnError)
assert.ErrorIs(t, e.Shutdown(t.Context()), assert.AnError)
})
})
@@ -419,7 +419,7 @@ func TestBufferExporter(t *testing.T) {
t.Cleanup(exp.Stop)
e := newBufferExporter(exp, 2)
ctx := context.Background()
ctx := t.Context()
records := make([]Record, 1)
require.NoError(t, e.enqueue(ctx, records, nil), "enqueue")
@@ -442,7 +442,7 @@ func TestBufferExporter(t *testing.T) {
t.Cleanup(func() { close(trigger) })
e := newBufferExporter(exp, 1)
ctx, cancel := context.WithCancel(context.Background())
ctx, cancel := context.WithCancel(t.Context())
require.True(t, e.EnqueueExport(make([]Record, 1)))
got := make(chan error, 1)
@@ -465,7 +465,7 @@ func TestBufferExporter(t *testing.T) {
t.Cleanup(exp.Stop)
e := newBufferExporter(exp, 1)
assert.ErrorIs(t, e.ForceFlush(context.Background()), assert.AnError)
assert.ErrorIs(t, e.ForceFlush(t.Context()), assert.AnError)
})
t.Run("Stopped", func(t *testing.T) {
@@ -474,7 +474,7 @@ func TestBufferExporter(t *testing.T) {
e := newBufferExporter(exp, 1)
ctx := context.Background()
ctx := t.Context()
_ = e.Shutdown(ctx)
assert.NoError(t, e.ForceFlush(ctx))
})
@@ -486,7 +486,7 @@ func TestBufferExporter(t *testing.T) {
t.Cleanup(exp.Stop)
e := newBufferExporter(exp, 1)
assert.NoError(t, e.Export(context.Background(), nil))
assert.NoError(t, e.Export(t.Context(), nil))
assert.Equal(t, 0, exp.ExportN())
})
@@ -495,7 +495,7 @@ func TestBufferExporter(t *testing.T) {
t.Cleanup(exp.Stop)
e := newBufferExporter(exp, 1)
ctx := context.Background()
ctx := t.Context()
records := make([]Record, 1)
records[0].SetBody(log.BoolValue(true))
@@ -520,7 +520,7 @@ func TestBufferExporter(t *testing.T) {
e := newBufferExporter(exp, 1)
records := make([]Record, 1)
ctx, cancel := context.WithCancel(context.Background())
ctx, cancel := context.WithCancel(t.Context())
got := make(chan error, 1)
go func() { got <- e.Export(ctx, records) }()
@@ -542,7 +542,7 @@ func TestBufferExporter(t *testing.T) {
t.Cleanup(exp.Stop)
e := newBufferExporter(exp, 1)
ctx, records := context.Background(), make([]Record, 1)
ctx, records := t.Context(), make([]Record, 1)
assert.ErrorIs(t, e.Export(ctx, records), assert.AnError)
})
@@ -552,7 +552,7 @@ func TestBufferExporter(t *testing.T) {
e := newBufferExporter(exp, 1)
ctx := context.Background()
ctx := t.Context()
_ = e.Shutdown(ctx)
assert.NoError(t, e.Export(ctx, make([]Record, 1)))
assert.Equal(t, 0, exp.ExportN(), "Export called")
@@ -566,7 +566,7 @@ func TestBufferExporter(t *testing.T) {
e := newBufferExporter(exp, 1)
assert.True(t, e.EnqueueExport(nil))
e.ForceFlush(context.Background())
e.ForceFlush(t.Context())
assert.Equal(t, 0, exp.ExportN(), "empty batch enqueued")
})
@@ -580,7 +580,7 @@ func TestBufferExporter(t *testing.T) {
assert.True(t, e.EnqueueExport(records))
assert.True(t, e.EnqueueExport(records))
e.ForceFlush(context.Background())
e.ForceFlush(t.Context())
n := exp.ExportN()
assert.Equal(t, 2, n, "Export number")
@@ -592,7 +592,7 @@ func TestBufferExporter(t *testing.T) {
t.Cleanup(exp.Stop)
e := newBufferExporter(exp, 1)
_ = e.Shutdown(context.Background())
_ = e.Shutdown(t.Context())
assert.True(t, e.EnqueueExport(make([]Record, 1)))
})
})

View File

@@ -4,7 +4,6 @@
package log // import "go.opentelemetry.io/otel/sdk/log"
import (
"context"
"testing"
"time"
@@ -51,7 +50,7 @@ func BenchmarkLoggerEmit(b *testing.B) {
b.ReportAllocs()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
logger.Emit(context.Background(), r)
logger.Emit(b.Context(), r)
}
})
})
@@ -60,7 +59,7 @@ func BenchmarkLoggerEmit(b *testing.B) {
b.ReportAllocs()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
logger.Emit(context.Background(), r10)
logger.Emit(b.Context(), r10)
}
})
})
@@ -81,7 +80,7 @@ func BenchmarkLoggerEmitObservability(b *testing.B) {
b.ReportAllocs()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
logger.Emit(context.Background(), r)
logger.Emit(b.Context(), r)
}
})
}
@@ -99,14 +98,14 @@ func BenchmarkLoggerEmitObservability(b *testing.B) {
})
var rm metricdata.ResourceMetrics
err := reader.Collect(context.Background(), &rm)
err := reader.Collect(b.Context(), &rm)
require.NoError(b, err)
require.Len(b, rm.ScopeMetrics, 1)
}
func BenchmarkLoggerEnabled(b *testing.B) {
logger := newTestLogger(b)
ctx := context.Background()
ctx := b.Context()
param := log.EnabledParameters{Severity: log.SeverityDebug}
var enabled bool

View File

@@ -73,7 +73,7 @@ func TestLoggerEmit(t *testing.T) {
))
contextWithSpanContext := trace.ContextWithSpanContext(
context.Background(),
t.Context(),
trace.NewSpanContext(trace.SpanContextConfig{
TraceID: trace.TraceID{0o1},
SpanID: trace.SpanID{0o2},
@@ -91,7 +91,7 @@ func TestLoggerEmit(t *testing.T) {
{
name: "NoProcessors",
logger: newLogger(NewLoggerProvider(), instrumentation.Scope{}),
ctx: context.Background(),
ctx: t.Context(),
record: r,
},
{
@@ -103,7 +103,7 @@ func TestLoggerEmit(t *testing.T) {
WithAttributeCountLimit(2),
WithResource(resource.NewSchemaless(attribute.String("key", "value"))),
), instrumentation.Scope{Name: "scope"}),
ctx: context.Background(),
ctx: t.Context(),
record: r,
expectedRecords: []Record{
{
@@ -133,7 +133,7 @@ func TestLoggerEmit(t *testing.T) {
WithAttributeCountLimit(2),
WithResource(resource.NewSchemaless(attribute.String("key", "value"))),
), instrumentation.Scope{Name: "scope"}),
ctx: context.Background(),
ctx: t.Context(),
},
{
name: "WithTraceSpanInContext",
@@ -178,7 +178,7 @@ func TestLoggerEmit(t *testing.T) {
WithAttributeCountLimit(2),
WithResource(resource.NewSchemaless(attribute.String("key", "value"))),
), instrumentation.Scope{Name: "scope"}),
ctx: context.Background(),
ctx: t.Context(),
record: r,
expectedRecords: []Record{
{
@@ -209,7 +209,7 @@ func TestLoggerEmit(t *testing.T) {
WithAttributeCountLimit(2),
WithResource(resource.NewSchemaless(attribute.String("key", "value"))),
), instrumentation.Scope{Name: "scope"}),
ctx: context.Background(),
ctx: t.Context(),
record: rWithNoObservedTimestamp,
expectedRecords: []Record{
{
@@ -241,7 +241,7 @@ func TestLoggerEmit(t *testing.T) {
WithResource(resource.NewSchemaless(attribute.String("key", "value"))),
WithAllowKeyDuplication(),
), instrumentation.Scope{Name: "scope"}),
ctx: context.Background(),
ctx: t.Context(),
record: rWithAllowKeyDuplication,
expectedRecords: []Record{
{
@@ -274,7 +274,7 @@ func TestLoggerEmit(t *testing.T) {
WithAttributeCountLimit(5),
WithResource(resource.NewSchemaless(attribute.String("key", "value"))),
), instrumentation.Scope{Name: "scope"}),
ctx: context.Background(),
ctx: t.Context(),
record: rWithDuplicatesInBody,
expectedRecords: []Record{
{
@@ -332,7 +332,7 @@ func TestLoggerEnabled(t *testing.T) {
{
name: "NoProcessors",
logger: newLogger(NewLoggerProvider(), instrumentation.Scope{}),
ctx: context.Background(),
ctx: t.Context(),
expected: false,
},
{
@@ -341,7 +341,7 @@ func TestLoggerEnabled(t *testing.T) {
WithProcessor(p0),
WithProcessor(p1),
), instrumentation.Scope{Name: "scope"}),
ctx: context.Background(),
ctx: t.Context(),
param: log.EnabledParameters{
Severity: log.SeverityInfo,
EventName: "test_event",
@@ -359,7 +359,7 @@ func TestLoggerEnabled(t *testing.T) {
logger: newLogger(NewLoggerProvider(
WithProcessor(p2WithDisabled),
), instrumentation.Scope{}),
ctx: context.Background(),
ctx: t.Context(),
expected: false,
expectedP2Params: []EnabledParameters{{}},
},
@@ -369,7 +369,7 @@ func TestLoggerEnabled(t *testing.T) {
WithProcessor(p2WithDisabled),
WithProcessor(p0),
), instrumentation.Scope{}),
ctx: context.Background(),
ctx: t.Context(),
expected: true,
expectedP2Params: []EnabledParameters{{}},
expectedP0Params: []EnabledParameters{{}},
@@ -436,11 +436,11 @@ func TestLoggerObservability(t *testing.T) {
l := newLogger(NewLoggerProvider(), instrumentation.Scope{})
for _, record := range tc.records {
l.Emit(context.Background(), record)
l.Emit(t.Context(), record)
}
gotMetrics := new(metricdata.ResourceMetrics)
assert.NoError(t, r.Collect(context.Background(), gotMetrics))
assert.NoError(t, r.Collect(t.Context(), gotMetrics))
if tc.wantLogRecordCount == 0 {
assert.Empty(t, gotMetrics.ScopeMetrics)
return

View File

@@ -248,7 +248,7 @@ func TestWithResource(t *testing.T) {
}
}
func TestLoggerProviderConcurrentSafe(*testing.T) {
func TestLoggerProviderConcurrentSafe(t *testing.T) {
const goRoutineN = 10
var wg sync.WaitGroup
@@ -256,7 +256,7 @@ func TestLoggerProviderConcurrentSafe(*testing.T) {
p := NewLoggerProvider(WithProcessor(newProcessor("0")))
const name = "testLogger"
ctx := context.Background()
ctx := t.Context()
for range goRoutineN {
go func() {
defer wg.Done()
@@ -301,7 +301,7 @@ func TestLoggerProviderLogger(t *testing.T) {
})
t.Run("Stopped", func(t *testing.T) {
ctx := context.Background()
ctx := t.Context()
p := NewLoggerProvider()
_ = p.Shutdown(ctx)
l := p.Logger("testing")
@@ -344,7 +344,7 @@ func TestLoggerProviderShutdown(t *testing.T) {
proc := newProcessor("")
p := NewLoggerProvider(WithProcessor(proc))
ctx := context.Background()
ctx := t.Context()
require.NoError(t, p.Shutdown(ctx))
require.Equal(t, 1, proc.shutdownCalls, "processor Shutdown not called")
@@ -357,7 +357,7 @@ func TestLoggerProviderShutdown(t *testing.T) {
proc.Err = assert.AnError
p := NewLoggerProvider(WithProcessor(proc))
ctx := context.Background()
ctx := t.Context()
assert.ErrorIs(t, p.Shutdown(ctx), assert.AnError, "processor error not returned")
})
}
@@ -367,7 +367,7 @@ func TestLoggerProviderForceFlush(t *testing.T) {
proc := newProcessor("")
p := NewLoggerProvider(WithProcessor(proc))
ctx := context.Background()
ctx := t.Context()
require.NoError(t, p.ForceFlush(ctx))
require.Equal(t, 1, proc.forceFlushCalls, "processor ForceFlush not called")
@@ -381,7 +381,7 @@ func TestLoggerProviderForceFlush(t *testing.T) {
proc := newProcessor("")
p := NewLoggerProvider(WithProcessor(proc))
ctx := context.Background()
ctx := t.Context()
require.NoError(t, p.ForceFlush(ctx))
require.Equal(t, 1, proc.forceFlushCalls, "processor ForceFlush not called")
@@ -394,7 +394,7 @@ func TestLoggerProviderForceFlush(t *testing.T) {
proc.Err = assert.AnError
p := NewLoggerProvider(WithProcessor(proc))
ctx := context.Background()
ctx := t.Context()
assert.ErrorIs(t, p.ForceFlush(ctx), assert.AnError, "processor error not returned")
})
}
@@ -415,5 +415,5 @@ func BenchmarkLoggerProviderLogger(b *testing.B) {
}
b.StopTimer()
loggers[0].Enabled(context.Background(), log.EnabledParameters{})
loggers[0].Enabled(b.Context(), log.EnabledParameters{})
}

View File

@@ -46,7 +46,7 @@ func TestSimpleProcessorOnEmit(t *testing.T) {
r := new(log.Record)
r.SetSeverityText("test")
_ = s.OnEmit(context.Background(), r)
_ = s.OnEmit(t.Context(), r)
require.True(t, e.exportCalled, "exporter Export not called")
assert.Equal(t, []log.Record{*r}, e.records)
@@ -55,14 +55,14 @@ func TestSimpleProcessorOnEmit(t *testing.T) {
func TestSimpleProcessorShutdown(t *testing.T) {
e := new(exporter)
s := log.NewSimpleProcessor(e)
_ = s.Shutdown(context.Background())
_ = s.Shutdown(t.Context())
require.True(t, e.shutdownCalled, "exporter Shutdown not called")
}
func TestSimpleProcessorForceFlush(t *testing.T) {
e := new(exporter)
s := log.NewSimpleProcessor(e)
_ = s.ForceFlush(context.Background())
_ = s.ForceFlush(t.Context())
require.True(t, e.forceFlushCalled, "exporter ForceFlush not called")
}
@@ -88,7 +88,7 @@ func (*writerExporter) ForceFlush(context.Context) error {
func TestSimpleProcessorEmpty(t *testing.T) {
assert.NotPanics(t, func() {
var s log.SimpleProcessor
ctx := context.Background()
ctx := t.Context()
record := new(log.Record)
assert.NoError(t, s.OnEmit(ctx, record), "OnEmit")
assert.NoError(t, s.ForceFlush(ctx), "ForceFlush")
@@ -96,7 +96,7 @@ func TestSimpleProcessorEmpty(t *testing.T) {
})
}
func TestSimpleProcessorConcurrentSafe(*testing.T) {
func TestSimpleProcessorConcurrentSafe(t *testing.T) {
const goRoutineN = 10
var wg sync.WaitGroup
@@ -104,7 +104,7 @@ func TestSimpleProcessorConcurrentSafe(*testing.T) {
r := new(log.Record)
r.SetSeverityText("test")
ctx := context.Background()
ctx := t.Context()
e := &writerExporter{new(strings.Builder)}
s := log.NewSimpleProcessor(e)
for range goRoutineN {
@@ -123,7 +123,7 @@ func TestSimpleProcessorConcurrentSafe(*testing.T) {
func BenchmarkSimpleProcessorOnEmit(b *testing.B) {
r := new(log.Record)
r.SetSeverityText("test")
ctx := context.Background()
ctx := b.Context()
s := log.NewSimpleProcessor(nil)
b.ReportAllocs()

View File

@@ -53,7 +53,6 @@ func exponentialAggregationSelector(ik InstrumentKind) Aggregation {
}
func benchSyncViews(views ...View) func(*testing.B) {
ctx := context.Background()
rdr := NewManualReader()
provider := NewMeterProvider(WithReader(rdr), WithView(views...))
meter := provider.Meter("benchSyncViews")
@@ -66,7 +65,7 @@ func benchSyncViews(views ...View) func(*testing.B) {
b.Run("Int64Counter", benchMeasAttrs(func() measF {
return func(s attribute.Set) func() {
o := []metric.AddOption{metric.WithAttributeSet(s)}
return func() { iCtr.Add(ctx, 1, o...) }
return func() { iCtr.Add(b.Context(), 1, o...) }
}
}()))
@@ -75,7 +74,7 @@ func benchSyncViews(views ...View) func(*testing.B) {
b.Run("Float64Counter", benchMeasAttrs(func() measF {
return func(s attribute.Set) func() {
o := []metric.AddOption{metric.WithAttributeSet(s)}
return func() { fCtr.Add(ctx, 1, o...) }
return func() { fCtr.Add(b.Context(), 1, o...) }
}
}()))
@@ -84,7 +83,7 @@ func benchSyncViews(views ...View) func(*testing.B) {
b.Run("Int64UpDownCounter", benchMeasAttrs(func() measF {
return func(s attribute.Set) func() {
o := []metric.AddOption{metric.WithAttributeSet(s)}
return func() { iUDCtr.Add(ctx, 1, o...) }
return func() { iUDCtr.Add(b.Context(), 1, o...) }
}
}()))
@@ -93,7 +92,7 @@ func benchSyncViews(views ...View) func(*testing.B) {
b.Run("Float64UpDownCounter", benchMeasAttrs(func() measF {
return func(s attribute.Set) func() {
o := []metric.AddOption{metric.WithAttributeSet(s)}
return func() { fUDCtr.Add(ctx, 1, o...) }
return func() { fUDCtr.Add(b.Context(), 1, o...) }
}
}()))
@@ -102,7 +101,7 @@ func benchSyncViews(views ...View) func(*testing.B) {
b.Run("Int64Histogram", benchMeasAttrs(func() measF {
return func(s attribute.Set) func() {
o := []metric.RecordOption{metric.WithAttributeSet(s)}
return func() { iHist.Record(ctx, 1, o...) }
return func() { iHist.Record(b.Context(), 1, o...) }
}
}()))
@@ -111,7 +110,7 @@ func benchSyncViews(views ...View) func(*testing.B) {
b.Run("Float64Histogram", benchMeasAttrs(func() measF {
return func(s attribute.Set) func() {
o := []metric.RecordOption{metric.WithAttributeSet(s)}
return func() { fHist.Record(ctx, 1, o...) }
return func() { fHist.Record(b.Context(), 1, o...) }
}
}()))
@@ -120,7 +119,7 @@ func benchSyncViews(views ...View) func(*testing.B) {
b.Run("ExponentialInt64Histogram", benchMeasAttrs(func() measF {
return func(s attribute.Set) func() {
o := []metric.RecordOption{metric.WithAttributeSet(s)}
return func() { expIHist.Record(ctx, 1, o...) }
return func() { expIHist.Record(b.Context(), 1, o...) }
}
}()))
@@ -129,7 +128,7 @@ func benchSyncViews(views ...View) func(*testing.B) {
b.Run("ExponentialFloat64Histogram", benchMeasAttrs(func() measF {
return func(s attribute.Set) func() {
o := []metric.RecordOption{metric.WithAttributeSet(s)}
return func() { expFHist.Record(ctx, 1, o...) }
return func() { expFHist.Record(b.Context(), 1, o...) }
}
}()))
}
@@ -184,13 +183,12 @@ func benchCollectViews(views ...View) func(*testing.B) {
mp := NewMeterProvider(WithReader(r), WithView(views...))
return mp.Meter(name), r
}
ctx := context.Background()
return func(b *testing.B) {
b.Run("Int64Counter/1", benchCollectAttrs(func(s attribute.Set) Reader {
m, r := setup("benchCollectViews/Int64Counter")
i, err := m.Int64Counter("int64-counter")
assert.NoError(b, err)
i.Add(ctx, 1, metric.WithAttributeSet(s))
i.Add(b.Context(), 1, metric.WithAttributeSet(s))
return r
}))
b.Run("Int64Counter/10", benchCollectAttrs(func(s attribute.Set) Reader {
@@ -198,7 +196,7 @@ func benchCollectViews(views ...View) func(*testing.B) {
i, err := m.Int64Counter("int64-counter")
assert.NoError(b, err)
for range 10 {
i.Add(ctx, 1, metric.WithAttributeSet(s))
i.Add(b.Context(), 1, metric.WithAttributeSet(s))
}
return r
}))
@@ -207,7 +205,7 @@ func benchCollectViews(views ...View) func(*testing.B) {
m, r := setup("benchCollectViews/Float64Counter")
i, err := m.Float64Counter("float64-counter")
assert.NoError(b, err)
i.Add(ctx, 1, metric.WithAttributeSet(s))
i.Add(b.Context(), 1, metric.WithAttributeSet(s))
return r
}))
b.Run("Float64Counter/10", benchCollectAttrs(func(s attribute.Set) Reader {
@@ -215,7 +213,7 @@ func benchCollectViews(views ...View) func(*testing.B) {
i, err := m.Float64Counter("float64-counter")
assert.NoError(b, err)
for range 10 {
i.Add(ctx, 1, metric.WithAttributeSet(s))
i.Add(b.Context(), 1, metric.WithAttributeSet(s))
}
return r
}))
@@ -224,7 +222,7 @@ func benchCollectViews(views ...View) func(*testing.B) {
m, r := setup("benchCollectViews/Int64UpDownCounter")
i, err := m.Int64UpDownCounter("int64-up-down-counter")
assert.NoError(b, err)
i.Add(ctx, 1, metric.WithAttributeSet(s))
i.Add(b.Context(), 1, metric.WithAttributeSet(s))
return r
}))
b.Run("Int64UpDownCounter/10", benchCollectAttrs(func(s attribute.Set) Reader {
@@ -232,7 +230,7 @@ func benchCollectViews(views ...View) func(*testing.B) {
i, err := m.Int64UpDownCounter("int64-up-down-counter")
assert.NoError(b, err)
for range 10 {
i.Add(ctx, 1, metric.WithAttributeSet(s))
i.Add(b.Context(), 1, metric.WithAttributeSet(s))
}
return r
}))
@@ -241,7 +239,7 @@ func benchCollectViews(views ...View) func(*testing.B) {
m, r := setup("benchCollectViews/Float64UpDownCounter")
i, err := m.Float64UpDownCounter("float64-up-down-counter")
assert.NoError(b, err)
i.Add(ctx, 1, metric.WithAttributeSet(s))
i.Add(b.Context(), 1, metric.WithAttributeSet(s))
return r
}))
b.Run("Float64UpDownCounter/10", benchCollectAttrs(func(s attribute.Set) Reader {
@@ -249,7 +247,7 @@ func benchCollectViews(views ...View) func(*testing.B) {
i, err := m.Float64UpDownCounter("float64-up-down-counter")
assert.NoError(b, err)
for range 10 {
i.Add(ctx, 1, metric.WithAttributeSet(s))
i.Add(b.Context(), 1, metric.WithAttributeSet(s))
}
return r
}))
@@ -258,7 +256,7 @@ func benchCollectViews(views ...View) func(*testing.B) {
m, r := setup("benchCollectViews/Int64Histogram")
i, err := m.Int64Histogram("int64-histogram")
assert.NoError(b, err)
i.Record(ctx, 1, metric.WithAttributeSet(s))
i.Record(b.Context(), 1, metric.WithAttributeSet(s))
return r
}))
b.Run("Int64Histogram/10", benchCollectAttrs(func(s attribute.Set) Reader {
@@ -266,7 +264,7 @@ func benchCollectViews(views ...View) func(*testing.B) {
i, err := m.Int64Histogram("int64-histogram")
assert.NoError(b, err)
for range 10 {
i.Record(ctx, 1, metric.WithAttributeSet(s))
i.Record(b.Context(), 1, metric.WithAttributeSet(s))
}
return r
}))
@@ -275,7 +273,7 @@ func benchCollectViews(views ...View) func(*testing.B) {
m, r := setup("benchCollectViews/Float64Histogram")
i, err := m.Float64Histogram("float64-histogram")
assert.NoError(b, err)
i.Record(ctx, 1, metric.WithAttributeSet(s))
i.Record(b.Context(), 1, metric.WithAttributeSet(s))
return r
}))
b.Run("Float64Histogram/10", benchCollectAttrs(func(s attribute.Set) Reader {
@@ -283,7 +281,7 @@ func benchCollectViews(views ...View) func(*testing.B) {
i, err := m.Float64Histogram("float64-histogram")
assert.NoError(b, err)
for range 10 {
i.Record(ctx, 1, metric.WithAttributeSet(s))
i.Record(b.Context(), 1, metric.WithAttributeSet(s))
}
return r
}))
@@ -367,13 +365,12 @@ func float64Cback(s attribute.Set) metric.Float64Callback {
}
func benchCollectAttrs(setup func(attribute.Set) Reader) func(*testing.B) {
ctx := context.Background()
out := new(metricdata.ResourceMetrics)
run := func(reader Reader) func(b *testing.B) {
return func(b *testing.B) {
b.ReportAllocs()
for n := 0; n < b.N; n++ {
_ = reader.Collect(ctx, out)
_ = reader.Collect(b.Context(), out)
}
}
}
@@ -396,7 +393,7 @@ func BenchmarkExemplars(b *testing.B) {
TraceID: trace.TraceID{0o1},
TraceFlags: trace.FlagsSampled,
})
ctx := trace.ContextWithSpanContext(context.Background(), sc)
ctx := trace.ContextWithSpanContext(b.Context(), sc)
attr := attribute.NewSet(
attribute.String("user", "Alice"),

View File

@@ -57,7 +57,7 @@ func TestConfigReaderSignalsEmpty(t *testing.T) {
require.NotNil(t, f)
require.NotNil(t, s)
ctx := context.Background()
ctx := t.Context()
assert.NoError(t, f(ctx))
assert.NoError(t, s(ctx))
assert.ErrorIs(t, s(ctx), ErrReaderShutdown)
@@ -81,7 +81,7 @@ func TestConfigReaderSignalsForwarded(t *testing.T) {
require.NotNil(t, f)
require.NotNil(t, s)
ctx := context.Background()
ctx := t.Context()
assert.NoError(t, f(ctx))
assert.NoError(t, f(ctx))
assert.NoError(t, s(ctx))
@@ -102,7 +102,7 @@ func TestConfigReaderSignalsForwardedErrors(t *testing.T) {
require.NotNil(t, f)
require.NotNil(t, s)
ctx := context.Background()
ctx := t.Context()
assert.ErrorIs(t, f(ctx), assert.AnError)
assert.ErrorIs(t, s(ctx), assert.AnError)
assert.ErrorIs(t, s(ctx), ErrReaderShutdown)
@@ -118,7 +118,7 @@ func TestUnifyMultiError(t *testing.T) {
func(context.Context) error { return e0 },
func(context.Context) error { return e1 },
func(context.Context) error { return e2 },
})(context.Background())
})(t.Context())
assert.ErrorIs(t, err, e0)
assert.ErrorIs(t, err, e1)
assert.ErrorIs(t, err, e2)
@@ -299,8 +299,8 @@ func TestWithExemplarFilterOff(t *testing.T) {
}
c := newConfig(tc.opts)
assert.NotNil(t, c.exemplarFilter)
assert.Equal(t, tc.expectFilterNotSampled, c.exemplarFilter(context.Background()))
assert.Equal(t, tc.expectFilterSampled, c.exemplarFilter(sample(context.Background())))
assert.Equal(t, tc.expectFilterNotSampled, c.exemplarFilter(t.Context()))
assert.Equal(t, tc.expectFilterSampled, c.exemplarFilter(sample(t.Context())))
})
}
}

View File

@@ -18,7 +18,7 @@ func TestTraceBasedFilter(t *testing.T) {
}
func testTraceBasedFilter[N int64 | float64](t *testing.T) {
ctx := context.Background()
ctx := t.Context()
assert.False(t, TraceBasedFilter(ctx), "non-sampled context should not be offered")
assert.True(t, TraceBasedFilter(sample(ctx)), "sampled context should be offered")
@@ -39,7 +39,7 @@ func TestAlwaysOnFilter(t *testing.T) {
}
func testAlwaysOnFiltered[N int64 | float64](t *testing.T) {
ctx := context.Background()
ctx := t.Context()
assert.True(t, AlwaysOnFilter(ctx), "non-sampled context should not be offered")
assert.True(t, AlwaysOnFilter(sample(ctx)), "sampled context should be offered")

View File

@@ -4,7 +4,6 @@
package exemplar
import (
"context"
"math"
"math/rand/v2"
"slices"
@@ -42,7 +41,7 @@ func TestNewFixedSizeReservoirSamplingCorrectness(t *testing.T) {
r := NewFixedSizeReservoir(sampleSize)
for _, value := range data {
r.Offer(context.Background(), staticTime, NewValue(value), nil)
r.Offer(t.Context(), staticTime, NewValue(value), nil)
}
var sum float64

View File

@@ -4,7 +4,6 @@
package exemplar
import (
"context"
"testing"
"time"
@@ -24,7 +23,7 @@ func ReservoirTest[N int64 | float64](f factory) func(*testing.T) {
return func(t *testing.T) {
t.Helper()
ctx := context.Background()
ctx := t.Context()
t.Run("CaptureSpanContext", func(t *testing.T) {
t.Helper()
@@ -137,7 +136,7 @@ func ReservoirTest[N int64 | float64](f factory) func(*testing.T) {
}
r := rp(*attribute.EmptySet())
r.Offer(context.Background(), staticTime, NewValue(N(10)), nil)
r.Offer(t.Context(), staticTime, NewValue(N(10)), nil)
dest := []Exemplar{{}} // Should be reset to empty.
r.Collect(&dest)

View File

@@ -27,7 +27,7 @@ func TestFixedSizeExemplarConcurrentSafe(t *testing.T) {
i1, err := m.Int64Counter("counter.1")
require.NoError(t, err)
ctx, cancel := context.WithCancel(context.Background())
ctx, cancel := context.WithCancel(t.Context())
add := func() {
i0.Add(ctx, 1)

View File

@@ -4,7 +4,6 @@
package metric
import (
"context"
"testing"
"go.opentelemetry.io/otel/attribute"
@@ -42,7 +41,7 @@ func BenchmarkInstrument(b *testing.B) {
meas = append(meas, in)
inst := int64Inst{measures: meas}
ctx := context.Background()
ctx := b.Context()
b.ReportAllocs()
b.ResetTimer()

View File

@@ -95,7 +95,7 @@ func testBuilderFilter[N int64 | float64]() func(t *testing.T) {
assert.Equal(t, wantF, f, "measured incorrect filtered attributes")
assert.ElementsMatch(t, wantD, d, "measured incorrect dropped attributes")
})
meas(context.Background(), value, attr)
meas(t.Context(), value, attr)
}
}
@@ -152,7 +152,7 @@ func benchmarkAggregate[N int64 | float64](factory func() (Measure[N], ComputeAg
var bmarkRes metricdata.Aggregation
func benchmarkAggregateN[N int64 | float64](b *testing.B, factory func() (Measure[N], ComputeAggregation), count int) {
ctx := context.Background()
ctx := b.Context()
attrs := make([]attribute.Set, count)
for i := range attrs {
attrs[i] = attribute.NewSet(attribute.Int("value", i))

View File

@@ -174,7 +174,7 @@ func testExpoHistogramMinMaxSumInt64(t *testing.T) {
h := newExponentialHistogram[int64](4, 20, false, false, 0, dropExemplars[int64])
for _, v := range tt.values {
h.measure(context.Background(), v, alice, nil)
h.measure(t.Context(), v, alice, nil)
}
dp := h.values[alice.Equivalent()]
@@ -216,7 +216,7 @@ func testExpoHistogramMinMaxSumFloat64(t *testing.T) {
h := newExponentialHistogram[float64](4, 20, false, false, 0, dropExemplars[float64])
for _, v := range tt.values {
h.measure(context.Background(), v, alice, nil)
h.measure(t.Context(), v, alice, nil)
}
dp := h.values[alice.Equivalent()]

View File

@@ -329,7 +329,7 @@ func TestHistogramImmutableBounds(t *testing.T) {
b[0] = 10
assert.Equal(t, cpB, h.bounds, "modifying the bounds argument should not change the bounds")
h.measure(context.Background(), 5, alice, nil)
h.measure(t.Context(), 5, alice, nil)
var data metricdata.Aggregation = metricdata.Histogram[int64]{}
h.cumulative(&data)
@@ -340,7 +340,7 @@ func TestHistogramImmutableBounds(t *testing.T) {
func TestCumulativeHistogramImmutableCounts(t *testing.T) {
h := newHistogram[int64](bounds, noMinMax, false, 0, dropExemplars[int64])
h.measure(context.Background(), 5, alice, nil)
h.measure(t.Context(), 5, alice, nil)
var data metricdata.Aggregation = metricdata.Histogram[int64]{}
h.cumulative(&data)
@@ -370,7 +370,7 @@ func TestDeltaHistogramReset(t *testing.T) {
require.Equal(t, 0, h.delta(&data))
require.Empty(t, data.(metricdata.Histogram[int64]).DataPoints)
h.measure(context.Background(), 1, alice, nil)
h.measure(t.Context(), 1, alice, nil)
expect := metricdata.Histogram[int64]{Temporality: metricdata.DeltaTemporality}
expect.DataPoints = []metricdata.HistogramDataPoint[int64]{hPointSummed[int64](alice, 1, 1, now(), now())}
@@ -383,7 +383,7 @@ func TestDeltaHistogramReset(t *testing.T) {
assert.Empty(t, data.(metricdata.Histogram[int64]).DataPoints)
// Aggregating another set should not affect the original (alice).
h.measure(context.Background(), 1, bob, nil)
h.measure(t.Context(), 1, bob, nil)
expect.DataPoints = []metricdata.HistogramDataPoint[int64]{hPointSummed[int64](bob, 1, 1, now(), now())}
h.delta(&data)
metricdatatest.AssertAggregationsEqual(t, expect, data)

View File

@@ -73,7 +73,7 @@ func TestManualReaderTemporality(t *testing.T) {
}
func TestManualReaderCollect(t *testing.T) {
expiredCtx, cancel := context.WithDeadline(context.Background(), time.Now().Add(-1))
expiredCtx, cancel := context.WithDeadline(t.Context(), time.Now().Add(-1))
defer cancel()
tests := []struct {
@@ -83,7 +83,7 @@ func TestManualReaderCollect(t *testing.T) {
}{
{
name: "with a valid context",
ctx: context.Background(),
ctx: t.Context(),
expectedErr: nil,
},
{

View File

@@ -162,7 +162,7 @@ func TestCallbackUnregisterConcurrency(t *testing.T) {
// Instruments should produce correct ResourceMetrics.
func TestMeterCreatesInstruments(t *testing.T) {
// The synchronous measurement methods must ignore the context cancellation.
ctx, cancel := context.WithCancel(context.Background())
ctx, cancel := context.WithCancel(t.Context())
cancel()
alice := attribute.NewSet(
@@ -391,7 +391,7 @@ func TestMeterCreatesInstruments(t *testing.T) {
c, ok := ctr.(x.EnabledInstrument)
require.True(t, ok)
assert.True(t, c.Enabled(context.Background()))
assert.True(t, c.Enabled(t.Context()))
ctr.Add(ctx, 3)
},
want: metricdata.Metrics{
@@ -413,7 +413,7 @@ func TestMeterCreatesInstruments(t *testing.T) {
c, ok := ctr.(x.EnabledInstrument)
require.True(t, ok)
assert.True(t, c.Enabled(context.Background()))
assert.True(t, c.Enabled(t.Context()))
ctr.Add(ctx, 11)
},
want: metricdata.Metrics{
@@ -464,7 +464,7 @@ func TestMeterCreatesInstruments(t *testing.T) {
c, ok := ctr.(x.EnabledInstrument)
require.True(t, ok)
assert.True(t, c.Enabled(context.Background()))
assert.True(t, c.Enabled(t.Context()))
ctr.Add(ctx, 3)
},
want: metricdata.Metrics{
@@ -486,7 +486,7 @@ func TestMeterCreatesInstruments(t *testing.T) {
c, ok := ctr.(x.EnabledInstrument)
require.True(t, ok)
assert.True(t, c.Enabled(context.Background()))
assert.True(t, c.Enabled(t.Context()))
ctr.Add(ctx, 11)
},
want: metricdata.Metrics{
@@ -539,7 +539,7 @@ func TestMeterCreatesInstruments(t *testing.T) {
tt.fn(t, m)
rm := metricdata.ResourceMetrics{}
err := rdr.Collect(context.Background(), &rm)
err := rdr.Collect(t.Context(), &rm)
assert.NoError(t, err)
require.Len(t, rm.ScopeMetrics, 1)
@@ -618,7 +618,7 @@ func TestMeterWithDropView(t *testing.T) {
require.NoError(t, err)
c, ok := got.(x.EnabledInstrument)
require.True(t, ok)
assert.False(t, c.Enabled(context.Background()))
assert.False(t, c.Enabled(t.Context()))
})
}
}
@@ -1039,7 +1039,7 @@ func TestCallbackObserverNonRegistered(t *testing.T) {
var got metricdata.ResourceMetrics
assert.NotPanics(t, func() {
err = rdr.Collect(context.Background(), &got)
err = rdr.Collect(t.Context(), &got)
})
assert.NoError(t, err)
@@ -1131,7 +1131,7 @@ func TestGlobalInstRegisterCallback(t *testing.T) {
assert.NoError(t, err)
got := metricdata.ResourceMetrics{}
err = rdr.Collect(context.Background(), &got)
err = rdr.Collect(t.Context(), &got)
assert.NoError(t, err)
assert.Emptyf(t, l.messages, "Warnings and errors logged:\n%s", l)
metricdatatest.AssertEqual(t, metricdata.ResourceMetrics{
@@ -1244,7 +1244,7 @@ func TestMetersProvideScope(t *testing.T) {
}
got := metricdata.ResourceMetrics{}
err = rdr.Collect(context.Background(), &got)
err = rdr.Collect(t.Context(), &got)
assert.NoError(t, err)
metricdatatest.AssertEqual(t, want, got, metricdatatest.IgnoreTimestamp())
}
@@ -1287,7 +1287,7 @@ func TestUnregisterUnregisters(t *testing.T) {
)
require.NoError(t, err)
ctx := context.Background()
ctx := t.Context()
err = r.Collect(ctx, &metricdata.ResourceMetrics{})
require.NoError(t, err)
assert.True(t, called, "callback not called for registered callback")
@@ -1342,7 +1342,7 @@ func TestRegisterCallbackDropAggregations(t *testing.T) {
require.NoError(t, err)
data := metricdata.ResourceMetrics{}
err = r.Collect(context.Background(), &data)
err = r.Collect(t.Context(), &data)
require.NoError(t, err)
assert.False(t, called, "callback called for all drop instruments")
@@ -1521,14 +1521,14 @@ func testAttributeFilter(temporality metricdata.Temporality) func(*testing.T) {
},
{
name: "SyncFloat64Counter",
register: func(_ *testing.T, mtr metric.Meter) error {
register: func(t *testing.T, mtr metric.Meter) error {
ctr, err := mtr.Float64Counter("sfcounter")
if err != nil {
return err
}
ctr.Add(context.Background(), 1.0, withV1)
ctr.Add(context.Background(), 2.0, withV2)
ctr.Add(t.Context(), 1.0, withV1)
ctr.Add(t.Context(), 2.0, withV2)
return nil
},
wantMetric: metricdata.Metrics{
@@ -1544,14 +1544,14 @@ func testAttributeFilter(temporality metricdata.Temporality) func(*testing.T) {
},
{
name: "SyncFloat64UpDownCounter",
register: func(_ *testing.T, mtr metric.Meter) error {
register: func(t *testing.T, mtr metric.Meter) error {
ctr, err := mtr.Float64UpDownCounter("sfupdowncounter")
if err != nil {
return err
}
ctr.Add(context.Background(), 1.0, withV1)
ctr.Add(context.Background(), 2.0, withV2)
ctr.Add(t.Context(), 1.0, withV1)
ctr.Add(t.Context(), 2.0, withV2)
return nil
},
wantMetric: metricdata.Metrics{
@@ -1567,14 +1567,14 @@ func testAttributeFilter(temporality metricdata.Temporality) func(*testing.T) {
},
{
name: "SyncFloat64Histogram",
register: func(_ *testing.T, mtr metric.Meter) error {
register: func(t *testing.T, mtr metric.Meter) error {
ctr, err := mtr.Float64Histogram("sfhistogram")
if err != nil {
return err
}
ctr.Record(context.Background(), 1.0, withV1)
ctr.Record(context.Background(), 2.0, withV2)
ctr.Record(t.Context(), 1.0, withV1)
ctr.Record(t.Context(), 2.0, withV2)
return nil
},
wantMetric: metricdata.Metrics{
@@ -1600,14 +1600,14 @@ func testAttributeFilter(temporality metricdata.Temporality) func(*testing.T) {
},
{
name: "SyncInt64Counter",
register: func(_ *testing.T, mtr metric.Meter) error {
register: func(t *testing.T, mtr metric.Meter) error {
ctr, err := mtr.Int64Counter("sicounter")
if err != nil {
return err
}
ctr.Add(context.Background(), 10, withV1)
ctr.Add(context.Background(), 20, withV2)
ctr.Add(t.Context(), 10, withV1)
ctr.Add(t.Context(), 20, withV2)
return nil
},
wantMetric: metricdata.Metrics{
@@ -1623,14 +1623,14 @@ func testAttributeFilter(temporality metricdata.Temporality) func(*testing.T) {
},
{
name: "SyncInt64UpDownCounter",
register: func(_ *testing.T, mtr metric.Meter) error {
register: func(t *testing.T, mtr metric.Meter) error {
ctr, err := mtr.Int64UpDownCounter("siupdowncounter")
if err != nil {
return err
}
ctr.Add(context.Background(), 10, withV1)
ctr.Add(context.Background(), 20, withV2)
ctr.Add(t.Context(), 10, withV1)
ctr.Add(t.Context(), 20, withV2)
return nil
},
wantMetric: metricdata.Metrics{
@@ -1646,14 +1646,14 @@ func testAttributeFilter(temporality metricdata.Temporality) func(*testing.T) {
},
{
name: "SyncInt64Histogram",
register: func(_ *testing.T, mtr metric.Meter) error {
register: func(t *testing.T, mtr metric.Meter) error {
ctr, err := mtr.Int64Histogram("sihistogram")
if err != nil {
return err
}
ctr.Record(context.Background(), 1, withV1)
ctr.Record(context.Background(), 2, withV2)
ctr.Record(t.Context(), 1, withV1)
ctr.Record(t.Context(), 2, withV2)
return nil
},
wantMetric: metricdata.Metrics{
@@ -1695,7 +1695,7 @@ func testAttributeFilter(temporality metricdata.Temporality) func(*testing.T) {
require.NoError(t, tt.register(t, mtr))
m := metricdata.ResourceMetrics{}
err := rdr.Collect(context.Background(), &m)
err := rdr.Collect(t.Context(), &m)
assert.NoError(t, err)
require.Len(t, m.ScopeMetrics, 1)
@@ -1800,13 +1800,13 @@ func TestObservableExample(t *testing.T) {
collect := func(t *testing.T) {
t.Helper()
got := metricdata.ResourceMetrics{}
err := reader1.Collect(context.Background(), &got)
err := reader1.Collect(t.Context(), &got)
require.NoError(t, err)
require.Len(t, got.ScopeMetrics, 1)
metricdatatest.AssertEqual(t, *want, got.ScopeMetrics[0], metricdatatest.IgnoreTimestamp())
got = metricdata.ResourceMetrics{}
err = reader2.Collect(context.Background(), &got)
err = reader2.Collect(t.Context(), &got)
require.NoError(t, err)
require.Len(t, got.ScopeMetrics, 1)
metricdatatest.AssertEqual(t, *want, got.ScopeMetrics[0], metricdatatest.IgnoreTimestamp())
@@ -2093,7 +2093,7 @@ func TestMalformedSelectors(t *testing.T) {
global.SetErrorHandler(noErrorHandler{t})
defer func() {
_ = tt.reader.Shutdown(context.Background())
_ = tt.reader.Shutdown(t.Context())
}()
meter := NewMeterProvider(WithReader(tt.reader)).Meter("TestNilAggregationSelector")
@@ -2147,15 +2147,15 @@ func TestMalformedSelectors(t *testing.T) {
)
require.NoError(t, err)
siCounter.Add(context.Background(), 1)
siUpDownCounter.Add(context.Background(), 1)
siHistogram.Record(context.Background(), 1)
sfCounter.Add(context.Background(), 1)
sfUpDownCounter.Add(context.Background(), 1)
sfHistogram.Record(context.Background(), 1)
siCounter.Add(t.Context(), 1)
siUpDownCounter.Add(t.Context(), 1)
siHistogram.Record(t.Context(), 1)
sfCounter.Add(t.Context(), 1)
sfUpDownCounter.Add(t.Context(), 1)
sfHistogram.Record(t.Context(), 1)
var rm metricdata.ResourceMetrics
err = tt.reader.Collect(context.Background(), &rm)
err = tt.reader.Collect(t.Context(), &rm)
require.NoError(t, err)
require.Len(t, rm.ScopeMetrics, 1)
@@ -2213,9 +2213,9 @@ func TestHistogramBucketPrecedenceOrdering(t *testing.T) {
).Meter("TestHistogramBucketPrecedenceOrdering")
sfHistogram, err := meter.Float64Histogram("sync.float64.histogram", tt.histogramOpts...)
require.NoError(t, err)
sfHistogram.Record(context.Background(), 1)
sfHistogram.Record(t.Context(), 1)
var rm metricdata.ResourceMetrics
err = tt.reader.Collect(context.Background(), &rm)
err = tt.reader.Collect(t.Context(), &rm)
require.NoError(t, err)
require.Len(t, rm.ScopeMetrics, 1)
require.Len(t, rm.ScopeMetrics[0].Metrics, 1)
@@ -2414,7 +2414,7 @@ func TestObservableDropAggregation(t *testing.T) {
require.NoError(t, err)
var rm metricdata.ResourceMetrics
err = reader.Collect(context.Background(), &rm)
err = reader.Collect(t.Context(), &rm)
require.NoError(t, err)
if len(tt.wantObservables) == 0 {
@@ -2526,7 +2526,7 @@ func TestDuplicateInstrumentCreation(t *testing.T) {
t.Run(tt.desc, func(t *testing.T) {
reader := NewManualReader()
defer func() {
require.NoError(t, reader.Shutdown(context.Background()))
require.NoError(t, reader.Shutdown(t.Context()))
}()
m := NewMeterProvider(WithReader(reader)).Meter("TestDuplicateInstrumentCreation")
@@ -2597,7 +2597,7 @@ func TestExemplarFilter(t *testing.T) {
m1 := mp.Meter("scope")
ctr1, err := m1.Float64Counter("ctr")
assert.NoError(t, err)
ctr1.Add(context.Background(), 1.0)
ctr1.Add(t.Context(), 1.0)
want := metricdata.ResourceMetrics{
Resource: resource.Default(),
@@ -2630,7 +2630,7 @@ func TestExemplarFilter(t *testing.T) {
}
got := metricdata.ResourceMetrics{}
err = rdr.Collect(context.Background(), &got)
err = rdr.Collect(t.Context(), &got)
assert.NoError(t, err)
metricdatatest.AssertEqual(t, want, got, metricdatatest.IgnoreTimestamp())
}

View File

@@ -286,7 +286,7 @@ func TestPeriodicReaderRun(t *testing.T) {
assert.Equal(t, assert.AnError, <-eh.Err)
// Ensure Reader is allowed clean up attempt.
_ = r.Shutdown(context.Background())
_ = r.Shutdown(t.Context())
}
func TestPeriodicReaderFlushesPending(t *testing.T) {
@@ -310,11 +310,11 @@ func TestPeriodicReaderFlushesPending(t *testing.T) {
exp, called := expFunc(t)
r := NewPeriodicReader(exp, WithProducer(testExternalProducer{}))
r.register(testSDKProducer{})
assert.Equal(t, assert.AnError, r.ForceFlush(context.Background()), "export error not returned")
assert.Equal(t, assert.AnError, r.ForceFlush(t.Context()), "export error not returned")
assert.True(t, *called, "exporter Export method not called, pending telemetry not flushed")
// Ensure Reader is allowed clean up attempt.
_ = r.Shutdown(context.Background())
_ = r.Shutdown(t.Context())
})
t.Run("ForceFlush timeout on producer", func(t *testing.T) {
@@ -333,11 +333,11 @@ func TestPeriodicReaderFlushesPending(t *testing.T) {
return nil
},
})
assert.ErrorIs(t, r.ForceFlush(context.Background()), context.DeadlineExceeded)
assert.ErrorIs(t, r.ForceFlush(t.Context()), context.DeadlineExceeded)
assert.False(t, *called, "exporter Export method called when it should have failed before export")
// Ensure Reader is allowed clean up attempt.
_ = r.Shutdown(context.Background())
_ = r.Shutdown(t.Context())
})
t.Run("ForceFlush timeout on external producer", func(t *testing.T) {
@@ -355,18 +355,18 @@ func TestPeriodicReaderFlushesPending(t *testing.T) {
},
}))
r.register(testSDKProducer{})
assert.ErrorIs(t, r.ForceFlush(context.Background()), context.DeadlineExceeded)
assert.ErrorIs(t, r.ForceFlush(t.Context()), context.DeadlineExceeded)
assert.False(t, *called, "exporter Export method called when it should have failed before export")
// Ensure Reader is allowed clean up attempt.
_ = r.Shutdown(context.Background())
_ = r.Shutdown(t.Context())
})
t.Run("Shutdown", func(t *testing.T) {
exp, called := expFunc(t)
r := NewPeriodicReader(exp, WithProducer(testExternalProducer{}))
r.register(testSDKProducer{})
assert.Equal(t, assert.AnError, r.Shutdown(context.Background()), "export error not returned")
assert.Equal(t, assert.AnError, r.Shutdown(t.Context()), "export error not returned")
assert.True(t, *called, "exporter Export method not called, pending telemetry not flushed")
})
@@ -386,7 +386,7 @@ func TestPeriodicReaderFlushesPending(t *testing.T) {
return nil
},
})
assert.ErrorIs(t, r.Shutdown(context.Background()), context.DeadlineExceeded)
assert.ErrorIs(t, r.Shutdown(t.Context()), context.DeadlineExceeded)
assert.False(t, *called, "exporter Export method called when it should have failed before export")
})
@@ -405,13 +405,13 @@ func TestPeriodicReaderFlushesPending(t *testing.T) {
},
}))
r.register(testSDKProducer{})
assert.ErrorIs(t, r.Shutdown(context.Background()), context.DeadlineExceeded)
assert.ErrorIs(t, r.Shutdown(t.Context()), context.DeadlineExceeded)
assert.False(t, *called, "exporter Export method called when it should have failed before export")
})
}
func TestPeriodicReaderMultipleForceFlush(t *testing.T) {
ctx := context.Background()
ctx := t.Context()
r := NewPeriodicReader(new(fnExporter), WithProducer(testExternalProducer{}))
r.register(testSDKProducer{})
require.NoError(t, r.ForceFlush(ctx))
@@ -422,7 +422,7 @@ func TestPeriodicReaderMultipleForceFlush(t *testing.T) {
func BenchmarkPeriodicReader(b *testing.B) {
r := NewPeriodicReader(new(fnExporter))
b.Run("Collect", benchReaderCollectFunc(r))
require.NoError(b, r.Shutdown(context.Background()))
require.NoError(b, r.Shutdown(b.Context()))
}
func TestPeriodicReaderTemporality(t *testing.T) {
@@ -460,7 +460,7 @@ func TestPeriodicReaderTemporality(t *testing.T) {
}
func TestPeriodicReaderCollect(t *testing.T) {
expiredCtx, cancel := context.WithDeadline(context.Background(), time.Now().Add(-1))
expiredCtx, cancel := context.WithDeadline(t.Context(), time.Now().Add(-1))
defer cancel()
tests := []struct {
@@ -470,7 +470,7 @@ func TestPeriodicReaderCollect(t *testing.T) {
}{
{
name: "with a valid context",
ctx: context.Background(),
ctx: t.Context(),
expectedErr: nil,
},
{

View File

@@ -4,7 +4,6 @@
package metric // import "go.opentelemetry.io/otel/sdk/metric"
import (
"context"
"sync/atomic"
"testing"
@@ -60,7 +59,7 @@ func assertSum[N int64 | float64](
for m := range n {
t.Logf("input/output number: %d", m)
in, out := meas[m], comps[m]
in(context.Background(), 1, *attribute.EmptySet())
in(t.Context(), 1, *attribute.EmptySet())
var got metricdata.Aggregation
assert.Equal(t, 1, out(&got), "1 data-point expected")
@@ -70,7 +69,7 @@ func assertSum[N int64 | float64](
DataPoints: []metricdata.DataPoint[N]{{Value: v[0]}},
}, got, metricdatatest.IgnoreTimestamp())
in(context.Background(), 3, *attribute.EmptySet())
in(t.Context(), 3, *attribute.EmptySet())
assert.Equal(t, 1, out(&got), "1 data-point expected")
metricdatatest.AssertAggregationsEqual(t, metricdata.Sum[N]{
@@ -90,7 +89,7 @@ func assertHist[N int64 | float64](
requireN[N](t, 1, meas, comps, err)
in, out := meas[0], comps[0]
in(context.Background(), 1, *attribute.EmptySet())
in(t.Context(), 1, *attribute.EmptySet())
var got metricdata.Aggregation
assert.Equal(t, 1, out(&got), "1 data-point expected")
@@ -108,7 +107,7 @@ func assertHist[N int64 | float64](
}},
}, got, metricdatatest.IgnoreTimestamp())
in(context.Background(), 1, *attribute.EmptySet())
in(t.Context(), 1, *attribute.EmptySet())
if temp == metricdata.CumulativeTemporality {
buckets[1] = 2
@@ -139,8 +138,8 @@ func assertLastValue[N int64 | float64](
requireN[N](t, 1, meas, comps, err)
in, out := meas[0], comps[0]
in(context.Background(), 10, *attribute.EmptySet())
in(context.Background(), 1, *attribute.EmptySet())
in(t.Context(), 10, *attribute.EmptySet())
in(t.Context(), 1, *attribute.EmptySet())
var got metricdata.Aggregation
assert.Equal(t, 1, out(&got), "1 data-point expected")
@@ -288,7 +287,7 @@ func testCreateAggregators[N int64 | float64](t *testing.T) {
requireN[N](t, 1, meas, comps, err)
in, out := meas[0], comps[0]
in(context.Background(), 1, *attribute.EmptySet())
in(t.Context(), 1, *attribute.EmptySet())
var got metricdata.Aggregation
assert.Equal(t, 1, out(&got), "1 data-point expected")
@@ -302,7 +301,7 @@ func testCreateAggregators[N int64 | float64](t *testing.T) {
}},
}, got, metricdatatest.IgnoreTimestamp())
in(context.Background(), 1, *attribute.EmptySet())
in(t.Context(), 1, *attribute.EmptySet())
assert.Equal(t, 1, out(&got), "1 data-point expected")
metricdatatest.AssertAggregationsEqual(t, metricdata.Histogram[N]{

View File

@@ -47,7 +47,7 @@ func TestNewPipeline(t *testing.T) {
pipe := newPipeline(nil, nil, nil, exemplar.AlwaysOffFilter, 0)
output := metricdata.ResourceMetrics{}
err := pipe.produce(context.Background(), &output)
err := pipe.produce(t.Context(), &output)
require.NoError(t, err)
assert.Equal(t, resource.Empty(), output.Resource)
assert.Empty(t, output.ScopeMetrics)
@@ -61,7 +61,7 @@ func TestNewPipeline(t *testing.T) {
pipe.addMultiCallback(func(context.Context) error { return nil })
})
err = pipe.produce(context.Background(), &output)
err = pipe.produce(t.Context(), &output)
require.NoError(t, err)
assert.Equal(t, resource.Empty(), output.Resource)
require.Len(t, output.ScopeMetrics, 1)
@@ -73,14 +73,14 @@ func TestPipelineUsesResource(t *testing.T) {
pipe := newPipeline(res, nil, nil, exemplar.AlwaysOffFilter, 0)
output := metricdata.ResourceMetrics{}
err := pipe.produce(context.Background(), &output)
err := pipe.produce(t.Context(), &output)
assert.NoError(t, err)
assert.Equal(t, res, output.Resource)
}
func TestPipelineConcurrentSafe(*testing.T) {
func TestPipelineConcurrentSafe(t *testing.T) {
pipe := newPipeline(nil, nil, nil, exemplar.AlwaysOffFilter, 0)
ctx := context.Background()
ctx := t.Context()
var output metricdata.ResourceMetrics
var wg sync.WaitGroup
@@ -163,11 +163,11 @@ func testDefaultViewImplicit[N int64 | float64]() func(t *testing.T) {
require.NoError(t, err)
assert.Len(t, got, 1, "default view not applied")
for _, in := range got {
in(context.Background(), 1, *attribute.EmptySet())
in(t.Context(), 1, *attribute.EmptySet())
}
out := metricdata.ResourceMetrics{}
err = test.pipe.produce(context.Background(), &out)
err = test.pipe.produce(t.Context(), &out)
require.NoError(t, err)
require.Len(t, out.ScopeMetrics, 1, "Aggregator not registered with pipeline")
sm := out.ScopeMetrics[0]
@@ -441,7 +441,7 @@ func TestExemplars(t *testing.T) {
t.Helper()
rm := new(metricdata.ResourceMetrics)
require.NoError(t, r.Collect(context.Background(), rm))
require.NoError(t, r.Collect(t.Context(), rm))
require.Len(t, rm.ScopeMetrics, 1, "ScopeMetrics")
sm := rm.ScopeMetrics[0]
@@ -460,13 +460,13 @@ func TestExemplars(t *testing.T) {
assert.Len(t, expo.DataPoints[0].Exemplars, nExpo)
}
ctx := context.Background()
ctx := t.Context()
sc := trace.NewSpanContext(trace.SpanContextConfig{
SpanID: trace.SpanID{0o1},
TraceID: trace.TraceID{0o1},
TraceFlags: trace.FlagsSampled,
})
sampled := trace.ContextWithSpanContext(context.Background(), sc)
sampled := trace.ContextWithSpanContext(t.Context(), sc)
t.Run("Default", func(t *testing.T) {
m, r := setup("default")
@@ -570,13 +570,13 @@ func TestAddingAndObservingMeasureConcurrentSafe(t *testing.T) {
wg.Add(1)
go func() {
defer wg.Done()
_ = mp.pipes[0].produce(context.Background(), &metricdata.ResourceMetrics{})
_ = mp.pipes[0].produce(t.Context(), &metricdata.ResourceMetrics{})
}()
wg.Add(1)
go func() {
defer wg.Done()
_ = mp.pipes[1].produce(context.Background(), &metricdata.ResourceMetrics{})
_ = mp.pipes[1].produce(t.Context(), &metricdata.ResourceMetrics{})
}()
wg.Wait()
@@ -598,7 +598,7 @@ func TestPipelineWithMultipleReaders(t *testing.T) {
}, oc)
require.NoError(t, err)
t.Cleanup(func() { assert.NoError(t, reg.Unregister()) })
ctx := context.Background()
ctx := t.Context()
rm := new(metricdata.ResourceMetrics)
val.Add(1)
err = r1.Collect(ctx, rm)
@@ -650,7 +650,7 @@ func TestPipelineProduceErrors(t *testing.T) {
}
pipe.addSync(instrumentation.Scope{Name: "test"}, inst)
ctx, cancelCtx := context.WithCancel(context.Background())
ctx, cancelCtx := context.WithCancel(t.Context())
var shouldCancelContext bool // When true, the second callback cancels ctx
var shouldReturnError bool // When true, the third callback returns an error
var callbackCounts [3]int

View File

@@ -35,40 +35,40 @@ func TestMeterConcurrentSafe(*testing.T) {
<-done
}
func TestForceFlushConcurrentSafe(*testing.T) {
func TestForceFlushConcurrentSafe(t *testing.T) {
mp := NewMeterProvider()
done := make(chan struct{})
go func() {
defer close(done)
_ = mp.ForceFlush(context.Background())
_ = mp.ForceFlush(t.Context())
}()
_ = mp.ForceFlush(context.Background())
_ = mp.ForceFlush(t.Context())
<-done
}
func TestShutdownConcurrentSafe(*testing.T) {
func TestShutdownConcurrentSafe(t *testing.T) {
mp := NewMeterProvider()
done := make(chan struct{})
go func() {
defer close(done)
_ = mp.Shutdown(context.Background())
_ = mp.Shutdown(t.Context())
}()
_ = mp.Shutdown(context.Background())
_ = mp.Shutdown(t.Context())
<-done
}
func TestMeterAndShutdownConcurrentSafe(*testing.T) {
func TestMeterAndShutdownConcurrentSafe(t *testing.T) {
const name = "TestMeterAndShutdownConcurrentSafe meter"
mp := NewMeterProvider()
done := make(chan struct{})
go func() {
defer close(done)
_ = mp.Shutdown(context.Background())
_ = mp.Shutdown(t.Context())
}()
_ = mp.Meter(name)
@@ -82,12 +82,12 @@ func TestMeterDoesNotPanicForEmptyMeterProvider(t *testing.T) {
func TestForceFlushDoesNotPanicForEmptyMeterProvider(t *testing.T) {
mp := MeterProvider{}
assert.NotPanics(t, func() { _ = mp.ForceFlush(context.Background()) })
assert.NotPanics(t, func() { _ = mp.ForceFlush(t.Context()) })
}
func TestShutdownDoesNotPanicForEmptyMeterProvider(t *testing.T) {
mp := MeterProvider{}
assert.NotPanics(t, func() { _ = mp.Shutdown(context.Background()) })
assert.NotPanics(t, func() { _ = mp.Shutdown(t.Context()) })
}
func TestMeterProviderReturnsSameMeter(t *testing.T) {
@@ -120,7 +120,7 @@ func TestMeterProviderReturnsNoopMeterAfterShutdown(t *testing.T) {
_, ok := m.(noop.Meter)
assert.False(t, ok, "Meter from running MeterProvider is NoOp")
require.NoError(t, mp.Shutdown(context.Background()))
require.NoError(t, mp.Shutdown(t.Context()))
m = mp.Meter("")
_, ok = m.(noop.Meter)
@@ -163,11 +163,11 @@ func TestMeterProviderMixingOnRegisterErrors(t *testing.T) {
)
var data metricdata.ResourceMetrics
_ = rdr0.Collect(context.Background(), &data)
_ = rdr0.Collect(t.Context(), &data)
// Only the metrics from mp0 should be produced.
assert.Len(t, data.ScopeMetrics, 1)
err = rdr1.Collect(context.Background(), &data)
err = rdr1.Collect(t.Context(), &data)
assert.NoError(t, err, "Errored when collect should be a noop")
assert.Empty(
t, data.ScopeMetrics,
@@ -218,14 +218,14 @@ func TestMeterProviderCardinalityLimit(t *testing.T) {
for i := range uniqueAttributesCount {
counter.Add(
context.Background(),
t.Context(),
1,
api.WithAttributes(attribute.Int("key", i)),
)
}
var rm metricdata.ResourceMetrics
err = reader.Collect(context.Background(), &rm)
err = reader.Collect(t.Context(), &rm)
require.NoError(t, err, "failed to collect metrics")
require.Len(t, rm.ScopeMetrics, 1, "expected 1 ScopeMetrics")

View File

@@ -270,8 +270,7 @@ func (p testExternalProducer) Produce(ctx context.Context) ([]metricdata.ScopeMe
return []metricdata.ScopeMetrics{testScopeMetricsB}, nil
}
func benchReaderCollectFunc(r Reader) func(*testing.B) {
ctx := context.Background()
func benchReaderCollectFunc(r Reader) func(b *testing.B) {
r.register(testSDKProducer{})
// Store benchmark results in a closure to prevent the compiler from
@@ -286,7 +285,7 @@ func benchReaderCollectFunc(r Reader) func(*testing.B) {
b.ResetTimer()
for n := 0; n < b.N; n++ {
err = r.Collect(ctx, &collectedMetrics)
err = r.Collect(b.Context(), &collectedMetrics)
assert.Equalf(
b,
testResourceMetricsA,

View File

@@ -77,7 +77,7 @@ func TestDetect(t *testing.T) {
for _, c := range cases {
t.Run(fmt.Sprintf("case-%s", c.name), func(t *testing.T) {
r, err := resource.Detect(context.Background(), c.detectors...)
r, err := resource.Detect(t.Context(), c.detectors...)
if c.wantErr != nil {
assert.ErrorIs(t, err, c.wantErr)
if errors.Is(c.wantErr, resource.ErrSchemaURLConflict) {

View File

@@ -4,7 +4,6 @@
package resource_test
import (
"context"
"fmt"
"testing"
@@ -18,7 +17,7 @@ func TestBuiltinStringDetector(t *testing.T) {
E := fmt.Errorf("no K")
res, err := resource.StringDetector("", attribute.Key("K"), func() (string, error) {
return "", E
}).Detect(context.Background())
}).Detect(t.Context())
require.ErrorIs(t, err, E)
require.NotEqual(t, E, err)
require.Nil(t, res)
@@ -48,7 +47,7 @@ func TestStringDetectorErrors(t *testing.T) {
for _, test := range tests {
res, err := resource.New(
context.Background(),
t.Context(),
resource.WithAttributes(attribute.String("A", "B")),
resource.WithDetectors(test.s),
)

View File

@@ -4,7 +4,6 @@
package resource
import (
"context"
"fmt"
"testing"
@@ -19,7 +18,7 @@ func TestDetectOnePair(t *testing.T) {
t.Setenv(resourceAttrKey, "key=value")
detector := &fromEnv{}
res, err := detector.Detect(context.Background())
res, err := detector.Detect(t.Context())
require.NoError(t, err)
assert.Equal(t, NewSchemaless(attribute.String("key", "value")), res)
}
@@ -28,7 +27,7 @@ func TestDetectURIEncodingOnePair(t *testing.T) {
t.Setenv(resourceAttrKey, "key=x+y+z?q=123")
detector := &fromEnv{}
res, err := detector.Detect(context.Background())
res, err := detector.Detect(t.Context())
require.NoError(t, err)
assert.Equal(t, NewSchemaless(attribute.String("key", "x+y+z?q=123")), res)
}
@@ -38,7 +37,7 @@ func TestDetectMultiPairs(t *testing.T) {
t.Setenv(resourceAttrKey, "key=value, k = v , a= x, a=z, b=c%2Fd")
detector := &fromEnv{}
res, err := detector.Detect(context.Background())
res, err := detector.Detect(t.Context())
require.NoError(t, err)
assert.Equal(t, NewSchemaless(
attribute.String("key", "value"),
@@ -53,7 +52,7 @@ func TestDetectURIEncodingMultiPairs(t *testing.T) {
t.Setenv("x", "1")
t.Setenv(resourceAttrKey, "key=x+y+z,namespace=localhost/test&verify")
detector := &fromEnv{}
res, err := detector.Detect(context.Background())
res, err := detector.Detect(t.Context())
require.NoError(t, err)
assert.Equal(t, NewSchemaless(
attribute.String("key", "x+y+z"),
@@ -64,7 +63,7 @@ func TestDetectURIEncodingMultiPairs(t *testing.T) {
func TestEmpty(t *testing.T) {
t.Setenv(resourceAttrKey, " ")
detector := &fromEnv{}
res, err := detector.Detect(context.Background())
res, err := detector.Detect(t.Context())
require.NoError(t, err)
assert.Equal(t, Empty(), res)
}
@@ -72,7 +71,7 @@ func TestEmpty(t *testing.T) {
func TestNoResourceAttributesSet(t *testing.T) {
t.Setenv(svcNameKey, "bar")
detector := &fromEnv{}
res, err := detector.Detect(context.Background())
res, err := detector.Detect(t.Context())
require.NoError(t, err)
assert.Equal(t, res, NewSchemaless(
semconv.ServiceName("bar"),
@@ -82,7 +81,7 @@ func TestNoResourceAttributesSet(t *testing.T) {
func TestMissingKeyError(t *testing.T) {
t.Setenv(resourceAttrKey, "key=value,key")
detector := &fromEnv{}
res, err := detector.Detect(context.Background())
res, err := detector.Detect(t.Context())
assert.Error(t, err)
assert.Equal(t, err, fmt.Errorf("%w: %v", errMissingValue, "[key]"))
assert.Equal(t, res, NewSchemaless(
@@ -93,7 +92,7 @@ func TestMissingKeyError(t *testing.T) {
func TestInvalidPercentDecoding(t *testing.T) {
t.Setenv(resourceAttrKey, "key=%invalid")
detector := &fromEnv{}
res, err := detector.Detect(context.Background())
res, err := detector.Detect(t.Context())
assert.NoError(t, err)
assert.Equal(t, NewSchemaless(
attribute.String("key", "%invalid"),
@@ -104,7 +103,7 @@ func TestDetectServiceNameFromEnv(t *testing.T) {
t.Setenv(resourceAttrKey, "key=value,service.name=foo")
t.Setenv(svcNameKey, "bar")
detector := &fromEnv{}
res, err := detector.Detect(context.Background())
res, err := detector.Detect(t.Context())
require.NoError(t, err)
assert.Equal(t, res, NewSchemaless(
attribute.String("key", "value"),

View File

@@ -4,7 +4,6 @@
package resource_test
import (
"context"
"fmt"
"os"
"os/user"
@@ -124,7 +123,7 @@ func TestRuntimeArch(t *testing.T) {
}
func testWithProcessExecutablePathError(t *testing.T) {
ctx := context.Background()
ctx := t.Context()
res, err := resource.New(ctx,
resource.WithProcessExecutablePath(),
@@ -135,7 +134,7 @@ func testWithProcessExecutablePathError(t *testing.T) {
}
func testWithProcessOwnerError(t *testing.T) {
ctx := context.Background()
ctx := t.Context()
res, err := resource.New(ctx,
resource.WithProcessOwner(),

View File

@@ -438,7 +438,7 @@ func TestNew(t *testing.T) {
for _, tt := range tc {
t.Run(tt.name, func(t *testing.T) {
t.Setenv(envVar, tt.envars)
ctx := context.Background()
ctx := t.Context()
res, err := resource.New(ctx, tt.options...)
if tt.wantErr != nil {
@@ -461,7 +461,7 @@ func TestNew(t *testing.T) {
func TestNewWrappedError(t *testing.T) {
localErr := errors.New("local error")
_, err := resource.New(
context.Background(),
t.Context(),
resource.WithDetectors(
resource.StringDetector("", "", func() (string, error) {
return "", localErr
@@ -481,7 +481,7 @@ func TestWithHostID(t *testing.T) {
mockHostIDProvider()
t.Cleanup(restoreHostIDProvider)
ctx := context.Background()
ctx := t.Context()
res, err := resource.New(ctx,
resource.WithHostID(),
@@ -497,7 +497,7 @@ func TestWithHostIDError(t *testing.T) {
mockHostIDProviderWithError()
t.Cleanup(restoreHostIDProvider)
ctx := context.Background()
ctx := t.Context()
res, err := resource.New(ctx,
resource.WithHostID(),
@@ -511,7 +511,7 @@ func TestWithOSType(t *testing.T) {
mockRuntimeProviders()
t.Cleanup(restoreAttributesProviders)
ctx := context.Background()
ctx := t.Context()
res, err := resource.New(ctx,
resource.WithOSType(),
@@ -527,7 +527,7 @@ func TestWithOSDescription(t *testing.T) {
mockRuntimeProviders()
t.Cleanup(restoreAttributesProviders)
ctx := context.Background()
ctx := t.Context()
res, err := resource.New(ctx,
resource.WithOSDescription(),
@@ -543,7 +543,7 @@ func TestWithOS(t *testing.T) {
mockRuntimeProviders()
t.Cleanup(restoreAttributesProviders)
ctx := context.Background()
ctx := t.Context()
res, err := resource.New(ctx,
resource.WithOS(),
@@ -558,7 +558,7 @@ func TestWithOS(t *testing.T) {
func TestWithProcessPID(t *testing.T) {
mockProcessAttributesProvidersWithErrors()
ctx := context.Background()
ctx := t.Context()
res, err := resource.New(ctx,
resource.WithProcessPID(),
@@ -572,7 +572,7 @@ func TestWithProcessPID(t *testing.T) {
func TestWithProcessExecutableName(t *testing.T) {
mockProcessAttributesProvidersWithErrors()
ctx := context.Background()
ctx := t.Context()
res, err := resource.New(ctx,
resource.WithProcessExecutableName(),
@@ -586,7 +586,7 @@ func TestWithProcessExecutableName(t *testing.T) {
func TestWithProcessExecutablePath(t *testing.T) {
mockProcessAttributesProviders()
ctx := context.Background()
ctx := t.Context()
res, err := resource.New(ctx,
resource.WithProcessExecutablePath(),
@@ -600,7 +600,7 @@ func TestWithProcessExecutablePath(t *testing.T) {
func TestWithProcessCommandArgs(t *testing.T) {
mockProcessAttributesProvidersWithErrors()
ctx := context.Background()
ctx := t.Context()
res, err := resource.New(ctx,
resource.WithProcessCommandArgs(),
@@ -615,7 +615,7 @@ func TestWithProcessCommandArgs(t *testing.T) {
func TestWithProcessOwner(t *testing.T) {
mockProcessAttributesProviders()
ctx := context.Background()
ctx := t.Context()
res, err := resource.New(ctx,
resource.WithProcessOwner(),
@@ -629,7 +629,7 @@ func TestWithProcessOwner(t *testing.T) {
func TestWithProcessRuntimeName(t *testing.T) {
mockProcessAttributesProvidersWithErrors()
ctx := context.Background()
ctx := t.Context()
res, err := resource.New(ctx,
resource.WithProcessRuntimeName(),
@@ -643,7 +643,7 @@ func TestWithProcessRuntimeName(t *testing.T) {
func TestWithProcessRuntimeVersion(t *testing.T) {
mockProcessAttributesProvidersWithErrors()
ctx := context.Background()
ctx := t.Context()
res, err := resource.New(ctx,
resource.WithProcessRuntimeVersion(),
@@ -657,7 +657,7 @@ func TestWithProcessRuntimeVersion(t *testing.T) {
func TestWithProcessRuntimeDescription(t *testing.T) {
mockProcessAttributesProvidersWithErrors()
ctx := context.Background()
ctx := t.Context()
res, err := resource.New(ctx,
resource.WithProcessRuntimeDescription(),
@@ -671,7 +671,7 @@ func TestWithProcessRuntimeDescription(t *testing.T) {
func TestWithProcess(t *testing.T) {
mockProcessAttributesProviders()
ctx := context.Background()
ctx := t.Context()
res, err := resource.New(ctx,
resource.WithProcess(),
@@ -748,7 +748,7 @@ func TestWithContainerID(t *testing.T) {
t.Run(tc.name, func(t *testing.T) {
resource.SetContainerProviders(tc.containerIDProvider)
res, err := resource.New(context.Background(),
res, err := resource.New(t.Context(),
resource.WithContainerID(),
)
@@ -768,7 +768,7 @@ func TestWithContainer(t *testing.T) {
return fakeContainerID, nil
})
res, err := resource.New(context.Background(),
res, err := resource.New(t.Context(),
resource.WithContainer(),
)
@@ -787,7 +787,7 @@ func TestResourceConcurrentSafe(t *testing.T) {
go func() {
defer wg.Done()
d := &fakeDetector{}
_, err := resource.Detect(context.Background(), d)
_, err := resource.Detect(t.Context(), d)
assert.NoError(t, err)
}()
}

View File

@@ -94,16 +94,16 @@ func TestNewBatchSpanProcessorWithNilExporter(t *testing.T) {
tp.RegisterSpanProcessor(bsp)
tr := tp.Tracer("NilExporter")
_, span := tr.Start(context.Background(), "foo")
_, span := tr.Start(t.Context(), "foo")
span.End()
// These should not panic.
bsp.OnStart(context.Background(), span.(ReadWriteSpan))
bsp.OnStart(t.Context(), span.(ReadWriteSpan))
bsp.OnEnd(span.(ReadOnlySpan))
if err := bsp.ForceFlush(context.Background()); err != nil {
if err := bsp.ForceFlush(t.Context()); err != nil {
t.Errorf("failed to ForceFlush the BatchSpanProcessor: %v", err)
}
if err := bsp.Shutdown(context.Background()); err != nil {
if err := bsp.Shutdown(t.Context()); err != nil {
t.Errorf("failed to Shutdown the BatchSpanProcessor: %v", err)
}
}
@@ -336,20 +336,20 @@ func createAndRegisterBatchSP(option testOption, te *testBatchExporter) SpanProc
return NewBatchSpanProcessor(te, options...)
}
func generateSpan(_ *testing.T, tr trace.Tracer, option testOption) {
func generateSpan(t *testing.T, tr trace.Tracer, option testOption) {
sc := getSpanContext()
for i := 0; i < option.genNumSpans; i++ {
tid := sc.TraceID()
binary.BigEndian.PutUint64(tid[0:8], uint64(i+1))
newSc := sc.WithTraceID(tid)
ctx := trace.ContextWithRemoteSpanContext(context.Background(), newSc)
ctx := trace.ContextWithRemoteSpanContext(t.Context(), newSc)
_, span := tr.Start(ctx, option.name)
span.End()
}
}
func generateSpanParallel(_ *testing.T, tr trace.Tracer, option testOption) {
func generateSpanParallel(t *testing.T, tr trace.Tracer, option testOption) {
sc := getSpanContext()
wg := &sync.WaitGroup{}
@@ -359,7 +359,7 @@ func generateSpanParallel(_ *testing.T, tr trace.Tracer, option testOption) {
wg.Add(1)
go func(sc trace.SpanContext) {
ctx := trace.ContextWithRemoteSpanContext(context.Background(), sc)
ctx := trace.ContextWithRemoteSpanContext(t.Context(), sc)
_, span := tr.Start(ctx, option.name)
span.End()
wg.Done()
@@ -382,14 +382,14 @@ func TestBatchSpanProcessorShutdown(t *testing.T) {
var bp testBatchExporter
bsp := NewBatchSpanProcessor(&bp)
err := bsp.Shutdown(context.Background())
err := bsp.Shutdown(t.Context())
if err != nil {
t.Error("Error shutting the BatchSpanProcessor down\n")
}
assert.Equal(t, 1, bp.shutdownCount, "shutdown from span exporter not called")
// Multiple call to Shutdown() should not panic.
err = bsp.Shutdown(context.Background())
err = bsp.Shutdown(t.Context())
if err != nil {
t.Error("Error shutting the BatchSpanProcessor down\n")
}
@@ -411,12 +411,12 @@ func TestBatchSpanProcessorPostShutdown(t *testing.T) {
genNumSpans: 60,
})
require.NoError(t, bsp.Shutdown(context.Background()), "shutting down BatchSpanProcessor")
require.NoError(t, bsp.Shutdown(t.Context()), "shutting down BatchSpanProcessor")
lenJustAfterShutdown := be.len()
_, span := tr.Start(context.Background(), "foo")
_, span := tr.Start(t.Context(), "foo")
span.End()
assert.NoError(t, bsp.ForceFlush(context.Background()), "force flushing BatchSpanProcessor")
assert.NoError(t, bsp.ForceFlush(t.Context()), "force flushing BatchSpanProcessor")
assert.Equal(t, lenJustAfterShutdown, be.len(), "OnEnd and ForceFlush should have no effect after Shutdown")
}
@@ -447,7 +447,7 @@ func TestBatchSpanProcessorForceFlushSucceeds(t *testing.T) {
}
// Force flush any held span batches
err := ssp.ForceFlush(context.Background())
err := ssp.ForceFlush(t.Context())
assertMaxSpanDiff(t, te.len(), option.wantNumSpans, 10)
@@ -487,7 +487,7 @@ func TestBatchSpanProcessorDropBatchIfFailed(t *testing.T) {
}
// Force flush any held span batches
err := ssp.ForceFlush(context.Background())
err := ssp.ForceFlush(t.Context())
assert.Error(t, err)
assert.EqualError(t, err, "fail to export")
@@ -504,7 +504,7 @@ func TestBatchSpanProcessorDropBatchIfFailed(t *testing.T) {
}
// Force flush any held span batches
err = ssp.ForceFlush(context.Background())
err = ssp.ForceFlush(t.Context())
assert.NoError(t, err)
assertMaxSpanDiff(t, te.len(), option.wantNumSpans, 10)
@@ -549,12 +549,13 @@ func (e indefiniteExporter) ExportSpans(ctx context.Context, _ []ReadOnlySpan) e
}
func TestBatchSpanProcessorForceFlushCancellation(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
ctx, cancel := context.WithCancel(t.Context())
// Cancel the context
cancel()
bsp := NewBatchSpanProcessor(newIndefiniteExporter(t))
t.Cleanup(func() {
//nolint:usetesting // required to avoid getting a canceled context at cleanup.
assert.NoError(t, bsp.Shutdown(context.Background()))
})
@@ -569,11 +570,11 @@ func TestBatchSpanProcessorForceFlushTimeout(t *testing.T) {
bsp := NewBatchSpanProcessor(exp)
tp.RegisterSpanProcessor(bsp)
tr := tp.Tracer(t.Name())
_, span := tr.Start(context.Background(), "foo")
_, span := tr.Start(t.Context(), "foo")
span.End()
// Add timeout to context to test deadline
ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond)
ctx, cancel := context.WithTimeout(t.Context(), time.Millisecond)
defer cancel()
if got, want := bsp.ForceFlush(ctx), context.DeadlineExceeded; !errors.Is(got, want) {
@@ -582,13 +583,14 @@ func TestBatchSpanProcessorForceFlushTimeout(t *testing.T) {
}
func TestBatchSpanProcessorForceFlushQueuedSpans(t *testing.T) {
ctx := context.Background()
ctx := t.Context()
var bp testBatchExporter
bsp := NewBatchSpanProcessor(&bp)
tp := basicTracerProvider(t)
tp.RegisterSpanProcessor(bsp)
t.Cleanup(func() {
//nolint:usetesting // required to avoid getting a canceled context at cleanup.
assert.NoError(t, tp.Shutdown(context.Background()))
})
@@ -606,7 +608,7 @@ func TestBatchSpanProcessorForceFlushQueuedSpans(t *testing.T) {
}
func TestBatchSpanProcessorConcurrentSafe(t *testing.T) {
ctx := context.Background()
ctx := t.Context()
var bp testBatchExporter
bsp := NewBatchSpanProcessor(&bp)
tp := basicTracerProvider(t)
@@ -666,7 +668,10 @@ func TestBatchSpanProcessorMetricsDisabled(t *testing.T) {
)
otel.SetMeterProvider(meterProvider)
me := newBlockingExporter()
t.Cleanup(func() { assert.NoError(t, me.Shutdown(context.Background())) })
t.Cleanup(func() {
//nolint:usetesting // required to avoid getting a canceled context at cleanup.
assert.NoError(t, me.Shutdown(context.Background()))
})
bsp := NewBatchSpanProcessor(
me,
// Make sure timeout doesn't trigger during the test.
@@ -679,19 +684,19 @@ func TestBatchSpanProcessorMetricsDisabled(t *testing.T) {
tr := tp.Tracer("TestBatchSpanProcessorMetricsDisabled")
// Generate 2 spans, which export and block during the export call.
generateSpan(t, tr, testOption{genNumSpans: 2})
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
ctx, cancel := context.WithTimeout(t.Context(), time.Second)
defer cancel()
assert.NoError(t, me.waitForSpans(ctx, 2))
// Validate that there are no metrics produced.
gotMetrics := new(metricdata.ResourceMetrics)
assert.NoError(t, reader.Collect(context.Background(), gotMetrics))
assert.NoError(t, reader.Collect(t.Context(), gotMetrics))
require.Empty(t, gotMetrics.ScopeMetrics)
// Generate 3 spans. 2 fill the queue, and 1 is dropped because the queue is full.
generateSpan(t, tr, testOption{genNumSpans: 3})
// Validate that there are no metrics produced.
gotMetrics = new(metricdata.ResourceMetrics)
assert.NoError(t, reader.Collect(context.Background(), gotMetrics))
assert.NoError(t, reader.Collect(t.Context(), gotMetrics))
require.Empty(t, gotMetrics.ScopeMetrics)
}
@@ -708,7 +713,10 @@ func TestBatchSpanProcessorMetrics(t *testing.T) {
)
otel.SetMeterProvider(meterProvider)
me := newBlockingExporter()
t.Cleanup(func() { assert.NoError(t, me.Shutdown(context.Background())) })
t.Cleanup(func() {
//nolint:usetesting // required to avoid getting a canceled context at cleanup.
assert.NoError(t, me.Shutdown(context.Background()))
})
bsp := NewBatchSpanProcessor(
me,
// Make sure timeout doesn't trigger during the test.
@@ -721,7 +729,7 @@ func TestBatchSpanProcessorMetrics(t *testing.T) {
tr := tp.Tracer("TestBatchSpanProcessorMetrics")
// Generate 2 spans, which export and block during the export call.
generateSpan(t, tr, testOption{genNumSpans: 2})
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
ctx, cancel := context.WithTimeout(t.Context(), time.Second)
defer cancel()
assert.NoError(t, me.waitForSpans(ctx, 2))
assertObsScopeMetrics(t, reader, expectMetrics{
@@ -752,7 +760,10 @@ func TestBatchSpanProcessorBlockingMetrics(t *testing.T) {
)
otel.SetMeterProvider(meterProvider)
me := newBlockingExporter()
t.Cleanup(func() { assert.NoError(t, me.Shutdown(context.Background())) })
t.Cleanup(func() {
//nolint:usetesting // required to avoid getting a canceled context at cleanup.
assert.NoError(t, me.Shutdown(context.Background()))
})
bsp := NewBatchSpanProcessor(
me,
// Use WithBlocking so we can trigger a queueFull using ForceFlush.
@@ -767,7 +778,7 @@ func TestBatchSpanProcessorBlockingMetrics(t *testing.T) {
tr := tp.Tracer("TestBatchSpanProcessorBlockingMetrics")
// Generate 2 spans that are exported to the exporter, which blocks.
generateSpan(t, tr, testOption{genNumSpans: 2})
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
ctx, cancel := context.WithTimeout(t.Context(), time.Second)
defer cancel()
assert.NoError(t, me.waitForSpans(ctx, 2))
assertObsScopeMetrics(t, reader, expectMetrics{
@@ -788,7 +799,7 @@ func TestBatchSpanProcessorBlockingMetrics(t *testing.T) {
})
// Use ForceFlush to force the span that is blocking on the full queue to be dropped.
ctx, cancel = context.WithTimeout(context.Background(), 10*time.Millisecond)
ctx, cancel = context.WithTimeout(t.Context(), 10*time.Millisecond)
defer cancel()
assert.Error(t, tp.ForceFlush(ctx))
assertObsScopeMetrics(t, reader, expectMetrics{
@@ -813,7 +824,7 @@ func assertObsScopeMetrics(
) {
t.Helper()
gotResourceMetrics := new(metricdata.ResourceMetrics)
assert.NoError(t, reader.Collect(context.Background(), gotResourceMetrics))
assert.NoError(t, reader.Collect(t.Context(), gotResourceMetrics))
componentNameAttr := observ.BSPComponentName(componentID)
baseAttrs := attribute.NewSet(

View File

@@ -22,7 +22,7 @@ import (
func benchmarkSpanLimits(b *testing.B, limits sdktrace.SpanLimits) {
tp := sdktrace.NewTracerProvider(sdktrace.WithSpanLimits(limits))
tracer := tp.Tracer(b.Name())
ctx := context.Background()
ctx := b.Context()
const count = 8
@@ -120,7 +120,7 @@ func BenchmarkSpanSetAttributesOverCapacity(b *testing.B) {
limits.AttributeCountLimit = 1
tp := sdktrace.NewTracerProvider(sdktrace.WithSpanLimits(limits))
tracer := tp.Tracer("BenchmarkSpanSetAttributesOverCapacity")
ctx := context.Background()
ctx := b.Context()
attrs := make([]attribute.KeyValue, 128)
for i := range attrs {
key := fmt.Sprintf("key-%d", i)
@@ -139,7 +139,7 @@ func BenchmarkSpanSetAttributesOverCapacity(b *testing.B) {
func BenchmarkStartEndSpan(b *testing.B) {
traceBenchmark(b, "Benchmark StartEndSpan", func(b *testing.B, t trace.Tracer) {
ctx := context.Background()
ctx := b.Context()
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, span := t.Start(ctx, "/foo")
@@ -150,7 +150,7 @@ func BenchmarkStartEndSpan(b *testing.B) {
func BenchmarkSpanWithAttributes_4(b *testing.B) {
traceBenchmark(b, "Benchmark Start With 4 Attributes", func(b *testing.B, t trace.Tracer) {
ctx := context.Background()
ctx := b.Context()
b.ResetTimer()
for i := 0; i < b.N; i++ {
@@ -168,7 +168,7 @@ func BenchmarkSpanWithAttributes_4(b *testing.B) {
func BenchmarkSpanWithAttributes_8(b *testing.B) {
traceBenchmark(b, "Benchmark Start With 8 Attributes", func(b *testing.B, t trace.Tracer) {
ctx := context.Background()
ctx := b.Context()
b.ResetTimer()
for i := 0; i < b.N; i++ {
@@ -190,7 +190,7 @@ func BenchmarkSpanWithAttributes_8(b *testing.B) {
func BenchmarkSpanWithAttributes_all(b *testing.B) {
traceBenchmark(b, "Benchmark Start With all Attribute types", func(b *testing.B, t trace.Tracer) {
ctx := context.Background()
ctx := b.Context()
b.ResetTimer()
for i := 0; i < b.N; i++ {
@@ -209,7 +209,7 @@ func BenchmarkSpanWithAttributes_all(b *testing.B) {
func BenchmarkSpanWithAttributes_all_2x(b *testing.B) {
traceBenchmark(b, "Benchmark Start With all Attributes types twice", func(b *testing.B, t trace.Tracer) {
ctx := context.Background()
ctx := b.Context()
b.ResetTimer()
for i := 0; i < b.N; i++ {
@@ -233,7 +233,7 @@ func BenchmarkSpanWithAttributes_all_2x(b *testing.B) {
func BenchmarkSpanWithEvents_4(b *testing.B) {
traceBenchmark(b, "Benchmark Start With 4 Events", func(b *testing.B, t trace.Tracer) {
ctx := context.Background()
ctx := b.Context()
b.ResetTimer()
for i := 0; i < b.N; i++ {
@@ -249,7 +249,7 @@ func BenchmarkSpanWithEvents_4(b *testing.B) {
func BenchmarkSpanWithEvents_8(b *testing.B) {
traceBenchmark(b, "Benchmark Start With 4 Events", func(b *testing.B, t trace.Tracer) {
ctx := context.Background()
ctx := b.Context()
b.ResetTimer()
for i := 0; i < b.N; i++ {
@@ -269,7 +269,7 @@ func BenchmarkSpanWithEvents_8(b *testing.B) {
func BenchmarkSpanWithEvents_WithStackTrace(b *testing.B) {
traceBenchmark(b, "Benchmark Start With 4 Attributes", func(b *testing.B, t trace.Tracer) {
ctx := context.Background()
ctx := b.Context()
b.ResetTimer()
for i := 0; i < b.N; i++ {
@@ -282,7 +282,7 @@ func BenchmarkSpanWithEvents_WithStackTrace(b *testing.B) {
func BenchmarkSpanWithEvents_WithTimestamp(b *testing.B) {
traceBenchmark(b, "Benchmark Start With 4 Attributes", func(b *testing.B, t trace.Tracer) {
ctx := context.Background()
ctx := b.Context()
b.ResetTimer()
for i := 0; i < b.N; i++ {
@@ -386,6 +386,7 @@ func BenchmarkSpanProcessorOnEnd(b *testing.B) {
sdktrace.WithMaxExportBatchSize(bb.batchSize),
)
b.Cleanup(func() {
//nolint:usetesting // required to avoid getting a canceled context at cleanup.
_ = bsp.Shutdown(context.Background())
})
snap := tracetest.SpanStub{}.Snapshot()
@@ -413,10 +414,11 @@ func BenchmarkSpanProcessorVerboseLogging(b *testing.B) {
sdktrace.WithMaxExportBatchSize(10),
))
b.Cleanup(func() {
//nolint:usetesting // required to avoid getting a canceled context at cleanup.
_ = tp.Shutdown(context.Background())
})
tracer := tp.Tracer("bench")
ctx := context.Background()
ctx := b.Context()
b.ResetTimer()
b.ReportAllocs()

View File

@@ -4,7 +4,6 @@
package trace
import (
"context"
"testing"
"github.com/stretchr/testify/assert"
@@ -17,7 +16,7 @@ func TestNewIDs(t *testing.T) {
n := 1000
for range n {
traceID, spanID := gen.NewIDs(context.Background())
traceID, spanID := gen.NewIDs(t.Context())
assert.Truef(t, traceID.IsValid(), "trace id: %s", traceID.String())
assert.Truef(t, spanID.IsValid(), "span id: %s", spanID.String())
}
@@ -29,13 +28,13 @@ func TestNewSpanID(t *testing.T) {
n := 1000
for range n {
spanID := gen.NewSpanID(context.Background(), testTraceID)
spanID := gen.NewSpanID(t.Context(), testTraceID)
assert.Truef(t, spanID.IsValid(), "span id: %s", spanID.String())
}
}
func TestNewSpanIDWithInvalidTraceID(t *testing.T) {
gen := defaultIDGenerator()
spanID := gen.NewSpanID(context.Background(), trace.TraceID{})
spanID := gen.NewSpanID(t.Context(), trace.TraceID{})
assert.Truef(t, spanID.IsValid(), "span id: %s", spanID.String())
}

View File

@@ -4,7 +4,6 @@
package observ_test
import (
"context"
"testing"
"github.com/stretchr/testify/assert"
@@ -129,7 +128,7 @@ func TestBSPProcessed(t *testing.T) {
require.NotNil(t, bsp)
require.NoError(t, bsp.Shutdown()) // Unregister callback.
ctx := context.Background()
ctx := t.Context()
const p0 int64 = 10
bsp.Processed(ctx, p0)
const e0 int64 = 1
@@ -164,7 +163,7 @@ func BenchmarkBSP(b *testing.B) {
})
return bsp
}
ctx := context.Background()
ctx := b.Context()
b.Run("Processed", func(b *testing.B) {
orig := otel.GetMeterProvider()

View File

@@ -4,7 +4,6 @@
package observ_test
import (
"context"
"testing"
"github.com/stretchr/testify/require"
@@ -32,7 +31,7 @@ func setup(t *testing.T) func() metricdata.ScopeMetrics {
return func() metricdata.ScopeMetrics {
var got metricdata.ResourceMetrics
require.NoError(t, reader.Collect(context.Background(), &got))
require.NoError(t, reader.Collect(t.Context(), &got))
if len(got.ScopeMetrics) != 1 {
return metricdata.ScopeMetrics{}
}

View File

@@ -4,7 +4,6 @@
package observ_test
import (
"context"
"testing"
"github.com/stretchr/testify/assert"
@@ -70,7 +69,7 @@ func TestTracer(t *testing.T) {
collect := setup(t)
tracer := trace.NewTracerProvider().Tracer(t.Name())
_, span := tracer.Start(context.Background(), "span")
_, span := tracer.Start(t.Context(), "span")
check(t, collect(), sampledLive(1), sampledStarted(1))
span.End()
@@ -95,7 +94,7 @@ func TestTracerNonRecording(t *testing.T) {
trace.WithSampler(trace.NeverSample()),
).Tracer(t.Name())
_, _ = tracer.Start(context.Background(), "span")
_, _ = tracer.Start(t.Context(), "span")
check(t, collect(), dropStarted(1))
}
@@ -138,7 +137,7 @@ func TestTracerRecordOnly(t *testing.T) {
trace.WithSampler(recOnly{}),
).Tracer(t.Name())
_, _ = tracer.Start(context.Background(), "span")
_, _ = tracer.Start(t.Context(), "span")
check(t, collect(), recLive(1), recStarted(1))
}
@@ -159,7 +158,7 @@ func TestTracerRemoteParent(t *testing.T) {
tracer := trace.NewTracerProvider().Tracer(t.Name())
ctx := tapi.ContextWithRemoteSpanContext(
context.Background(),
t.Context(),
tapi.NewSpanContext(tapi.SpanContextConfig{
TraceID: tapi.TraceID{0x01},
SpanID: tapi.SpanID{0x01},
@@ -195,7 +194,7 @@ func TestTracerLocalParent(t *testing.T) {
collect := setup(t)
tracer := trace.NewTracerProvider().Tracer(t.Name())
ctx, parent := tracer.Start(context.Background(), "parent")
ctx, parent := tracer.Start(t.Context(), "parent")
_, child := tracer.Start(ctx, "child")
check(t, collect(), sampledLive(2), chainStarted(1, 1))
@@ -243,7 +242,7 @@ func BenchmarkTracer(b *testing.B) {
require.True(b, tracer.Enabled())
t := otel.GetTracerProvider().Tracer(b.Name())
ctx, span := t.Start(context.Background(), "parent")
ctx, span := t.Start(b.Context(), "parent")
psc := span.SpanContext()
span.End()

View File

@@ -65,14 +65,14 @@ func TestShutdownCallsTracerMethod(t *testing.T) {
},
}
stp.RegisterSpanProcessor(sp)
assert.NoError(t, stp.Shutdown(context.Background()))
assert.NoError(t, stp.Shutdown(t.Context()))
assert.True(t, stp.isShutdown.Load())
}
func TestForceFlushAndShutdownTraceProviderWithoutProcessor(t *testing.T) {
stp := NewTracerProvider()
assert.NoError(t, stp.ForceFlush(context.Background()))
assert.NoError(t, stp.Shutdown(context.Background()))
assert.NoError(t, stp.ForceFlush(t.Context()))
assert.NoError(t, stp.Shutdown(t.Context()))
assert.True(t, stp.isShutdown.Load())
}
@@ -132,9 +132,9 @@ func TestShutdownTraceProvider(t *testing.T) {
sp := &basicSpanProcessor{}
stp.RegisterSpanProcessor(sp)
assert.NoError(t, stp.ForceFlush(context.Background()))
assert.NoError(t, stp.ForceFlush(t.Context()))
assert.True(t, sp.flushed, "error ForceFlush basicSpanProcessor")
assert.NoError(t, stp.Shutdown(context.Background()))
assert.NoError(t, stp.Shutdown(t.Context()))
assert.True(t, stp.isShutdown.Load())
assert.True(t, sp.closed, "error Shutdown basicSpanProcessor")
}
@@ -147,7 +147,7 @@ func TestFailedProcessorShutdown(t *testing.T) {
}
stp.RegisterSpanProcessor(sp)
err := stp.Shutdown(context.Background())
err := stp.Shutdown(t.Context())
assert.Error(t, err)
assert.Equal(t, err, spErr)
assert.True(t, stp.isShutdown.Load())
@@ -166,7 +166,7 @@ func TestFailedProcessorsShutdown(t *testing.T) {
stp.RegisterSpanProcessor(sp1)
stp.RegisterSpanProcessor(sp2)
err := stp.Shutdown(context.Background())
err := stp.Shutdown(t.Context())
assert.Error(t, err)
assert.EqualError(t, err, "basic span processor shutdown failure1; basic span processor shutdown failure2")
assert.True(t, sp1.closed)
@@ -186,7 +186,7 @@ func TestFailedProcessorShutdownInUnregister(t *testing.T) {
assert.Contains(t, handler.errs, spErr)
err := stp.Shutdown(context.Background())
err := stp.Shutdown(t.Context())
assert.NoError(t, err)
assert.True(t, stp.isShutdown.Load())
}
@@ -203,7 +203,7 @@ func TestSchemaURL(t *testing.T) {
func TestRegisterAfterShutdownWithoutProcessors(t *testing.T) {
stp := NewTracerProvider()
err := stp.Shutdown(context.Background())
err := stp.Shutdown(t.Context())
assert.NoError(t, err)
assert.True(t, stp.isShutdown.Load())
@@ -217,7 +217,7 @@ func TestRegisterAfterShutdownWithProcessors(t *testing.T) {
sp1 := &basicSpanProcessor{}
stp.RegisterSpanProcessor(sp1)
err := stp.Shutdown(context.Background())
err := stp.Shutdown(t.Context())
assert.NoError(t, err)
assert.True(t, stp.isShutdown.Load())
assert.Empty(t, stp.getSpanProcessors())
@@ -340,6 +340,7 @@ func TestTracerProviderSamplerConfigFromEnv(t *testing.T) {
stp := NewTracerProvider(WithSyncer(NewTestExporter()))
t.Cleanup(func() {
//nolint:usetesting // required to avoid getting a canceled context at cleanup.
require.NoError(t, stp.Shutdown(context.Background()))
})
assert.Equal(t, test.description, stp.sampler.Description())

View File

@@ -4,7 +4,6 @@
package trace
import (
"context"
"fmt"
"math/rand/v2"
"testing"
@@ -20,7 +19,7 @@ func TestParentBasedDefaultLocalParentSampled(t *testing.T) {
traceID, _ := trace.TraceIDFromHex("4bf92f3577b34da6a3ce929d0e0e4736")
spanID, _ := trace.SpanIDFromHex("00f067aa0ba902b7")
parentCtx := trace.ContextWithSpanContext(
context.Background(),
t.Context(),
trace.NewSpanContext(trace.SpanContextConfig{
TraceID: traceID,
SpanID: spanID,
@@ -37,7 +36,7 @@ func TestParentBasedDefaultLocalParentNotSampled(t *testing.T) {
traceID, _ := trace.TraceIDFromHex("4bf92f3577b34da6a3ce929d0e0e4736")
spanID, _ := trace.SpanIDFromHex("00f067aa0ba902b7")
parentCtx := trace.ContextWithSpanContext(
context.Background(),
t.Context(),
trace.NewSpanContext(trace.SpanContextConfig{
TraceID: traceID,
SpanID: spanID,
@@ -114,7 +113,7 @@ func TestParentBasedWithSamplerOptions(t *testing.T) {
params := SamplingParameters{
ParentContext: trace.ContextWithSpanContext(
context.Background(),
t.Context(),
trace.NewSpanContext(pscc),
),
}
@@ -188,7 +187,7 @@ func TestTraceIdRatioSamplesInclusively(t *testing.T) {
samplerHi := TraceIDRatioBased(ratioHi)
samplerLo := TraceIDRatioBased(ratioLo)
for range numTraces {
traceID, _ := idg.NewIDs(context.Background())
traceID, _ := idg.NewIDs(t.Context())
params := SamplingParameters{TraceID: traceID}
if samplerLo.ShouldSample(params).Decision == RecordAndSample {
@@ -235,7 +234,7 @@ func TestTracestateIsPassed(t *testing.T) {
params := SamplingParameters{
ParentContext: trace.ContextWithSpanContext(
context.Background(),
t.Context(),
trace.NewSpanContext(trace.SpanContextConfig{
TraceState: traceState,
}),

View File

@@ -76,7 +76,7 @@ func TestSimpleSpanProcessorShutdown(t *testing.T) {
t.Error("failed to verify span export")
}
if err := ssp.Shutdown(context.Background()); err != nil {
if err := ssp.Shutdown(t.Context()); err != nil {
t.Errorf("shutting the SimpleSpanProcessor down: %v", err)
}
if !exporter.shutdown {
@@ -111,7 +111,7 @@ func TestSimpleSpanProcessorShutdownOnEndConcurrentSafe(t *testing.T) {
}
}()
if err := ssp.Shutdown(context.Background()); err != nil {
if err := ssp.Shutdown(t.Context()); err != nil {
t.Errorf("shutting the SimpleSpanProcessor down: %v", err)
}
if !exporter.shutdown {
@@ -134,7 +134,7 @@ func TestSimpleSpanProcessorShutdownOnEndConcurrentSafe2(t *testing.T) {
span := func(spanName string) {
assert.NotPanics(t, func() {
defer wg.Done()
_, span := tp.Tracer("test").Start(context.Background(), spanName)
_, span := tp.Tracer("test").Start(t.Context(), spanName)
span.End()
})
}
@@ -144,12 +144,12 @@ func TestSimpleSpanProcessorShutdownOnEndConcurrentSafe2(t *testing.T) {
wg.Wait()
assert.NoError(t, ssp.Shutdown(context.Background()))
assert.NoError(t, ssp.Shutdown(t.Context()))
assert.True(t, exporter.shutdown, "exporter shutdown")
}
func TestSimpleSpanProcessorShutdownHonorsContextDeadline(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), time.Nanosecond)
ctx, cancel := context.WithTimeout(t.Context(), time.Nanosecond)
defer cancel()
<-ctx.Done()
@@ -160,7 +160,7 @@ func TestSimpleSpanProcessorShutdownHonorsContextDeadline(t *testing.T) {
}
func TestSimpleSpanProcessorShutdownHonorsContextCancel(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
ctx, cancel := context.WithCancel(t.Context())
cancel()
ssp := NewSimpleSpanProcessor(&simpleTestExporter{})

View File

@@ -139,7 +139,7 @@ func testSpanLimits(t *testing.T, limits SpanLimits) ReadOnlySpan {
tp := NewTracerProvider(WithRawSpanLimits(limits), WithSpanProcessor(rec))
tracer := tp.Tracer("testSpanLimits")
ctx := context.Background()
ctx := t.Context()
a := []attribute.KeyValue{attribute.Bool("one", true), attribute.Bool("two", true)}
l := trace.Link{
SpanContext: trace.NewSpanContext(trace.SpanContextConfig{

View File

@@ -83,7 +83,7 @@ func TestRegisterSpanProcessor(t *testing.T) {
TraceID: tid,
SpanID: sid,
})
ctx := trace.ContextWithRemoteSpanContext(context.Background(), parent)
ctx := trace.ContextWithRemoteSpanContext(t.Context(), parent)
tr := tp.Tracer("SpanProcessor")
_, span := tr.Start(ctx, "OnStart")
@@ -152,14 +152,14 @@ func TestUnregisterSpanProcessor(t *testing.T) {
}
tr := tp.Tracer("SpanProcessor")
_, span := tr.Start(context.Background(), "OnStart")
_, span := tr.Start(t.Context(), "OnStart")
span.End()
for _, sp := range sps {
tp.UnregisterSpanProcessor(sp)
}
// start another span after unregistering span processor.
_, span = tr.Start(context.Background(), "Start span after unregister")
_, span = tr.Start(t.Context(), "Start span after unregister")
span.End()
for _, sp := range sps {
@@ -183,7 +183,7 @@ func TestUnregisterSpanProcessorWhileSpanIsActive(t *testing.T) {
tp.RegisterSpanProcessor(sp)
tr := tp.Tracer("SpanProcessor")
_, span := tr.Start(context.Background(), "OnStart")
_, span := tr.Start(t.Context(), "OnStart")
tp.UnregisterSpanProcessor(sp)
span.End()
@@ -208,7 +208,7 @@ func TestSpanProcessorShutdown(t *testing.T) {
tp.RegisterSpanProcessor(sp)
wantCount := 1
err := sp.Shutdown(context.Background())
err := sp.Shutdown(t.Context())
if err != nil {
t.Error("Error shutting the testSpanProcessor down\n")
}

View File

@@ -5,7 +5,6 @@ package trace
import (
"bytes"
"context"
"fmt"
"testing"
@@ -378,7 +377,7 @@ func BenchmarkRecordingSpanSetAttributes(b *testing.B) {
attrs = append(attrs, attr)
}
ctx := context.Background()
ctx := b.Context()
for _, limit := range []bool{false, true} {
b.Run(fmt.Sprintf("WithLimit/%t", limit), func(b *testing.B) {
b.ReportAllocs()
@@ -415,7 +414,7 @@ func BenchmarkSpanEnd(b *testing.B) {
},
}
ctx := trace.ContextWithSpanContext(context.Background(), trace.SpanContext{})
ctx := trace.ContextWithSpanContext(b.Context(), trace.SpanContext{})
for _, c := range cases {
b.Run(c.name, func(b *testing.B) {

View File

@@ -221,7 +221,7 @@ func TestSpanIsRecording(t *testing.T) {
"Never sample recording off": {sampler: NeverSample(), want: false},
} {
tp := NewTracerProvider(WithSampler(tc.sampler))
_, span := tp.Tracer(name).Start(context.Background(), "StartSpan")
_, span := tp.Tracer(name).Start(t.Context(), "StartSpan")
got := span.IsRecording()
span.End()
assert.Equal(t, tc.want, got, name)
@@ -234,7 +234,7 @@ func TestSpanIsRecording(t *testing.T) {
"Never Sample": NeverSample(),
} {
tp := NewTracerProvider(WithSampler(tc))
_, span := tp.Tracer(name).Start(context.Background(), "StartSpan")
_, span := tp.Tracer(name).Start(t.Context(), "StartSpan")
span.End()
got := span.IsRecording()
assert.False(t, got, name)
@@ -290,7 +290,7 @@ func TestSampling(t *testing.T) {
tr := p.Tracer("test")
var sampled int
for range total {
ctx := context.Background()
ctx := t.Context()
if tc.parent {
tid, sid := idg.NewIDs(ctx)
psc := trace.NewSpanContext(trace.SpanContextConfig{
@@ -327,7 +327,7 @@ func TestSampling(t *testing.T) {
func TestStartSpanWithParent(t *testing.T) {
tp := NewTracerProvider()
tr := tp.Tracer("SpanWithParent")
ctx := context.Background()
ctx := t.Context()
_, s1 := tr.Start(trace.ContextWithRemoteSpanContext(ctx, sc), "span1-unsampled-parent1")
if err := checkChild(t, sc, s1); err != nil {
@@ -372,7 +372,7 @@ func TestStartSpanNewRootNotSampled(t *testing.T) {
sampledTr := alwaysSampleTp.Tracer("AlwaysSampled")
neverSampleTp := NewTracerProvider(WithSampler(ParentBased(NeverSample())))
neverSampledTr := neverSampleTp.Tracer("ParentBasedNeverSample")
ctx := context.Background()
ctx := t.Context()
ctx, s1 := sampledTr.Start(trace.ContextWithRemoteSpanContext(ctx, sc), "span1-sampled")
if err := checkChild(t, sc, s1); err != nil {
@@ -436,7 +436,7 @@ func TestSamplerAttributesLocalChildSpan(t *testing.T) {
te := NewTestExporter()
tp := NewTracerProvider(WithSampler(sampler), WithSyncer(te), WithResource(resource.Empty()))
ctx := context.Background()
ctx := t.Context()
ctx, span := startLocalSpan(ctx, tp, "SpanOne", "span0")
_, spanTwo := startLocalSpan(ctx, tp, "SpanTwo", "span1")
@@ -619,7 +619,7 @@ func TestSpanSetAttributes(t *testing.T) {
sl := NewSpanLimits()
sl.AttributeCountLimit = capacity
tp := NewTracerProvider(WithSyncer(te), WithSpanLimits(sl))
_, span := tp.Tracer(instName).Start(context.Background(), spanName)
_, span := tp.Tracer(instName).Start(t.Context(), spanName)
for _, a := range test.input {
span.SetAttributes(a...)
}
@@ -845,7 +845,7 @@ func TestLinksOverLimit(t *testing.T) {
func TestSetSpanName(t *testing.T) {
te := NewTestExporter()
tp := NewTracerProvider(WithSyncer(te), WithResource(resource.Empty()))
ctx := context.Background()
ctx := t.Context()
want := "SpanName-1"
ctx = trace.ContextWithRemoteSpanContext(ctx, sc)
@@ -1058,7 +1058,7 @@ func TestEndSpanTwice(t *testing.T) {
func TestStartSpanAfterEnd(t *testing.T) {
te := NewTestExporter()
tp := NewTracerProvider(WithSampler(AlwaysSample()), WithSyncer(te))
ctx := context.Background()
ctx := t.Context()
tr := tp.Tracer("SpanAfterEnd")
ctx, span0 := tr.Start(trace.ContextWithRemoteSpanContext(ctx, sc), "parent")
@@ -1105,7 +1105,7 @@ func TestChildSpanCount(t *testing.T) {
tp := NewTracerProvider(WithSampler(AlwaysSample()), WithSyncer(te))
tr := tp.Tracer("ChidSpanCount")
ctx, span0 := tr.Start(context.Background(), "parent")
ctx, span0 := tr.Start(t.Context(), "parent")
ctx1, span1 := tr.Start(ctx, "span-1")
_, span2 := tr.Start(ctx1, "span-2")
span2.End()
@@ -1158,7 +1158,7 @@ func TestSpanWithCanceledContext(t *testing.T) {
te := NewTestExporter()
tp := NewTracerProvider(WithSyncer(te))
ctx, cancel := context.WithCancel(context.Background())
ctx, cancel := context.WithCancel(t.Context())
cancel()
_, span := tp.Tracer(t.Name()).Start(ctx, "span")
span.End()
@@ -1170,7 +1170,7 @@ func TestNonRecordingSpanDoesNotTrackRuntimeTracerTask(t *testing.T) {
tp := NewTracerProvider(WithSampler(NeverSample()))
tr := tp.Tracer("TestNonRecordingSpanDoesNotTrackRuntimeTracerTask")
_, apiSpan := tr.Start(context.Background(), "foo")
_, apiSpan := tr.Start(t.Context(), "foo")
if _, ok := apiSpan.(runtimeTracer); ok {
t.Fatalf("non recording span implements runtime trace task tracking")
}
@@ -1184,7 +1184,7 @@ func TestRecordingSpanRuntimeTracerTaskEnd(t *testing.T) {
executionTracerTaskEnd := func() {
atomic.AddUint64(&n, 1)
}
_, apiSpan := tr.Start(context.Background(), "foo")
_, apiSpan := tr.Start(t.Context(), "foo")
s, ok := apiSpan.(*recordingSpan)
if !ok {
t.Fatal("recording span not returned from always sampled Tracer")
@@ -1205,7 +1205,7 @@ func TestCustomStartEndTime(t *testing.T) {
startTime := time.Date(2019, time.August, 27, 14, 42, 0, 0, time.UTC)
endTime := startTime.Add(time.Second * 20)
_, span := tp.Tracer("Custom Start and End time").Start(
context.Background(),
t.Context(),
"testspan",
trace.WithTimestamp(startTime),
)
@@ -1382,7 +1382,7 @@ func TestWithSpanKind(t *testing.T) {
tp := NewTracerProvider(WithSyncer(te), WithSampler(AlwaysSample()), WithResource(resource.Empty()))
tr := tp.Tracer("withSpanKind")
_, span := tr.Start(context.Background(), "WithoutSpanKind")
_, span := tr.Start(t.Context(), "WithoutSpanKind")
spanData, err := endSpan(te, span)
if err != nil {
t.Error(err.Error())
@@ -1407,7 +1407,7 @@ func TestWithSpanKind(t *testing.T) {
for _, sk := range sks {
te.Reset()
_, span := tr.Start(context.Background(), fmt.Sprintf("SpanKind-%v", sk), trace.WithSpanKind(sk))
_, span := tr.Start(t.Context(), fmt.Sprintf("SpanKind-%v", sk), trace.WithSpanKind(sk))
spanData, err := endSpan(te, span)
if err != nil {
t.Error(err.Error())
@@ -1514,7 +1514,7 @@ func TestWithInstrumentationVersionAndSchema(t *testing.T) {
te := NewTestExporter()
tp := NewTracerProvider(WithSyncer(te), WithResource(resource.Empty()))
ctx := context.Background()
ctx := t.Context()
ctx = trace.ContextWithRemoteSpanContext(ctx, sc)
_, span := tp.Tracer(
"WithInstrumentationVersion",
@@ -1549,7 +1549,7 @@ func TestSpanCapturesPanic(t *testing.T) {
te := NewTestExporter()
tp := NewTracerProvider(WithSyncer(te), WithResource(resource.Empty()))
_, span := tp.Tracer("CatchPanic").Start(
context.Background(),
t.Context(),
"span",
)
@@ -1572,7 +1572,7 @@ func TestSpanCapturesPanicWithStackTrace(t *testing.T) {
te := NewTestExporter()
tp := NewTracerProvider(WithSyncer(te), WithResource(resource.Empty()))
_, span := tp.Tracer("CatchPanic").Start(
context.Background(),
t.Context(),
"span",
)
@@ -1610,17 +1610,17 @@ func TestReadOnlySpan(t *testing.T) {
tr := tp.Tracer("ReadOnlySpan", trace.WithInstrumentationVersion("3"))
// Initialize parent context.
tID, sID := tp.idGenerator.NewIDs(context.Background())
tID, sID := tp.idGenerator.NewIDs(t.Context())
parent := trace.NewSpanContext(trace.SpanContextConfig{
TraceID: tID,
SpanID: sID,
TraceFlags: 0x1,
Remote: true,
})
ctx := trace.ContextWithRemoteSpanContext(context.Background(), parent)
ctx := trace.ContextWithRemoteSpanContext(t.Context(), parent)
// Initialize linked context.
tID, sID = tp.idGenerator.NewIDs(context.Background())
tID, sID = tp.idGenerator.NewIDs(t.Context())
linked := trace.NewSpanContext(trace.SpanContextConfig{
TraceID: tID,
SpanID: sID,
@@ -1692,13 +1692,13 @@ func TestReadWriteSpan(t *testing.T) {
tr := tp.Tracer("ReadWriteSpan")
// Initialize parent context.
tID, sID := tp.idGenerator.NewIDs(context.Background())
tID, sID := tp.idGenerator.NewIDs(t.Context())
parent := trace.NewSpanContext(trace.SpanContextConfig{
TraceID: tID,
SpanID: sID,
TraceFlags: 0x1,
})
ctx := trace.ContextWithRemoteSpanContext(context.Background(), parent)
ctx := trace.ContextWithRemoteSpanContext(t.Context(), parent)
_, span := tr.Start(ctx, "foo")
defer span.End()
@@ -1905,7 +1905,7 @@ func TestSamplerTraceState(t *testing.T) {
TraceFlags: trace.FlagsSampled,
TraceState: ts.input,
})
ctx := trace.ContextWithRemoteSpanContext(context.Background(), sc1)
ctx := trace.ContextWithRemoteSpanContext(t.Context(), sc1)
_, span := tr.Start(ctx, ts.spanName)
// span's TraceState should be set regardless of Sampled/NonSampled state.
@@ -1968,7 +1968,7 @@ func TestWithIDGenerator(t *testing.T) {
)
for i := range numSpan {
func() {
_, span := tp.Tracer(t.Name()).Start(context.Background(), strconv.Itoa(i))
_, span := tp.Tracer(t.Name()).Start(t.Context(), strconv.Itoa(i))
defer span.End()
gotSpanID, err := strconv.ParseUint(span.SpanContext().SpanID().String(), 16, 64)
@@ -1986,7 +1986,7 @@ func TestIDsRoundTrip(t *testing.T) {
gen := defaultIDGenerator()
for range 1000 {
traceID, spanID := gen.NewIDs(context.Background())
traceID, spanID := gen.NewIDs(t.Context())
gotTraceID, err := trace.TraceIDFromHex(traceID.String())
assert.NoError(t, err)
assert.Equal(t, traceID, gotTraceID)
@@ -2256,7 +2256,7 @@ func TestObservability(t *testing.T) {
name: "SampledSpan",
test: func(t *testing.T, scopeMetrics func() metricdata.ScopeMetrics) {
tp := NewTracerProvider()
_, span := tp.Tracer("").Start(context.Background(), "StartSpan")
_, span := tp.Tracer("").Start(t.Context(), "StartSpan")
want := metricdata.ScopeMetrics{
Scope: instrumentation.Scope{
@@ -2383,7 +2383,7 @@ func TestObservability(t *testing.T) {
test: func(t *testing.T, scopeMetrics func() metricdata.ScopeMetrics) {
// Create a tracer provider with NeverSample sampler to get non-recording spans.
tp := NewTracerProvider(WithSampler(NeverSample()))
tp.Tracer("").Start(context.Background(), "NonRecordingSpan")
tp.Tracer("").Start(t.Context(), "NonRecordingSpan")
want := metricdata.ScopeMetrics{
Scope: instrumentation.Scope{
@@ -2432,7 +2432,7 @@ func TestObservability(t *testing.T) {
test: func(t *testing.T, scopeMetrics func() metricdata.ScopeMetrics) {
// Create a tracer provider with NeverSample sampler to get non-recording spans.
tp := NewTracerProvider(WithSampler(RecordingOnly()))
tp.Tracer("").Start(context.Background(), "OnlyRecordingSpan")
tp.Tracer("").Start(t.Context(), "OnlyRecordingSpan")
want := metricdata.ScopeMetrics{
Scope: instrumentation.Scope{
@@ -2500,7 +2500,7 @@ func TestObservability(t *testing.T) {
// Create a remote parent context
tid, _ := trace.TraceIDFromHex("01020304050607080102040810203040")
sid, _ := trace.SpanIDFromHex("0102040810203040")
remoteCtx := trace.ContextWithRemoteSpanContext(context.Background(),
remoteCtx := trace.ContextWithRemoteSpanContext(t.Context(),
trace.NewSpanContext(trace.SpanContextConfig{
TraceID: tid,
SpanID: sid,
@@ -2574,7 +2574,7 @@ func TestObservability(t *testing.T) {
name: "LocalParentSpan",
test: func(t *testing.T, scopeMetrics func() metricdata.ScopeMetrics) {
tp := NewTracerProvider()
ctx, parentSpan := tp.Tracer("").Start(context.Background(), "ParentSpan")
ctx, parentSpan := tp.Tracer("").Start(t.Context(), "ParentSpan")
_, childSpan := tp.Tracer("").Start(ctx, "ChildSpan")
want := metricdata.ScopeMetrics{
@@ -2734,7 +2734,7 @@ func TestObservability(t *testing.T) {
scopeMetrics := func() metricdata.ScopeMetrics {
var got metricdata.ResourceMetrics
err := r.Collect(context.Background(), &got)
err := r.Collect(t.Context(), &got)
require.NoError(t, err)
require.Len(t, got.ScopeMetrics, 1)
return got.ScopeMetrics[0]
@@ -2805,12 +2805,12 @@ func TestObservabilityContextPropagation(t *testing.T) {
fn(ctx)
}
ctx := context.WithValue(context.Background(), ctxKey, want)
ctx := context.WithValue(t.Context(), ctxKey, want)
wrap(ctx, "parent", func(ctx context.Context) {
wrap(ctx, "child", func(context.Context) {})
})
require.NoError(t, tp.Shutdown(context.Background()))
require.NoError(t, tp.Shutdown(t.Context()))
// The TracerProvider shutdown returned, no more measurements will be sent
// to the exemplar filter.
@@ -2855,7 +2855,7 @@ func TestRecordOnlySampler(t *testing.T) {
te := NewTestExporter()
tp := NewTracerProvider(WithSyncer(te), WithSampler(RecordingOnly()))
_, span := tp.Tracer("RecordOnly").Start(context.Background(), "test-span")
_, span := tp.Tracer("RecordOnly").Start(t.Context(), "test-span")
assert.True(t, span.IsRecording(), "span should be recording")
assert.False(t, span.SpanContext().IsSampled(), "span should not be sampled")
@@ -2866,7 +2866,7 @@ func TestRecordOnlySampler(t *testing.T) {
}
func BenchmarkTraceStart(b *testing.B) {
ctx := trace.ContextWithSpanContext(context.Background(), trace.SpanContext{})
ctx := trace.ContextWithSpanContext(b.Context(), trace.SpanContext{})
l1 := trace.Link{SpanContext: trace.SpanContext{}, Attributes: []attribute.KeyValue{}}
l2 := trace.Link{SpanContext: trace.SpanContext{}, Attributes: []attribute.KeyValue{}}

View File

@@ -4,7 +4,6 @@
package tracetest
import (
"context"
"fmt"
"testing"
@@ -16,22 +15,22 @@ import (
func TestNoop(t *testing.T) {
nsb := NewNoopExporter()
require.NoError(t, nsb.ExportSpans(context.Background(), nil))
require.NoError(t, nsb.ExportSpans(context.Background(), make(SpanStubs, 10).Snapshots()))
require.NoError(t, nsb.ExportSpans(context.Background(), make(SpanStubs, 0, 10).Snapshots()))
require.NoError(t, nsb.ExportSpans(t.Context(), nil))
require.NoError(t, nsb.ExportSpans(t.Context(), make(SpanStubs, 10).Snapshots()))
require.NoError(t, nsb.ExportSpans(t.Context(), make(SpanStubs, 0, 10).Snapshots()))
}
func TestNewInMemoryExporter(t *testing.T) {
imsb := NewInMemoryExporter()
require.NoError(t, imsb.ExportSpans(context.Background(), nil))
require.NoError(t, imsb.ExportSpans(t.Context(), nil))
assert.Empty(t, imsb.GetSpans())
input := make(SpanStubs, 10)
for i := range 10 {
input[i] = SpanStub{Name: fmt.Sprintf("span %d", i)}
}
require.NoError(t, imsb.ExportSpans(context.Background(), input.Snapshots()))
require.NoError(t, imsb.ExportSpans(t.Context(), input.Snapshots()))
sds := imsb.GetSpans()
assert.Len(t, sds, 10)
for i, sd := range sds {
@@ -42,7 +41,7 @@ func TestNewInMemoryExporter(t *testing.T) {
assert.Len(t, sds, 10)
assert.Empty(t, imsb.GetSpans())
require.NoError(t, imsb.ExportSpans(context.Background(), input.Snapshots()[0:1]))
require.NoError(t, imsb.ExportSpans(t.Context(), input.Snapshots()[0:1]))
sds = imsb.GetSpans()
assert.Len(t, sds, 1)
assert.Equal(t, input[0], sds[0])

View File

@@ -19,7 +19,7 @@ type rwSpan struct {
func TestSpanRecorderOnStartAppends(t *testing.T) {
s0, s1 := new(rwSpan), new(rwSpan)
ctx := context.Background()
ctx := t.Context()
sr := new(SpanRecorder)
assert.Empty(t, sr.started)
@@ -55,7 +55,7 @@ func TestSpanRecorderOnEndAppends(t *testing.T) {
}
func TestSpanRecorderShutdownNoError(t *testing.T) {
ctx := context.Background()
ctx := t.Context()
assert.NoError(t, new(SpanRecorder).Shutdown(ctx))
var c context.CancelFunc
@@ -65,7 +65,7 @@ func TestSpanRecorderShutdownNoError(t *testing.T) {
}
func TestSpanRecorderForceFlushNoError(t *testing.T) {
ctx := context.Background()
ctx := t.Context()
assert.NoError(t, new(SpanRecorder).ForceFlush(ctx))
var c context.CancelFunc
@@ -103,7 +103,7 @@ func TestEndingConcurrentSafe(t *testing.T) {
func TestStartingConcurrentSafe(t *testing.T) {
sr := NewSpanRecorder()
ctx := context.Background()
ctx := t.Context()
runConcurrently(
func() { sr.OnStart(ctx, new(rwSpan)) },
func() { sr.OnStart(ctx, new(rwSpan)) },
@@ -115,7 +115,7 @@ func TestStartingConcurrentSafe(t *testing.T) {
func TestResetConcurrentSafe(t *testing.T) {
sr := NewSpanRecorder()
ctx := context.Background()
ctx := t.Context()
runConcurrently(
func() { sr.OnStart(ctx, new(rwSpan)) },

View File

@@ -22,6 +22,7 @@ import (
func basicTracerProvider(t *testing.T) *TracerProvider {
tp := NewTracerProvider(WithSampler(AlwaysSample()))
t.Cleanup(func() {
//nolint:usetesting // required to avoid getting a canceled context at cleanup.
assert.NoError(t, tp.Shutdown(context.Background()))
})
return tp
@@ -112,7 +113,7 @@ func (h *harness) testTracer(subjectFactory func() trace.Tracer) {
ctxKey := testCtxKey{}
ctxValue := "ctx value"
ctx := context.WithValue(context.Background(), ctxKey, ctxValue)
ctx := context.WithValue(t.Context(), ctxKey, ctxValue)
ctx, _ = subject.Start(ctx, "test")
@@ -124,7 +125,7 @@ func (h *harness) testTracer(subjectFactory func() trace.Tracer) {
subject := subjectFactory()
_, span := subject.Start(context.Background(), "test")
_, span := subject.Start(t.Context(), "test")
require.NotNil(t, span)
require.True(t, span.SpanContext().IsValid())
@@ -135,7 +136,7 @@ func (h *harness) testTracer(subjectFactory func() trace.Tracer) {
subject := subjectFactory()
ctx, span := subject.Start(context.Background(), "test")
ctx, span := subject.Start(t.Context(), "test")
require.NotNil(t, span)
require.NotEqual(t, trace.SpanContext{}, span.SpanContext())
@@ -147,8 +148,8 @@ func (h *harness) testTracer(subjectFactory func() trace.Tracer) {
subject := subjectFactory()
_, span1 := subject.Start(context.Background(), "span1")
_, span2 := subject.Start(context.Background(), "span2")
_, span1 := subject.Start(t.Context(), "span1")
_, span2 := subject.Start(t.Context(), "span2")
sc1 := span1.SpanContext()
sc2 := span2.SpanContext()
@@ -162,7 +163,7 @@ func (h *harness) testTracer(subjectFactory func() trace.Tracer) {
subject := subjectFactory()
ctx, parent := subject.Start(context.Background(), "parent")
ctx, parent := subject.Start(t.Context(), "parent")
_, child := subject.Start(ctx, "child")
psc := parent.SpanContext()
@@ -177,7 +178,7 @@ func (h *harness) testTracer(subjectFactory func() trace.Tracer) {
subject := subjectFactory()
ctx, parent := subject.Start(context.Background(), "parent")
ctx, parent := subject.Start(t.Context(), "parent")
_, child := subject.Start(ctx, "child", trace.WithNewRoot())
psc := parent.SpanContext()
@@ -192,8 +193,8 @@ func (h *harness) testTracer(subjectFactory func() trace.Tracer) {
subject := subjectFactory()
_, remoteParent := subject.Start(context.Background(), "remote parent")
parentCtx := trace.ContextWithRemoteSpanContext(context.Background(), remoteParent.SpanContext())
_, remoteParent := subject.Start(t.Context(), "remote parent")
parentCtx := trace.ContextWithRemoteSpanContext(t.Context(), remoteParent.SpanContext())
_, child := subject.Start(parentCtx, "child")
psc := remoteParent.SpanContext()
@@ -208,8 +209,8 @@ func (h *harness) testTracer(subjectFactory func() trace.Tracer) {
subject := subjectFactory()
_, remoteParent := subject.Start(context.Background(), "remote parent")
parentCtx := trace.ContextWithRemoteSpanContext(context.Background(), remoteParent.SpanContext())
_, remoteParent := subject.Start(t.Context(), "remote parent")
parentCtx := trace.ContextWithRemoteSpanContext(t.Context(), remoteParent.SpanContext())
_, child := subject.Start(parentCtx, "child", trace.WithNewRoot())
psc := remoteParent.SpanContext()
@@ -224,7 +225,7 @@ func (h *harness) testTracer(subjectFactory func() trace.Tracer) {
tracer := subjectFactory()
ctx, parent := tracer.Start(context.Background(), "span")
ctx, parent := tracer.Start(t.Context(), "span")
runner := func(tp trace.Tracer) <-chan struct{} {
done := make(chan struct{})
@@ -284,12 +285,12 @@ func (h *harness) testSpan(tracerFactory func() trace.Tracer) {
mechanisms := map[string]func() trace.Span{
"Span created via Tracer#Start": func() trace.Span {
tracer := tracerFactory()
_, subject := tracer.Start(context.Background(), "test")
_, subject := tracer.Start(h.t.Context(), "test")
return subject
},
"Span created via span.TracerProvider()": func() trace.Span {
ctx, spanA := tracerFactory().Start(context.Background(), "span1")
ctx, spanA := tracerFactory().Start(h.t.Context(), "span1")
_, spanB := spanA.TracerProvider().Tracer("second").Start(ctx, "span2")
return spanB