1
0
mirror of https://github.com/open-telemetry/opentelemetry-go.git synced 2025-02-01 13:07:51 +02:00
opentelemetry-go/sdk/log/provider_test.go
Robert Pająk 23f7b41e0a
log: Introduce EnabledParameters (#5791)
Fixes https://github.com/open-telemetry/opentelemetry-go/issues/5769

Related spec PR:
https://github.com/open-telemetry/opentelemetry-specification/pull/4203

Remark: A follow-up in contrib is required and afterwards here to fix
the `example/dice`.

Benchstat results for `sdk/log` (`log` has no benchmarks related to
Enabled):

```
goos: linux
goarch: amd64
pkg: go.opentelemetry.io/otel/sdk/log
cpu: Intel(R) Core(TM) i9-10885H CPU @ 2.40GHz
                                  │    old.txt    │               new.txt                │
                                  │    sec/op     │    sec/op     vs base                │
BatchProcessorOnEmit-16              398.8n ± 10%   395.6n ± 12%        ~ (p=0.971 n=10)
Processor/Simple-16                  882.2n ±  8%   869.8n ±  9%        ~ (p=0.811 n=10)
Processor/Batch-16                   1.478µ ±  3%   1.485µ ±  5%        ~ (p=0.646 n=10)
Processor/SetTimestampSimple-16      847.8n ±  1%   844.6n ±  3%        ~ (p=0.247 n=10)
Processor/SetTimestampBatch-16       1.480µ ±  3%   1.473µ ±  4%        ~ (p=0.700 n=10)
Processor/AddAttributesSimple-16     930.0n ±  1%   933.8n ±  1%        ~ (p=0.172 n=10)
Processor/AddAttributesBatch-16      1.624µ ±  2%   1.639µ ±  2%        ~ (p=0.839 n=10)
Processor/SetAttributesSimple-16     903.4n ±  1%   895.1n ±  1%        ~ (p=0.190 n=10)
Processor/SetAttributesBatch-16      1.554µ ±  4%   1.529µ ±  3%        ~ (p=0.159 n=10)
LoggerNewRecord/5_attributes-16      346.0n ±  2%   343.3n ±  2%        ~ (p=0.448 n=10)
LoggerNewRecord/10_attributes-16     1.608µ ±  6%   1.503µ ±  2%   -6.53% (p=0.007 n=10)
LoggerEnabled-16                    34.305n ±  8%   6.706n ±  1%  -80.45% (p=0.000 n=10)
LoggerProviderLogger-16              636.9n ± 10%   605.8n ±  3%        ~ (p=0.105 n=10)
WalkAttributes/1_attributes-16       5.363n ±  3%   4.540n ± 14%  -15.34% (p=0.002 n=10)
WalkAttributes/10_attributes-16      5.436n ±  7%   4.461n ±  2%  -17.95% (p=0.000 n=10)
WalkAttributes/100_attributes-16     5.126n ±  9%   4.465n ±  1%  -12.90% (p=0.000 n=10)
WalkAttributes/1000_attributes-16    5.316n ±  9%   4.502n ±  5%  -15.32% (p=0.002 n=10)
SetAddAttributes/SetAttributes-16    220.5n ± 18%   192.6n ± 11%  -12.67% (p=0.007 n=10)
SetAddAttributes/AddAttributes-16    165.3n ± 21%   127.3n ± 22%  -22.96% (p=0.011 n=10)
SimpleProcessorOnEmit-16             2.159n ±  9%   2.167n ±  9%        ~ (p=0.739 n=10)
geomean                              178.3n         154.5n        -13.31%

                        │    old.txt    │             new.txt             │
                        │      B/s      │      B/s       vs base          │
BatchProcessorOnEmit-16   76.52Mi ± 11%   77.14Mi ± 14%  ~ (p=0.971 n=10)

                                  │    old.txt     │                new.txt                │
                                  │      B/op      │     B/op      vs base                 │
BatchProcessorOnEmit-16               0.000 ± 0%       0.000 ± 0%       ~ (p=1.000 n=10) ¹
Processor/Simple-16                   417.0 ± 0%       417.0 ± 0%       ~ (p=1.000 n=10) ¹
Processor/Batch-16                  1.093Ki ± 1%     1.088Ki ± 1%       ~ (p=0.254 n=10)
Processor/SetTimestampSimple-16       417.0 ± 0%       417.0 ± 0%       ~ (p=1.000 n=10) ¹
Processor/SetTimestampBatch-16      1.095Ki ± 1%     1.084Ki ± 2%       ~ (p=0.361 n=10)
Processor/AddAttributesSimple-16      417.0 ± 0%       417.0 ± 0%       ~ (p=1.000 n=10) ¹
Processor/AddAttributesBatch-16     1.085Ki ± 1%     1.086Ki ± 1%       ~ (p=1.000 n=10)
Processor/SetAttributesSimple-16      465.0 ± 0%       465.0 ± 0%       ~ (p=1.000 n=10) ¹
Processor/SetAttributesBatch-16     1.129Ki ± 1%     1.125Ki ± 1%       ~ (p=0.084 n=10)
LoggerNewRecord/5_attributes-16       0.000 ± 0%       0.000 ± 0%       ~ (p=1.000 n=10) ¹
LoggerNewRecord/10_attributes-16      610.0 ± 0%       610.0 ± 0%       ~ (p=1.000 n=10) ¹
LoggerEnabled-16                      0.000 ± 0%       0.000 ± 0%       ~ (p=1.000 n=10) ¹
LoggerProviderLogger-16               359.0 ± 6%       346.0 ± 3%       ~ (p=0.117 n=10)
WalkAttributes/1_attributes-16        0.000 ± 0%       0.000 ± 0%       ~ (p=1.000 n=10) ¹
WalkAttributes/10_attributes-16       0.000 ± 0%       0.000 ± 0%       ~ (p=1.000 n=10) ¹
WalkAttributes/100_attributes-16      0.000 ± 0%       0.000 ± 0%       ~ (p=1.000 n=10) ¹
WalkAttributes/1000_attributes-16     0.000 ± 0%       0.000 ± 0%       ~ (p=1.000 n=10) ¹
SetAddAttributes/SetAttributes-16     48.00 ± 0%       48.00 ± 0%       ~ (p=1.000 n=10) ¹
SetAddAttributes/AddAttributes-16     0.000 ± 0%       0.000 ± 0%       ~ (p=1.000 n=10) ¹
SimpleProcessorOnEmit-16              0.000 ± 0%       0.000 ± 0%       ~ (p=1.000 n=10) ¹
geomean                                          ²                 -0.27%                ²
¹ all samples are equal
² summaries must be >0 to compute geomean

                                  │   old.txt    │               new.txt               │
                                  │  allocs/op   │ allocs/op   vs base                 │
BatchProcessorOnEmit-16             0.000 ± 0%     0.000 ± 0%       ~ (p=1.000 n=10) ¹
Processor/Simple-16                 1.000 ± 0%     1.000 ± 0%       ~ (p=1.000 n=10) ¹
Processor/Batch-16                  1.000 ± 0%     1.000 ± 0%       ~ (p=1.000 n=10) ¹
Processor/SetTimestampSimple-16     1.000 ± 0%     1.000 ± 0%       ~ (p=1.000 n=10) ¹
Processor/SetTimestampBatch-16      1.000 ± 0%     1.000 ± 0%       ~ (p=1.000 n=10) ¹
Processor/AddAttributesSimple-16    1.000 ± 0%     1.000 ± 0%       ~ (p=1.000 n=10) ¹
Processor/AddAttributesBatch-16     1.000 ± 0%     1.000 ± 0%       ~ (p=1.000 n=10) ¹
Processor/SetAttributesSimple-16    2.000 ± 0%     2.000 ± 0%       ~ (p=1.000 n=10) ¹
Processor/SetAttributesBatch-16     2.000 ± 0%     2.000 ± 0%       ~ (p=1.000 n=10) ¹
LoggerNewRecord/5_attributes-16     0.000 ± 0%     0.000 ± 0%       ~ (p=1.000 n=10) ¹
LoggerNewRecord/10_attributes-16    4.000 ± 0%     4.000 ± 0%       ~ (p=1.000 n=10) ¹
LoggerEnabled-16                    0.000 ± 0%     0.000 ± 0%       ~ (p=1.000 n=10) ¹
LoggerProviderLogger-16             1.000 ± 0%     1.000 ± 0%       ~ (p=1.000 n=10) ¹
WalkAttributes/1_attributes-16      0.000 ± 0%     0.000 ± 0%       ~ (p=1.000 n=10) ¹
WalkAttributes/10_attributes-16     0.000 ± 0%     0.000 ± 0%       ~ (p=1.000 n=10) ¹
WalkAttributes/100_attributes-16    0.000 ± 0%     0.000 ± 0%       ~ (p=1.000 n=10) ¹
WalkAttributes/1000_attributes-16   0.000 ± 0%     0.000 ± 0%       ~ (p=1.000 n=10) ¹
SetAddAttributes/SetAttributes-16   1.000 ± 0%     1.000 ± 0%       ~ (p=1.000 n=10) ¹
SetAddAttributes/AddAttributes-16   0.000 ± 0%     0.000 ± 0%       ~ (p=1.000 n=10) ¹
SimpleProcessorOnEmit-16            0.000 ± 0%     0.000 ± 0%       ~ (p=1.000 n=10) ¹
geomean                                        ²               +0.00%                ²
¹ all samples are equal
² summaries must be >0 to compute geomean
```
2024-09-13 06:35:01 +02:00

389 lines
10 KiB
Go

// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package log // import "go.opentelemetry.io/otel/sdk/log"
import (
"context"
"fmt"
"strconv"
"sync"
"testing"
"github.com/go-logr/logr"
"github.com/go-logr/logr/testr"
"github.com/google/go-cmp/cmp"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/internal/global"
"go.opentelemetry.io/otel/log"
"go.opentelemetry.io/otel/log/noop"
ottest "go.opentelemetry.io/otel/sdk/internal/internaltest"
"go.opentelemetry.io/otel/sdk/log/internal/x"
"go.opentelemetry.io/otel/sdk/resource"
)
const envVarResourceAttributes = "OTEL_RESOURCE_ATTRIBUTES"
type processor struct {
Name string
Err error
shutdownCalls int
forceFlushCalls int
records []Record
}
func newProcessor(name string) *processor {
return &processor{Name: name}
}
func (p *processor) OnEmit(ctx context.Context, r *Record) error {
if p.Err != nil {
return p.Err
}
p.records = append(p.records, *r)
return nil
}
func (p *processor) Shutdown(context.Context) error {
p.shutdownCalls++
return p.Err
}
func (p *processor) ForceFlush(context.Context) error {
p.forceFlushCalls++
return p.Err
}
type fltrProcessor struct {
*processor
enabled bool
}
var _ x.FilterProcessor = (*fltrProcessor)(nil)
func newFltrProcessor(name string, enabled bool) *fltrProcessor {
return &fltrProcessor{
processor: newProcessor(name),
enabled: enabled,
}
}
func (p *fltrProcessor) Enabled(context.Context, log.EnabledParameters) bool {
return p.enabled
}
func TestNewLoggerProviderConfiguration(t *testing.T) {
t.Cleanup(func(orig otel.ErrorHandler) func() {
otel.SetErrorHandler(otel.ErrorHandlerFunc(func(err error) {
t.Log(err)
}))
return func() { otel.SetErrorHandler(orig) }
}(otel.GetErrorHandler()))
res := resource.NewSchemaless(attribute.String("key", "value"))
p0, p1 := newProcessor("0"), newProcessor("1")
attrCntLim := 12
attrValLenLim := 21
testcases := []struct {
name string
envars map[string]string
options []LoggerProviderOption
want *LoggerProvider
}{
{
name: "Defaults",
want: &LoggerProvider{
resource: resource.Default(),
attributeCountLimit: defaultAttrCntLim,
attributeValueLengthLimit: defaultAttrValLenLim,
},
},
{
name: "Options",
options: []LoggerProviderOption{
WithResource(res),
WithProcessor(p0),
WithProcessor(p1),
WithAttributeCountLimit(attrCntLim),
WithAttributeValueLengthLimit(attrValLenLim),
},
want: &LoggerProvider{
resource: res,
processors: []Processor{p0, p1},
attributeCountLimit: attrCntLim,
attributeValueLengthLimit: attrValLenLim,
},
},
{
name: "Environment",
envars: map[string]string{
envarAttrCntLim: strconv.Itoa(attrCntLim),
envarAttrValLenLim: strconv.Itoa(attrValLenLim),
},
want: &LoggerProvider{
resource: resource.Default(),
attributeCountLimit: attrCntLim,
attributeValueLengthLimit: attrValLenLim,
},
},
{
name: "InvalidEnvironment",
envars: map[string]string{
envarAttrCntLim: "invalid attributeCountLimit",
envarAttrValLenLim: "invalid attributeValueLengthLimit",
},
want: &LoggerProvider{
resource: resource.Default(),
attributeCountLimit: defaultAttrCntLim,
attributeValueLengthLimit: defaultAttrValLenLim,
},
},
{
name: "Precedence",
envars: map[string]string{
envarAttrCntLim: strconv.Itoa(100),
envarAttrValLenLim: strconv.Itoa(101),
},
options: []LoggerProviderOption{
// These override the environment variables.
WithAttributeCountLimit(attrCntLim),
WithAttributeValueLengthLimit(attrValLenLim),
},
want: &LoggerProvider{
resource: resource.Default(),
attributeCountLimit: attrCntLim,
attributeValueLengthLimit: attrValLenLim,
},
},
}
for _, tc := range testcases {
t.Run(tc.name, func(t *testing.T) {
for key, value := range tc.envars {
t.Setenv(key, value)
}
assert.Equal(t, tc.want, NewLoggerProvider(tc.options...))
})
}
}
func mergeResource(t *testing.T, r1, r2 *resource.Resource) *resource.Resource {
r, err := resource.Merge(r1, r2)
assert.NoError(t, err)
return r
}
func TestWithResource(t *testing.T) {
store, err := ottest.SetEnvVariables(map[string]string{
envVarResourceAttributes: "key=value,rk5=7",
})
require.NoError(t, err)
defer func() { require.NoError(t, store.Restore()) }()
cases := []struct {
name string
options []LoggerProviderOption
want *resource.Resource
msg string
}{
{
name: "explicitly empty resource",
options: []LoggerProviderOption{WithResource(resource.Empty())},
want: resource.Environment(),
},
{
name: "uses default if no resource option",
options: []LoggerProviderOption{},
want: resource.Default(),
},
{
name: "explicit resource",
options: []LoggerProviderOption{WithResource(resource.NewSchemaless(attribute.String("rk1", "rv1"), attribute.Int64("rk2", 5)))},
want: mergeResource(t, resource.Environment(), resource.NewSchemaless(attribute.String("rk1", "rv1"), attribute.Int64("rk2", 5))),
},
{
name: "last resource wins",
options: []LoggerProviderOption{
WithResource(resource.NewSchemaless(attribute.String("rk1", "vk1"), attribute.Int64("rk2", 5))),
WithResource(resource.NewSchemaless(attribute.String("rk3", "rv3"), attribute.Int64("rk4", 10))),
},
want: mergeResource(t, resource.Environment(), resource.NewSchemaless(attribute.String("rk3", "rv3"), attribute.Int64("rk4", 10))),
},
{
name: "overlapping attributes with environment resource",
options: []LoggerProviderOption{WithResource(resource.NewSchemaless(attribute.String("rk1", "rv1"), attribute.Int64("rk5", 10)))},
want: mergeResource(t, resource.Environment(), resource.NewSchemaless(attribute.String("rk1", "rv1"), attribute.Int64("rk5", 10))),
},
}
for _, tc := range cases {
tc := tc
t.Run(tc.name, func(t *testing.T) {
got := newProviderConfig(tc.options).resource
if diff := cmp.Diff(got, tc.want); diff != "" {
t.Errorf("WithResource:\n -got +want %s", diff)
}
})
}
}
func TestLoggerProviderConcurrentSafe(t *testing.T) {
const goRoutineN = 10
var wg sync.WaitGroup
wg.Add(goRoutineN)
p := NewLoggerProvider(WithProcessor(newProcessor("0")))
const name = "testLogger"
ctx := context.Background()
for i := 0; i < goRoutineN; i++ {
go func() {
defer wg.Done()
_ = p.Logger(name)
_ = p.Shutdown(ctx)
_ = p.ForceFlush(ctx)
}()
}
wg.Wait()
}
type logSink struct {
logr.LogSink
level int
msg string
keysAndValues []interface{}
}
func (l *logSink) Enabled(int) bool { return true }
func (l *logSink) Info(level int, msg string, keysAndValues ...any) {
l.level, l.msg, l.keysAndValues = level, msg, keysAndValues
l.LogSink.Info(level, msg, keysAndValues...)
}
func TestLoggerProviderLogger(t *testing.T) {
t.Run("InvalidName", func(t *testing.T) {
l := &logSink{LogSink: testr.New(t).GetSink()}
t.Cleanup(func(orig logr.Logger) func() {
global.SetLogger(logr.New(l))
return func() { global.SetLogger(orig) }
}(global.GetLogger()))
_ = NewLoggerProvider().Logger("")
assert.Equal(t, 1, l.level, "logged level")
assert.Equal(t, "Invalid Logger name.", l.msg, "logged message")
require.Len(t, l.keysAndValues, 2, "logged key values")
assert.Equal(t, "", l.keysAndValues[1], "logged name")
})
t.Run("Stopped", func(t *testing.T) {
ctx := context.Background()
p := NewLoggerProvider()
_ = p.Shutdown(ctx)
l := p.Logger("testing")
assert.NotNil(t, l)
assert.IsType(t, noop.Logger{}, l)
})
t.Run("SameLoggers", func(t *testing.T) {
p := NewLoggerProvider()
l0, l1 := p.Logger("l0"), p.Logger("l1")
l2, l3 := p.Logger("l0"), p.Logger("l1")
assert.Same(t, l0, l2)
assert.Same(t, l1, l3)
})
}
func TestLoggerProviderShutdown(t *testing.T) {
t.Run("Once", func(t *testing.T) {
proc := newProcessor("")
p := NewLoggerProvider(WithProcessor(proc))
ctx := context.Background()
require.NoError(t, p.Shutdown(ctx))
require.Equal(t, 1, proc.shutdownCalls, "processor Shutdown not called")
require.NoError(t, p.Shutdown(ctx))
assert.Equal(t, 1, proc.shutdownCalls, "processor Shutdown called multiple times")
})
t.Run("Error", func(t *testing.T) {
proc := newProcessor("")
proc.Err = assert.AnError
p := NewLoggerProvider(WithProcessor(proc))
ctx := context.Background()
assert.ErrorIs(t, p.Shutdown(ctx), assert.AnError, "processor error not returned")
})
}
func TestLoggerProviderForceFlush(t *testing.T) {
t.Run("Stopped", func(t *testing.T) {
proc := newProcessor("")
p := NewLoggerProvider(WithProcessor(proc))
ctx := context.Background()
require.NoError(t, p.ForceFlush(ctx))
require.Equal(t, 1, proc.forceFlushCalls, "processor ForceFlush not called")
require.NoError(t, p.Shutdown(ctx))
require.NoError(t, p.ForceFlush(ctx))
assert.Equal(t, 1, proc.forceFlushCalls, "processor ForceFlush called after Shutdown")
})
t.Run("Multi", func(t *testing.T) {
proc := newProcessor("")
p := NewLoggerProvider(WithProcessor(proc))
ctx := context.Background()
require.NoError(t, p.ForceFlush(ctx))
require.Equal(t, 1, proc.forceFlushCalls, "processor ForceFlush not called")
require.NoError(t, p.ForceFlush(ctx))
assert.Equal(t, 2, proc.forceFlushCalls, "processor ForceFlush not called multiple times")
})
t.Run("Error", func(t *testing.T) {
proc := newProcessor("")
proc.Err = assert.AnError
p := NewLoggerProvider(WithProcessor(proc))
ctx := context.Background()
assert.ErrorIs(t, p.ForceFlush(ctx), assert.AnError, "processor error not returned")
})
}
func BenchmarkLoggerProviderLogger(b *testing.B) {
p := NewLoggerProvider()
names := make([]string, b.N)
for i := 0; i < b.N; i++ {
names[i] = fmt.Sprintf("%d logger", i)
}
b.ResetTimer()
b.ReportAllocs()
loggers := make([]log.Logger, b.N)
for i := 0; i < b.N; i++ {
loggers[i] = p.Logger(names[i])
}
b.StopTimer()
loggers[0].Enabled(context.Background(), log.EnabledParameters{})
}