mirror of
https://github.com/open-telemetry/opentelemetry-go.git
synced 2024-12-28 21:09:17 +02:00
8e8ad092cc
## What Change `Processor.OnEmit` methods to accept a record pointer. ## Why Fixes https://github.com/open-telemetry/opentelemetry-go/issues/5219 This would be specification compliant according to discussions around https://github.com/open-telemetry/opentelemetry-specification/pull/4067 This is inline of how processors Go span processors works and how log processors work in other languages. If the performance (an additional heap allocation during log processing) would occur to be a significant problem for some users, we have at few possible solutions: 1. Utilize PGO which may also lead to decreasing heap allocations (sources: https://landontclipp.github.io/blog/2023/08/25/profile-guided-optimizations-in-go/#devirtualization, https://andrewwphillips.github.io/blog/pgo.html). Currently it does not but I expect it may change in future. 2. Improve the Go compilers escape analysis (related to previous point) 3. introduce new "chaining processor" which can be also desirable in other languages ## Benchstat `old` is from `main`. `new` is from current branch. `new-pgo` is from current branch with PGO optimization. I first run benchmarks to generate a CPU profile using `go test -run=^$ -bench=. -count=10 -cpuprofile default.pgo` and then I rerun the tests with PGO. Currently, the number of heap allocations is the same. ``` goos: linux goarch: amd64 pkg: go.opentelemetry.io/otel/sdk/log cpu: Intel(R) Core(TM) i9-10885H CPU @ 2.40GHz │ old.txt │ new.txt │ new-pgo.txt │ │ sec/op │ sec/op vs base │ sec/op vs base │ BatchProcessorOnEmit-16 402.7n ± 18% 382.0n ± 7% ~ (p=0.247 n=10) 376.7n ± 14% ~ (p=0.210 n=10) Processor/Simple-16 350.9n ± 9% 782.5n ± 6% +123.00% (p=0.000 n=10) 755.1n ± 5% +115.19% (p=0.000 n=10) Processor/Batch-16 1.333µ ± 15% 1.497µ ± 11% +12.27% (p=0.000 n=10) 1.528µ ± 8% +14.63% (p=0.000 n=10) Processor/SetTimestampSimple-16 329.5n ± 15% 711.6n ± 4% +115.93% (p=0.000 n=10) 721.9n ± 5% +119.04% (p=0.000 n=10) Processor/SetTimestampBatch-16 1.163µ ± 2% 1.524µ ± 3% +31.03% (p=0.000 n=10) 1.461µ ± 5% +25.57% (p=0.000 n=10) Processor/AddAttributesSimple-16 408.7n ± 3% 810.1n ± 4% +98.21% (p=0.000 n=10) 830.1n ± 4% +103.11% (p=0.000 n=10) Processor/AddAttributesBatch-16 1.270µ ± 2% 1.623µ ± 4% +27.71% (p=0.000 n=10) 1.597µ ± 7% +25.66% (p=0.000 n=10) Processor/SetAttributesSimple-16 436.2n ± 10% 796.1n ± 3% +82.50% (p=0.000 n=10) 817.6n ± 4% +87.43% (p=0.000 n=10) Processor/SetAttributesBatch-16 1.202µ ± 2% 1.552µ ± 2% +29.06% (p=0.000 n=10) 1.659µ ± 11% +37.96% (p=0.000 n=10) LoggerNewRecord/5_attributes-16 366.6n ± 3% 363.7n ± 7% ~ (p=0.952 n=10) 426.2n ± 7% +16.27% (p=0.000 n=10) LoggerNewRecord/10_attributes-16 1.711µ ± 2% 1.909µ ± 18% +11.57% (p=0.000 n=10) 2.077µ ± 10% +21.39% (p=0.000 n=10) LoggerProviderLogger-16 650.1n ± 4% 690.1n ± 8% +6.15% (p=0.019 n=10) 737.6n ± 13% +13.47% (p=0.004 n=10) WalkAttributes/1_attributes-16 5.264n ± 12% 5.510n ± 8% ~ (p=0.812 n=10) 5.865n ± 5% +11.41% (p=0.011 n=10) WalkAttributes/10_attributes-16 5.440n ± 8% 5.881n ± 7% +8.12% (p=0.004 n=10) 6.104n ± 7% +12.21% (p=0.005 n=10) WalkAttributes/100_attributes-16 5.403n ± 9% 5.894n ± 9% +9.10% (p=0.029 n=10) 5.783n ± 6% ~ (p=0.052 n=10) WalkAttributes/1000_attributes-16 5.196n ± 4% 5.860n ± 8% +12.79% (p=0.000 n=10) 5.981n ± 13% +15.13% (p=0.002 n=10) SetAddAttributes/SetAttributes-16 181.2n ± 14% 208.1n ± 12% +14.85% (p=0.005 n=10) 209.9n ± 11% +15.87% (p=0.007 n=10) SetAddAttributes/AddAttributes-16 156.7n ± 14% 161.1n ± 16% ~ (p=0.190 n=10) 165.5n ± 15% ~ (p=0.315 n=10) SimpleProcessorOnEmit-16 11.775n ± 10% 9.027n ± 17% -23.33% (p=0.000 n=10) 9.389n ± 18% -20.26% (p=0.002 n=10) geomean 169.1n 209.6n +23.98% 215.5n +27.48% │ old.txt │ new.txt │ new-pgo.txt │ │ B/s │ B/s vs base │ B/s vs base │ BatchProcessorOnEmit-16 1004.39Mi ± 15% 79.88Mi ± 7% -92.05% (p=0.000 n=10) 81.06Mi ± 12% -91.93% (p=0.000 n=10) │ old.txt │ new.txt │ new-pgo.txt │ │ B/op │ B/op vs base │ B/op vs base │ BatchProcessorOnEmit-16 0.000 ± 0% 0.000 ± 0% ~ (p=1.000 n=10) ¹ 0.000 ± 0% ~ (p=1.000 n=10) ¹ Processor/Simple-16 0.0 ± 0% 417.0 ± 0% ? (p=0.000 n=10) 417.0 ± 0% ? (p=0.000 n=10) Processor/Batch-16 621.5 ± 2% 1057.5 ± 1% +70.15% (p=0.000 n=10) 1064.5 ± 1% +71.28% (p=0.000 n=10) Processor/SetTimestampSimple-16 0.0 ± 0% 417.0 ± 0% ? (p=0.000 n=10) 418.0 ± 0% ? (p=0.000 n=10) Processor/SetTimestampBatch-16 626.5 ± 3% 1049.5 ± 1% +67.52% (p=0.000 n=10) 1057.5 ± 2% +68.79% (p=0.000 n=10) Processor/AddAttributesSimple-16 0.0 ± 0% 417.0 ± 0% ? (p=0.000 n=10) 417.0 ± 0% ? (p=0.000 n=10) Processor/AddAttributesBatch-16 616.5 ± 3% 1053.0 ± 2% +70.80% (p=0.000 n=10) 1048.5 ± 2% +70.07% (p=0.000 n=10) Processor/SetAttributesSimple-16 48.00 ± 0% 466.00 ± 0% +870.83% (p=0.000 n=10) 466.00 ± 0% +870.83% (p=0.000 n=10) Processor/SetAttributesBatch-16 648.0 ± 3% 1089.5 ± 1% +68.13% (p=0.000 n=10) 1087.5 ± 4% +67.82% (p=0.000 n=10) LoggerNewRecord/5_attributes-16 0.000 ± 0% 0.000 ± 0% ~ (p=1.000 n=10) ¹ 0.000 ± 0% ~ (p=1.000 n=10) ¹ LoggerNewRecord/10_attributes-16 610.0 ± 0% 610.0 ± 0% ~ (p=1.000 n=10) ¹ 610.0 ± 0% ~ (p=1.000 n=10) ¹ LoggerProviderLogger-16 354.5 ± 6% 368.0 ± 7% ~ (p=0.288 n=10) 391.0 ± 29% ~ (p=0.239 n=10) WalkAttributes/1_attributes-16 0.000 ± 0% 0.000 ± 0% ~ (p=1.000 n=10) ¹ 0.000 ± 0% ~ (p=1.000 n=10) ¹ WalkAttributes/10_attributes-16 0.000 ± 0% 0.000 ± 0% ~ (p=1.000 n=10) ¹ 0.000 ± 0% ~ (p=1.000 n=10) ¹ WalkAttributes/100_attributes-16 0.000 ± 0% 0.000 ± 0% ~ (p=1.000 n=10) ¹ 0.000 ± 0% ~ (p=1.000 n=10) ¹ WalkAttributes/1000_attributes-16 0.000 ± 0% 0.000 ± 0% ~ (p=1.000 n=10) ¹ 0.000 ± 0% ~ (p=1.000 n=10) ¹ SetAddAttributes/SetAttributes-16 48.00 ± 0% 48.00 ± 0% ~ (p=1.000 n=10) ¹ 48.00 ± 0% ~ (p=1.000 n=10) ¹ SetAddAttributes/AddAttributes-16 0.000 ± 0% 0.000 ± 0% ~ (p=1.000 n=10) ¹ 0.000 ± 0% ~ (p=1.000 n=10) ¹ SimpleProcessorOnEmit-16 0.000 ± 0% 0.000 ± 0% ~ (p=1.000 n=10) ¹ 0.000 ± 0% ~ (p=1.000 n=10) ¹ geomean ² ? ² ? ² ¹ all samples are equal ² summaries must be >0 to compute geomean │ old.txt │ new.txt │ new-pgo.txt │ │ allocs/op │ allocs/op vs base │ allocs/op vs base │ BatchProcessorOnEmit-16 0.000 ± 0% 0.000 ± 0% ~ (p=1.000 n=10) ¹ 0.000 ± 0% ~ (p=1.000 n=10) ¹ Processor/Simple-16 0.000 ± 0% 1.000 ± 0% ? (p=0.000 n=10) 1.000 ± 0% ? (p=0.000 n=10) Processor/Batch-16 0.000 ± 0% 1.000 ± 0% ? (p=0.000 n=10) 1.000 ± 0% ? (p=0.000 n=10) Processor/SetTimestampSimple-16 0.000 ± 0% 1.000 ± 0% ? (p=0.000 n=10) 1.000 ± 0% ? (p=0.000 n=10) Processor/SetTimestampBatch-16 0.000 ± 0% 1.000 ± 0% ? (p=0.000 n=10) 1.000 ± 0% ? (p=0.000 n=10) Processor/AddAttributesSimple-16 0.000 ± 0% 1.000 ± 0% ? (p=0.000 n=10) 1.000 ± 0% ? (p=0.000 n=10) Processor/AddAttributesBatch-16 0.000 ± 0% 1.000 ± 0% ? (p=0.000 n=10) 1.000 ± 0% ? (p=0.000 n=10) Processor/SetAttributesSimple-16 1.000 ± 0% 2.000 ± 0% +100.00% (p=0.000 n=10) 2.000 ± 0% +100.00% (p=0.000 n=10) Processor/SetAttributesBatch-16 1.000 ± 0% 2.000 ± 0% +100.00% (p=0.000 n=10) 2.000 ± 0% +100.00% (p=0.000 n=10) LoggerNewRecord/5_attributes-16 0.000 ± 0% 0.000 ± 0% ~ (p=1.000 n=10) ¹ 0.000 ± 0% ~ (p=1.000 n=10) ¹ LoggerNewRecord/10_attributes-16 4.000 ± 0% 4.000 ± 0% ~ (p=1.000 n=10) ¹ 4.000 ± 0% ~ (p=1.000 n=10) ¹ LoggerProviderLogger-16 1.000 ± 0% 1.000 ± 0% ~ (p=1.000 n=10) ¹ 1.000 ± 0% ~ (p=1.000 n=10) ¹ WalkAttributes/1_attributes-16 0.000 ± 0% 0.000 ± 0% ~ (p=1.000 n=10) ¹ 0.000 ± 0% ~ (p=1.000 n=10) ¹ WalkAttributes/10_attributes-16 0.000 ± 0% 0.000 ± 0% ~ (p=1.000 n=10) ¹ 0.000 ± 0% ~ (p=1.000 n=10) ¹ WalkAttributes/100_attributes-16 0.000 ± 0% 0.000 ± 0% ~ (p=1.000 n=10) ¹ 0.000 ± 0% ~ (p=1.000 n=10) ¹ WalkAttributes/1000_attributes-16 0.000 ± 0% 0.000 ± 0% ~ (p=1.000 n=10) ¹ 0.000 ± 0% ~ (p=1.000 n=10) ¹ SetAddAttributes/SetAttributes-16 1.000 ± 0% 1.000 ± 0% ~ (p=1.000 n=10) ¹ 1.000 ± 0% ~ (p=1.000 n=10) ¹ SetAddAttributes/AddAttributes-16 0.000 ± 0% 0.000 ± 0% ~ (p=1.000 n=10) ¹ 0.000 ± 0% ~ (p=1.000 n=10) ¹ SimpleProcessorOnEmit-16 0.000 ± 0% 0.000 ± 0% ~ (p=1.000 n=10) ¹ 0.000 ± 0% ~ (p=1.000 n=10) ¹ geomean ² ? ² ? ² ¹ all samples are equal ² summaries must be >0 to compute geomean ```
311 lines
7.6 KiB
Go
311 lines
7.6 KiB
Go
// Copyright The OpenTelemetry Authors
|
|
// SPDX-License-Identifier: Apache-2.0
|
|
|
|
package log // import "go.opentelemetry.io/otel/sdk/log"
|
|
|
|
import (
|
|
"context"
|
|
"fmt"
|
|
"strconv"
|
|
"sync"
|
|
"testing"
|
|
|
|
"github.com/go-logr/logr"
|
|
"github.com/go-logr/logr/testr"
|
|
"github.com/stretchr/testify/assert"
|
|
"github.com/stretchr/testify/require"
|
|
|
|
"go.opentelemetry.io/otel"
|
|
"go.opentelemetry.io/otel/attribute"
|
|
"go.opentelemetry.io/otel/internal/global"
|
|
"go.opentelemetry.io/otel/log"
|
|
"go.opentelemetry.io/otel/log/noop"
|
|
"go.opentelemetry.io/otel/sdk/resource"
|
|
)
|
|
|
|
type processor struct {
|
|
Name string
|
|
Err error
|
|
|
|
shutdownCalls int
|
|
forceFlushCalls int
|
|
|
|
records []Record
|
|
enabled bool
|
|
}
|
|
|
|
func newProcessor(name string) *processor {
|
|
return &processor{Name: name, enabled: true}
|
|
}
|
|
|
|
func (p *processor) OnEmit(ctx context.Context, r *Record) error {
|
|
if p.Err != nil {
|
|
return p.Err
|
|
}
|
|
|
|
p.records = append(p.records, *r)
|
|
return nil
|
|
}
|
|
|
|
func (p *processor) Enabled(context.Context, Record) bool {
|
|
return p.enabled
|
|
}
|
|
|
|
func (p *processor) Shutdown(context.Context) error {
|
|
p.shutdownCalls++
|
|
return p.Err
|
|
}
|
|
|
|
func (p *processor) ForceFlush(context.Context) error {
|
|
p.forceFlushCalls++
|
|
return p.Err
|
|
}
|
|
|
|
func TestNewLoggerProviderConfiguration(t *testing.T) {
|
|
t.Cleanup(func(orig otel.ErrorHandler) func() {
|
|
otel.SetErrorHandler(otel.ErrorHandlerFunc(func(err error) {
|
|
t.Log(err)
|
|
}))
|
|
return func() { otel.SetErrorHandler(orig) }
|
|
}(otel.GetErrorHandler()))
|
|
|
|
res := resource.NewSchemaless(attribute.String("key", "value"))
|
|
p0, p1 := newProcessor("0"), newProcessor("1")
|
|
attrCntLim := 12
|
|
attrValLenLim := 21
|
|
|
|
testcases := []struct {
|
|
name string
|
|
envars map[string]string
|
|
options []LoggerProviderOption
|
|
want *LoggerProvider
|
|
}{
|
|
{
|
|
name: "Defaults",
|
|
want: &LoggerProvider{
|
|
resource: resource.Default(),
|
|
attributeCountLimit: defaultAttrCntLim,
|
|
attributeValueLengthLimit: defaultAttrValLenLim,
|
|
},
|
|
},
|
|
{
|
|
name: "Options",
|
|
options: []LoggerProviderOption{
|
|
WithResource(res),
|
|
WithProcessor(p0),
|
|
WithProcessor(p1),
|
|
WithAttributeCountLimit(attrCntLim),
|
|
WithAttributeValueLengthLimit(attrValLenLim),
|
|
},
|
|
want: &LoggerProvider{
|
|
resource: res,
|
|
processors: []Processor{p0, p1},
|
|
attributeCountLimit: attrCntLim,
|
|
attributeValueLengthLimit: attrValLenLim,
|
|
},
|
|
},
|
|
{
|
|
name: "Environment",
|
|
envars: map[string]string{
|
|
envarAttrCntLim: strconv.Itoa(attrCntLim),
|
|
envarAttrValLenLim: strconv.Itoa(attrValLenLim),
|
|
},
|
|
want: &LoggerProvider{
|
|
resource: resource.Default(),
|
|
attributeCountLimit: attrCntLim,
|
|
attributeValueLengthLimit: attrValLenLim,
|
|
},
|
|
},
|
|
{
|
|
name: "InvalidEnvironment",
|
|
envars: map[string]string{
|
|
envarAttrCntLim: "invalid attributeCountLimit",
|
|
envarAttrValLenLim: "invalid attributeValueLengthLimit",
|
|
},
|
|
want: &LoggerProvider{
|
|
resource: resource.Default(),
|
|
attributeCountLimit: defaultAttrCntLim,
|
|
attributeValueLengthLimit: defaultAttrValLenLim,
|
|
},
|
|
},
|
|
{
|
|
name: "Precedence",
|
|
envars: map[string]string{
|
|
envarAttrCntLim: strconv.Itoa(100),
|
|
envarAttrValLenLim: strconv.Itoa(101),
|
|
},
|
|
options: []LoggerProviderOption{
|
|
// These override the environment variables.
|
|
WithAttributeCountLimit(attrCntLim),
|
|
WithAttributeValueLengthLimit(attrValLenLim),
|
|
},
|
|
want: &LoggerProvider{
|
|
resource: resource.Default(),
|
|
attributeCountLimit: attrCntLim,
|
|
attributeValueLengthLimit: attrValLenLim,
|
|
},
|
|
},
|
|
}
|
|
|
|
for _, tc := range testcases {
|
|
t.Run(tc.name, func(t *testing.T) {
|
|
for key, value := range tc.envars {
|
|
t.Setenv(key, value)
|
|
}
|
|
assert.Equal(t, tc.want, NewLoggerProvider(tc.options...))
|
|
})
|
|
}
|
|
}
|
|
|
|
func TestLoggerProviderConcurrentSafe(t *testing.T) {
|
|
const goRoutineN = 10
|
|
|
|
var wg sync.WaitGroup
|
|
wg.Add(goRoutineN)
|
|
|
|
p := NewLoggerProvider(WithProcessor(newProcessor("0")))
|
|
const name = "testLogger"
|
|
ctx := context.Background()
|
|
for i := 0; i < goRoutineN; i++ {
|
|
go func() {
|
|
defer wg.Done()
|
|
|
|
_ = p.Logger(name)
|
|
_ = p.Shutdown(ctx)
|
|
_ = p.ForceFlush(ctx)
|
|
}()
|
|
}
|
|
|
|
wg.Wait()
|
|
}
|
|
|
|
type logSink struct {
|
|
logr.LogSink
|
|
|
|
level int
|
|
msg string
|
|
keysAndValues []interface{}
|
|
}
|
|
|
|
func (l *logSink) Enabled(int) bool { return true }
|
|
|
|
func (l *logSink) Info(level int, msg string, keysAndValues ...any) {
|
|
l.level, l.msg, l.keysAndValues = level, msg, keysAndValues
|
|
l.LogSink.Info(level, msg, keysAndValues...)
|
|
}
|
|
|
|
func TestLoggerProviderLogger(t *testing.T) {
|
|
t.Run("InvalidName", func(t *testing.T) {
|
|
l := &logSink{LogSink: testr.New(t).GetSink()}
|
|
t.Cleanup(func(orig logr.Logger) func() {
|
|
global.SetLogger(logr.New(l))
|
|
return func() { global.SetLogger(orig) }
|
|
}(global.GetLogger()))
|
|
|
|
_ = NewLoggerProvider().Logger("")
|
|
assert.Equal(t, 1, l.level, "logged level")
|
|
assert.Equal(t, "Invalid Logger name.", l.msg, "logged message")
|
|
require.Len(t, l.keysAndValues, 2, "logged key values")
|
|
assert.Equal(t, "", l.keysAndValues[1], "logged name")
|
|
})
|
|
|
|
t.Run("Stopped", func(t *testing.T) {
|
|
ctx := context.Background()
|
|
p := NewLoggerProvider()
|
|
_ = p.Shutdown(ctx)
|
|
l := p.Logger("testing")
|
|
|
|
assert.NotNil(t, l)
|
|
assert.IsType(t, noop.Logger{}, l)
|
|
})
|
|
|
|
t.Run("SameLoggers", func(t *testing.T) {
|
|
p := NewLoggerProvider()
|
|
|
|
l0, l1 := p.Logger("l0"), p.Logger("l1")
|
|
l2, l3 := p.Logger("l0"), p.Logger("l1")
|
|
|
|
assert.Same(t, l0, l2)
|
|
assert.Same(t, l1, l3)
|
|
})
|
|
}
|
|
|
|
func TestLoggerProviderShutdown(t *testing.T) {
|
|
t.Run("Once", func(t *testing.T) {
|
|
proc := newProcessor("")
|
|
p := NewLoggerProvider(WithProcessor(proc))
|
|
|
|
ctx := context.Background()
|
|
require.NoError(t, p.Shutdown(ctx))
|
|
require.Equal(t, 1, proc.shutdownCalls, "processor Shutdown not called")
|
|
|
|
require.NoError(t, p.Shutdown(ctx))
|
|
assert.Equal(t, 1, proc.shutdownCalls, "processor Shutdown called multiple times")
|
|
})
|
|
|
|
t.Run("Error", func(t *testing.T) {
|
|
proc := newProcessor("")
|
|
proc.Err = assert.AnError
|
|
p := NewLoggerProvider(WithProcessor(proc))
|
|
|
|
ctx := context.Background()
|
|
assert.ErrorIs(t, p.Shutdown(ctx), assert.AnError, "processor error not returned")
|
|
})
|
|
}
|
|
|
|
func TestLoggerProviderForceFlush(t *testing.T) {
|
|
t.Run("Stopped", func(t *testing.T) {
|
|
proc := newProcessor("")
|
|
p := NewLoggerProvider(WithProcessor(proc))
|
|
|
|
ctx := context.Background()
|
|
require.NoError(t, p.ForceFlush(ctx))
|
|
require.Equal(t, 1, proc.forceFlushCalls, "processor ForceFlush not called")
|
|
|
|
require.NoError(t, p.Shutdown(ctx))
|
|
|
|
require.NoError(t, p.ForceFlush(ctx))
|
|
assert.Equal(t, 1, proc.forceFlushCalls, "processor ForceFlush called after Shutdown")
|
|
})
|
|
|
|
t.Run("Multi", func(t *testing.T) {
|
|
proc := newProcessor("")
|
|
p := NewLoggerProvider(WithProcessor(proc))
|
|
|
|
ctx := context.Background()
|
|
require.NoError(t, p.ForceFlush(ctx))
|
|
require.Equal(t, 1, proc.forceFlushCalls, "processor ForceFlush not called")
|
|
|
|
require.NoError(t, p.ForceFlush(ctx))
|
|
assert.Equal(t, 2, proc.forceFlushCalls, "processor ForceFlush not called multiple times")
|
|
})
|
|
|
|
t.Run("Error", func(t *testing.T) {
|
|
proc := newProcessor("")
|
|
proc.Err = assert.AnError
|
|
p := NewLoggerProvider(WithProcessor(proc))
|
|
|
|
ctx := context.Background()
|
|
assert.ErrorIs(t, p.ForceFlush(ctx), assert.AnError, "processor error not returned")
|
|
})
|
|
}
|
|
|
|
func BenchmarkLoggerProviderLogger(b *testing.B) {
|
|
p := NewLoggerProvider()
|
|
names := make([]string, b.N)
|
|
for i := 0; i < b.N; i++ {
|
|
names[i] = fmt.Sprintf("%d logger", i)
|
|
}
|
|
|
|
b.ResetTimer()
|
|
b.ReportAllocs()
|
|
|
|
loggers := make([]log.Logger, b.N)
|
|
for i := 0; i < b.N; i++ {
|
|
loggers[i] = p.Logger(names[i])
|
|
}
|
|
|
|
b.StopTimer()
|
|
loggers[0].Enabled(context.Background(), log.Record{})
|
|
}
|