1
0
mirror of https://github.com/open-telemetry/opentelemetry-go.git synced 2025-08-10 22:31:50 +02:00

sdk/log: Assign fltrProcessors on provider creation instead of lazy (#6239)

I think it is better to calculate fltrProcessors upfront instead of
doing it in a lazy manner. Reasons:

- No locks/synchronization in the hot-path. Even though the performance
overhead is not big (in practice it will be usually unnoticeable, in my
benchmark execution it is less than a nanosecond) I think it is good to
minimize locking on the hot path.
- Simpler code (subjective, but at least less code by 9 lines)

```
goos: linux
goarch: amd64
pkg: go.opentelemetry.io/otel/sdk/log
cpu: 13th Gen Intel(R) Core(TM) i7-13800H
                 │   old.txt   │               new.txt               │
                 │   sec/op    │   sec/op     vs base                │
LoggerEnabled-20   6.010n ± 1%   5.364n ± 1%  -10.73% (p=0.000 n=10)

                 │  old.txt   │            new.txt             │
                 │    B/op    │    B/op     vs base            │
LoggerEnabled-20   0.000 ± 0%   0.000 ± 0%  ~ (p=1.000 n=10) ¹
¹ all samples are equal

                 │  old.txt   │            new.txt             │
                 │ allocs/op  │ allocs/op   vs base            │
LoggerEnabled-20   0.000 ± 0%   0.000 ± 0%  ~ (p=1.000 n=10) ¹
¹ all samples are equal
```
This commit is contained in:
Robert Pająk
2025-02-04 14:38:10 +01:00
committed by GitHub
parent 2260929549
commit 078a4a844f
4 changed files with 31 additions and 40 deletions

View File

@@ -50,12 +50,11 @@ func (l *logger) Emit(ctx context.Context, r log.Record) {
// processed, true will be returned by default. A value of false will only be
// returned if it can be positively verified that no Processor will process.
func (l *logger) Enabled(ctx context.Context, param log.EnabledParameters) bool {
fltrs := l.provider.filterProcessors()
// If there are more Processors than FilterProcessors we cannot be sure
// that all Processors will drop the record. Therefore, return true.
//
// If all Processors are FilterProcessors, check if any is enabled.
return len(l.provider.processors) > len(fltrs) || anyEnabled(ctx, param, fltrs)
return len(l.provider.processors) > len(l.provider.fltrProcessors) || anyEnabled(ctx, param, l.provider.fltrProcessors)
}
func anyEnabled(ctx context.Context, param log.EnabledParameters, fltrs []x.FilterProcessor) bool {

View File

@@ -62,3 +62,23 @@ func BenchmarkLoggerNewRecord(b *testing.B) {
})
})
}
func BenchmarkLoggerEnabled(b *testing.B) {
provider := NewLoggerProvider(
WithProcessor(newFltrProcessor("0", false)),
WithProcessor(newFltrProcessor("1", true)),
)
logger := provider.Logger(b.Name())
ctx := context.Background()
param := log.EnabledParameters{Severity: log.SeverityDebug}
var enabled bool
b.ReportAllocs()
b.ResetTimer()
for n := 0; n < b.N; n++ {
enabled = logger.Enabled(ctx, param)
}
_ = enabled
}

View File

@@ -279,23 +279,3 @@ func TestLoggerEnabled(t *testing.T) {
})
}
}
func BenchmarkLoggerEnabled(b *testing.B) {
provider := NewLoggerProvider(
WithProcessor(newFltrProcessor("0", false)),
WithProcessor(newFltrProcessor("1", true)),
)
logger := provider.Logger(b.Name())
ctx := context.Background()
param := log.EnabledParameters{Severity: log.SeverityDebug}
var enabled bool
b.ReportAllocs()
b.ResetTimer()
for n := 0; n < b.N; n++ {
enabled = logger.Enabled(ctx, param)
}
_ = enabled
}

View File

@@ -28,10 +28,11 @@ const (
)
type providerConfig struct {
resource *resource.Resource
processors []Processor
attrCntLim setting[int]
attrValLenLim setting[int]
resource *resource.Resource
processors []Processor
fltrProcessors []x.FilterProcessor
attrCntLim setting[int]
attrValLenLim setting[int]
}
func newProviderConfig(opts []LoggerProviderOption) providerConfig {
@@ -64,12 +65,10 @@ type LoggerProvider struct {
resource *resource.Resource
processors []Processor
fltrProcessors []x.FilterProcessor
attributeCountLimit int
attributeValueLengthLimit int
fltrProcessorsOnce sync.Once
fltrProcessors []x.FilterProcessor
loggersMu sync.Mutex
loggers map[instrumentation.Scope]*logger
@@ -92,22 +91,12 @@ func NewLoggerProvider(opts ...LoggerProviderOption) *LoggerProvider {
return &LoggerProvider{
resource: cfg.resource,
processors: cfg.processors,
fltrProcessors: cfg.fltrProcessors,
attributeCountLimit: cfg.attrCntLim.Value,
attributeValueLengthLimit: cfg.attrValLenLim.Value,
}
}
func (p *LoggerProvider) filterProcessors() []x.FilterProcessor {
p.fltrProcessorsOnce.Do(func() {
for _, proc := range p.processors {
if f, ok := proc.(x.FilterProcessor); ok {
p.fltrProcessors = append(p.fltrProcessors, f)
}
}
})
return p.fltrProcessors
}
// Logger returns a new [log.Logger] with the provided name and configuration.
//
// If p is shut down, a [noop.Logger] instance is returned.
@@ -220,6 +209,9 @@ func WithResource(res *resource.Resource) LoggerProviderOption {
func WithProcessor(processor Processor) LoggerProviderOption {
return loggerProviderOptionFunc(func(cfg providerConfig) providerConfig {
cfg.processors = append(cfg.processors, processor)
if f, ok := processor.(x.FilterProcessor); ok {
cfg.fltrProcessors = append(cfg.fltrProcessors, f)
}
return cfg
})
}