1
0
mirror of https://github.com/open-telemetry/opentelemetry-go.git synced 2025-11-29 23:07:45 +02:00
Files
opentelemetry-go/sdk/log/logger_test.go

516 lines
15 KiB
Go
Raw Normal View History

// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package log // import "go.opentelemetry.io/otel/sdk/log"
import (
"context"
"errors"
"strconv"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/log"
"go.opentelemetry.io/otel/metric"
"go.opentelemetry.io/otel/sdk"
"go.opentelemetry.io/otel/sdk/instrumentation"
sdkmetric "go.opentelemetry.io/otel/sdk/metric"
"go.opentelemetry.io/otel/sdk/metric/metricdata"
"go.opentelemetry.io/otel/sdk/metric/metricdata/metricdatatest"
"go.opentelemetry.io/otel/sdk/resource"
semconv "go.opentelemetry.io/otel/semconv/v1.36.0"
"go.opentelemetry.io/otel/semconv/v1.36.0/otelconv"
"go.opentelemetry.io/otel/trace"
)
func TestLoggerEmit(t *testing.T) {
nowDate := time.Date(2010, time.January, 1, 0, 0, 0, 0, time.UTC)
nowSwap := now
t.Cleanup(func() {
now = nowSwap
})
now = func() time.Time {
return nowDate
}
p0, p1, p2WithError := newProcessor("0"), newProcessor("1"), newProcessor("2")
p2WithError.Err = errors.New("error")
r := log.Record{}
r.SetEventName("testing.name")
r.SetTimestamp(time.Date(2000, time.January, 1, 0, 0, 0, 0, time.UTC))
r.SetBody(log.StringValue("testing body value"))
r.SetSeverity(log.SeverityInfo)
r.SetSeverityText("testing text")
r.AddAttributes(
log.String("k1", "str"),
log.Float64("k2", 1.0),
)
r.SetObservedTimestamp(time.Date(2001, time.January, 1, 0, 0, 0, 0, time.UTC))
rWithNoObservedTimestamp := r
rWithNoObservedTimestamp.SetObservedTimestamp(time.Time{})
rWithAllowKeyDuplication := r
rWithAllowKeyDuplication.AddAttributes(
log.String("k1", "str1"),
)
rWithAllowKeyDuplication.SetBody(log.MapValue(
log.Int64("1", 2),
log.Int64("1", 3),
))
rWithDuplicatesInBody := r
rWithDuplicatesInBody.SetBody(log.MapValue(
log.Int64("1", 2),
log.Int64("1", 3),
))
contextWithSpanContext := trace.ContextWithSpanContext(
context.Background(),
trace.NewSpanContext(trace.SpanContextConfig{
TraceID: trace.TraceID{0o1},
SpanID: trace.SpanID{0o2},
TraceFlags: 0x1,
}),
)
testCases := []struct {
name string
logger *logger
ctx context.Context
record log.Record
expectedRecords []Record
}{
{
name: "NoProcessors",
logger: newLogger(NewLoggerProvider(), instrumentation.Scope{}),
ctx: context.Background(),
record: r,
},
{
name: "WithProcessors",
logger: newLogger(NewLoggerProvider(
WithProcessor(p0),
WithProcessor(p1),
WithAttributeValueLengthLimit(3),
WithAttributeCountLimit(2),
WithResource(resource.NewSchemaless(attribute.String("key", "value"))),
), instrumentation.Scope{Name: "scope"}),
ctx: context.Background(),
record: r,
expectedRecords: []Record{
{
eventName: r.EventName(),
timestamp: r.Timestamp(),
body: r.Body(),
severity: r.Severity(),
severityText: r.SeverityText(),
observedTimestamp: r.ObservedTimestamp(),
resource: resource.NewSchemaless(attribute.String("key", "value")),
attributeValueLengthLimit: 3,
attributeCountLimit: 2,
scope: &instrumentation.Scope{Name: "scope"},
front: [attributesInlineCount]log.KeyValue{
log.String("k1", "str"),
log.Float64("k2", 1.0),
},
nFront: 2,
},
},
},
{
name: "WithProcessorsWithError",
logger: newLogger(NewLoggerProvider(
WithProcessor(p2WithError),
WithAttributeValueLengthLimit(3),
WithAttributeCountLimit(2),
WithResource(resource.NewSchemaless(attribute.String("key", "value"))),
), instrumentation.Scope{Name: "scope"}),
ctx: context.Background(),
},
{
name: "WithTraceSpanInContext",
logger: newLogger(NewLoggerProvider(
WithProcessor(p0),
WithProcessor(p1),
WithAttributeValueLengthLimit(3),
WithAttributeCountLimit(2),
WithResource(resource.NewSchemaless(attribute.String("key", "value"))),
), instrumentation.Scope{Name: "scope"}),
ctx: contextWithSpanContext,
record: r,
expectedRecords: []Record{
{
eventName: r.EventName(),
timestamp: r.Timestamp(),
body: r.Body(),
severity: r.Severity(),
severityText: r.SeverityText(),
observedTimestamp: r.ObservedTimestamp(),
resource: resource.NewSchemaless(attribute.String("key", "value")),
attributeValueLengthLimit: 3,
attributeCountLimit: 2,
scope: &instrumentation.Scope{Name: "scope"},
front: [attributesInlineCount]log.KeyValue{
log.String("k1", "str"),
log.Float64("k2", 1.0),
},
nFront: 2,
traceID: trace.TraceID{0o1},
spanID: trace.SpanID{0o2},
traceFlags: 0x1,
},
},
},
{
name: "WithNilContext",
logger: newLogger(NewLoggerProvider(
WithProcessor(p0),
WithProcessor(p1),
WithAttributeValueLengthLimit(3),
WithAttributeCountLimit(2),
WithResource(resource.NewSchemaless(attribute.String("key", "value"))),
), instrumentation.Scope{Name: "scope"}),
ctx: context.Background(),
record: r,
expectedRecords: []Record{
{
eventName: r.EventName(),
timestamp: r.Timestamp(),
body: r.Body(),
severity: r.Severity(),
severityText: r.SeverityText(),
observedTimestamp: r.ObservedTimestamp(),
resource: resource.NewSchemaless(attribute.String("key", "value")),
attributeValueLengthLimit: 3,
attributeCountLimit: 2,
scope: &instrumentation.Scope{Name: "scope"},
front: [attributesInlineCount]log.KeyValue{
log.String("k1", "str"),
log.Float64("k2", 1.0),
},
nFront: 2,
},
},
},
{
name: "NoObservedTimestamp",
logger: newLogger(NewLoggerProvider(
WithProcessor(p0),
WithProcessor(p1),
WithAttributeValueLengthLimit(3),
WithAttributeCountLimit(2),
WithResource(resource.NewSchemaless(attribute.String("key", "value"))),
), instrumentation.Scope{Name: "scope"}),
ctx: context.Background(),
record: rWithNoObservedTimestamp,
expectedRecords: []Record{
{
eventName: rWithNoObservedTimestamp.EventName(),
timestamp: rWithNoObservedTimestamp.Timestamp(),
body: rWithNoObservedTimestamp.Body(),
severity: rWithNoObservedTimestamp.Severity(),
severityText: rWithNoObservedTimestamp.SeverityText(),
observedTimestamp: nowDate,
resource: resource.NewSchemaless(attribute.String("key", "value")),
attributeValueLengthLimit: 3,
attributeCountLimit: 2,
scope: &instrumentation.Scope{Name: "scope"},
front: [attributesInlineCount]log.KeyValue{
log.String("k1", "str"),
log.Float64("k2", 1.0),
},
nFront: 2,
},
},
},
{
name: "WithAllowKeyDuplication",
logger: newLogger(NewLoggerProvider(
WithProcessor(p0),
WithProcessor(p1),
WithAttributeValueLengthLimit(5),
WithAttributeCountLimit(5),
WithResource(resource.NewSchemaless(attribute.String("key", "value"))),
WithAllowKeyDuplication(),
), instrumentation.Scope{Name: "scope"}),
ctx: context.Background(),
record: rWithAllowKeyDuplication,
expectedRecords: []Record{
{
eventName: rWithAllowKeyDuplication.EventName(),
timestamp: rWithAllowKeyDuplication.Timestamp(),
body: rWithAllowKeyDuplication.Body(),
severity: rWithAllowKeyDuplication.Severity(),
severityText: rWithAllowKeyDuplication.SeverityText(),
observedTimestamp: rWithAllowKeyDuplication.ObservedTimestamp(),
resource: resource.NewSchemaless(attribute.String("key", "value")),
attributeValueLengthLimit: 5,
attributeCountLimit: 5,
scope: &instrumentation.Scope{Name: "scope"},
front: [attributesInlineCount]log.KeyValue{
log.String("k1", "str"),
log.Float64("k2", 1.0),
log.String("k1", "str1"),
},
nFront: 3,
allowDupKeys: true,
},
},
},
{
name: "WithDuplicatesInBody",
logger: newLogger(NewLoggerProvider(
WithProcessor(p0),
WithProcessor(p1),
WithAttributeValueLengthLimit(5),
WithAttributeCountLimit(5),
WithResource(resource.NewSchemaless(attribute.String("key", "value"))),
), instrumentation.Scope{Name: "scope"}),
ctx: context.Background(),
record: rWithDuplicatesInBody,
expectedRecords: []Record{
{
eventName: rWithDuplicatesInBody.EventName(),
timestamp: rWithDuplicatesInBody.Timestamp(),
body: log.MapValue(
log.Int64("1", 3),
),
severity: rWithDuplicatesInBody.Severity(),
severityText: rWithDuplicatesInBody.SeverityText(),
observedTimestamp: rWithDuplicatesInBody.ObservedTimestamp(),
resource: resource.NewSchemaless(attribute.String("key", "value")),
attributeValueLengthLimit: 5,
attributeCountLimit: 5,
scope: &instrumentation.Scope{Name: "scope"},
front: [attributesInlineCount]log.KeyValue{
log.String("k1", "str"),
log.Float64("k2", 1.0),
},
nFront: 2,
},
},
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
// Clean up the records before the test.
p0.records = nil
p1.records = nil
tc.logger.Emit(tc.ctx, tc.record)
assert.Equal(t, tc.expectedRecords, p0.records)
assert.Equal(t, tc.expectedRecords, p1.records)
})
}
}
func TestLoggerEnabled(t *testing.T) {
Move `log.Processor.Enabled` to independent `FilterProcessor` interfaced type (#5692) Closes #5425 Our current log `Processor` interface contains more functionality than the [OTel spec](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/logs/sdk.md#logrecordprocessor-operations). The additional functionality allows processors to report back to the API if a Record should be constructed and emitted or not, which is quite helpful[^1][^2][^3][^4][^5]. This removes the `Enabled` method from the `Processor` type. It adds this functionality a new optional and experimental `FilterProcessor` interface type. The logger and provider are updated to check for this optional interface to be implemented with the configured processors and uses them to back the `Logger.Enabled` method, preserving existing functionality. By making this change: - The `Processor` interface is now compliant with the OTel spec and does not contain any additional unspecified behavior. - All `Processor` implementations are no longer required to implement an `Enabled` method. The default, when they do not implement this method, is to assume they are enabled. ### Benchmark ```terminal goos: linux goarch: amd64 pkg: go.opentelemetry.io/otel/sdk/log cpu: Intel(R) Core(TM) i7-8550U CPU @ 1.80GHz │ old.txt │ new7.txt │ │ sec/op │ sec/op vs base │ LoggerEnabled-8 133.30n ± 3% 32.36n ± 3% -75.72% (p=0.000 n=10) │ old.txt │ new7.txt │ │ B/op │ B/op vs base │ LoggerEnabled-8 0.000 ± 0% 0.000 ± 0% ~ (p=1.000 n=10) ¹ ¹ all samples are equal │ old.txt │ new7.txt │ │ allocs/op │ allocs/op vs base │ LoggerEnabled-8 0.000 ± 0% 0.000 ± 0% ~ (p=1.000 n=10) ¹ ¹ all samples are equal ``` This is a significant performance improvement due to the `Record` no longer being converted from the API version to the SDK version. [^1]: https://pkg.go.dev/go.opentelemetry.io/contrib/processors/minsev [^2]: https://pkg.go.dev/go.opentelemetry.io/otel/sdk/log#BatchProcessor.Enabled [^3]: https://pkg.go.dev/go.opentelemetry.io/otel/sdk/log#SimpleProcessor.Enabled [^4]: https://github.com/open-telemetry/opentelemetry-go-contrib/blob/af75717ac4fb3ba13eaea83b88301723122060cf/bridges/otelslog/handler.go#L206-L211 [^5]: https://github.com/open-telemetry/opentelemetry-go-contrib/blob/d0309ddd8c5714af1cd9dbe8b39b7e8f10485679/bridges/otelzap/core.go#L142-L146 --------- Co-authored-by: Robert Pająk <pellared@hotmail.com> Co-authored-by: Sam Xie <sam@samxie.me>
2024-08-22 09:12:23 -07:00
p0 := newFltrProcessor("0", true)
p1 := newFltrProcessor("1", true)
p2WithDisabled := newFltrProcessor("2", false)
testCases := []struct {
sdk/log: Add FilterProcessor and EnabledParameters (#6317) Per https://github.com/open-telemetry/opentelemetry-go/pull/6271#issuecomment-2657554647 > We agreed that we can move `FilterProcessor` directly to `sdk/log` as Logs SDK does not look to be stabilized soon. - Add the possibility to filter based on the resource and scope which is available for the SDK. The scope information is the most important as it gives the possibility to e.g. filter out logs emitted for a given logger. Thus e.g. https://github.com/open-telemetry/opentelemetry-specification/issues/4364 is not necessary. See https://github.com/open-telemetry/opentelemetry-specification/pull/4290#discussion_r1927546170 for more context. - It is going be an example for https://github.com/open-telemetry/opentelemetry-specification/issues/4363 There is a little overhead (IMO totally acceptable) because of data transformation. Most importantly, there is no new heap allocation. ``` goos: linux goarch: amd64 pkg: go.opentelemetry.io/otel/sdk/log cpu: 13th Gen Intel(R) Core(TM) i7-13800H │ old.txt │ new.txt │ │ sec/op │ sec/op vs base │ LoggerEnabled-20 4.589n ± 1% 319.750n ± 16% +6867.75% (p=0.000 n=10) │ old.txt │ new.txt │ │ B/op │ B/op vs base │ LoggerEnabled-20 0.000Ki ± 0% 1.093Ki ± 13% ? (p=0.000 n=10) │ old.txt │ new.txt │ │ allocs/op │ allocs/op vs base │ LoggerEnabled-20 0.000 ± 0% 0.000 ± 0% ~ (p=1.000 n=10) ¹ ¹ all samples are equal ``` `Logger.Enabled` is still more efficient than `Logger.Emit` (benchmarks from https://github.com/open-telemetry/opentelemetry-go/pull/6315). ``` goos: linux goarch: amd64 pkg: go.opentelemetry.io/otel/sdk/log cpu: 13th Gen Intel(R) Core(TM) i7-13800H BenchmarkLoggerEmit/5_attributes-20 559934 2391 ns/op 39088 B/op 1 allocs/op BenchmarkLoggerEmit/10_attributes-20 1000000 5910 ns/op 49483 B/op 5 allocs/op BenchmarkLoggerEnabled-20 1605697 968.7 ns/op 1272 B/op 0 allocs/op PASS ok go.opentelemetry.io/otel/sdk/log 10.789s ``` Prior art: - https://github.com/open-telemetry/opentelemetry-go/pull/6271 - https://github.com/open-telemetry/opentelemetry-go/pull/6286 I also created for tracking purposes: - https://github.com/open-telemetry/opentelemetry-go/issues/6328
2025-02-18 22:35:14 +01:00
name string
logger *logger
ctx context.Context
param log.EnabledParameters
sdk/log: Add FilterProcessor and EnabledParameters (#6317) Per https://github.com/open-telemetry/opentelemetry-go/pull/6271#issuecomment-2657554647 > We agreed that we can move `FilterProcessor` directly to `sdk/log` as Logs SDK does not look to be stabilized soon. - Add the possibility to filter based on the resource and scope which is available for the SDK. The scope information is the most important as it gives the possibility to e.g. filter out logs emitted for a given logger. Thus e.g. https://github.com/open-telemetry/opentelemetry-specification/issues/4364 is not necessary. See https://github.com/open-telemetry/opentelemetry-specification/pull/4290#discussion_r1927546170 for more context. - It is going be an example for https://github.com/open-telemetry/opentelemetry-specification/issues/4363 There is a little overhead (IMO totally acceptable) because of data transformation. Most importantly, there is no new heap allocation. ``` goos: linux goarch: amd64 pkg: go.opentelemetry.io/otel/sdk/log cpu: 13th Gen Intel(R) Core(TM) i7-13800H │ old.txt │ new.txt │ │ sec/op │ sec/op vs base │ LoggerEnabled-20 4.589n ± 1% 319.750n ± 16% +6867.75% (p=0.000 n=10) │ old.txt │ new.txt │ │ B/op │ B/op vs base │ LoggerEnabled-20 0.000Ki ± 0% 1.093Ki ± 13% ? (p=0.000 n=10) │ old.txt │ new.txt │ │ allocs/op │ allocs/op vs base │ LoggerEnabled-20 0.000 ± 0% 0.000 ± 0% ~ (p=1.000 n=10) ¹ ¹ all samples are equal ``` `Logger.Enabled` is still more efficient than `Logger.Emit` (benchmarks from https://github.com/open-telemetry/opentelemetry-go/pull/6315). ``` goos: linux goarch: amd64 pkg: go.opentelemetry.io/otel/sdk/log cpu: 13th Gen Intel(R) Core(TM) i7-13800H BenchmarkLoggerEmit/5_attributes-20 559934 2391 ns/op 39088 B/op 1 allocs/op BenchmarkLoggerEmit/10_attributes-20 1000000 5910 ns/op 49483 B/op 5 allocs/op BenchmarkLoggerEnabled-20 1605697 968.7 ns/op 1272 B/op 0 allocs/op PASS ok go.opentelemetry.io/otel/sdk/log 10.789s ``` Prior art: - https://github.com/open-telemetry/opentelemetry-go/pull/6271 - https://github.com/open-telemetry/opentelemetry-go/pull/6286 I also created for tracking purposes: - https://github.com/open-telemetry/opentelemetry-go/issues/6328
2025-02-18 22:35:14 +01:00
expected bool
expectedP0Params []EnabledParameters
expectedP1Params []EnabledParameters
expectedP2Params []EnabledParameters
}{
{
name: "NoProcessors",
logger: newLogger(NewLoggerProvider(), instrumentation.Scope{}),
ctx: context.Background(),
expected: false,
},
{
name: "WithProcessors",
logger: newLogger(NewLoggerProvider(
WithProcessor(p0),
WithProcessor(p1),
sdk/log: Add FilterProcessor and EnabledParameters (#6317) Per https://github.com/open-telemetry/opentelemetry-go/pull/6271#issuecomment-2657554647 > We agreed that we can move `FilterProcessor` directly to `sdk/log` as Logs SDK does not look to be stabilized soon. - Add the possibility to filter based on the resource and scope which is available for the SDK. The scope information is the most important as it gives the possibility to e.g. filter out logs emitted for a given logger. Thus e.g. https://github.com/open-telemetry/opentelemetry-specification/issues/4364 is not necessary. See https://github.com/open-telemetry/opentelemetry-specification/pull/4290#discussion_r1927546170 for more context. - It is going be an example for https://github.com/open-telemetry/opentelemetry-specification/issues/4363 There is a little overhead (IMO totally acceptable) because of data transformation. Most importantly, there is no new heap allocation. ``` goos: linux goarch: amd64 pkg: go.opentelemetry.io/otel/sdk/log cpu: 13th Gen Intel(R) Core(TM) i7-13800H │ old.txt │ new.txt │ │ sec/op │ sec/op vs base │ LoggerEnabled-20 4.589n ± 1% 319.750n ± 16% +6867.75% (p=0.000 n=10) │ old.txt │ new.txt │ │ B/op │ B/op vs base │ LoggerEnabled-20 0.000Ki ± 0% 1.093Ki ± 13% ? (p=0.000 n=10) │ old.txt │ new.txt │ │ allocs/op │ allocs/op vs base │ LoggerEnabled-20 0.000 ± 0% 0.000 ± 0% ~ (p=1.000 n=10) ¹ ¹ all samples are equal ``` `Logger.Enabled` is still more efficient than `Logger.Emit` (benchmarks from https://github.com/open-telemetry/opentelemetry-go/pull/6315). ``` goos: linux goarch: amd64 pkg: go.opentelemetry.io/otel/sdk/log cpu: 13th Gen Intel(R) Core(TM) i7-13800H BenchmarkLoggerEmit/5_attributes-20 559934 2391 ns/op 39088 B/op 1 allocs/op BenchmarkLoggerEmit/10_attributes-20 1000000 5910 ns/op 49483 B/op 5 allocs/op BenchmarkLoggerEnabled-20 1605697 968.7 ns/op 1272 B/op 0 allocs/op PASS ok go.opentelemetry.io/otel/sdk/log 10.789s ``` Prior art: - https://github.com/open-telemetry/opentelemetry-go/pull/6271 - https://github.com/open-telemetry/opentelemetry-go/pull/6286 I also created for tracking purposes: - https://github.com/open-telemetry/opentelemetry-go/issues/6328
2025-02-18 22:35:14 +01:00
), instrumentation.Scope{Name: "scope"}),
ctx: context.Background(),
param: log.EnabledParameters{
Severity: log.SeverityInfo,
EventName: "test_event",
},
expected: true,
sdk/log: Add FilterProcessor and EnabledParameters (#6317) Per https://github.com/open-telemetry/opentelemetry-go/pull/6271#issuecomment-2657554647 > We agreed that we can move `FilterProcessor` directly to `sdk/log` as Logs SDK does not look to be stabilized soon. - Add the possibility to filter based on the resource and scope which is available for the SDK. The scope information is the most important as it gives the possibility to e.g. filter out logs emitted for a given logger. Thus e.g. https://github.com/open-telemetry/opentelemetry-specification/issues/4364 is not necessary. See https://github.com/open-telemetry/opentelemetry-specification/pull/4290#discussion_r1927546170 for more context. - It is going be an example for https://github.com/open-telemetry/opentelemetry-specification/issues/4363 There is a little overhead (IMO totally acceptable) because of data transformation. Most importantly, there is no new heap allocation. ``` goos: linux goarch: amd64 pkg: go.opentelemetry.io/otel/sdk/log cpu: 13th Gen Intel(R) Core(TM) i7-13800H │ old.txt │ new.txt │ │ sec/op │ sec/op vs base │ LoggerEnabled-20 4.589n ± 1% 319.750n ± 16% +6867.75% (p=0.000 n=10) │ old.txt │ new.txt │ │ B/op │ B/op vs base │ LoggerEnabled-20 0.000Ki ± 0% 1.093Ki ± 13% ? (p=0.000 n=10) │ old.txt │ new.txt │ │ allocs/op │ allocs/op vs base │ LoggerEnabled-20 0.000 ± 0% 0.000 ± 0% ~ (p=1.000 n=10) ¹ ¹ all samples are equal ``` `Logger.Enabled` is still more efficient than `Logger.Emit` (benchmarks from https://github.com/open-telemetry/opentelemetry-go/pull/6315). ``` goos: linux goarch: amd64 pkg: go.opentelemetry.io/otel/sdk/log cpu: 13th Gen Intel(R) Core(TM) i7-13800H BenchmarkLoggerEmit/5_attributes-20 559934 2391 ns/op 39088 B/op 1 allocs/op BenchmarkLoggerEmit/10_attributes-20 1000000 5910 ns/op 49483 B/op 5 allocs/op BenchmarkLoggerEnabled-20 1605697 968.7 ns/op 1272 B/op 0 allocs/op PASS ok go.opentelemetry.io/otel/sdk/log 10.789s ``` Prior art: - https://github.com/open-telemetry/opentelemetry-go/pull/6271 - https://github.com/open-telemetry/opentelemetry-go/pull/6286 I also created for tracking purposes: - https://github.com/open-telemetry/opentelemetry-go/issues/6328
2025-02-18 22:35:14 +01:00
expectedP0Params: []EnabledParameters{{
InstrumentationScope: instrumentation.Scope{Name: "scope"},
Severity: log.SeverityInfo,
EventName: "test_event",
sdk/log: Add FilterProcessor and EnabledParameters (#6317) Per https://github.com/open-telemetry/opentelemetry-go/pull/6271#issuecomment-2657554647 > We agreed that we can move `FilterProcessor` directly to `sdk/log` as Logs SDK does not look to be stabilized soon. - Add the possibility to filter based on the resource and scope which is available for the SDK. The scope information is the most important as it gives the possibility to e.g. filter out logs emitted for a given logger. Thus e.g. https://github.com/open-telemetry/opentelemetry-specification/issues/4364 is not necessary. See https://github.com/open-telemetry/opentelemetry-specification/pull/4290#discussion_r1927546170 for more context. - It is going be an example for https://github.com/open-telemetry/opentelemetry-specification/issues/4363 There is a little overhead (IMO totally acceptable) because of data transformation. Most importantly, there is no new heap allocation. ``` goos: linux goarch: amd64 pkg: go.opentelemetry.io/otel/sdk/log cpu: 13th Gen Intel(R) Core(TM) i7-13800H │ old.txt │ new.txt │ │ sec/op │ sec/op vs base │ LoggerEnabled-20 4.589n ± 1% 319.750n ± 16% +6867.75% (p=0.000 n=10) │ old.txt │ new.txt │ │ B/op │ B/op vs base │ LoggerEnabled-20 0.000Ki ± 0% 1.093Ki ± 13% ? (p=0.000 n=10) │ old.txt │ new.txt │ │ allocs/op │ allocs/op vs base │ LoggerEnabled-20 0.000 ± 0% 0.000 ± 0% ~ (p=1.000 n=10) ¹ ¹ all samples are equal ``` `Logger.Enabled` is still more efficient than `Logger.Emit` (benchmarks from https://github.com/open-telemetry/opentelemetry-go/pull/6315). ``` goos: linux goarch: amd64 pkg: go.opentelemetry.io/otel/sdk/log cpu: 13th Gen Intel(R) Core(TM) i7-13800H BenchmarkLoggerEmit/5_attributes-20 559934 2391 ns/op 39088 B/op 1 allocs/op BenchmarkLoggerEmit/10_attributes-20 1000000 5910 ns/op 49483 B/op 5 allocs/op BenchmarkLoggerEnabled-20 1605697 968.7 ns/op 1272 B/op 0 allocs/op PASS ok go.opentelemetry.io/otel/sdk/log 10.789s ``` Prior art: - https://github.com/open-telemetry/opentelemetry-go/pull/6271 - https://github.com/open-telemetry/opentelemetry-go/pull/6286 I also created for tracking purposes: - https://github.com/open-telemetry/opentelemetry-go/issues/6328
2025-02-18 22:35:14 +01:00
}},
expectedP1Params: nil,
},
{
name: "WithDisabledProcessors",
logger: newLogger(NewLoggerProvider(
WithProcessor(p2WithDisabled),
), instrumentation.Scope{}),
sdk/log: Add FilterProcessor and EnabledParameters (#6317) Per https://github.com/open-telemetry/opentelemetry-go/pull/6271#issuecomment-2657554647 > We agreed that we can move `FilterProcessor` directly to `sdk/log` as Logs SDK does not look to be stabilized soon. - Add the possibility to filter based on the resource and scope which is available for the SDK. The scope information is the most important as it gives the possibility to e.g. filter out logs emitted for a given logger. Thus e.g. https://github.com/open-telemetry/opentelemetry-specification/issues/4364 is not necessary. See https://github.com/open-telemetry/opentelemetry-specification/pull/4290#discussion_r1927546170 for more context. - It is going be an example for https://github.com/open-telemetry/opentelemetry-specification/issues/4363 There is a little overhead (IMO totally acceptable) because of data transformation. Most importantly, there is no new heap allocation. ``` goos: linux goarch: amd64 pkg: go.opentelemetry.io/otel/sdk/log cpu: 13th Gen Intel(R) Core(TM) i7-13800H │ old.txt │ new.txt │ │ sec/op │ sec/op vs base │ LoggerEnabled-20 4.589n ± 1% 319.750n ± 16% +6867.75% (p=0.000 n=10) │ old.txt │ new.txt │ │ B/op │ B/op vs base │ LoggerEnabled-20 0.000Ki ± 0% 1.093Ki ± 13% ? (p=0.000 n=10) │ old.txt │ new.txt │ │ allocs/op │ allocs/op vs base │ LoggerEnabled-20 0.000 ± 0% 0.000 ± 0% ~ (p=1.000 n=10) ¹ ¹ all samples are equal ``` `Logger.Enabled` is still more efficient than `Logger.Emit` (benchmarks from https://github.com/open-telemetry/opentelemetry-go/pull/6315). ``` goos: linux goarch: amd64 pkg: go.opentelemetry.io/otel/sdk/log cpu: 13th Gen Intel(R) Core(TM) i7-13800H BenchmarkLoggerEmit/5_attributes-20 559934 2391 ns/op 39088 B/op 1 allocs/op BenchmarkLoggerEmit/10_attributes-20 1000000 5910 ns/op 49483 B/op 5 allocs/op BenchmarkLoggerEnabled-20 1605697 968.7 ns/op 1272 B/op 0 allocs/op PASS ok go.opentelemetry.io/otel/sdk/log 10.789s ``` Prior art: - https://github.com/open-telemetry/opentelemetry-go/pull/6271 - https://github.com/open-telemetry/opentelemetry-go/pull/6286 I also created for tracking purposes: - https://github.com/open-telemetry/opentelemetry-go/issues/6328
2025-02-18 22:35:14 +01:00
ctx: context.Background(),
expected: false,
expectedP2Params: []EnabledParameters{{}},
},
{
name: "ContainsDisabledProcessor",
logger: newLogger(NewLoggerProvider(
WithProcessor(p2WithDisabled),
WithProcessor(p0),
), instrumentation.Scope{}),
sdk/log: Add FilterProcessor and EnabledParameters (#6317) Per https://github.com/open-telemetry/opentelemetry-go/pull/6271#issuecomment-2657554647 > We agreed that we can move `FilterProcessor` directly to `sdk/log` as Logs SDK does not look to be stabilized soon. - Add the possibility to filter based on the resource and scope which is available for the SDK. The scope information is the most important as it gives the possibility to e.g. filter out logs emitted for a given logger. Thus e.g. https://github.com/open-telemetry/opentelemetry-specification/issues/4364 is not necessary. See https://github.com/open-telemetry/opentelemetry-specification/pull/4290#discussion_r1927546170 for more context. - It is going be an example for https://github.com/open-telemetry/opentelemetry-specification/issues/4363 There is a little overhead (IMO totally acceptable) because of data transformation. Most importantly, there is no new heap allocation. ``` goos: linux goarch: amd64 pkg: go.opentelemetry.io/otel/sdk/log cpu: 13th Gen Intel(R) Core(TM) i7-13800H │ old.txt │ new.txt │ │ sec/op │ sec/op vs base │ LoggerEnabled-20 4.589n ± 1% 319.750n ± 16% +6867.75% (p=0.000 n=10) │ old.txt │ new.txt │ │ B/op │ B/op vs base │ LoggerEnabled-20 0.000Ki ± 0% 1.093Ki ± 13% ? (p=0.000 n=10) │ old.txt │ new.txt │ │ allocs/op │ allocs/op vs base │ LoggerEnabled-20 0.000 ± 0% 0.000 ± 0% ~ (p=1.000 n=10) ¹ ¹ all samples are equal ``` `Logger.Enabled` is still more efficient than `Logger.Emit` (benchmarks from https://github.com/open-telemetry/opentelemetry-go/pull/6315). ``` goos: linux goarch: amd64 pkg: go.opentelemetry.io/otel/sdk/log cpu: 13th Gen Intel(R) Core(TM) i7-13800H BenchmarkLoggerEmit/5_attributes-20 559934 2391 ns/op 39088 B/op 1 allocs/op BenchmarkLoggerEmit/10_attributes-20 1000000 5910 ns/op 49483 B/op 5 allocs/op BenchmarkLoggerEnabled-20 1605697 968.7 ns/op 1272 B/op 0 allocs/op PASS ok go.opentelemetry.io/otel/sdk/log 10.789s ``` Prior art: - https://github.com/open-telemetry/opentelemetry-go/pull/6271 - https://github.com/open-telemetry/opentelemetry-go/pull/6286 I also created for tracking purposes: - https://github.com/open-telemetry/opentelemetry-go/issues/6328
2025-02-18 22:35:14 +01:00
ctx: context.Background(),
expected: true,
expectedP2Params: []EnabledParameters{{}},
expectedP0Params: []EnabledParameters{{}},
},
{
name: "WithNilContext",
logger: newLogger(NewLoggerProvider(
WithProcessor(p0),
WithProcessor(p1),
), instrumentation.Scope{}),
sdk/log: Add FilterProcessor and EnabledParameters (#6317) Per https://github.com/open-telemetry/opentelemetry-go/pull/6271#issuecomment-2657554647 > We agreed that we can move `FilterProcessor` directly to `sdk/log` as Logs SDK does not look to be stabilized soon. - Add the possibility to filter based on the resource and scope which is available for the SDK. The scope information is the most important as it gives the possibility to e.g. filter out logs emitted for a given logger. Thus e.g. https://github.com/open-telemetry/opentelemetry-specification/issues/4364 is not necessary. See https://github.com/open-telemetry/opentelemetry-specification/pull/4290#discussion_r1927546170 for more context. - It is going be an example for https://github.com/open-telemetry/opentelemetry-specification/issues/4363 There is a little overhead (IMO totally acceptable) because of data transformation. Most importantly, there is no new heap allocation. ``` goos: linux goarch: amd64 pkg: go.opentelemetry.io/otel/sdk/log cpu: 13th Gen Intel(R) Core(TM) i7-13800H │ old.txt │ new.txt │ │ sec/op │ sec/op vs base │ LoggerEnabled-20 4.589n ± 1% 319.750n ± 16% +6867.75% (p=0.000 n=10) │ old.txt │ new.txt │ │ B/op │ B/op vs base │ LoggerEnabled-20 0.000Ki ± 0% 1.093Ki ± 13% ? (p=0.000 n=10) │ old.txt │ new.txt │ │ allocs/op │ allocs/op vs base │ LoggerEnabled-20 0.000 ± 0% 0.000 ± 0% ~ (p=1.000 n=10) ¹ ¹ all samples are equal ``` `Logger.Enabled` is still more efficient than `Logger.Emit` (benchmarks from https://github.com/open-telemetry/opentelemetry-go/pull/6315). ``` goos: linux goarch: amd64 pkg: go.opentelemetry.io/otel/sdk/log cpu: 13th Gen Intel(R) Core(TM) i7-13800H BenchmarkLoggerEmit/5_attributes-20 559934 2391 ns/op 39088 B/op 1 allocs/op BenchmarkLoggerEmit/10_attributes-20 1000000 5910 ns/op 49483 B/op 5 allocs/op BenchmarkLoggerEnabled-20 1605697 968.7 ns/op 1272 B/op 0 allocs/op PASS ok go.opentelemetry.io/otel/sdk/log 10.789s ``` Prior art: - https://github.com/open-telemetry/opentelemetry-go/pull/6271 - https://github.com/open-telemetry/opentelemetry-go/pull/6286 I also created for tracking purposes: - https://github.com/open-telemetry/opentelemetry-go/issues/6328
2025-02-18 22:35:14 +01:00
ctx: nil,
expected: true,
expectedP0Params: []EnabledParameters{{}},
expectedP1Params: nil,
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
sdk/log: Add FilterProcessor and EnabledParameters (#6317) Per https://github.com/open-telemetry/opentelemetry-go/pull/6271#issuecomment-2657554647 > We agreed that we can move `FilterProcessor` directly to `sdk/log` as Logs SDK does not look to be stabilized soon. - Add the possibility to filter based on the resource and scope which is available for the SDK. The scope information is the most important as it gives the possibility to e.g. filter out logs emitted for a given logger. Thus e.g. https://github.com/open-telemetry/opentelemetry-specification/issues/4364 is not necessary. See https://github.com/open-telemetry/opentelemetry-specification/pull/4290#discussion_r1927546170 for more context. - It is going be an example for https://github.com/open-telemetry/opentelemetry-specification/issues/4363 There is a little overhead (IMO totally acceptable) because of data transformation. Most importantly, there is no new heap allocation. ``` goos: linux goarch: amd64 pkg: go.opentelemetry.io/otel/sdk/log cpu: 13th Gen Intel(R) Core(TM) i7-13800H │ old.txt │ new.txt │ │ sec/op │ sec/op vs base │ LoggerEnabled-20 4.589n ± 1% 319.750n ± 16% +6867.75% (p=0.000 n=10) │ old.txt │ new.txt │ │ B/op │ B/op vs base │ LoggerEnabled-20 0.000Ki ± 0% 1.093Ki ± 13% ? (p=0.000 n=10) │ old.txt │ new.txt │ │ allocs/op │ allocs/op vs base │ LoggerEnabled-20 0.000 ± 0% 0.000 ± 0% ~ (p=1.000 n=10) ¹ ¹ all samples are equal ``` `Logger.Enabled` is still more efficient than `Logger.Emit` (benchmarks from https://github.com/open-telemetry/opentelemetry-go/pull/6315). ``` goos: linux goarch: amd64 pkg: go.opentelemetry.io/otel/sdk/log cpu: 13th Gen Intel(R) Core(TM) i7-13800H BenchmarkLoggerEmit/5_attributes-20 559934 2391 ns/op 39088 B/op 1 allocs/op BenchmarkLoggerEmit/10_attributes-20 1000000 5910 ns/op 49483 B/op 5 allocs/op BenchmarkLoggerEnabled-20 1605697 968.7 ns/op 1272 B/op 0 allocs/op PASS ok go.opentelemetry.io/otel/sdk/log 10.789s ``` Prior art: - https://github.com/open-telemetry/opentelemetry-go/pull/6271 - https://github.com/open-telemetry/opentelemetry-go/pull/6286 I also created for tracking purposes: - https://github.com/open-telemetry/opentelemetry-go/issues/6328
2025-02-18 22:35:14 +01:00
// Clean up the records before the test.
p0.params = nil
p1.params = nil
p2WithDisabled.params = nil
assert.Equal(t, tc.expected, tc.logger.Enabled(tc.ctx, tc.param))
sdk/log: Add FilterProcessor and EnabledParameters (#6317) Per https://github.com/open-telemetry/opentelemetry-go/pull/6271#issuecomment-2657554647 > We agreed that we can move `FilterProcessor` directly to `sdk/log` as Logs SDK does not look to be stabilized soon. - Add the possibility to filter based on the resource and scope which is available for the SDK. The scope information is the most important as it gives the possibility to e.g. filter out logs emitted for a given logger. Thus e.g. https://github.com/open-telemetry/opentelemetry-specification/issues/4364 is not necessary. See https://github.com/open-telemetry/opentelemetry-specification/pull/4290#discussion_r1927546170 for more context. - It is going be an example for https://github.com/open-telemetry/opentelemetry-specification/issues/4363 There is a little overhead (IMO totally acceptable) because of data transformation. Most importantly, there is no new heap allocation. ``` goos: linux goarch: amd64 pkg: go.opentelemetry.io/otel/sdk/log cpu: 13th Gen Intel(R) Core(TM) i7-13800H │ old.txt │ new.txt │ │ sec/op │ sec/op vs base │ LoggerEnabled-20 4.589n ± 1% 319.750n ± 16% +6867.75% (p=0.000 n=10) │ old.txt │ new.txt │ │ B/op │ B/op vs base │ LoggerEnabled-20 0.000Ki ± 0% 1.093Ki ± 13% ? (p=0.000 n=10) │ old.txt │ new.txt │ │ allocs/op │ allocs/op vs base │ LoggerEnabled-20 0.000 ± 0% 0.000 ± 0% ~ (p=1.000 n=10) ¹ ¹ all samples are equal ``` `Logger.Enabled` is still more efficient than `Logger.Emit` (benchmarks from https://github.com/open-telemetry/opentelemetry-go/pull/6315). ``` goos: linux goarch: amd64 pkg: go.opentelemetry.io/otel/sdk/log cpu: 13th Gen Intel(R) Core(TM) i7-13800H BenchmarkLoggerEmit/5_attributes-20 559934 2391 ns/op 39088 B/op 1 allocs/op BenchmarkLoggerEmit/10_attributes-20 1000000 5910 ns/op 49483 B/op 5 allocs/op BenchmarkLoggerEnabled-20 1605697 968.7 ns/op 1272 B/op 0 allocs/op PASS ok go.opentelemetry.io/otel/sdk/log 10.789s ``` Prior art: - https://github.com/open-telemetry/opentelemetry-go/pull/6271 - https://github.com/open-telemetry/opentelemetry-go/pull/6286 I also created for tracking purposes: - https://github.com/open-telemetry/opentelemetry-go/issues/6328
2025-02-18 22:35:14 +01:00
assert.Equal(t, tc.expectedP0Params, p0.params)
assert.Equal(t, tc.expectedP1Params, p1.params)
assert.Equal(t, tc.expectedP2Params, p2WithDisabled.params)
})
}
}
func TestLoggerSelfObservability(t *testing.T) {
testCases := []struct {
name string
selfObservabilityEnabled bool
records []log.Record
wantLogRecordCount int64
}{
{
name: "Disabled",
selfObservabilityEnabled: false,
records: []log.Record{{}, {}},
wantLogRecordCount: 0,
},
{
name: "Enabled",
selfObservabilityEnabled: true,
records: []log.Record{{}, {}, {}, {}, {}},
wantLogRecordCount: 5,
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
t.Setenv("OTEL_GO_X_SELF_OBSERVABILITY", strconv.FormatBool(tc.selfObservabilityEnabled))
prev := otel.GetMeterProvider()
t.Cleanup(func() {
otel.SetMeterProvider(prev)
})
r := sdkmetric.NewManualReader()
mp := sdkmetric.NewMeterProvider(sdkmetric.WithReader(r))
otel.SetMeterProvider(mp)
l := newLogger(NewLoggerProvider(), instrumentation.Scope{})
for _, record := range tc.records {
l.Emit(context.Background(), record)
}
gotMetrics := new(metricdata.ResourceMetrics)
assert.NoError(t, r.Collect(context.Background(), gotMetrics))
if tc.wantLogRecordCount == 0 {
assert.Empty(t, gotMetrics.ScopeMetrics)
return
}
require.Len(t, gotMetrics.ScopeMetrics, 1)
sm := gotMetrics.ScopeMetrics[0]
assert.Equal(t, instrumentation.Scope{
Name: "go.opentelemetry.io/otel/sdk/log",
Version: sdk.Version(),
SchemaURL: semconv.SchemaURL,
}, sm.Scope)
wantMetric := metricdata.Metrics{
Name: otelconv.SDKLogCreated{}.Name(),
Description: otelconv.SDKLogCreated{}.Description(),
Unit: otelconv.SDKLogCreated{}.Unit(),
Data: metricdata.Sum[int64]{
DataPoints: []metricdata.DataPoint[int64]{{Value: tc.wantLogRecordCount}},
Temporality: metricdata.CumulativeTemporality,
IsMonotonic: true,
},
}
metricdatatest.AssertEqual(t, wantMetric, sm.Metrics[0], metricdatatest.IgnoreTimestamp())
})
}
}
func TestNewLoggerSelfObservabilityErrorHandled(t *testing.T) {
errHandler := otel.GetErrorHandler()
t.Cleanup(func() {
otel.SetErrorHandler(errHandler)
})
var errs []error
eh := otel.ErrorHandlerFunc(func(e error) { errs = append(errs, e) })
otel.SetErrorHandler(eh)
orig := otel.GetMeterProvider()
t.Cleanup(func() { otel.SetMeterProvider(orig) })
otel.SetMeterProvider(&errMeterProvider{err: assert.AnError})
t.Setenv("OTEL_GO_X_SELF_OBSERVABILITY", "true")
l := newLogger(NewLoggerProvider(), instrumentation.Scope{})
_ = l
require.Len(t, errs, 1)
assert.ErrorIs(t, errs[0], assert.AnError)
}
type errMeterProvider struct {
metric.MeterProvider
err error
}
func (mp *errMeterProvider) Meter(string, ...metric.MeterOption) metric.Meter {
return &errMeter{err: mp.err}
}
type errMeter struct {
metric.Meter
err error
}
func (m *errMeter) Int64Counter(string, ...metric.Int64CounterOption) (metric.Int64Counter, error) {
return nil, m.err
}
func (m *errMeter) Int64UpDownCounter(string, ...metric.Int64UpDownCounterOption) (metric.Int64UpDownCounter, error) {
return nil, m.err
}