1
0
mirror of https://github.com/open-telemetry/opentelemetry-go.git synced 2025-10-31 00:07:40 +02:00

sdk/log: Fix AddAttributes, SetAttributes, SetBody on Record to not mutate input (#7403)

Fixes #7364

The allocations happen only when necessary.
This is when deduplication happens or data is truncated because of the
limits.

```
goos: linux
goarch: amd64
pkg: go.opentelemetry.io/otel/sdk/log
cpu: 13th Gen Intel(R) Core(TM) i7-13800H
                                              │    old.txt    │                new.txt                 │
                                              │    sec/op     │    sec/op      vs base                 │
AddAttributes/Single/NoLimits-20                 57.91n ± 19%    29.12n ±  5%   -49.71% (p=0.000 n=10)
AddAttributes/Single/AllowDuplicates-20          24.21n ±  2%    24.46n ± 17%    +1.05% (p=0.011 n=10)
AddAttributes/Unique/NoLimits-20                 174.6n ±  8%    184.2n ±  7%    +5.47% (p=0.027 n=10)
AddAttributes/Unique/AllowDuplicates-20          69.79n ± 22%    67.83n ±  9%    -2.81% (p=0.019 n=10)
AddAttributes/Deduplication/Enabled-20           144.5n ±  4%    165.8n ±  4%   +14.71% (p=0.000 n=10)
AddAttributes/NestedDeduplication/Enabled-20     439.2n ±  2%    792.4n ± 11%   +80.41% (p=0.000 n=10)
AddAttributes/NestedDeduplication/Disabled-20   162.60n ±  1%    86.84n ±  5%   -46.59% (p=0.000 n=10)
AddAttributes/Deduplication/Disabled-20          68.15n ±  8%    67.59n ±  8%         ~ (p=0.190 n=10)
AddAttributes/CountLimit/Hit-20                  629.1n ±  4%    621.0n ±  7%         ~ (p=0.325 n=10)
AddAttributes/CountLimit/NotHit-20               914.6n ±  7%    944.2n ± 13%         ~ (p=0.393 n=10)
AddAttributes/ValueLimit/Hit-20                  133.5n ±  2%    162.1n ±  2%   +21.43% (p=0.000 n=10)
AddAttributes/ValueLimit/NotHit-20               115.8n ±  9%    131.2n ±  8%   +13.26% (p=0.000 n=10)
SetAttributes/Single/NoLimits-20                 59.38n ±  2%    28.89n ± 22%   -51.36% (p=0.000 n=10)
SetAttributes/Single/AllowDuplicates-20          24.19n ±  1%    24.30n ±  2%         ~ (p=0.867 n=10)
SetAttributes/Unique/NoLimits-20                 179.8n ±  2%    200.7n ±  6%   +11.66% (p=0.000 n=10)
SetAttributes/Unique/AllowDuplicates-20          72.65n ±  6%    71.91n ±  6%         ~ (p=0.218 n=10)
SetAttributes/Deduplication/Enabled-20           143.9n ±  6%    188.5n ± 12%   +30.96% (p=0.000 n=10)
SetAttributes/Deduplication/Disabled-20          77.68n ± 12%    69.20n ±  8%   -10.92% (p=0.008 n=10)
SetAttributes/NestedDeduplication/Enabled-20     443.7n ± 11%    770.8n ±  7%   +73.75% (p=0.000 n=10)
SetAttributes/NestedDeduplication/Disabled-20   172.60n ±  4%    91.73n ± 11%   -46.85% (p=0.000 n=10)
SetAttributes/CountLimit/Hit-20                  629.1n ±  2%    631.3n ±  7%         ~ (p=0.971 n=10)
SetAttributes/CountLimit/NotHit-20               909.2n ±  8%    866.4n ±  7%         ~ (p=0.143 n=10)
SetAttributes/ValueLimit/Hit-20                  135.5n ±  2%    150.2n ±  5%   +10.85% (p=0.000 n=10)
SetAttributes/ValueLimit/NotHit-20               121.7n ±  7%    137.2n ±  6%   +12.65% (p=0.001 n=10)
SetAttributes/Overwrite/Existing-20              176.0n ±  5%    184.1n ±  2%    +4.60% (p=0.005 n=10)
SetBody/Simple/NoLimits-20                       13.54n ±  1%    13.34n ±  2%    -1.48% (p=0.027 n=10)
SetBody/Simple/WithLimits-20                     13.46n ± 11%    13.91n ±  9%         ~ (p=0.541 n=10)
SetBody/UniqueMap/NoLimits-20                    216.2n ±  8%    183.8n ±  3%   -14.94% (p=0.000 n=10)
SetBody/UniqueMap/AllowDuplicates-20             12.89n ±  1%    12.75n ±  1%    -1.05% (p=0.001 n=10)
SetBody/Deduplication/Enabled-20                 113.5n ±  2%    164.4n ±  4%   +44.85% (p=0.000 n=10)
SetBody/Deduplication/Disabled-20                12.93n ± 41%    12.74n ±  1%    -1.51% (p=0.000 n=10)
SetBody/NestedDeduplication/Enabled-20           206.4n ±  2%    493.4n ±  2%  +139.05% (p=0.000 n=10)
SetBody/NestedDeduplication/Disabled-20          12.93n ±  1%    12.98n ± 11%         ~ (p=0.515 n=10)
SetBody/ValueLimit/Hit-20                        86.22n ±  1%   110.25n ±  3%   +27.87% (p=0.000 n=10)
SetBody/ValueLimit/NoHit-20                      85.52n ±  8%   104.25n ±  1%   +21.91% (p=0.000 n=10)
geomean                                          104.2n          107.1n          +2.84%

                                              │   old.txt    │            new.txt             │
                                              │     B/op     │    B/op     vs base            │
AddAttributes/Single/NoLimits-20                0.000 ± 0%     0.000 ± 0%  ~ (p=1.000 n=10) ¹
AddAttributes/Single/AllowDuplicates-20         0.000 ± 0%     0.000 ± 0%  ~ (p=1.000 n=10) ¹
AddAttributes/Unique/NoLimits-20                0.000 ± 0%     0.000 ± 0%  ~ (p=1.000 n=10) ¹
AddAttributes/Unique/AllowDuplicates-20         0.000 ± 0%     0.000 ± 0%  ~ (p=1.000 n=10) ¹
AddAttributes/Deduplication/Enabled-20            0.0 ± 0%     128.0 ± 0%  ? (p=0.000 n=10)
AddAttributes/NestedDeduplication/Enabled-20      0.0 ± 0%     312.0 ± 0%  ? (p=0.000 n=10)
AddAttributes/NestedDeduplication/Disabled-20   0.000 ± 0%     0.000 ± 0%  ~ (p=1.000 n=10) ¹
AddAttributes/Deduplication/Disabled-20         0.000 ± 0%     0.000 ± 0%  ~ (p=1.000 n=10) ¹
AddAttributes/CountLimit/Hit-20                 208.0 ± 0%     208.0 ± 0%  ~ (p=1.000 n=10) ¹
AddAttributes/CountLimit/NotHit-20              640.0 ± 0%     640.0 ± 0%  ~ (p=1.000 n=10) ¹
AddAttributes/ValueLimit/Hit-20                 0.000 ± 0%     0.000 ± 0%  ~ (p=1.000 n=10) ¹
AddAttributes/ValueLimit/NotHit-20              0.000 ± 0%     0.000 ± 0%  ~ (p=1.000 n=10) ¹
SetAttributes/Single/NoLimits-20                0.000 ± 0%     0.000 ± 0%  ~ (p=1.000 n=10) ¹
SetAttributes/Single/AllowDuplicates-20         0.000 ± 0%     0.000 ± 0%  ~ (p=1.000 n=10) ¹
SetAttributes/Unique/NoLimits-20                0.000 ± 0%     0.000 ± 0%  ~ (p=1.000 n=10) ¹
SetAttributes/Unique/AllowDuplicates-20         0.000 ± 0%     0.000 ± 0%  ~ (p=1.000 n=10) ¹
SetAttributes/Deduplication/Enabled-20            0.0 ± 0%     128.0 ± 0%  ? (p=0.000 n=10)
SetAttributes/Deduplication/Disabled-20         0.000 ± 0%     0.000 ± 0%  ~ (p=1.000 n=10) ¹
SetAttributes/NestedDeduplication/Enabled-20      0.0 ± 0%     312.0 ± 0%  ? (p=0.000 n=10)
SetAttributes/NestedDeduplication/Disabled-20   0.000 ± 0%     0.000 ± 0%  ~ (p=1.000 n=10) ¹
SetAttributes/CountLimit/Hit-20                 208.0 ± 0%     208.0 ± 0%  ~ (p=1.000 n=10) ¹
SetAttributes/CountLimit/NotHit-20              640.0 ± 0%     640.0 ± 0%  ~ (p=1.000 n=10) ¹
SetAttributes/ValueLimit/Hit-20                 0.000 ± 0%     0.000 ± 0%  ~ (p=1.000 n=10) ¹
SetAttributes/ValueLimit/NotHit-20              0.000 ± 0%     0.000 ± 0%  ~ (p=1.000 n=10) ¹
SetAttributes/Overwrite/Existing-20             0.000 ± 0%     0.000 ± 0%  ~ (p=1.000 n=10) ¹
SetBody/Simple/NoLimits-20                      0.000 ± 0%     0.000 ± 0%  ~ (p=1.000 n=10) ¹
SetBody/Simple/WithLimits-20                    0.000 ± 0%     0.000 ± 0%  ~ (p=1.000 n=10) ¹
SetBody/UniqueMap/NoLimits-20                   0.000 ± 0%     0.000 ± 0%  ~ (p=1.000 n=10) ¹
SetBody/UniqueMap/AllowDuplicates-20            0.000 ± 0%     0.000 ± 0%  ~ (p=1.000 n=10) ¹
SetBody/Deduplication/Enabled-20                  0.0 ± 0%     128.0 ± 0%  ? (p=0.000 n=10)
SetBody/Deduplication/Disabled-20               0.000 ± 0%     0.000 ± 0%  ~ (p=1.000 n=10) ¹
SetBody/NestedDeduplication/Enabled-20            0.0 ± 0%     280.0 ± 0%  ? (p=0.000 n=10)
SetBody/NestedDeduplication/Disabled-20         0.000 ± 0%     0.000 ± 0%  ~ (p=1.000 n=10) ¹
SetBody/ValueLimit/Hit-20                       0.000 ± 0%     0.000 ± 0%  ~ (p=1.000 n=10) ¹
SetBody/ValueLimit/NoHit-20                     0.000 ± 0%     0.000 ± 0%  ~ (p=1.000 n=10) ¹
geomean                                                    ²               ?                ²
¹ all samples are equal
² summaries must be >0 to compute geomean

                                              │   old.txt    │            new.txt             │
                                              │  allocs/op   │ allocs/op   vs base            │
AddAttributes/Single/NoLimits-20                0.000 ± 0%     0.000 ± 0%  ~ (p=1.000 n=10) ¹
AddAttributes/Single/AllowDuplicates-20         0.000 ± 0%     0.000 ± 0%  ~ (p=1.000 n=10) ¹
AddAttributes/Unique/NoLimits-20                0.000 ± 0%     0.000 ± 0%  ~ (p=1.000 n=10) ¹
AddAttributes/Unique/AllowDuplicates-20         0.000 ± 0%     0.000 ± 0%  ~ (p=1.000 n=10) ¹
AddAttributes/Deduplication/Enabled-20          0.000 ± 0%     1.000 ± 0%  ? (p=0.000 n=10)
AddAttributes/NestedDeduplication/Enabled-20    0.000 ± 0%     5.000 ± 0%  ? (p=0.000 n=10)
AddAttributes/NestedDeduplication/Disabled-20   0.000 ± 0%     0.000 ± 0%  ~ (p=1.000 n=10) ¹
AddAttributes/Deduplication/Disabled-20         0.000 ± 0%     0.000 ± 0%  ~ (p=1.000 n=10) ¹
AddAttributes/CountLimit/Hit-20                 1.000 ± 0%     1.000 ± 0%  ~ (p=1.000 n=10) ¹
AddAttributes/CountLimit/NotHit-20              1.000 ± 0%     1.000 ± 0%  ~ (p=1.000 n=10) ¹
AddAttributes/ValueLimit/Hit-20                 0.000 ± 0%     0.000 ± 0%  ~ (p=1.000 n=10) ¹
AddAttributes/ValueLimit/NotHit-20              0.000 ± 0%     0.000 ± 0%  ~ (p=1.000 n=10) ¹
SetAttributes/Single/NoLimits-20                0.000 ± 0%     0.000 ± 0%  ~ (p=1.000 n=10) ¹
SetAttributes/Single/AllowDuplicates-20         0.000 ± 0%     0.000 ± 0%  ~ (p=1.000 n=10) ¹
SetAttributes/Unique/NoLimits-20                0.000 ± 0%     0.000 ± 0%  ~ (p=1.000 n=10) ¹
SetAttributes/Unique/AllowDuplicates-20         0.000 ± 0%     0.000 ± 0%  ~ (p=1.000 n=10) ¹
SetAttributes/Deduplication/Enabled-20          0.000 ± 0%     1.000 ± 0%  ? (p=0.000 n=10)
SetAttributes/Deduplication/Disabled-20         0.000 ± 0%     0.000 ± 0%  ~ (p=1.000 n=10) ¹
SetAttributes/NestedDeduplication/Enabled-20    0.000 ± 0%     5.000 ± 0%  ? (p=0.000 n=10)
SetAttributes/NestedDeduplication/Disabled-20   0.000 ± 0%     0.000 ± 0%  ~ (p=1.000 n=10) ¹
SetAttributes/CountLimit/Hit-20                 1.000 ± 0%     1.000 ± 0%  ~ (p=1.000 n=10) ¹
SetAttributes/CountLimit/NotHit-20              1.000 ± 0%     1.000 ± 0%  ~ (p=1.000 n=10) ¹
SetAttributes/ValueLimit/Hit-20                 0.000 ± 0%     0.000 ± 0%  ~ (p=1.000 n=10) ¹
SetAttributes/ValueLimit/NotHit-20              0.000 ± 0%     0.000 ± 0%  ~ (p=1.000 n=10) ¹
SetAttributes/Overwrite/Existing-20             0.000 ± 0%     0.000 ± 0%  ~ (p=1.000 n=10) ¹
SetBody/Simple/NoLimits-20                      0.000 ± 0%     0.000 ± 0%  ~ (p=1.000 n=10) ¹
SetBody/Simple/WithLimits-20                    0.000 ± 0%     0.000 ± 0%  ~ (p=1.000 n=10) ¹
SetBody/UniqueMap/NoLimits-20                   0.000 ± 0%     0.000 ± 0%  ~ (p=1.000 n=10) ¹
SetBody/UniqueMap/AllowDuplicates-20            0.000 ± 0%     0.000 ± 0%  ~ (p=1.000 n=10) ¹
SetBody/Deduplication/Enabled-20                0.000 ± 0%     1.000 ± 0%  ? (p=0.000 n=10)
SetBody/Deduplication/Disabled-20               0.000 ± 0%     0.000 ± 0%  ~ (p=1.000 n=10) ¹
SetBody/NestedDeduplication/Enabled-20          0.000 ± 0%     4.000 ± 0%  ? (p=0.000 n=10)
SetBody/NestedDeduplication/Disabled-20         0.000 ± 0%     0.000 ± 0%  ~ (p=1.000 n=10) ¹
SetBody/ValueLimit/Hit-20                       0.000 ± 0%     0.000 ± 0%  ~ (p=1.000 n=10) ¹
SetBody/ValueLimit/NoHit-20                     0.000 ± 0%     0.000 ± 0%  ~ (p=1.000 n=10) ¹
geomean                                                    ²               ?                ²
¹ all samples are equal
² summaries must be >0 to compute geomean
```
This commit is contained in:
Robert Pająk
2025-10-16 21:07:08 +02:00
committed by GitHub
parent f346decaf6
commit b5b6989c7a
5 changed files with 722 additions and 74 deletions

View File

@@ -30,6 +30,7 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm
- Return partial OTLP export errors to the caller in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#7372)
- Return partial OTLP export errors to the caller in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`. (#7372)
- Return partial OTLP export errors to the caller in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#7372)
- Fix `AddAttributes`, `SetAttributes`, `SetBody` on `Record` in `go.opentelemetry.io/otel/sdk/log` to not mutate input. (#7403)
### Removed

View File

@@ -1,11 +0,0 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
//go:build !race
// +build !race
package log
func race() bool {
return false
}

View File

@@ -1,11 +0,0 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
//go:build race
// +build race
package log
func race() bool {
return true
}

View File

@@ -27,7 +27,25 @@ var logAttrDropped = sync.OnceFunc(func() {
global.Warn("limit reached: dropping log Record attributes")
})
// indexPool is a pool of index maps used for de-duplication.
// uniquePool is a pool of unique attributes used for attributes de-duplication.
var uniquePool = sync.Pool{
New: func() any { return new([]log.KeyValue) },
}
func getUnique() *[]log.KeyValue {
return uniquePool.Get().(*[]log.KeyValue)
}
func putUnique(v *[]log.KeyValue) {
// To reduce peak allocation.
const maxUniqueSize = 1028
if cap(*v) <= maxUniqueSize {
*v = (*v)[:0]
uniquePool.Put(v)
}
}
// indexPool is a pool of index maps used for attributes de-duplication.
var indexPool = sync.Pool{
New: func() any { return make(map[string]int) },
}
@@ -41,6 +59,20 @@ func putIndex(index map[string]int) {
indexPool.Put(index)
}
// seenPool is a pool of seen keys used for maps de-duplication.
var seenPool = sync.Pool{
New: func() any { return make(map[string]struct{}) },
}
func getSeen() map[string]struct{} {
return seenPool.Get().(map[string]struct{})
}
func putSeen(seen map[string]struct{}) {
clear(seen)
seenPool.Put(seen)
}
// Record is a log record emitted by the Logger.
// A log record with non-empty event name is interpreted as an event record.
//
@@ -212,34 +244,36 @@ func (r *Record) AddAttributes(attrs ...log.KeyValue) {
}
if !r.allowDupKeys {
// Use a slice from the pool to avoid modifying the original.
// Note, do not iterate attrs twice by just calling dedup(attrs) here.
unique := getUnique()
defer putUnique(unique)
// Used to find duplicates between attrs and existing attributes in r.
rIndex := r.attrIndex()
defer putIndex(rIndex)
// Unique attrs that need to be added to r. This uses the same underlying
// array as attrs.
//
// Note, do not iterate attrs twice by just calling dedup(attrs) here.
unique := attrs[:0]
// Used to find duplicates within attrs itself. The index value is the
// index of the element in unique.
// Used to find duplicates within attrs itself.
// The index value is the index of the element in unique.
uIndex := getIndex()
defer putIndex(uIndex)
dropped := 0
// Deduplicate attrs within the scope of all existing attributes.
for _, a := range attrs {
// Last-value-wins for any duplicates in attrs.
idx, found := uIndex[a.Key]
if found {
r.addDropped(1)
unique[idx] = a
dropped++
(*unique)[idx] = a
continue
}
idx, found = rIndex[a.Key]
if found {
// New attrs overwrite any existing with the same key.
r.addDropped(1)
dropped++
if idx < 0 {
r.front[-(idx + 1)] = a
} else {
@@ -247,11 +281,16 @@ func (r *Record) AddAttributes(attrs ...log.KeyValue) {
}
} else {
// Unique attribute.
unique = append(unique, a)
uIndex[a.Key] = len(unique) - 1
(*unique) = append(*unique, a)
uIndex[a.Key] = len(*unique) - 1
}
}
attrs = unique
if dropped > 0 {
attrs = make([]log.KeyValue, len(*unique))
copy(attrs, *unique)
r.addDropped(dropped)
}
}
if r.attributeCountLimit > 0 && n+len(attrs) > r.attributeCountLimit {
@@ -294,15 +333,17 @@ func (r *Record) addAttrs(attrs []log.KeyValue) {
var i int
for i = 0; i < len(attrs) && r.nFront < len(r.front); i++ {
a := attrs[i]
r.front[r.nFront] = r.applyAttrLimits(a)
r.front[r.nFront] = r.applyAttrLimitsAndDedup(a)
r.nFront++
}
for j, a := range attrs[i:] {
attrs[i+j] = r.applyAttrLimits(a)
}
// Make a copy to avoid modifying the original.
j := len(r.back)
r.back = slices.Grow(r.back, len(attrs[i:]))
r.back = append(r.back, attrs[i:]...)
for i, a := range r.back[j:] {
r.back[i+j] = r.applyAttrLimitsAndDedup(a)
}
}
// SetAttributes sets (and overrides) attributes to the log record.
@@ -321,13 +362,13 @@ func (r *Record) SetAttributes(attrs ...log.KeyValue) {
var i int
for i = 0; i < len(attrs) && r.nFront < len(r.front); i++ {
a := attrs[i]
r.front[r.nFront] = r.applyAttrLimits(a)
r.front[r.nFront] = r.applyAttrLimitsAndDedup(a)
r.nFront++
}
r.back = slices.Clone(attrs[i:])
for i, a := range r.back {
r.back[i] = r.applyAttrLimits(a)
r.back[i] = r.applyAttrLimitsAndDedup(a)
}
}
@@ -342,20 +383,31 @@ func head(kvs []log.KeyValue, n int) (out []log.KeyValue, dropped int) {
// dedup deduplicates kvs front-to-back with the last value saved.
func dedup(kvs []log.KeyValue) (unique []log.KeyValue, dropped int) {
if len(kvs) <= 1 {
return kvs, 0 // No deduplication needed.
}
index := getIndex()
defer putIndex(index)
unique = kvs[:0] // Use the same underlying array as kvs.
u := getUnique()
defer putUnique(u)
for _, a := range kvs {
idx, found := index[a.Key]
if found {
dropped++
unique[idx] = a
(*u)[idx] = a
} else {
unique = append(unique, a)
index[a.Key] = len(unique) - 1
*u = append(*u, a)
index[a.Key] = len(*u) - 1
}
}
if dropped == 0 {
return kvs, 0
}
unique = make([]log.KeyValue, len(*u))
copy(unique, *u)
return unique, dropped
}
@@ -421,59 +473,212 @@ func (r *Record) Clone() Record {
return res
}
func (r *Record) applyAttrLimits(attr log.KeyValue) log.KeyValue {
attr.Value = r.applyValueLimits(attr.Value)
func (r *Record) applyAttrLimitsAndDedup(attr log.KeyValue) log.KeyValue {
attr.Value = r.applyValueLimitsAndDedup(attr.Value)
return attr
}
func (r *Record) applyValueLimits(val log.Value) log.Value {
func (r *Record) applyValueLimitsAndDedup(val log.Value) log.Value {
switch val.Kind() {
case log.KindString:
s := val.AsString()
if len(s) > r.attributeValueLengthLimit {
if r.attributeValueLengthLimit >= 0 && len(s) > r.attributeValueLengthLimit {
val = log.StringValue(truncate(r.attributeValueLengthLimit, s))
}
case log.KindSlice:
sl := val.AsSlice()
for i := range sl {
sl[i] = r.applyValueLimits(sl[i])
// First check if any limits need to be applied.
needsChange := false
for _, v := range sl {
if r.needsValueLimitsOrDedup(v) {
needsChange = true
break
}
}
val = log.SliceValue(sl...)
if needsChange {
// Create a new slice to avoid modifying the original.
newSl := make([]log.Value, len(sl))
for i, item := range sl {
newSl[i] = r.applyValueLimitsAndDedup(item)
}
val = log.SliceValue(newSl...)
}
case log.KindMap:
kvs := val.AsMap()
var newKvs []log.KeyValue
var dropped int
if !r.allowDupKeys {
// Deduplicate then truncate. Do not do at the same time to avoid
// wasted truncation operations.
var dropped int
kvs, dropped = dedup(kvs)
// Deduplicate then truncate.
// Do not do at the same time to avoid wasted truncation operations.
newKvs, dropped = dedup(kvs)
r.addDropped(dropped)
} else {
newKvs = kvs
}
for i := range kvs {
kvs[i] = r.applyAttrLimits(kvs[i])
// Check if any attribute limits need to be applied.
needsChange := false
if dropped > 0 {
needsChange = true // Already changed by dedup.
} else {
for _, kv := range newKvs {
if r.needsValueLimitsOrDedup(kv.Value) {
needsChange = true
break
}
}
}
if needsChange {
// Only create new slice if changes are needed.
if dropped == 0 {
// Make a copy to avoid modifying the original.
newKvs = make([]log.KeyValue, len(kvs))
copy(newKvs, kvs)
}
for i := range newKvs {
newKvs[i] = r.applyAttrLimitsAndDedup(newKvs[i])
}
val = log.MapValue(newKvs...)
}
val = log.MapValue(kvs...)
}
return val
}
// needsValueLimitsOrDedup checks if a value would be modified by applyValueLimitsAndDedup.
func (r *Record) needsValueLimitsOrDedup(val log.Value) bool {
switch val.Kind() {
case log.KindString:
return r.attributeValueLengthLimit >= 0 && len(val.AsString()) > r.attributeValueLengthLimit
case log.KindSlice:
for _, v := range val.AsSlice() {
if r.needsValueLimitsOrDedup(v) {
return true
}
}
case log.KindMap:
kvs := val.AsMap()
if !r.allowDupKeys && len(kvs) > 1 {
// Check for duplicates.
hasDuplicates := func() bool {
seen := getSeen()
defer putSeen(seen)
for _, kv := range kvs {
if _, ok := seen[kv.Key]; ok {
return true
}
seen[kv.Key] = struct{}{}
}
return false
}()
if hasDuplicates {
return true
}
}
for _, kv := range kvs {
if r.needsValueLimitsOrDedup(kv.Value) {
return true
}
}
}
return false
}
func (r *Record) dedupeBodyCollections(val log.Value) log.Value {
switch val.Kind() {
case log.KindSlice:
sl := val.AsSlice()
for i := range sl {
sl[i] = r.dedupeBodyCollections(sl[i])
// Check if any nested values need deduplication.
needsChange := false
for _, item := range sl {
if r.needsBodyDedup(item) {
needsChange = true
break
}
}
val = log.SliceValue(sl...)
if needsChange {
// Create a new slice to avoid modifying the original.
newSl := make([]log.Value, len(sl))
for i, item := range sl {
newSl[i] = r.dedupeBodyCollections(item)
}
val = log.SliceValue(newSl...)
}
case log.KindMap:
kvs, _ := dedup(val.AsMap())
for i := range kvs {
kvs[i].Value = r.dedupeBodyCollections(kvs[i].Value)
kvs := val.AsMap()
newKvs, dropped := dedup(kvs)
// Check if any nested values need deduplication.
needsValueChange := false
for _, kv := range newKvs {
if r.needsBodyDedup(kv.Value) {
needsValueChange = true
break
}
}
if dropped > 0 || needsValueChange {
// Only create new value if changes are needed.
if dropped == 0 {
// Make a copy to avoid modifying the original.
newKvs = make([]log.KeyValue, len(kvs))
copy(newKvs, kvs)
}
for i := range newKvs {
newKvs[i].Value = r.dedupeBodyCollections(newKvs[i].Value)
}
val = log.MapValue(newKvs...)
}
val = log.MapValue(kvs...)
}
return val
}
// needsBodyDedup checks if a value would be modified by dedupeBodyCollections.
func (r *Record) needsBodyDedup(val log.Value) bool {
switch val.Kind() {
case log.KindSlice:
for _, item := range val.AsSlice() {
if r.needsBodyDedup(item) {
return true
}
}
case log.KindMap:
kvs := val.AsMap()
if len(kvs) > 1 {
// Check for duplicates.
hasDuplicates := func() bool {
seen := getSeen()
defer putSeen(seen)
for _, kv := range kvs {
if _, ok := seen[kv.Key]; ok {
return true
}
seen[kv.Key] = struct{}{}
}
return false
}()
if hasDuplicates {
return true
}
}
for _, kv := range kvs {
if r.needsBodyDedup(kv.Value) {
return true
}
}
}
return false
}
// truncate returns a truncated version of s such that it contains less than
// the limit number of characters. Truncation is applied by returning the limit
// number of valid characters contained in s.

View File

@@ -66,7 +66,7 @@ func TestRecordBody(t *testing.T) {
want log.Value
}{
{
name: "Bool",
name: "boolean value",
body: log.BoolValue(true),
want: log.BoolValue(true),
},
@@ -98,7 +98,7 @@ func TestRecordBody(t *testing.T) {
),
},
{
name: "nestedMap",
name: "nested map",
body: log.MapValue(
log.Map("key",
log.Int64("key", 1),
@@ -123,6 +123,268 @@ func TestRecordBody(t *testing.T) {
log.Int64("1", 3),
),
},
{
name: "slice with nested deduplication",
body: log.SliceValue(
log.MapValue(log.String("key", "value1"), log.String("key", "value2")),
log.StringValue("normal"),
log.SliceValue(
log.MapValue(log.String("nested", "val1"), log.String("nested", "val2")),
),
),
want: log.SliceValue(
log.MapValue(log.String("key", "value2")),
log.StringValue("normal"),
log.SliceValue(
log.MapValue(log.String("nested", "val2")),
),
),
},
{
name: "empty slice",
body: log.SliceValue(),
want: log.SliceValue(),
},
{
name: "empty map",
body: log.MapValue(),
want: log.MapValue(),
},
{
name: "single key map",
body: log.MapValue(log.String("single", "value")),
want: log.MapValue(log.String("single", "value")),
},
{
name: "slice with no deduplication needed",
body: log.SliceValue(
log.StringValue("value1"),
log.StringValue("value2"),
log.MapValue(log.String("unique1", "val1")),
log.MapValue(log.String("unique2", "val2")),
),
want: log.SliceValue(
log.StringValue("value1"),
log.StringValue("value2"),
log.MapValue(log.String("unique1", "val1")),
log.MapValue(log.String("unique2", "val2")),
),
},
{
name: "deeply nested slice and map structure",
body: log.SliceValue(
log.MapValue(
log.String("outer", "value"),
log.Slice("inner_slice",
log.MapValue(log.String("deep", "value1"), log.String("deep", "value2")),
),
),
),
want: log.SliceValue(
log.MapValue(
log.String("outer", "value"),
log.Slice("inner_slice",
log.MapValue(log.String("deep", "value2")),
),
),
),
},
{
name: "slice with duplicates allowed",
allowDuplicates: true,
body: log.SliceValue(
log.MapValue(log.String("key", "value1"), log.String("key", "value2")),
),
want: log.SliceValue(
log.MapValue(log.String("key", "value1"), log.String("key", "value2")),
),
},
{
name: "string value",
body: log.StringValue("test"),
want: log.StringValue("test"),
},
{
name: "boolean value without deduplication",
body: log.BoolValue(true),
want: log.BoolValue(true),
},
{
name: "integer value",
body: log.Int64Value(42),
want: log.Int64Value(42),
},
{
name: "float value",
body: log.Float64Value(3.14),
want: log.Float64Value(3.14),
},
{
name: "bytes value",
body: log.BytesValue([]byte("test")),
want: log.BytesValue([]byte("test")),
},
{
name: "empty slice",
body: log.SliceValue(),
want: log.SliceValue(),
},
{
name: "slice without nested deduplication",
body: log.SliceValue(log.StringValue("test"), log.BoolValue(true)),
want: log.SliceValue(log.StringValue("test"), log.BoolValue(true)),
},
{
name: "slice with nested deduplication needed",
body: log.SliceValue(log.MapValue(log.String("key", "value1"), log.String("key", "value2"))),
want: log.SliceValue(log.MapValue(log.String("key", "value2"))),
},
{
name: "empty map",
body: log.MapValue(),
want: log.MapValue(),
},
{
name: "single key map",
body: log.MapValue(log.String("key", "value")),
want: log.MapValue(log.String("key", "value")),
},
{
name: "map with duplicate keys",
body: log.MapValue(log.String("key", "value1"), log.String("key", "value2")),
want: log.MapValue(log.String("key", "value2")),
},
{
name: "map without duplicates",
body: log.MapValue(log.String("key1", "value1"), log.String("key2", "value2")),
want: log.MapValue(log.String("key1", "value1"), log.String("key2", "value2")),
},
{
name: "map with nested slice deduplication",
body: log.MapValue(
log.Slice("slice", log.MapValue(log.String("nested", "val1"), log.String("nested", "val2"))),
),
want: log.MapValue(
log.Slice("slice", log.MapValue(log.String("nested", "val2"))),
),
},
{
name: "deeply nested structure with deduplication",
body: log.SliceValue(
log.MapValue(
log.Map("nested",
log.String("key", "value1"),
log.String("key", "value2"),
),
),
),
want: log.SliceValue(
log.MapValue(
log.Map("nested",
log.String("key", "value2"),
),
),
),
},
{
name: "deeply nested structure without deduplication",
body: log.SliceValue(
log.MapValue(
log.Map("nested",
log.String("key1", "value1"),
log.String("key2", "value2"),
),
),
),
want: log.SliceValue(
log.MapValue(
log.Map("nested",
log.String("key1", "value1"),
log.String("key2", "value2"),
),
),
),
},
{
name: "string value for collection deduplication",
body: log.StringValue("test"),
want: log.StringValue("test"),
},
{
name: "boolean value for collection deduplication",
body: log.BoolValue(true),
want: log.BoolValue(true),
},
{
name: "empty slice for collection deduplication",
body: log.SliceValue(),
want: log.SliceValue(),
},
{
name: "slice without nested deduplication for collection testing",
body: log.SliceValue(log.StringValue("test"), log.BoolValue(true)),
want: log.SliceValue(log.StringValue("test"), log.BoolValue(true)),
},
{
name: "slice with nested map requiring deduplication",
body: log.SliceValue(
log.MapValue(log.String("key", "value1"), log.String("key", "value2")),
log.StringValue("normal"),
),
want: log.SliceValue(
log.MapValue(log.String("key", "value2")),
log.StringValue("normal"),
),
},
{
name: "deeply nested slice with map deduplication",
body: log.SliceValue(
log.SliceValue(
log.MapValue(log.String("deep", "value1"), log.String("deep", "value2")),
),
),
want: log.SliceValue(
log.SliceValue(
log.MapValue(log.String("deep", "value2")),
),
),
},
{
name: "empty map for collection deduplication",
body: log.MapValue(),
want: log.MapValue(),
},
{
name: "map with nested slice containing duplicates",
body: log.MapValue(
log.String("outer", "value"),
log.Slice("nested_slice",
log.MapValue(log.String("inner", "val1"), log.String("inner", "val2")),
),
),
want: log.MapValue(
log.String("outer", "value"),
log.Slice("nested_slice",
log.MapValue(log.String("inner", "val2")),
),
),
},
{
name: "map with key duplication and nested value deduplication",
body: log.MapValue(
log.String("key1", "value1"),
log.String("key1", "value2"), // key dedup
log.Slice("slice",
log.MapValue(log.String("nested", "val1"), log.String("nested", "val2")), // nested value dedup
),
),
want: log.MapValue(
log.String("key1", "value2"),
log.Slice("slice",
log.MapValue(log.String("nested", "val2")),
),
),
},
}
for _, tc := range testcases {
t.Run(tc.name, func(t *testing.T) {
@@ -536,6 +798,46 @@ func TestRecordAttrDeduplication(t *testing.T) {
return out
}(),
},
{
name: "AttributeWithDuplicateKeys",
attrs: []log.KeyValue{
log.String("duplicate", "first"),
log.String("unique", "value"),
log.String("duplicate", "second"),
},
want: []log.KeyValue{
log.String("duplicate", "second"),
log.String("unique", "value"),
},
},
{
name: "ManyDuplicateKeys",
attrs: []log.KeyValue{
log.String("key", "value1"),
log.String("key", "value2"),
log.String("key", "value3"),
log.String("key", "value4"),
log.String("key", "value5"),
},
want: []log.KeyValue{
log.String("key", "value5"),
},
},
{
name: "InterleavedDuplicates",
attrs: []log.KeyValue{
log.String("a", "a1"),
log.String("b", "b1"),
log.String("a", "a2"),
log.String("c", "c1"),
log.String("b", "b2"),
},
want: []log.KeyValue{
log.String("a", "a2"),
log.String("b", "b2"),
log.String("c", "c1"),
},
},
}
for _, tc := range testcases {
@@ -657,6 +959,84 @@ func TestApplyAttrLimitsDeduplication(t *testing.T) {
),
wantDroppedAttrs: 10,
},
{
name: "EmptyMap",
input: log.MapValue(),
want: log.MapValue(),
wantDroppedAttrs: 0,
},
{
name: "SingleKeyMap",
input: log.MapValue(log.String("key1", "value1")),
want: log.MapValue(log.String("key1", "value1")),
wantDroppedAttrs: 0,
},
{
name: "EmptySlice",
input: log.SliceValue(),
want: log.SliceValue(),
wantDroppedAttrs: 0,
},
{
name: "SliceWithNestedDedup",
input: log.SliceValue(
log.MapValue(log.String("key", "value1"), log.String("key", "value2")),
log.StringValue("normal"),
),
want: log.SliceValue(
log.MapValue(log.String("key", "value2")),
log.StringValue("normal"),
),
wantDroppedAttrs: 1,
},
{
name: "NestedSliceInMap",
input: log.MapValue(
log.Slice("slice_key",
log.MapValue(log.String("nested", "value1"), log.String("nested", "value2")),
),
),
want: log.MapValue(
log.Slice("slice_key",
log.MapValue(log.String("nested", "value2")),
),
),
wantDroppedAttrs: 1,
},
{
name: "DeeplyNestedStructure",
input: log.MapValue(
log.Map("level1",
log.Map("level2",
log.Slice("level3",
log.MapValue(log.String("deep", "value1"), log.String("deep", "value2")),
),
),
),
),
want: log.MapValue(
log.Map("level1",
log.Map("level2",
log.Slice("level3",
log.MapValue(log.String("deep", "value2")),
),
),
),
),
wantDroppedAttrs: 1,
},
{
name: "NestedMapWithoutDuplicateKeys",
input: log.SliceValue((log.MapValue(
log.String("key1", "value1"),
log.String("key2", "value2"),
))),
want: log.SliceValue(log.MapValue(
log.String("key1", "value1"),
log.String("key2", "value2"),
)),
wantDroppedAttrs: 0,
},
}
for _, tc := range testcases {
@@ -770,6 +1150,42 @@ func TestApplyAttrLimitsTruncation(t *testing.T) {
log.Map("7", log.String("a", "")),
),
},
{
name: "LongStringTruncated",
limit: 5,
input: log.StringValue("This is a very long string that should be truncated"),
want: log.StringValue("This "),
},
{
name: "LongBytesNotTruncated",
limit: 5,
input: log.BytesValue([]byte("This is a very long byte array")),
want: log.BytesValue([]byte("This is a very long byte array")),
},
{
name: "TruncationInNestedMap",
limit: 3,
input: log.MapValue(
log.String("short", "ok"),
log.String("long", "toolong"),
),
want: log.MapValue(
log.String("short", "ok"),
log.String("long", "too"),
),
},
{
name: "TruncationInNestedSlice",
limit: 4,
input: log.SliceValue(
log.StringValue("good"),
log.StringValue("toolong"),
),
want: log.SliceValue(
log.StringValue("good"),
log.StringValue("tool"),
),
},
}
for _, tc := range testcases {
@@ -926,11 +1342,59 @@ func TestTruncate(t *testing.T) {
}
}
func TestRecordMethodsInputConcurrentSafe(t *testing.T) {
if race() {
t.Skip("TODO: Fix bug https://github.com/open-telemetry/opentelemetry-go/issues/7364.")
func TestRecordAddAttributesDoesNotMutateInput(t *testing.T) {
attrs := []log.KeyValue{
log.String("attr1", "very long value that will be truncated"),
log.String("attr2", "another very long value that will be truncated"),
log.String("attr3", "yet another very long value that will be truncated"),
log.String("attr4", "more very long value that will be truncated"),
log.String("attr5", "extra very long value that will be truncated"),
log.String("attr6", "additional very long value that will be truncated"),
log.String("attr7", "more additional very long value that will be truncated"),
}
originalValues := make([]string, len(attrs))
for i, kv := range attrs {
originalValues[i] = kv.Value.AsString()
}
r := &Record{
attributeValueLengthLimit: 20, // Short limit to trigger truncation.
attributeCountLimit: -1, // No count limit.
allowDupKeys: false,
}
r.AddAttributes(attrs...)
// Verify that the original shared slice was not mutated
for i, kv := range attrs {
if kv.Value.AsString() != originalValues[i] {
t.Errorf("Input slice was mutated! Attribute %d: original=%q, current=%q",
i, originalValues[i], kv.Value.AsString())
}
}
// Verify that the record has the truncated values
var gotAttrs []log.KeyValue
r.WalkAttributes(func(kv log.KeyValue) bool {
gotAttrs = append(gotAttrs, kv)
return true
})
wantAttr := []log.KeyValue{
log.String("attr1", "very long value that"),
log.String("attr2", "another very long va"),
log.String("attr3", "yet another very lon"),
log.String("attr4", "more very long value"),
log.String("attr5", "extra very long valu"),
log.String("attr6", "additional very long"),
log.String("attr7", "more additional very"),
}
if !slices.EqualFunc(gotAttrs, wantAttr, func(a, b log.KeyValue) bool { return a.Equal(b) }) {
t.Errorf("Attributes do not match.\ngot:\n%v\nwant:\n%v", printKVs(gotAttrs), printKVs(wantAttr))
}
}
func TestRecordMethodsInputConcurrentSafe(t *testing.T) {
nestedSlice := log.Slice("nested_slice",
log.SliceValue(log.StringValue("nested_inner1"), log.StringValue("nested_inner2")),
log.StringValue("nested_outer"),
@@ -990,7 +1454,7 @@ func TestRecordMethodsInputConcurrentSafe(t *testing.T) {
gotBody := r.Body()
wantBody := log.MapValue(
log.String("nested_key1", "duplicate"),
log.Map("nested_map", log.String("nested_inner_key", "nested_inn")),
log.Map("nested_map", log.String("nested_inner_key", "nested_inner_value")),
)
if !gotBody.Equal(wantBody) {
t.Errorf("Body does not match.\ngot:\n%v\nwant:\n%v", gotBody, wantBody)