1
0
mirror of https://github.com/open-telemetry/opentelemetry-go.git synced 2025-09-16 09:26:25 +02:00
Use
https://pkg.go.dev/golang.org/x/tools/gopls/internal/analysis/modernize
to update code to new style.

---------

Co-authored-by: Flc゛ <four_leaf_clover@foxmail.com>
Co-authored-by: Damien Mathieu <42@dmathieu.com>
This commit is contained in:
Mikhail Mazurskiy
2025-07-29 18:19:11 +10:00
committed by GitHub
parent 7bcbb6a49a
commit 5e1c62a2d5
97 changed files with 234 additions and 268 deletions

View File

@@ -78,7 +78,7 @@ func DefaultEncoder() Encoder {
defaultEncoderOnce.Do(func() {
defaultEncoderInstance = &defaultAttrEncoder{
pool: sync.Pool{
New: func() interface{} {
New: func() any {
return &bytes.Buffer{}
},
},

View File

@@ -12,7 +12,7 @@ import (
)
// BoolSliceValue converts a bool slice into an array with same elements as slice.
func BoolSliceValue(v []bool) interface{} {
func BoolSliceValue(v []bool) any {
var zero bool
cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))).Elem()
reflect.Copy(cp, reflect.ValueOf(v))
@@ -20,7 +20,7 @@ func BoolSliceValue(v []bool) interface{} {
}
// Int64SliceValue converts an int64 slice into an array with same elements as slice.
func Int64SliceValue(v []int64) interface{} {
func Int64SliceValue(v []int64) any {
var zero int64
cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))).Elem()
reflect.Copy(cp, reflect.ValueOf(v))
@@ -28,7 +28,7 @@ func Int64SliceValue(v []int64) interface{} {
}
// Float64SliceValue converts a float64 slice into an array with same elements as slice.
func Float64SliceValue(v []float64) interface{} {
func Float64SliceValue(v []float64) any {
var zero float64
cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))).Elem()
reflect.Copy(cp, reflect.ValueOf(v))
@@ -36,7 +36,7 @@ func Float64SliceValue(v []float64) interface{} {
}
// StringSliceValue converts a string slice into an array with same elements as slice.
func StringSliceValue(v []string) interface{} {
func StringSliceValue(v []string) any {
var zero string
cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))).Elem()
reflect.Copy(cp, reflect.ValueOf(v))
@@ -44,7 +44,7 @@ func StringSliceValue(v []string) interface{} {
}
// AsBoolSlice converts a bool array into a slice into with same elements as array.
func AsBoolSlice(v interface{}) []bool {
func AsBoolSlice(v any) []bool {
rv := reflect.ValueOf(v)
if rv.Type().Kind() != reflect.Array {
return nil
@@ -57,7 +57,7 @@ func AsBoolSlice(v interface{}) []bool {
}
// AsInt64Slice converts an int64 array into a slice into with same elements as array.
func AsInt64Slice(v interface{}) []int64 {
func AsInt64Slice(v any) []int64 {
rv := reflect.ValueOf(v)
if rv.Type().Kind() != reflect.Array {
return nil
@@ -70,7 +70,7 @@ func AsInt64Slice(v interface{}) []int64 {
}
// AsFloat64Slice converts a float64 array into a slice into with same elements as array.
func AsFloat64Slice(v interface{}) []float64 {
func AsFloat64Slice(v any) []float64 {
rv := reflect.ValueOf(v)
if rv.Type().Kind() != reflect.Array {
return nil
@@ -83,7 +83,7 @@ func AsFloat64Slice(v interface{}) []float64 {
}
// AsStringSlice converts a string array into a slice into with same elements as array.
func AsStringSlice(v interface{}) []string {
func AsStringSlice(v any) []string {
rv := reflect.ValueOf(v)
if rv.Type().Kind() != reflect.Array {
return nil

View File

@@ -8,28 +8,28 @@ import (
"testing"
)
var wrapFloat64SliceValue = func(v interface{}) interface{} {
var wrapFloat64SliceValue = func(v any) any {
if vi, ok := v.([]float64); ok {
return Float64SliceValue(vi)
}
return nil
}
var wrapInt64SliceValue = func(v interface{}) interface{} {
var wrapInt64SliceValue = func(v any) any {
if vi, ok := v.([]int64); ok {
return Int64SliceValue(vi)
}
return nil
}
var wrapBoolSliceValue = func(v interface{}) interface{} {
var wrapBoolSliceValue = func(v any) any {
if vi, ok := v.([]bool); ok {
return BoolSliceValue(vi)
}
return nil
}
var wrapStringSliceValue = func(v interface{}) interface{} {
var wrapStringSliceValue = func(v any) any {
if vi, ok := v.([]string); ok {
return StringSliceValue(vi)
}
@@ -37,21 +37,21 @@ var wrapStringSliceValue = func(v interface{}) interface{} {
}
var (
wrapAsBoolSlice = func(v interface{}) interface{} { return AsBoolSlice(v) }
wrapAsInt64Slice = func(v interface{}) interface{} { return AsInt64Slice(v) }
wrapAsFloat64Slice = func(v interface{}) interface{} { return AsFloat64Slice(v) }
wrapAsStringSlice = func(v interface{}) interface{} { return AsStringSlice(v) }
wrapAsBoolSlice = func(v any) any { return AsBoolSlice(v) }
wrapAsInt64Slice = func(v any) any { return AsInt64Slice(v) }
wrapAsFloat64Slice = func(v any) any { return AsFloat64Slice(v) }
wrapAsStringSlice = func(v any) any { return AsStringSlice(v) }
)
func TestSliceValue(t *testing.T) {
type args struct {
v interface{}
v any
}
tests := []struct {
name string
args args
want interface{}
fn func(interface{}) interface{}
want any
fn func(any) any
}{
{
name: "Float64SliceValue() two items",
@@ -136,7 +136,7 @@ func BenchmarkStringSliceValue(b *testing.B) {
func BenchmarkAsFloat64Slice(b *testing.B) {
b.ReportAllocs()
var in interface{} = [2]float64{1, 2.3}
var in any = [2]float64{1, 2.3}
b.ResetTimer()
for i := 0; i < b.N; i++ {

View File

@@ -40,7 +40,7 @@ func TestDefined(t *testing.T) {
}
func TestJSONValue(t *testing.T) {
var kvs interface{} = [2]attribute.KeyValue{
var kvs any = [2]attribute.KeyValue{
attribute.String("A", "B"),
attribute.Int64("C", 1),
}

View File

@@ -35,7 +35,7 @@ type (
// will return the save value across versions. For this reason, Distinct
// should always be used as a map key instead of a Set.
Distinct struct {
iface interface{}
iface any
}
// Sortable implements sort.Interface, used for sorting KeyValue.
@@ -344,7 +344,7 @@ func computeDistinct(kvs []KeyValue) Distinct {
// computeDistinctFixed computes a Distinct for small slices. It returns nil
// if the input is too large for this code path.
func computeDistinctFixed(kvs []KeyValue) interface{} {
func computeDistinctFixed(kvs []KeyValue) any {
switch len(kvs) {
case 1:
return [1]KeyValue(kvs)
@@ -373,7 +373,7 @@ func computeDistinctFixed(kvs []KeyValue) interface{} {
// computeDistinctReflect computes a Distinct using reflection, works for any
// size input.
func computeDistinctReflect(kvs []KeyValue) interface{} {
func computeDistinctReflect(kvs []KeyValue) any {
at := reflect.New(reflect.ArrayOf(len(kvs), keyValueType)).Elem()
for i, keyValue := range kvs {
*(at.Index(i).Addr().Interface().(*KeyValue)) = keyValue
@@ -387,7 +387,7 @@ func (l *Set) MarshalJSON() ([]byte, error) {
}
// MarshalLog is the marshaling function used by the logging system to represent this Set.
func (l Set) MarshalLog() interface{} {
func (l Set) MarshalLog() any {
kvs := make(map[string]string)
for _, kv := range l.ToSlice() {
kvs[string(kv.Key)] = kv.Value.Emit()

View File

@@ -22,7 +22,7 @@ type Value struct {
vtype Type
numeric uint64
stringly string
slice interface{}
slice any
}
const (
@@ -199,8 +199,8 @@ func (v Value) asStringSlice() []string {
type unknownValueType struct{}
// AsInterface returns Value's data as interface{}.
func (v Value) AsInterface() interface{} {
// AsInterface returns Value's data as any.
func (v Value) AsInterface() any {
switch v.Type() {
case BOOL:
return v.AsBool()
@@ -262,7 +262,7 @@ func (v Value) Emit() string {
func (v Value) MarshalJSON() ([]byte, error) {
var jsonVal struct {
Type string
Value interface{}
Value any
}
jsonVal.Type = v.Type().String()
jsonVal.Value = v.AsInterface()

View File

@@ -18,7 +18,7 @@ func TestValue(t *testing.T) {
name string
value attribute.Value
wantType attribute.Type
wantValue interface{}
wantValue any
}{
{
name: "Key.Bool() correctly returns keys's internal bool value",

View File

@@ -21,7 +21,7 @@ func Attributes(attr []octrace.Attribute) []attribute.KeyValue {
return otelAttr
}
func AttributesFromMap(attr map[string]interface{}) []attribute.KeyValue {
func AttributesFromMap(attr map[string]any) []attribute.KeyValue {
otelAttr := make([]attribute.KeyValue, 0, len(attr))
for k, v := range attr {
otelAttr = append(otelAttr, attribute.KeyValue{
@@ -32,7 +32,7 @@ func AttributesFromMap(attr map[string]interface{}) []attribute.KeyValue {
return otelAttr
}
func AttributeValue(ocval interface{}) attribute.Value {
func AttributeValue(ocval any) attribute.Value {
switch v := ocval.(type) {
case bool:
return attribute.BoolValue(v)

View File

@@ -38,7 +38,7 @@ func TestAttributes(t *testing.T) {
}
func TestAttributesFromMap(t *testing.T) {
in := map[string]interface{}{
in := map[string]any{
"bool": true,
"int64": int64(49),
"float64": float64(1.618),

View File

@@ -75,7 +75,7 @@ func TestConvertMetrics(t *testing.T) {
Exemplar: &ocmetricdata.Exemplar{
Value: 0.8,
Timestamp: exemplarTime,
Attachments: map[string]interface{}{
Attachments: map[string]any{
ocmetricdata.AttachmentKeySpanContext: octrace.SpanContext{
TraceID: octrace.TraceID([16]byte{1}),
SpanID: octrace.SpanID([8]byte{2}),
@@ -89,7 +89,7 @@ func TestConvertMetrics(t *testing.T) {
Exemplar: &ocmetricdata.Exemplar{
Value: 1.5,
Timestamp: exemplarTime,
Attachments: map[string]interface{}{
Attachments: map[string]any{
ocmetricdata.AttachmentKeySpanContext: octrace.SpanContext{
TraceID: octrace.TraceID([16]byte{3}),
SpanID: octrace.SpanID([8]byte{4}),
@@ -102,7 +102,7 @@ func TestConvertMetrics(t *testing.T) {
Exemplar: &ocmetricdata.Exemplar{
Value: 2.6,
Timestamp: exemplarTime,
Attachments: map[string]interface{}{
Attachments: map[string]any{
ocmetricdata.AttachmentKeySpanContext: octrace.SpanContext{
TraceID: octrace.TraceID([16]byte{5}),
SpanID: octrace.SpanID([8]byte{6}),
@@ -124,7 +124,7 @@ func TestConvertMetrics(t *testing.T) {
Exemplar: &ocmetricdata.Exemplar{
Value: 0.9,
Timestamp: exemplarTime,
Attachments: map[string]interface{}{
Attachments: map[string]any{
ocmetricdata.AttachmentKeySpanContext: octrace.SpanContext{
TraceID: octrace.TraceID([16]byte{7}),
SpanID: octrace.SpanID([8]byte{8}),
@@ -137,7 +137,7 @@ func TestConvertMetrics(t *testing.T) {
Exemplar: &ocmetricdata.Exemplar{
Value: 1.1,
Timestamp: exemplarTime,
Attachments: map[string]interface{}{
Attachments: map[string]any{
ocmetricdata.AttachmentKeySpanContext: octrace.SpanContext{
TraceID: octrace.TraceID([16]byte{9}),
SpanID: octrace.SpanID([8]byte{10}),
@@ -150,7 +150,7 @@ func TestConvertMetrics(t *testing.T) {
Exemplar: &ocmetricdata.Exemplar{
Value: 2.7,
Timestamp: exemplarTime,
Attachments: map[string]interface{}{
Attachments: map[string]any{
ocmetricdata.AttachmentKeySpanContext: octrace.SpanContext{
TraceID: octrace.TraceID([16]byte{11}),
SpanID: octrace.SpanID([8]byte{12}),
@@ -836,7 +836,7 @@ func TestConvertMetrics(t *testing.T) {
Exemplar: &ocmetricdata.Exemplar{
Value: 0.8,
Timestamp: exemplarTime,
Attachments: map[string]interface{}{
Attachments: map[string]any{
ocmetricdata.AttachmentKeySpanContext: "notaspancontext",
},
},
@@ -1188,7 +1188,7 @@ func BenchmarkConvertExemplar(b *testing.B) {
data := make([]*ocmetricdata.Exemplar, b.N)
for i := range data {
a := make(ocmetricdata.Attachments, attchmentsN)
for j := 0; j < attchmentsN; j++ {
for j := range attchmentsN {
a[strconv.Itoa(j)] = rand.Int64()
}
data[i] = &ocmetricdata.Exemplar{
@@ -1214,7 +1214,7 @@ func BenchmarkConvertQuantiles(b *testing.B) {
data := make([]ocmetricdata.Snapshot, b.N)
for i := range data {
p := make(map[float64]float64, percentileN)
for j := 0; j < percentileN; j++ {
for range percentileN {
v := rand.Float64()
for v == 0 {
// Convert from [0, 1) interval to (0, 1).

View File

@@ -75,7 +75,7 @@ func (s *Span) Annotate(attributes []octrace.Attribute, str string) {
}
// Annotatef adds a formatted annotation with attributes to this span.
func (s *Span) Annotatef(attributes []octrace.Attribute, format string, a ...interface{}) {
func (s *Span) Annotatef(attributes []octrace.Attribute, format string, a ...any) {
s.Annotate(attributes, fmt.Sprintf(format, a...))
}

View File

@@ -272,7 +272,7 @@ func TestSpanAddLinkFails(t *testing.T) {
ocS.AddLink(octrace.Link{
TraceID: octrace.TraceID([16]byte{1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}),
SpanID: octrace.SpanID([8]byte{2, 0, 0, 0, 0, 0, 0, 0}),
Attributes: map[string]interface{}{
Attributes: map[string]any{
"foo": "bar",
"number": int64(3),
},

View File

@@ -6,6 +6,7 @@ package opentracing // import "go.opentelemetry.io/otel/bridge/opentracing"
import (
"context"
"fmt"
"maps"
"strconv"
"strings"
"sync"
@@ -139,7 +140,7 @@ func (s *bridgeSpan) SetOperationName(operationName string) ot.Span {
// - uint32 -> int64
// - uint64 -> string
// - float32 -> float64
func (s *bridgeSpan) SetTag(key string, value interface{}) ot.Span {
func (s *bridgeSpan) SetTag(key string, value any) ot.Span {
switch key {
case string(otext.SpanKind):
// TODO: Should we ignore it?
@@ -202,7 +203,7 @@ func (e *bridgeFieldEncoder) EmitFloat64(key string, value float64) {
e.emitCommon(key, value)
}
func (e *bridgeFieldEncoder) EmitObject(key string, value interface{}) {
func (e *bridgeFieldEncoder) EmitObject(key string, value any) {
e.emitCommon(key, value)
}
@@ -210,7 +211,7 @@ func (e *bridgeFieldEncoder) EmitLazyLogger(value otlog.LazyLogger) {
value(e)
}
func (e *bridgeFieldEncoder) emitCommon(key string, value interface{}) {
func (e *bridgeFieldEncoder) emitCommon(key string, value any) {
e.pairs = append(e.pairs, otTagToOTelAttr(key, value))
}
@@ -222,7 +223,7 @@ func otLogFieldsToOTelAttrs(fields []otlog.Field) []attribute.KeyValue {
return encoder.pairs
}
func (s *bridgeSpan) LogKV(alternatingKeyValues ...interface{}) {
func (s *bridgeSpan) LogKV(alternatingKeyValues ...any) {
fields, err := otlog.InterleavedKVToFields(alternatingKeyValues...)
if err != nil {
return
@@ -259,7 +260,7 @@ func (s *bridgeSpan) LogEvent(event string) {
s.LogEventWithPayload(event, nil)
}
func (s *bridgeSpan) LogEventWithPayload(event string, payload interface{}) {
func (s *bridgeSpan) LogEventWithPayload(event string, payload any) {
data := ot.LogData{
Event: event,
Payload: payload,
@@ -400,9 +401,7 @@ func (t *BridgeTracer) baggageGetHook(ctx context.Context, list iBaggage.List) i
// need to return a copy to ensure this.
merged := make(iBaggage.List, len(list))
for k, v := range list {
merged[k] = v
}
maps.Copy(merged, list)
for k, v := range items {
// Overwrite according to OpenTelemetry specification.
@@ -497,7 +496,7 @@ func (t *BridgeTracer) ContextWithSpanHook(ctx context.Context, span ot.Span) co
return ctx
}
func otTagsToOTelAttributesKindAndError(tags map[string]interface{}) ([]attribute.KeyValue, trace.SpanKind, bool) {
func otTagsToOTelAttributesKindAndError(tags map[string]any) ([]attribute.KeyValue, trace.SpanKind, bool) {
kind := trace.SpanKindInternal
err := false
var pairs []attribute.KeyValue
@@ -537,7 +536,7 @@ func otTagsToOTelAttributesKindAndError(tags map[string]interface{}) ([]attribut
// - uint32 -> int64
// - uint64 -> string
// - float32 -> float64
func otTagToOTelAttr(k string, v interface{}) attribute.KeyValue {
func otTagToOTelAttr(k string, v any) attribute.KeyValue {
key := otTagToOTelAttrKey(k)
switch val := v.(type) {
case bool:
@@ -648,7 +647,7 @@ func (s fakeSpan) SpanContext() trace.SpanContext {
// interface.
//
// Currently only the HTTPHeaders and TextMap formats are supported.
func (t *BridgeTracer) Inject(sm ot.SpanContext, format interface{}, carrier interface{}) error {
func (t *BridgeTracer) Inject(sm ot.SpanContext, format any, carrier any) error {
bridgeSC, ok := sm.(*bridgeSpanContext)
if !ok {
return ot.ErrInvalidSpanContext
@@ -697,7 +696,7 @@ func (t *BridgeTracer) Inject(sm ot.SpanContext, format interface{}, carrier int
// interface.
//
// Currently only the HTTPHeaders and TextMap formats are supported.
func (t *BridgeTracer) Extract(format interface{}, carrier interface{}) (ot.SpanContext, error) {
func (t *BridgeTracer) Extract(format any, carrier any) (ot.SpanContext, error) {
builtinFormat, ok := format.(ot.BuiltinFormat)
if !ok {
return nil, ot.ErrUnsupportedFormat
@@ -791,7 +790,7 @@ func (t *textMapWrapper) loadMap() {
})
}
func newTextMapWrapperForExtract(carrier interface{}) (*textMapWrapper, error) {
func newTextMapWrapperForExtract(carrier any) (*textMapWrapper, error) {
t := &textMapWrapper{}
reader, ok := carrier.(ot.TextMapReader)
@@ -811,7 +810,7 @@ func newTextMapWrapperForExtract(carrier interface{}) (*textMapWrapper, error) {
return t, nil
}
func newTextMapWrapperForInject(carrier interface{}) (*textMapWrapper, error) {
func newTextMapWrapperForInject(carrier any) (*textMapWrapper, error) {
t := &textMapWrapper{}
writer, ok := carrier.(ot.TextMapWriter)

View File

@@ -261,8 +261,8 @@ func TestBridgeTracer_ExtractAndInject(t *testing.T) {
name string
injectCarrierType ot.BuiltinFormat
extractCarrierType ot.BuiltinFormat
extractCarrier interface{}
injectCarrier interface{}
extractCarrier any
injectCarrier any
extractErr error
injectErr error
}{
@@ -436,7 +436,7 @@ func TestBridgeTracer_StartSpan(t *testing.T) {
func Test_otTagToOTelAttr(t *testing.T) {
key := attribute.Key("test")
testCases := []struct {
value interface{}
value any
expected attribute.KeyValue
}{
{
@@ -628,17 +628,17 @@ func TestBridgeSpanContextPromotedMethods(t *testing.T) {
func TestBridgeCarrierBaggagePropagation(t *testing.T) {
carriers := []struct {
name string
factory func() interface{}
factory func() any
format ot.BuiltinFormat
}{
{
name: "TextMapCarrier",
factory: func() interface{} { return ot.TextMapCarrier{} },
factory: func() any { return ot.TextMapCarrier{} },
format: ot.TextMap,
},
{
name: "HTTPHeadersCarrier",
factory: func() interface{} { return ot.HTTPHeadersCarrier{} },
factory: func() any { return ot.HTTPHeadersCarrier{} },
format: ot.HTTPHeaders,
},
}
@@ -895,87 +895,87 @@ func TestBridgeSpan_LogFields(t *testing.T) {
func TestBridgeSpan_LogKV(t *testing.T) {
testCases := []struct {
name string
kv [2]interface{}
kv [2]any
expected attribute.KeyValue
}{
{
name: "string",
kv: [2]interface{}{"string", "value"},
kv: [2]any{"string", "value"},
expected: attribute.String("string", "value"),
},
{
name: "bool",
kv: [2]interface{}{"boolKey", true},
kv: [2]any{"boolKey", true},
expected: attribute.Bool("boolKey", true),
},
{
name: "int",
kv: [2]interface{}{"intKey", int(12)},
kv: [2]any{"intKey", int(12)},
expected: attribute.Int("intKey", 12),
},
{
name: "int8",
kv: [2]interface{}{"int8Key", int8(12)},
kv: [2]any{"int8Key", int8(12)},
expected: attribute.Int64("int8Key", 12),
},
{
name: "int16",
kv: [2]interface{}{"int16Key", int16(12)},
kv: [2]any{"int16Key", int16(12)},
expected: attribute.Int64("int16Key", 12),
},
{
name: "int32",
kv: [2]interface{}{"int32", int32(12)},
kv: [2]any{"int32", int32(12)},
expected: attribute.Int64("int32", 12),
},
{
name: "int64",
kv: [2]interface{}{"int64Key", int64(12)},
kv: [2]any{"int64Key", int64(12)},
expected: attribute.Int64("int64Key", 12),
},
{
name: "uint",
kv: [2]interface{}{"uintKey", uint(12)},
kv: [2]any{"uintKey", uint(12)},
expected: attribute.String("uintKey", strconv.FormatUint(12, 10)),
},
{
name: "uint8",
kv: [2]interface{}{"uint8Key", uint8(12)},
kv: [2]any{"uint8Key", uint8(12)},
expected: attribute.Int64("uint8Key", 12),
},
{
name: "uint16",
kv: [2]interface{}{"uint16Key", uint16(12)},
kv: [2]any{"uint16Key", uint16(12)},
expected: attribute.Int64("uint16Key", 12),
},
{
name: "uint32",
kv: [2]interface{}{"uint32Key", uint32(12)},
kv: [2]any{"uint32Key", uint32(12)},
expected: attribute.Int64("uint32Key", 12),
},
{
name: "uint64",
kv: [2]interface{}{"uint64Key", uint64(12)},
kv: [2]any{"uint64Key", uint64(12)},
expected: attribute.String("uint64Key", strconv.FormatUint(12, 10)),
},
{
name: "float32",
kv: [2]interface{}{"float32Key", float32(12)},
kv: [2]any{"float32Key", float32(12)},
expected: attribute.Float64("float32Key", float64(12)),
},
{
name: "float64",
kv: [2]interface{}{"float64Key", 1.1},
kv: [2]any{"float64Key", 1.1},
expected: attribute.Float64("float64Key", 1.1),
},
{
name: "error",
kv: [2]interface{}{"errorKey", fmt.Errorf("error")},
kv: [2]any{"errorKey", fmt.Errorf("error")},
expected: attribute.String("errorKey", "error"),
},
{
name: "objectKey",
kv: [2]interface{}{"objectKey", struct{}{}},
kv: [2]any{"objectKey", struct{}{}},
expected: attribute.String("objectKey", "{}"),
},
}

View File

@@ -6,6 +6,7 @@ package opentracing
import (
"context"
"fmt"
"maps"
"testing"
ot "github.com/opentracing/opentracing-go"
@@ -237,7 +238,7 @@ func (cast *currentActiveSpanTest) recordSpans(t *testing.T, ctx context.Context
type contextIntactTest struct {
contextKeyValues []mockContextKeyValue
recordedContextValues []interface{}
recordedContextValues []any
recordIdx int
}
@@ -289,7 +290,7 @@ func (coin *contextIntactTest) check(t *testing.T, tracer *mockTracer) {
}
minLen := min(len(coin.recordedContextValues), len(coin.contextKeyValues))
for i := 0; i < minLen; i++ {
for i := range minLen {
key := coin.contextKeyValues[i].Key
value := coin.contextKeyValues[i].Value
gotValue := coin.recordedContextValues[i]
@@ -362,7 +363,7 @@ func (bip *baggageItemsPreservationTest) check(t *testing.T, tracer *mockTracer)
}
minLen := min(len(bip.recordedBaggage), len(bip.baggageItems))
for i := 0; i < minLen; i++ {
for i := range minLen {
recordedItems := bip.recordedBaggage[i]
if len(recordedItems) != i+1 {
t.Errorf(
@@ -373,7 +374,7 @@ func (bip *baggageItemsPreservationTest) check(t *testing.T, tracer *mockTracer)
)
}
minItemLen := min(len(bip.baggageItems), i+1)
for j := 0; j < minItemLen; j++ {
for j := range minItemLen {
expectedItem := bip.baggageItems[j]
if gotValue, ok := recordedItems[expectedItem.key]; !ok {
t.Errorf("Missing baggage item %q in recording %d", expectedItem.key, i+1)
@@ -474,7 +475,7 @@ func checkBIORecording(t *testing.T, apiDesc string, initialItems []bipBaggage,
t.Errorf("Expected %d recordings from %s, got %d", len(initialItems), apiDesc, len(recordings))
}
minRecLen := min(len(initialItems), len(recordings))
for i := 0; i < minRecLen; i++ {
for i := range minRecLen {
recordedItems := recordings[i]
expectedItemsInStep := (i + 1) * 2
if expectedItemsInStep != len(recordedItems) {
@@ -487,9 +488,7 @@ func checkBIORecording(t *testing.T, apiDesc string, initialItems []bipBaggage,
)
}
recordedItemsCopy := make(map[string]string, len(recordedItems))
for k, v := range recordedItems {
recordedItemsCopy[k] = v
}
maps.Copy(recordedItemsCopy, recordedItems)
for j := 0; j < i+1; j++ {
otKey, otelKey := generateBaggageKeys(initialItems[j].key)
value := initialItems[j].value
@@ -721,7 +720,7 @@ func runOTOtelOT(
func TestOtTagToOTelAttrCheckTypeConversions(t *testing.T) {
tableTest := []struct {
key string
value interface{}
value any
expectedValueType attribute.Type
}{
{

View File

@@ -28,8 +28,8 @@ var (
)
type mockContextKeyValue struct {
Key interface{}
Value interface{}
Key any
Value any
}
type mockTracer struct {

View File

@@ -67,7 +67,7 @@ func (c *Code) UnmarshalJSON(b []byte) error {
return errors.New("nil receiver passed to UnmarshalJSON")
}
var x interface{}
var x any
if err := json.Unmarshal(b, &x); err != nil {
return err
}
@@ -102,5 +102,5 @@ func (c *Code) MarshalJSON() ([]byte, error) {
if !ok {
return nil, fmt.Errorf("invalid code: %d", *c)
}
return []byte(fmt.Sprintf("%q", str)), nil
return fmt.Appendf(nil, "%q", str), nil
}

View File

@@ -125,7 +125,7 @@ func TestExporterConcurrentSafe(t *testing.T) {
var wg sync.WaitGroup
ctx, cancel := context.WithCancel(context.Background())
runs := new(uint64)
for i := 0; i < goroutines; i++ {
for range goroutines {
wg.Add(1)
go func() {
defer wg.Done()

View File

@@ -220,7 +220,7 @@ func (c *httpClient) uploadLogs(ctx context.Context, data []*logpb.ResourceLogs)
}
var gzPool = sync.Pool{
New: func() interface{} {
New: func() any {
w := gzip.NewWriter(io.Discard)
return w
},
@@ -313,7 +313,7 @@ func (e retryableError) Unwrap() error {
return e.err
}
func (e retryableError) As(target interface{}) bool {
func (e retryableError) As(target any) bool {
if e.err == nil {
return false
}

View File

@@ -93,7 +93,7 @@ func TestExporterConcurrentSafe(t *testing.T) {
var wg sync.WaitGroup
ctx, cancel := context.WithCancel(context.Background())
runs := new(uint64)
for i := 0; i < goroutines; i++ {
for range goroutines {
wg.Add(1)
go func() {
defer wg.Done()

View File

@@ -135,7 +135,7 @@ func (c shutdownClient) Shutdown(ctx context.Context) error {
}
// MarshalLog returns logging data about the Exporter.
func (e *Exporter) MarshalLog() interface{} {
func (e *Exporter) MarshalLog() any {
return struct{ Type string }{Type: "OTLP/gRPC"}
}

View File

@@ -37,7 +37,7 @@ func TestExporterClientConcurrentSafe(t *testing.T) {
done := make(chan struct{})
var wg, someWork sync.WaitGroup
for i := 0; i < goroutines; i++ {
for range goroutines {
wg.Add(1)
someWork.Add(1)
go func() {

View File

@@ -223,7 +223,7 @@ func (c *client) UploadMetrics(ctx context.Context, protoMetrics *metricpb.Resou
}
var gzPool = sync.Pool{
New: func() interface{} {
New: func() any {
w := gzip.NewWriter(io.Discard)
return w
},
@@ -316,7 +316,7 @@ func (e retryableError) Unwrap() error {
return e.err
}
func (e retryableError) As(target interface{}) bool {
func (e retryableError) As(target any) bool {
if e.err == nil {
return false
}

View File

@@ -135,7 +135,7 @@ func (c shutdownClient) Shutdown(ctx context.Context) error {
}
// MarshalLog returns logging data about the Exporter.
func (e *Exporter) MarshalLog() interface{} {
func (e *Exporter) MarshalLog() any {
return struct{ Type string }{Type: "OTLP/HTTP"}
}

View File

@@ -37,7 +37,7 @@ func TestExporterClientConcurrentSafe(t *testing.T) {
done := make(chan struct{})
var wg, someWork sync.WaitGroup
for i := 0; i < goroutines; i++ {
for range goroutines {
wg.Add(1)
someWork.Add(1)
go func() {

View File

@@ -94,7 +94,7 @@ func NewUnstarted(client Client) *Exporter {
}
// MarshalLog is the marshaling function used by the logging system to represent this Exporter.
func (e *Exporter) MarshalLog() interface{} {
func (e *Exporter) MarshalLog() any {
return struct {
Type string
Client Client

View File

@@ -154,7 +154,6 @@ func links(links []tracesdk.Link) []*tracepb.Span_Link {
for _, otLink := range links {
// This redefinition is necessary to prevent otLink.*ID[:] copies
// being reused -- in short we need a new otLink per iteration.
otLink := otLink
tid := otLink.SpanContext.TraceID()
sid := otLink.SpanContext.SpanID()
@@ -189,7 +188,7 @@ func spanEvents(es []tracesdk.Event) []*tracepb.Span_Event {
events := make([]*tracepb.Span_Event, len(es))
// Transform message events
for i := 0; i < len(es); i++ {
for i := range es {
events[i] = &tracepb.Span_Event{
Name: es[i].Name,
TimeUnixNano: uint64(max(0, es[i].Time.UnixNano())), // nolint:gosec // Overflow checked.

View File

@@ -289,7 +289,7 @@ func throttleDelay(s *status.Status) (bool, time.Duration) {
}
// MarshalLog is the marshaling function used by the logging system to represent this Client.
func (c *client) MarshalLog() interface{} {
func (c *client) MarshalLog() any {
return struct {
Type string
Endpoint string

View File

@@ -173,7 +173,7 @@ func TestNewInvokeStartThenStopManyTimes(t *testing.T) {
t.Cleanup(func() { require.NoError(t, exp.Shutdown(ctx)) })
// Invoke Start numerous times, should return errAlreadyStarted
for i := 0; i < 10; i++ {
for i := range 10 {
if err := exp.Start(ctx); err == nil || !strings.Contains(err.Error(), "already started") {
t.Fatalf("#%d unexpected Start error: %v", i, err)
}
@@ -183,7 +183,7 @@ func TestNewInvokeStartThenStopManyTimes(t *testing.T) {
t.Fatalf("failed to Shutdown the exporter: %v", err)
}
// Invoke Shutdown numerous times
for i := 0; i < 10; i++ {
for i := range 10 {
if err := exp.Shutdown(ctx); err != nil {
t.Fatalf(`#%d got error (%v) expected none`, i, err)
}

View File

@@ -107,7 +107,7 @@ func testClientStopManyTimes(t *testing.T, client otlptrace.Client) {
const num int = 20
wg.Add(num)
errs := make([]error, num)
for i := 0; i < num; i++ {
for i := range num {
go func(idx int) {
defer wg.Done()
<-ch

View File

@@ -48,7 +48,7 @@ func RunEndToEndTest(ctx context.Context, t *testing.T, exp *otlptrace.Exporter,
tr2 := tp2.Tracer("test-tracer2")
// Now create few spans
m := 4
for i := 0; i < m; i++ {
for i := range m {
_, span := tr1.Start(ctx, "AlwaysSample")
span.SetAttributes(attribute.Int64("i", int64(i)))
span.End()
@@ -109,7 +109,7 @@ func RunEndToEndTest(ctx context.Context, t *testing.T, exp *otlptrace.Exporter,
if got, want := len(attrMap), m; got != want {
t.Fatalf("span attribute unique values: got %d want %d", got, want)
}
for i := 0; i < m; i++ {
for i := range m {
_, ok := attrMap[int64(i)]
if !ok {
t.Fatalf("span with attribute %d missing", i)

View File

@@ -32,7 +32,7 @@ import (
const contentTypeProto = "application/x-protobuf"
var gzPool = sync.Pool{
New: func() interface{} {
New: func() any {
w := gzip.NewWriter(io.Discard)
return w
},
@@ -274,7 +274,7 @@ func (d *client) newRequest(body []byte) (request, error) {
}
// MarshalLog is the marshaling function used by the logging system to represent this Client.
func (d *client) MarshalLog() interface{} {
func (d *client) MarshalLog() any {
return struct {
Type string
Endpoint string
@@ -340,7 +340,7 @@ func (e retryableError) Unwrap() error {
return e.err
}
func (e retryableError) As(target interface{}) bool {
func (e retryableError) As(target any) bool {
if e.err == nil {
return false
}

View File

@@ -107,7 +107,7 @@ func testClientStopManyTimes(t *testing.T, client otlptrace.Client) {
const num int = 20
wg.Add(num)
errs := make([]error, num)
for i := 0; i < num; i++ {
for i := range num {
go func(idx int) {
defer wg.Done()
<-ch

View File

@@ -48,7 +48,7 @@ func RunEndToEndTest(ctx context.Context, t *testing.T, exp *otlptrace.Exporter,
tr2 := tp2.Tracer("test-tracer2")
// Now create few spans
m := 4
for i := 0; i < m; i++ {
for i := range m {
_, span := tr1.Start(ctx, "AlwaysSample")
span.SetAttributes(attribute.Int64("i", int64(i)))
span.End()
@@ -109,7 +109,7 @@ func RunEndToEndTest(ctx context.Context, t *testing.T, exp *otlptrace.Exporter,
if got, want := len(attrMap), m; got != want {
t.Fatalf("span attribute unique values: got %d want %d", got, want)
}
for i := 0; i < m; i++ {
for i := range m {
_, ok := attrMap[int64(i)]
if !ok {
t.Fatalf("span with attribute %d missing", i)

View File

@@ -22,7 +22,7 @@ func benchmarkCollect(b *testing.B, n int) {
provider := metric.NewMeterProvider(metric.WithReader(exporter))
meter := provider.Meter("testmeter")
for i := 0; i < n; i++ {
for i := range n {
counter, err := meter.Float64Counter(fmt.Sprintf("foo_%d", i))
require.NoError(b, err)
counter.Add(ctx, float64(i))

View File

@@ -37,7 +37,7 @@ const (
)
var metricsPool = sync.Pool{
New: func() interface{} {
New: func() any {
return &metricdata.ResourceMetrics{}
},
}
@@ -49,7 +49,7 @@ type Exporter struct {
}
// MarshalLog returns logging data about the Exporter.
func (e *Exporter) MarshalLog() interface{} {
func (e *Exporter) MarshalLog() any {
const t = "Prometheus exporter"
if r, ok := e.Reader.(*metric.ManualReader); ok {

View File

@@ -989,7 +989,7 @@ func TestCollectorConcurrentSafe(t *testing.T) {
var wg sync.WaitGroup
concurrencyLevel := 10
for i := 0; i < concurrencyLevel; i++ {
for range concurrencyLevel {
wg.Add(1)
go func() {
defer wg.Done()
@@ -1009,7 +1009,7 @@ func TestShutdownExporter(t *testing.T) {
ctx := context.Background()
registry := prometheus.NewRegistry()
for i := 0; i < 3; i++ {
for range 3 {
exporter, err := New(WithRegisterer(registry))
require.NoError(t, err)
provider := metric.NewMeterProvider(

View File

@@ -361,7 +361,7 @@ func TestExporterConcurrentSafe(t *testing.T) {
const goroutines = 10
var wg sync.WaitGroup
wg.Add(goroutines)
for i := 0; i < goroutines; i++ {
for range goroutines {
go func() {
defer wg.Done()
err := exporter.Export(context.Background(), []sdklog.Record{{}})

View File

@@ -27,7 +27,7 @@ type value struct {
func (v value) MarshalJSON() ([]byte, error) {
var jsonVal struct {
Type string
Value interface{}
Value any
}
jsonVal.Type = v.Kind().String()

View File

@@ -77,7 +77,7 @@ func (e *exporter) Shutdown(context.Context) error {
return nil
}
func (e *exporter) MarshalLog() interface{} {
func (e *exporter) MarshalLog() any {
return struct{ Type string }{Type: "STDOUT"}
}

View File

@@ -92,7 +92,7 @@ func (e *Exporter) Shutdown(ctx context.Context) error {
}
// MarshalLog is the marshaling function used by the logging system to represent this Exporter.
func (e *Exporter) MarshalLog() interface{} {
func (e *Exporter) MarshalLog() any {
return struct {
Type string
WithTimestamps bool

View File

@@ -148,7 +148,7 @@ func toZipkinAnnotations(events []tracesdk.Event) []zkmodel.Annotation {
}
func attributesToJSONMapString(attributes []attribute.KeyValue) string {
m := make(map[string]interface{}, len(attributes))
m := make(map[string]any, len(attributes))
for _, a := range attributes {
m[(string)(a.Key)] = a.Value.AsInterface()
}

View File

@@ -189,19 +189,19 @@ func (e *Exporter) Shutdown(ctx context.Context) error {
return nil
}
func (e *Exporter) logf(format string, args ...interface{}) {
func (e *Exporter) logf(format string, args ...any) {
if e.logger != emptyLogger {
e.logger.Info(fmt.Sprintf(format, args...))
}
}
func (e *Exporter) errf(format string, args ...interface{}) error {
func (e *Exporter) errf(format string, args ...any) error {
e.logf(format, args...)
return fmt.Errorf(format, args...)
}
// MarshalLog is the marshaling function used by the logging system to represent this Exporter.
func (e *Exporter) MarshalLog() interface{} {
func (e *Exporter) MarshalLog() any {
return struct {
Type string
URL string

View File

@@ -362,7 +362,7 @@ func TestErrorOnExportShutdownExporter(t *testing.T) {
func TestLogrFormatting(t *testing.T) {
format := "string %q, int %d"
args := []interface{}{"s", 1}
args := []any{"s", 1}
var buf bytes.Buffer
l := funcr.New(func(prefix, args string) {

View File

@@ -41,22 +41,22 @@ func GetLogger() logr.Logger {
// Info prints messages about the general state of the API or SDK.
// This should usually be less than 5 messages a minute.
func Info(msg string, keysAndValues ...interface{}) {
func Info(msg string, keysAndValues ...any) {
GetLogger().V(4).Info(msg, keysAndValues...)
}
// Error prints messages about exceptional states of the API or SDK.
func Error(err error, msg string, keysAndValues ...interface{}) {
func Error(err error, msg string, keysAndValues ...any) {
GetLogger().Error(err, msg, keysAndValues...)
}
// Debug prints messages about all internal changes in the API or SDK.
func Debug(msg string, keysAndValues ...interface{}) {
func Debug(msg string, keysAndValues ...any) {
GetLogger().V(8).Info(msg, keysAndValues...)
}
// Warn prints messages about warnings in the API or SDK.
// Not an error but is likely more important than an informational event.
func Warn(msg string, keysAndValues ...interface{}) {
func Warn(msg string, keysAndValues ...any) {
GetLogger().V(1).Info(msg, keysAndValues...)
}

View File

@@ -7,6 +7,8 @@
package internaltest // import "go.opentelemetry.io/otel/internal/internaltest"
import (
"maps"
"slices"
"sync"
"testing"
@@ -28,9 +30,7 @@ var _ propagation.TextMapCarrier = (*TextMapCarrier)(nil)
// NewTextMapCarrier returns a new *TextMapCarrier populated with data.
func NewTextMapCarrier(data map[string]string) *TextMapCarrier {
copied := make(map[string]string, len(data))
for k, v := range data {
copied[k] = v
}
maps.Copy(copied, data)
return &TextMapCarrier{data: copied}
}
@@ -58,10 +58,8 @@ func (c *TextMapCarrier) Get(key string) string {
func (c *TextMapCarrier) GotKey(t *testing.T, key string) bool {
c.mtx.Lock()
defer c.mtx.Unlock()
for _, k := range c.gets {
if k == key {
return true
}
if slices.Contains(c.gets, key) {
return true
}
t.Errorf("TextMapCarrier.Get(%q) has not been called", key)
return false
@@ -120,9 +118,7 @@ func (c *TextMapCarrier) SetN(t *testing.T, n int) bool {
// Reset zeros out the recording state and sets the carried values to data.
func (c *TextMapCarrier) Reset(data map[string]string) {
copied := make(map[string]string, len(data))
for k, v := range data {
copied[k] = v
}
maps.Copy(copied, data)
c.mtx.Lock()
defer c.mtx.Unlock()

View File

@@ -107,7 +107,7 @@ func testClientStopManyTimes(t *testing.T, client otlptrace.Client) {
const num int = 20
wg.Add(num)
errs := make([]error, num)
for i := 0; i < num; i++ {
for i := range num {
go func(idx int) {
defer wg.Done()
<-ch

View File

@@ -48,7 +48,7 @@ func RunEndToEndTest(ctx context.Context, t *testing.T, exp *otlptrace.Exporter,
tr2 := tp2.Tracer("test-tracer2")
// Now create few spans
m := 4
for i := 0; i < m; i++ {
for i := range m {
_, span := tr1.Start(ctx, "AlwaysSample")
span.SetAttributes(attribute.Int64("i", int64(i)))
span.End()
@@ -109,7 +109,7 @@ func RunEndToEndTest(ctx context.Context, t *testing.T, exp *otlptrace.Exporter,
if got, want := len(attrMap), m; got != want {
t.Fatalf("span attribute unique values: got %d want %d", got, want)
}
for i := 0; i < m; i++ {
for i := range m {
_, ok := attrMap[int64(i)]
if !ok {
t.Fatalf("span with attribute %d missing", i)

View File

@@ -439,10 +439,10 @@ type logSink struct {
err error
msg string
keysAndValues []interface{}
keysAndValues []any
}
func (l *logSink) Error(err error, msg string, keysAndValues ...interface{}) {
func (l *logSink) Error(err error, msg string, keysAndValues ...any) {
l.err, l.msg, l.keysAndValues = err, msg, keysAndValues
l.LogSink.Error(err, msg, keysAndValues...)
}

View File

@@ -157,7 +157,7 @@ func TestRecorderConcurrentSafe(t *testing.T) {
r := &Recorder{}
for i := 0; i < goRoutineN; i++ {
for range goRoutineN {
go func() {
defer wg.Done()

View File

@@ -166,7 +166,7 @@ func ExampleMeter_gauge() {
go func() {
defer close(fanSpeedSubscription)
for idx := 0; idx < 5; idx++ {
for range 5 {
// Synchronous gauges are used when the measurement cycle is
// synchronous to an external change.
// Simulate that external cycle here.

View File

@@ -13,4 +13,4 @@ type TelemetryVersion types10.TelemetryVersion
type AttributeName string
// AttributeValue is an attribute value.
type AttributeValue interface{}
type AttributeValue any

View File

@@ -329,7 +329,7 @@ func (q *queue) TryDequeue(buf []Record, write func([]Record) bool) int {
origRead := q.read
n := min(len(buf), q.len)
for i := 0; i < n; i++ {
for i := range n {
buf[i] = q.read.Value
q.read = q.read.Next()
}

View File

@@ -207,7 +207,7 @@ func TestBatchProcessor(t *testing.T) {
WithExportInterval(time.Nanosecond),
WithExportTimeout(time.Hour),
)
for i := 0; i < size; i++ {
for range size {
assert.NoError(t, b.OnEmit(ctx, new(Record)))
}
var got []Record
@@ -230,7 +230,7 @@ func TestBatchProcessor(t *testing.T) {
WithExportInterval(time.Hour),
WithExportTimeout(time.Hour),
)
for i := 0; i < 10*batch; i++ {
for range 10 * batch {
assert.NoError(t, b.OnEmit(ctx, new(Record)))
}
assert.Eventually(t, func() bool {
@@ -253,7 +253,7 @@ func TestBatchProcessor(t *testing.T) {
WithExportInterval(time.Hour),
WithExportTimeout(time.Hour),
)
for i := 0; i < 2*batch; i++ {
for range 2 * batch {
assert.NoError(t, b.OnEmit(ctx, new(Record)))
}
@@ -293,7 +293,7 @@ func TestBatchProcessor(t *testing.T) {
b := NewBatchProcessor(e)
const shutdowns = 3
for i := 0; i < shutdowns; i++ {
for range shutdowns {
assert.NoError(t, b.Shutdown(ctx))
}
assert.Equal(t, 1, e.ShutdownN(), "exporter Shutdown calls")
@@ -382,7 +382,7 @@ func TestBatchProcessor(t *testing.T) {
)
// Enqueue 10 x "batch size" amount of records.
for i := 0; i < 10*batch; i++ {
for range 10 * batch {
require.NoError(t, b.OnEmit(ctx, new(Record)))
}
assert.Eventually(t, func() bool {
@@ -490,7 +490,7 @@ func TestBatchProcessor(t *testing.T) {
ctx, cancel := context.WithCancel(ctx)
var wg sync.WaitGroup
for i := 0; i < goRoutines-1; i++ {
for range goRoutines - 1 {
wg.Add(1)
go func() {
defer wg.Done()
@@ -582,7 +582,7 @@ func TestQueue(t *testing.T) {
t.Run("TryFlush", func(t *testing.T) {
const size = 3
q := newQueue(size)
for i := 0; i < size-1; i++ {
for range size - 1 {
q.write.Value = r
q.write = q.write.Next()
q.len++
@@ -627,7 +627,7 @@ func TestQueue(t *testing.T) {
wg.Add(goRoutines)
b := newQueue(goRoutines)
for i := 0; i < goRoutines; i++ {
for range goRoutines {
go func() {
defer wg.Done()
b.Enqueue(Record{})

View File

@@ -245,7 +245,7 @@ func TestExportSync(t *testing.T) {
const goRoutines = 10
var wg sync.WaitGroup
wg.Add(goRoutines)
for i := 0; i < goRoutines; i++ {
for i := range goRoutines {
go func(n int) {
defer wg.Done()
@@ -338,7 +338,7 @@ func TestBufferExporter(t *testing.T) {
stop := make(chan struct{})
var wg sync.WaitGroup
for i := 0; i < goRoutines; i++ {
for range goRoutines {
wg.Add(1)
go func() {
defer wg.Done()

View File

@@ -239,7 +239,6 @@ func TestWithResource(t *testing.T) {
},
}
for _, tc := range cases {
tc := tc
t.Run(tc.name, func(t *testing.T) {
got := newProviderConfig(tc.options).resource
if diff := cmp.Diff(got, tc.want); diff != "" {
@@ -258,7 +257,7 @@ func TestLoggerProviderConcurrentSafe(t *testing.T) {
p := NewLoggerProvider(WithProcessor(newProcessor("0")))
const name = "testLogger"
ctx := context.Background()
for i := 0; i < goRoutineN; i++ {
for range goRoutineN {
go func() {
defer wg.Done()
@@ -276,7 +275,7 @@ type logSink struct {
level int
msg string
keysAndValues []interface{}
keysAndValues []any
}
func (l *logSink) Enabled(int) bool { return true }

View File

@@ -55,13 +55,13 @@ func verifyRing(t *testing.T, r *ring, N int, sum int) {
}
func TestNewRing(t *testing.T) {
for i := 0; i < 10; i++ {
for i := range 10 {
// Empty value.
r := newRing(i)
verifyRing(t, r, i, -1)
}
for n := 0; n < 10; n++ {
for n := range 10 {
r := newRing(n)
for i := 1; i <= n; i++ {
var rec Record

View File

@@ -107,7 +107,7 @@ func TestSimpleProcessorConcurrentSafe(t *testing.T) {
ctx := context.Background()
e := &writerExporter{new(strings.Builder)}
s := log.NewSimpleProcessor(e)
for i := 0; i < goRoutineN; i++ {
for range goRoutineN {
go func() {
defer wg.Done()

View File

@@ -169,7 +169,7 @@ func benchCollectViews(views ...View) func(*testing.B) {
m, r := setup("benchCollectViews/Int64Counter")
i, err := m.Int64Counter("int64-counter")
assert.NoError(b, err)
for n := 0; n < 10; n++ {
for range 10 {
i.Add(ctx, 1, metric.WithAttributeSet(s))
}
return r
@@ -186,7 +186,7 @@ func benchCollectViews(views ...View) func(*testing.B) {
m, r := setup("benchCollectViews/Float64Counter")
i, err := m.Float64Counter("float64-counter")
assert.NoError(b, err)
for n := 0; n < 10; n++ {
for range 10 {
i.Add(ctx, 1, metric.WithAttributeSet(s))
}
return r
@@ -203,7 +203,7 @@ func benchCollectViews(views ...View) func(*testing.B) {
m, r := setup("benchCollectViews/Int64UpDownCounter")
i, err := m.Int64UpDownCounter("int64-up-down-counter")
assert.NoError(b, err)
for n := 0; n < 10; n++ {
for range 10 {
i.Add(ctx, 1, metric.WithAttributeSet(s))
}
return r
@@ -220,7 +220,7 @@ func benchCollectViews(views ...View) func(*testing.B) {
m, r := setup("benchCollectViews/Float64UpDownCounter")
i, err := m.Float64UpDownCounter("float64-up-down-counter")
assert.NoError(b, err)
for n := 0; n < 10; n++ {
for range 10 {
i.Add(ctx, 1, metric.WithAttributeSet(s))
}
return r
@@ -237,7 +237,7 @@ func benchCollectViews(views ...View) func(*testing.B) {
m, r := setup("benchCollectViews/Int64Histogram")
i, err := m.Int64Histogram("int64-histogram")
assert.NoError(b, err)
for n := 0; n < 10; n++ {
for range 10 {
i.Record(ctx, 1, metric.WithAttributeSet(s))
}
return r
@@ -254,7 +254,7 @@ func benchCollectViews(views ...View) func(*testing.B) {
m, r := setup("benchCollectViews/Float64Histogram")
i, err := m.Float64Histogram("float64-histogram")
assert.NoError(b, err)
for n := 0; n < 10; n++ {
for range 10 {
i.Record(ctx, 1, metric.WithAttributeSet(s))
}
return r

View File

@@ -37,7 +37,7 @@ func TestCacheConcurrentSafe(t *testing.T) {
c := cache[string, int]{}
var wg sync.WaitGroup
for n := 0; n < goroutines; n++ {
for n := range goroutines {
wg.Add(1)
go func(i int) {
defer wg.Done()

View File

@@ -185,7 +185,6 @@ func TestWithResource(t *testing.T) {
},
}
for _, tc := range cases {
tc := tc
t.Run(tc.name, func(t *testing.T) {
got := newConfig(tc.options).res
if diff := cmp.Diff(got, tc.want); diff != "" {

View File

@@ -58,10 +58,7 @@ func DefaultExemplarReservoirProviderSelector(agg Aggregation) exemplar.Reservoi
// SimpleFixedSizeExemplarReservoir with a reservoir equal to the
// smaller of the maximum number of buckets configured on the
// aggregation or twenty (e.g. min(20, max_buckets)).
n = int(a.MaxSize)
if n > 20 {
n = 20
}
n = min(int(a.MaxSize), 20)
} else {
// https://github.com/open-telemetry/opentelemetry-specification/blob/e94af89e3d0c01de30127a0f423e912f6cda7bed/specification/metrics/sdk.md#simplefixedsizeexemplarreservoir
// This Exemplar reservoir MAY take a configuration parameter for
@@ -69,11 +66,9 @@ func DefaultExemplarReservoirProviderSelector(agg Aggregation) exemplar.Reservoi
// provided, the default size MAY be the number of possible
// concurrent threads (e.g. number of CPUs) to help reduce
// contention. Otherwise, a default size of 1 SHOULD be used.
n = runtime.NumCPU()
if n < 1 {
n = max(runtime.NumCPU(),
// Should never be the case, but be defensive.
n = 1
}
1)
}
return exemplar.FixedSizeReservoirProvider(n)

View File

@@ -37,7 +37,7 @@ func TestFixedSizeExemplarConcurrentSafe(t *testing.T) {
goRoutines := max(10, runtime.NumCPU())
var wg sync.WaitGroup
for n := 0; n < goRoutines; n++ {
for range goRoutines {
wg.Add(1)
go func() {
defer wg.Done()
@@ -54,7 +54,7 @@ func TestFixedSizeExemplarConcurrentSafe(t *testing.T) {
const collections = 100
var rm metricdata.ResourceMetrics
for c := 0; c < collections; c++ {
for range collections {
require.NotPanics(t, func() { _ = r.Collect(ctx, &rm) })
}

View File

@@ -654,7 +654,7 @@ func BenchmarkPrepend(b *testing.B) {
for i := 0; i < b.N; i++ {
agg := newExpoHistogramDataPoint[float64](alice, 1024, 20, false, false)
n := math.MaxFloat64
for j := 0; j < 1024; j++ {
for range 1024 {
agg.record(n)
n = n / 2
}
@@ -665,7 +665,7 @@ func BenchmarkAppend(b *testing.B) {
for i := 0; i < b.N; i++ {
agg := newExpoHistogramDataPoint[float64](alice, 1024, 20, false, false)
n := smallestNonZeroNormalFloat64
for j := 0; j < 1024; j++ {
for range 1024 {
agg.record(n)
n = n * 2
}

View File

@@ -129,7 +129,7 @@ func (mr *ManualReader) Collect(ctx context.Context, rm *metricdata.ResourceMetr
}
// MarshalLog returns logging data about the ManualReader.
func (r *ManualReader) MarshalLog() interface{} {
func (r *ManualReader) MarshalLog() any {
r.mu.Lock()
down := r.isShutdown
r.mu.Unlock()

View File

@@ -1080,12 +1080,12 @@ func newLogSink(t *testing.T) *logSink {
return &logSink{LogSink: testr.New(t).GetSink()}
}
func (l *logSink) Info(level int, msg string, keysAndValues ...interface{}) {
func (l *logSink) Info(level int, msg string, keysAndValues ...any) {
l.messages = append(l.messages, msg)
l.LogSink.Info(level, msg, keysAndValues...)
}
func (l *logSink) Error(err error, msg string, keysAndValues ...interface{}) {
func (l *logSink) Error(err error, msg string, keysAndValues ...any) {
l.messages = append(l.messages, fmt.Sprintf("%s: %s", err, msg))
l.LogSink.Error(err, msg, keysAndValues...)
}
@@ -2352,7 +2352,7 @@ func TestObservableDropAggregation(t *testing.T) {
otel.SetLogger(
funcr.NewJSON(
func(obj string) {
var entry map[string]interface{}
var entry map[string]any
_ = json.Unmarshal([]byte(obj), &entry)
// All unregistered observables should log `errUnregObserver` error.
@@ -2530,7 +2530,7 @@ func TestDuplicateInstrumentCreation(t *testing.T) {
}()
m := NewMeterProvider(WithReader(reader)).Meter("TestDuplicateInstrumentCreation")
for i := 0; i < 3; i++ {
for range 3 {
require.NoError(t, tt.createInstrument(m))
}
internalMeter, ok := m.(*meter)
@@ -2553,7 +2553,7 @@ func TestDuplicateInstrumentCreation(t *testing.T) {
func TestMeterProviderDelegation(t *testing.T) {
meter := otel.Meter("go.opentelemetry.io/otel/metric/internal/global/meter_test")
otel.SetErrorHandler(otel.ErrorHandlerFunc(func(err error) { require.NoError(t, err) }))
for i := 0; i < 5; i++ {
for range 5 {
int64Counter, err := meter.Int64ObservableCounter("observable.int64.counter")
require.NoError(t, err)
int64UpDownCounter, err := meter.Int64ObservableUpDownCounter("observable.int64.up.down.counter")

View File

@@ -121,10 +121,10 @@ func AssertEqual[T Datatypes](t TestingT, expected, actual T, opts ...Option) bo
cfg := newConfig(opts)
// Generic types cannot be type asserted. Use an interface instead.
aIface := interface{}(actual)
aIface := any(actual)
var r []string
switch e := interface{}(expected).(type) {
switch e := any(expected).(type) {
case metricdata.Exemplar[int64]:
r = equalExemplars(e, aIface.(metricdata.Exemplar[int64]), cfg)
case metricdata.Exemplar[float64]:
@@ -206,7 +206,7 @@ func AssertHasAttributes[T Datatypes](t TestingT, actual T, attrs ...attribute.K
var reasons []string
switch e := interface{}(actual).(type) {
switch e := any(actual).(type) {
case metricdata.Exemplar[int64]:
reasons = hasAttributesExemplars(e, attrs...)
case metricdata.Exemplar[float64]:

View File

@@ -497,7 +497,7 @@ func equalQuantileValue(a, b metricdata.QuantileValue, _ config) (reasons []stri
return reasons
}
func notEqualStr(prefix string, expected, actual interface{}) string {
func notEqualStr(prefix string, expected, actual any) string {
return fmt.Sprintf("%s not equal:\nexpected: %v\nactual: %v", prefix, expected, actual)
}
@@ -591,9 +591,9 @@ func equalExemplars[N int64 | float64](a, b metricdata.Exemplar[N], cfg config)
func diffSlices[T any](a, b []T, equal func(T, T) bool) (extraA, extraB []T) {
visited := make([]bool, len(b))
for i := 0; i < len(a); i++ {
for i := range a {
found := false
for j := 0; j < len(b); j++ {
for j := range b {
if visited[j] {
continue
}
@@ -608,7 +608,7 @@ func diffSlices[T any](a, b []T, equal func(T, T) bool) (extraA, extraB []T) {
}
}
for j := 0; j < len(b); j++ {
for j := range b {
if visited[j] {
continue
}

View File

@@ -114,7 +114,7 @@ func NewPeriodicReader(exporter Exporter, options ...PeriodicReaderOption) *Peri
cancel: cancel,
done: make(chan struct{}),
rmPool: sync.Pool{
New: func() interface{} {
New: func() any {
return &metricdata.ResourceMetrics{}
},
},
@@ -234,7 +234,7 @@ func (r *PeriodicReader) Collect(ctx context.Context, rm *metricdata.ResourceMet
}
// collect unwraps p as a produceHolder and returns its produce results.
func (r *PeriodicReader) collect(ctx context.Context, p interface{}, rm *metricdata.ResourceMetrics) error {
func (r *PeriodicReader) collect(ctx context.Context, p any, rm *metricdata.ResourceMetrics) error {
if p == nil {
return ErrReaderNotRegistered
}
@@ -349,7 +349,7 @@ func (r *PeriodicReader) Shutdown(ctx context.Context) error {
}
// MarshalLog returns logging data about the PeriodicReader.
func (r *PeriodicReader) MarshalLog() interface{} {
func (r *PeriodicReader) MarshalLog() any {
r.mu.Lock()
down := r.isShutdown
r.mu.Unlock()

View File

@@ -426,7 +426,7 @@ func (i *inserter[N]) logConflict(id instID) {
}
const msg = "duplicate metric stream definitions"
args := []interface{}{
args := []any{
"names", fmt.Sprintf("%q, %q", existing.Name, id.Name),
"descriptions", fmt.Sprintf("%q, %q", existing.Description, id.Description),
"kinds", fmt.Sprintf("%s, %s", existing.Kind, id.Kind),

View File

@@ -57,7 +57,7 @@ func assertSum[N int64 | float64](
t.Helper()
requireN[N](t, n, meas, comps, err)
for m := 0; m < n; m++ {
for m := range n {
t.Logf("input/output number: %d", m)
in, out := meas[m], comps[m]
in(context.Background(), 1, *attribute.EmptySet())
@@ -601,7 +601,7 @@ type logCounter struct {
infoN uint32
}
func (l *logCounter) Info(level int, msg string, keysAndValues ...interface{}) {
func (l *logCounter) Info(level int, msg string, keysAndValues ...any) {
atomic.AddUint32(&l.infoN, 1)
l.LogSink.Info(level, msg, keysAndValues...)
}
@@ -610,7 +610,7 @@ func (l *logCounter) InfoN() int {
return int(atomic.SwapUint32(&l.infoN, 0))
}
func (l *logCounter) Error(err error, msg string, keysAndValues ...interface{}) {
func (l *logCounter) Error(err error, msg string, keysAndValues ...any) {
atomic.AddUint32(&l.errN, 1)
l.LogSink.Error(err, msg, keysAndValues...)
}

View File

@@ -83,7 +83,7 @@ func TestPipelineConcurrentSafe(t *testing.T) {
var wg sync.WaitGroup
const threads = 2
for i := 0; i < threads; i++ {
for i := range threads {
wg.Add(1)
go func() {
defer wg.Done()

View File

@@ -146,7 +146,7 @@ func (ts *readerTestSuite) TestMethodConcurrentSafe() {
var wg sync.WaitGroup
const threads = 2
for i := 0; i < threads; i++ {
for range threads {
wg.Add(1)
go func() {
defer wg.Done()

View File

@@ -18,7 +18,7 @@ func makeAttrs(n int) (_, _ *resource.Resource) {
used := map[string]bool{}
l1 := make([]attribute.KeyValue, n)
l2 := make([]attribute.KeyValue, n)
for i := 0; i < n; i++ {
for i := range n {
var k string
for {
k = fmt.Sprint("k", rand.IntN(1000000000))

View File

@@ -70,8 +70,6 @@ func TestHostIDReaderBSD(t *testing.T) {
}
for _, tc := range tt {
tc := tc
t.Run(tc.name, func(t *testing.T) {
reader := hostIDReaderBSD{
readFile: tc.fileReader,
@@ -119,8 +117,6 @@ func TestHostIDReaderLinux(t *testing.T) {
}
for _, tc := range tt {
tc := tc
t.Run(tc.name, func(t *testing.T) {
reader := hostIDReaderLinux{
readFile: tc.fileReader,
@@ -198,7 +194,6 @@ func TestHostIDReaderDarwin(t *testing.T) {
}
for _, tc := range tt {
tc := tc
t.Run(tc.name, func(t *testing.T) {
reader := hostIDReaderDarwin{
execCommand: tc.commandExecutor,

View File

@@ -84,8 +84,6 @@ func TestParsePlistFile(t *testing.T) {
}
for _, tc := range tt {
tc := tc
t.Run(tc.Name, func(t *testing.T) {
result, err := resource.ParsePlistFile(tc.Plist)
@@ -155,8 +153,6 @@ func TestBuildOSRelease(t *testing.T) {
}
for _, tc := range tt {
tc := tc
t.Run(tc.Name, func(t *testing.T) {
result := resource.BuildOSRelease(tc.Properties)
require.Equal(t, tc.OSRelease, result)

View File

@@ -45,8 +45,6 @@ func TestMapRuntimeOSToSemconvOSType(t *testing.T) {
}
for _, tc := range tt {
tc := tc
t.Run(tc.Name, func(t *testing.T) {
osTypeAttribute := resource.MapRuntimeOSToSemconvOSType(tc.Goos)
require.Equal(t, osTypeAttribute, tc.OSType)

View File

@@ -92,8 +92,6 @@ func TestGetFirstAvailableFile(t *testing.T) {
}
for _, tc := range tt {
tc := tc
t.Run(tc.Name, func(t *testing.T) {
file, err := resource.GetFirstAvailableFile(tc.Candidates)

View File

@@ -112,7 +112,7 @@ func (r *Resource) String() string {
}
// MarshalLog is the marshaling function used by the logging system to represent this Resource.
func (r *Resource) MarshalLog() interface{} {
func (r *Resource) MarshalLog() any {
return struct {
Attributes attribute.Set
SchemaURL string

View File

@@ -782,7 +782,7 @@ func TestResourceConcurrentSafe(t *testing.T) {
// Creating Resources should also be free of any data races,
// because Resources are immutable.
var wg sync.WaitGroup
for i := 0; i < 2; i++ {
for range 2 {
wg.Add(1)
go func() {
defer wg.Done()

View File

@@ -89,11 +89,7 @@ func NewBatchSpanProcessor(exporter SpanExporter, options ...BatchSpanProcessorO
maxExportBatchSize := env.BatchSpanProcessorMaxExportBatchSize(DefaultMaxExportBatchSize)
if maxExportBatchSize > maxQueueSize {
if DefaultMaxExportBatchSize > maxQueueSize {
maxExportBatchSize = maxQueueSize
} else {
maxExportBatchSize = DefaultMaxExportBatchSize
}
maxExportBatchSize = min(DefaultMaxExportBatchSize, maxQueueSize)
}
o := BatchSpanProcessorOptions{
@@ -403,7 +399,7 @@ func (bsp *batchSpanProcessor) enqueueDrop(_ context.Context, sd ReadOnlySpan) b
}
// MarshalLog is the marshaling function used by the logging system to represent this Span Processor.
func (bsp *batchSpanProcessor) MarshalLog() interface{} {
func (bsp *batchSpanProcessor) MarshalLog() any {
return struct {
Type string
SpanExporter SpanExporter

View File

@@ -580,7 +580,7 @@ func TestBatchSpanProcessorForceFlushQueuedSpans(t *testing.T) {
tracer := tp.Tracer("tracer")
for i := 0; i < 10; i++ {
for i := range 10 {
_, span := tracer.Start(ctx, fmt.Sprintf("span%d", i))
span.End()

View File

@@ -383,7 +383,7 @@ func BenchmarkSpanProcessorVerboseLogging(b *testing.B) {
b.ReportAllocs()
for i := 0; i < b.N; i++ {
for j := 0; j < 10; j++ {
for range 10 {
_, span := tracer.Start(ctx, "bench")
span.End()
}

View File

@@ -16,7 +16,7 @@ func TestNewIDs(t *testing.T) {
gen := defaultIDGenerator()
n := 1000
for i := 0; i < n; i++ {
for range n {
traceID, spanID := gen.NewIDs(context.Background())
assert.Truef(t, traceID.IsValid(), "trace id: %s", traceID.String())
assert.Truef(t, spanID.IsValid(), "span id: %s", spanID.String())
@@ -28,7 +28,7 @@ func TestNewSpanID(t *testing.T) {
testTraceID := [16]byte{123, 123}
n := 1000
for i := 0; i < n; i++ {
for range n {
spanID := gen.NewSpanID(context.Background(), testTraceID)
assert.Truef(t, spanID.IsValid(), "span id: %s", spanID.String())
}

View File

@@ -45,7 +45,7 @@ type tracerProviderConfig struct {
}
// MarshalLog is the marshaling function used by the logging system to represent this Provider.
func (cfg tracerProviderConfig) MarshalLog() interface{} {
func (cfg tracerProviderConfig) MarshalLog() any {
return struct {
SpanProcessors []SpanProcessor
SamplerType string

View File

@@ -232,7 +232,7 @@ func TestTracerProviderSamplerConfigFromEnv(t *testing.T) {
argOptional bool
description string
errorType error
invalidArgErrorType interface{}
invalidArgErrorType any
}
randFloat := rand.Float64()
@@ -353,7 +353,7 @@ func TestTracerProviderSamplerConfigFromEnv(t *testing.T) {
}
}
func testStoredError(t *testing.T, target interface{}) {
func testStoredError(t *testing.T, target any) {
t.Helper()
if assert.Len(t, handler.errs, 1) && assert.Error(t, handler.errs[0]) {

View File

@@ -180,14 +180,14 @@ func TestTraceIdRatioSamplesInclusively(t *testing.T) {
)
idg := defaultIDGenerator()
for i := 0; i < numSamplers; i++ {
for range numSamplers {
ratioLo, ratioHi := rand.Float64(), rand.Float64()
if ratioHi < ratioLo {
ratioLo, ratioHi = ratioHi, ratioLo
}
samplerHi := TraceIDRatioBased(ratioHi)
samplerLo := TraceIDRatioBased(ratioLo)
for j := 0; j < numTraces; j++ {
for range numTraces {
traceID, _ := idg.NewIDs(context.Background())
params := SamplingParameters{TraceID: traceID}

View File

@@ -110,7 +110,7 @@ func (ssp *simpleSpanProcessor) ForceFlush(context.Context) error {
// MarshalLog is the marshaling function used by the logging system to represent
// this Span Processor.
func (ssp *simpleSpanProcessor) MarshalLog() interface{} {
func (ssp *simpleSpanProcessor) MarshalLog() any {
return struct {
Type string
Exporter SpanExporter

View File

@@ -563,7 +563,7 @@ func (s *recordingSpan) RecordError(err error, opts ...trace.EventOption) {
s.addEvent(semconv.ExceptionEventName, opts...)
}
func typeStr(i interface{}) string {
func typeStr(i any) string {
t := reflect.TypeOf(i)
if t.PkgPath() == "" && t.Name() == "" {
// Likely a builtin type.

View File

@@ -373,7 +373,7 @@ func TestLogDropAttrs(t *testing.T) {
func BenchmarkRecordingSpanSetAttributes(b *testing.B) {
var attrs []attribute.KeyValue
for i := 0; i < 100; i++ {
for i := range 100 {
attr := attribute.String(fmt.Sprintf("hello.attrib%d", i), fmt.Sprintf("goodbye.attrib%d", i))
attrs = append(attrs, attr)
}

View File

@@ -283,13 +283,12 @@ func TestSampling(t *testing.T) {
"SampledParentSpanWithParentTraceIdRatioBased_.50": {sampler: ParentBased(TraceIDRatioBased(0.50)), expect: 1, parent: true, sampledParent: true},
"UnsampledParentSpanWithParentTraceIdRatioBased_.50": {sampler: ParentBased(TraceIDRatioBased(0.50)), expect: 0, parent: true, sampledParent: false},
} {
tc := tc
t.Run(name, func(t *testing.T) {
t.Parallel()
p := NewTracerProvider(WithSampler(tc.sampler))
tr := p.Tracer("test")
var sampled int
for i := 0; i < total; i++ {
for range total {
ctx := context.Background()
if tc.parent {
tid, sid := idg.NewIDs(ctx)
@@ -920,7 +919,7 @@ func TestSetSpanStatusWithoutMessageWhenStatusIsNotError(t *testing.T) {
}
}
func cmpDiff(x, y interface{}) string {
func cmpDiff(x, y any) string {
return cmp.Diff(x, y,
cmp.AllowUnexported(snapshot{}),
cmp.AllowUnexported(attribute.Value{}),
@@ -1479,7 +1478,6 @@ func TestWithResource(t *testing.T) {
},
}
for _, tc := range cases {
tc := tc
t.Run(tc.name, func(t *testing.T) {
te := NewTestExporter()
defaultOptions := []TracerProviderOption{WithSyncer(te), WithSampler(AlwaysSample())}
@@ -1895,7 +1893,6 @@ func TestSamplerTraceState(t *testing.T) {
}
for _, ts := range tests {
ts := ts
t.Run(ts.name, func(t *testing.T) {
te := NewTestExporter()
tp := NewTracerProvider(WithSampler(ts.sampler), WithSyncer(te), WithResource(resource.Empty()))
@@ -1968,7 +1965,7 @@ func TestWithIDGenerator(t *testing.T) {
WithSyncer(te),
WithIDGenerator(gen),
)
for i := 0; i < numSpan; i++ {
for i := range numSpan {
func() {
_, span := tp.Tracer(t.Name()).Start(context.Background(), strconv.Itoa(i))
defer span.End()

View File

@@ -28,7 +28,7 @@ func TestNewInMemoryExporter(t *testing.T) {
assert.Empty(t, imsb.GetSpans())
input := make(SpanStubs, 10)
for i := 0; i < 10; i++ {
for i := range 10 {
input[i] = SpanStub{Name: fmt.Sprintf("span %d", i)}
}
require.NoError(t, imsb.ExportSpans(context.Background(), input.Snapshots()))

View File

@@ -37,7 +37,7 @@ func (s SpanStubs) Snapshots() []tracesdk.ReadOnlySpan {
}
ro := make([]tracesdk.ReadOnlySpan, len(s))
for i := 0; i < len(s); i++ {
for i := range s {
ro[i] = s[i].Snapshot()
}
return ro

View File

@@ -72,7 +72,7 @@ func (h *harness) testTracerProvider(subjectFactory func() trace.TracerProvider)
done := make(chan struct{})
go func(tp trace.TracerProvider) {
var wg sync.WaitGroup
for i := 0; i < 20; i++ {
for i := range 20 {
wg.Add(1)
go func(name, version string) {
_ = tp.Tracer(name, trace.WithInstrumentationVersion(version))
@@ -231,7 +231,7 @@ func (h *harness) testTracer(subjectFactory func() trace.Tracer) {
done := make(chan struct{})
go func(tp trace.Tracer) {
var wg sync.WaitGroup
for i := 0; i < 20; i++ {
for i := range 20 {
wg.Add(1)
go func(name string) {
defer wg.Done()

View File

@@ -898,7 +898,7 @@ func TestHTTPAttributesFromHTTPStatusCode(t *testing.T) {
}
func TestSpanStatusFromHTTPStatusCode(t *testing.T) {
for code := 0; code < 1000; code++ {
for code := range 1000 {
expected := getExpectedCodeForHTTPCode(code, trace.SpanKindClient)
got, msg := SpanStatusFromHTTPStatusCode(code)
assert.Equalf(t, expected, got, "%s vs %s", expected, got)
@@ -913,7 +913,7 @@ func TestSpanStatusFromHTTPStatusCode(t *testing.T) {
}
func TestSpanStatusFromHTTPStatusCodeAndSpanKind(t *testing.T) {
for code := 0; code < 1000; code++ {
for code := range 1000 {
expected := getExpectedCodeForHTTPCode(code, trace.SpanKindClient)
got, msg := SpanStatusFromHTTPStatusCodeAndSpanKind(code, trace.SpanKindClient)
assert.Equalf(t, expected, got, "%s vs %s", expected, got)
@@ -954,7 +954,7 @@ func getExpectedCodeForHTTPCode(code int, spanKind trace.SpanKind) codes.Code {
return codes.Error
}
func assertElementsMatch(t *testing.T, expected, got []attribute.KeyValue, format string, args ...interface{}) {
func assertElementsMatch(t *testing.T, expected, got []attribute.KeyValue, format string, args ...any) {
if !assert.ElementsMatchf(t, expected, got, format, args...) {
t.Log("expected:", kvStr(expected))
t.Log("got:", kvStr(got))

View File

@@ -125,7 +125,7 @@ func TestTracerProviderConcurrentSafe(t *testing.T) {
defer close(done)
var wg sync.WaitGroup
for i := 0; i < goroutines; i++ {
for i := range goroutines {
wg.Add(1)
go func(name, version string) {
defer wg.Done()
@@ -207,7 +207,7 @@ func TestTracerConcurrentSafe(t *testing.T) {
defer close(done)
var wg sync.WaitGroup
for i := 0; i < goroutines; i++ {
for i := range goroutines {
wg.Add(1)
go func(name string) {
defer wg.Done()
@@ -1041,7 +1041,7 @@ func TestSpanConcurrentSafe(t *testing.T) {
defer close(done)
var wg sync.WaitGroup
for i := 0; i < nGoroutine; i++ {
for i := range nGoroutine {
wg.Add(1)
go func(n int) {
defer wg.Done()
@@ -1074,7 +1074,7 @@ func TestSpanConcurrentSafe(t *testing.T) {
ctx := context.Background()
var wg sync.WaitGroup
for i := 0; i < nSpans; i++ {
for i := range nSpans {
wg.Add(1)
go func(n int) {
defer wg.Done()
@@ -1094,7 +1094,7 @@ func TestSpanConcurrentSafe(t *testing.T) {
defer close(done)
var wg sync.WaitGroup
for i := 0; i < nTracers; i++ {
for i := range nTracers {
wg.Add(1)
go func(n int) {
defer wg.Done()

View File

@@ -278,7 +278,7 @@ var testcases = []struct {
var maxMembers = func() TraceState {
members := make([]member, maxListMembers)
for i := 0; i < maxListMembers; i++ {
for i := range maxListMembers {
members[i] = member{
Key: fmt.Sprintf("key%d", i+1),
Value: fmt.Sprintf("value%d", i+1),