diff --git a/.golangci.yml b/.golangci.yml index 1492f6fd7..4d190c1d6 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -235,10 +235,13 @@ formatters: enable: - gofumpt - goimports + - golines settings: goimports: local-prefixes: - go.opentelemetry.io + golines: + max-len: 120 exclusions: generated: lax paths: diff --git a/attribute/set_test.go b/attribute/set_test.go index b851d1fc7..c73f0212c 100644 --- a/attribute/set_test.go +++ b/attribute/set_test.go @@ -43,14 +43,47 @@ func TestSetDedup(t *testing.T) { cases := []testCase{ expect("A=B", attribute.String("A", "2"), attribute.String("A", "B")), expect("A=B", attribute.String("A", "2"), attribute.Int("A", 1), attribute.String("A", "B")), - expect("A=B", attribute.String("A", "B"), attribute.String("A", "C"), attribute.String("A", "D"), attribute.String("A", "B")), + expect( + "A=B", + attribute.String("A", "B"), + attribute.String("A", "C"), + attribute.String("A", "D"), + attribute.String("A", "B"), + ), expect("A=B,C=D", attribute.String("A", "1"), attribute.String("C", "D"), attribute.String("A", "B")), expect("A=B,C=D", attribute.String("A", "2"), attribute.String("A", "B"), attribute.String("C", "D")), - expect("A=B,C=D", attribute.Float64("C", 1.2), attribute.String("A", "2"), attribute.String("A", "B"), attribute.String("C", "D")), - expect("A=B,C=D", attribute.String("C", "D"), attribute.String("A", "B"), attribute.String("A", "C"), attribute.String("A", "D"), attribute.String("A", "B")), - expect("A=B,C=D", attribute.String("A", "B"), attribute.String("C", "D"), attribute.String("A", "C"), attribute.String("A", "D"), attribute.String("A", "B")), - expect("A=B,C=D", attribute.String("A", "B"), attribute.String("A", "C"), attribute.String("A", "D"), attribute.String("A", "B"), attribute.String("C", "D")), + expect( + "A=B,C=D", + attribute.Float64("C", 1.2), + attribute.String("A", "2"), + attribute.String("A", "B"), + attribute.String("C", "D"), + ), + expect( + "A=B,C=D", + attribute.String("C", "D"), + attribute.String("A", "B"), + attribute.String("A", "C"), + attribute.String("A", "D"), + attribute.String("A", "B"), + ), + expect( + "A=B,C=D", + attribute.String("A", "B"), + attribute.String("C", "D"), + attribute.String("A", "C"), + attribute.String("A", "D"), + attribute.String("A", "B"), + ), + expect( + "A=B,C=D", + attribute.String("A", "B"), + attribute.String("A", "C"), + attribute.String("A", "D"), + attribute.String("A", "B"), + attribute.String("C", "D"), + ), } enc := attribute.DefaultEncoder() diff --git a/baggage/baggage_test.go b/baggage/baggage_test.go index e25ed4b8d..fed443e6e 100644 --- a/baggage/baggage_test.go +++ b/baggage/baggage_test.go @@ -1173,7 +1173,9 @@ func BenchmarkParse(b *testing.B) { b.ReportAllocs() for i := 0; i < b.N; i++ { - benchBaggage, _ = Parse("userId=alice,serverNode = DF28 , isProduction = false,hasProp=stuff;propKey;propWValue=value, invalidUtf8=pr%ffo%ffp%fcValue") + benchBaggage, _ = Parse( + "userId=alice,serverNode = DF28 , isProduction = false,hasProp=stuff;propKey;propWValue=value, invalidUtf8=pr%ffo%ffp%fcValue", + ) } } diff --git a/bridge/opencensus/internal/oc2otel/attributes_test.go b/bridge/opencensus/internal/oc2otel/attributes_test.go index 7e3efeed7..b0d8e203d 100644 --- a/bridge/opencensus/internal/oc2otel/attributes_test.go +++ b/bridge/opencensus/internal/oc2otel/attributes_test.go @@ -56,7 +56,11 @@ func TestAttributesFromMap(t *testing.T) { gotAttributeSet := attribute.NewSet(got...) wantAttributeSet := attribute.NewSet(want...) if !gotAttributeSet.Equals(&wantAttributeSet) { - t.Errorf("Attributes conversion want %v, got %v", wantAttributeSet.Encoded(attribute.DefaultEncoder()), gotAttributeSet.Encoded(attribute.DefaultEncoder())) + t.Errorf( + "Attributes conversion want %v, got %v", + wantAttributeSet.Encoded(attribute.DefaultEncoder()), + gotAttributeSet.Encoded(attribute.DefaultEncoder()), + ) } } diff --git a/bridge/opencensus/internal/ocmetric/metric.go b/bridge/opencensus/internal/ocmetric/metric.go index ae04013a3..70ab55bb5 100644 --- a/bridge/opencensus/internal/ocmetric/metric.go +++ b/bridge/opencensus/internal/ocmetric/metric.go @@ -26,7 +26,9 @@ var ( errNegativeCount = errors.New("distribution or summary count is negative") errNegativeBucketCount = errors.New("distribution bucket count is negative") errMismatchedAttributeKeyValues = errors.New("mismatched number of attribute keys and values") - errInvalidExemplarSpanContext = errors.New("span context exemplar attachment does not contain an OpenCensus SpanContext") + errInvalidExemplarSpanContext = errors.New( + "span context exemplar attachment does not contain an OpenCensus SpanContext", + ) ) // ConvertMetrics converts metric data from OpenCensus to OpenTelemetry. @@ -76,20 +78,29 @@ func convertAggregation(metric *ocmetricdata.Metric) (metricdata.Aggregation, er } // convertGauge converts an OpenCensus gauge to an OpenTelemetry gauge aggregation. -func convertGauge[N int64 | float64](labelKeys []ocmetricdata.LabelKey, ts []*ocmetricdata.TimeSeries) (metricdata.Gauge[N], error) { +func convertGauge[N int64 | float64]( + labelKeys []ocmetricdata.LabelKey, + ts []*ocmetricdata.TimeSeries, +) (metricdata.Gauge[N], error) { points, err := convertNumberDataPoints[N](labelKeys, ts) return metricdata.Gauge[N]{DataPoints: points}, err } // convertSum converts an OpenCensus cumulative to an OpenTelemetry sum aggregation. -func convertSum[N int64 | float64](labelKeys []ocmetricdata.LabelKey, ts []*ocmetricdata.TimeSeries) (metricdata.Sum[N], error) { +func convertSum[N int64 | float64]( + labelKeys []ocmetricdata.LabelKey, + ts []*ocmetricdata.TimeSeries, +) (metricdata.Sum[N], error) { points, err := convertNumberDataPoints[N](labelKeys, ts) // OpenCensus sums are always Cumulative return metricdata.Sum[N]{DataPoints: points, Temporality: metricdata.CumulativeTemporality, IsMonotonic: true}, err } // convertNumberDataPoints converts OpenCensus TimeSeries to OpenTelemetry DataPoints. -func convertNumberDataPoints[N int64 | float64](labelKeys []ocmetricdata.LabelKey, ts []*ocmetricdata.TimeSeries) ([]metricdata.DataPoint[N], error) { +func convertNumberDataPoints[N int64 | float64]( + labelKeys []ocmetricdata.LabelKey, + ts []*ocmetricdata.TimeSeries, +) ([]metricdata.DataPoint[N], error) { var points []metricdata.DataPoint[N] var err error for _, t := range ts { @@ -117,7 +128,10 @@ func convertNumberDataPoints[N int64 | float64](labelKeys []ocmetricdata.LabelKe // convertHistogram converts OpenCensus Distribution timeseries to an // OpenTelemetry Histogram aggregation. -func convertHistogram(labelKeys []ocmetricdata.LabelKey, ts []*ocmetricdata.TimeSeries) (metricdata.Histogram[float64], error) { +func convertHistogram( + labelKeys []ocmetricdata.LabelKey, + ts []*ocmetricdata.TimeSeries, +) (metricdata.Histogram[float64], error) { points := make([]metricdata.HistogramDataPoint[float64], 0, len(ts)) var err error for _, t := range ts { @@ -390,7 +404,12 @@ func convertQuantiles(snapshot ocmetricdata.Snapshot) []metricdata.QuantileValue // OpenTelemetry attribute Set. func convertAttrs(keys []ocmetricdata.LabelKey, values []ocmetricdata.LabelValue) (attribute.Set, error) { if len(keys) != len(values) { - return attribute.NewSet(), fmt.Errorf("%w: keys(%q) values(%q)", errMismatchedAttributeKeyValues, len(keys), len(values)) + return attribute.NewSet(), fmt.Errorf( + "%w: keys(%q) values(%q)", + errMismatchedAttributeKeyValues, + len(keys), + len(values), + ) } attrs := []attribute.KeyValue{} for i, lv := range values { diff --git a/bridge/opencensus/internal/ocmetric/metric_test.go b/bridge/opencensus/internal/ocmetric/metric_test.go index afdcfaa9e..f60ace316 100644 --- a/bridge/opencensus/internal/ocmetric/metric_test.go +++ b/bridge/opencensus/internal/ocmetric/metric_test.go @@ -989,10 +989,22 @@ func TestConvertAttributes(t *testing.T) { t.Run(tc.desc, func(t *testing.T) { output, err := convertAttrs(tc.inputKeys, tc.inputValues) if !errors.Is(err, tc.expectedErr) { - t.Errorf("convertAttrs(keys: %v, values: %v) = err(%v), want err(%v)", tc.inputKeys, tc.inputValues, err, tc.expectedErr) + t.Errorf( + "convertAttrs(keys: %v, values: %v) = err(%v), want err(%v)", + tc.inputKeys, + tc.inputValues, + err, + tc.expectedErr, + ) } if !output.Equals(tc.expected) { - t.Errorf("convertAttrs(keys: %v, values: %v) = %+v, want %+v", tc.inputKeys, tc.inputValues, output.ToSlice(), tc.expected.ToSlice()) + t.Errorf( + "convertAttrs(keys: %v, values: %v) = %+v, want %+v", + tc.inputKeys, + tc.inputValues, + output.ToSlice(), + tc.expected.ToSlice(), + ) } }) } diff --git a/bridge/opencensus/internal/span_test.go b/bridge/opencensus/internal/span_test.go index 3844add62..de06a86a3 100644 --- a/bridge/opencensus/internal/span_test.go +++ b/bridge/opencensus/internal/span_test.go @@ -124,10 +124,18 @@ func TestSpanSetStatus(t *testing.T) { ocS.SetStatus(status) if s.sCode != tt.wantCode { - t.Errorf("span.SetStatus failed to set OpenTelemetry status code. Expected %d, got %d", tt.wantCode, s.sCode) + t.Errorf( + "span.SetStatus failed to set OpenTelemetry status code. Expected %d, got %d", + tt.wantCode, + s.sCode, + ) } if s.sMsg != tt.message { - t.Errorf("span.SetStatus failed to set OpenTelemetry status description. Expected %s, got %s", tt.message, s.sMsg) + t.Errorf( + "span.SetStatus failed to set OpenTelemetry status description. Expected %s, got %s", + tt.message, + s.sMsg, + ) } }) } @@ -295,12 +303,22 @@ func TestSpanAddLinkFails(t *testing.T) { for i, l := range s.links { if !l.SpanContext.Equal(wantLinks[i].SpanContext) { - t.Errorf("link[%v] has the wrong span context; want %+v, got %+v", i, wantLinks[i].SpanContext, l.SpanContext) + t.Errorf( + "link[%v] has the wrong span context; want %+v, got %+v", + i, + wantLinks[i].SpanContext, + l.SpanContext, + ) } gotAttributeSet := attribute.NewSet(l.Attributes...) wantAttributeSet := attribute.NewSet(wantLinks[i].Attributes...) if !gotAttributeSet.Equals(&wantAttributeSet) { - t.Errorf("link[%v] has the wrong attributes; want %v, got %v", i, wantAttributeSet.Encoded(attribute.DefaultEncoder()), gotAttributeSet.Encoded(attribute.DefaultEncoder())) + t.Errorf( + "link[%v] has the wrong attributes; want %v, got %v", + i, + wantAttributeSet.Encoded(attribute.DefaultEncoder()), + gotAttributeSet.Encoded(attribute.DefaultEncoder()), + ) } } } diff --git a/bridge/opencensus/internal/tracer.go b/bridge/opencensus/internal/tracer.go index 131646845..bb1b0839c 100644 --- a/bridge/opencensus/internal/tracer.go +++ b/bridge/opencensus/internal/tracer.go @@ -25,7 +25,11 @@ func NewTracer(tracer trace.Tracer) octrace.Tracer { // StartSpan starts a new child span of the current span in the context. If // there is no span in the context, it creates a new trace and span. -func (o *Tracer) StartSpan(ctx context.Context, name string, s ...octrace.StartOption) (context.Context, *octrace.Span) { +func (o *Tracer) StartSpan( + ctx context.Context, + name string, + s ...octrace.StartOption, +) (context.Context, *octrace.Span) { otelOpts, err := oc2otel.StartOptions(s) if err != nil { Handle(fmt.Errorf("starting span %q: %w", name, err)) @@ -36,7 +40,12 @@ func (o *Tracer) StartSpan(ctx context.Context, name string, s ...octrace.StartO // StartSpanWithRemoteParent starts a new child span of the span from the // given parent. -func (o *Tracer) StartSpanWithRemoteParent(ctx context.Context, name string, parent octrace.SpanContext, s ...octrace.StartOption) (context.Context, *octrace.Span) { +func (o *Tracer) StartSpanWithRemoteParent( + ctx context.Context, + name string, + parent octrace.SpanContext, + s ...octrace.StartOption, +) (context.Context, *octrace.Span) { // make sure span context is zeroed out so we use the remote parent ctx = trace.ContextWithSpan(ctx, nil) ctx = trace.ContextWithRemoteSpanContext(ctx, oc2otel.SpanContext(parent)) @@ -53,6 +62,8 @@ func (o *Tracer) NewContext(parent context.Context, s *octrace.Span) context.Con if otSpan, ok := s.Internal().(*Span); ok { return trace.ContextWithSpan(parent, otSpan.otelSpan) } - Handle(fmt.Errorf("unable to create context with span %q, since it was created using a different tracer", s.String())) + Handle( + fmt.Errorf("unable to create context with span %q, since it was created using a different tracer", s.String()), + ) return parent } diff --git a/bridge/opencensus/test/bridge_test.go b/bridge/opencensus/test/bridge_test.go index 1fe74fe4f..bb02da1c2 100644 --- a/bridge/opencensus/test/bridge_test.go +++ b/bridge/opencensus/test/bridge_test.go @@ -92,7 +92,11 @@ func TestStartSpanWithRemoteParent(t *testing.T) { ctx := context.Background() ctx, parent := tracer.Start(ctx, "OpenTelemetrySpan1") - _, span := octrace.StartSpanWithRemoteParent(ctx, "OpenCensusSpan", ocbridge.OTelSpanContextToOC(parent.SpanContext())) + _, span := octrace.StartSpanWithRemoteParent( + ctx, + "OpenCensusSpan", + ocbridge.OTelSpanContextToOC(parent.SpanContext()), + ) span.End() spans := sr.Ended() diff --git a/bridge/opentracing/bridge.go b/bridge/opentracing/bridge.go index 848f028c8..c26a7d7d8 100644 --- a/bridge/opentracing/bridge.go +++ b/bridge/opentracing/bridge.go @@ -283,7 +283,9 @@ type bridgeSetTracer struct { func (s *bridgeSetTracer) tracer() trace.Tracer { if !s.isSet { s.warnOnce.Do(func() { - s.warningHandler("The OpenTelemetry tracer is not set, default no-op tracer is used! Call SetOpenTelemetryTracer to set it up.\n") + s.warningHandler( + "The OpenTelemetry tracer is not set, default no-op tracer is used! Call SetOpenTelemetryTracer to set it up.\n", + ) }) } return s.otelTracer @@ -362,7 +364,9 @@ func (t *BridgeTracer) baggageSetHook(ctx context.Context, list iBaggage.List) c } bSpan, ok := span.(*bridgeSpan) if !ok { - t.warningHandler("Encountered a foreign OpenTracing span, will not propagate the baggage items from OpenTelemetry context\n") + t.warningHandler( + "Encountered a foreign OpenTracing span, will not propagate the baggage items from OpenTelemetry context\n", + ) return ctx } for k, v := range list { @@ -374,12 +378,16 @@ func (t *BridgeTracer) baggageSetHook(ctx context.Context, list iBaggage.List) c func (t *BridgeTracer) baggageGetHook(ctx context.Context, list iBaggage.List) iBaggage.List { span := ot.SpanFromContext(ctx) if span == nil { - t.warningHandler("No active OpenTracing span, can not propagate the baggage items from OpenTracing span context\n") + t.warningHandler( + "No active OpenTracing span, can not propagate the baggage items from OpenTracing span context\n", + ) return list } bSpan, ok := span.(*bridgeSpan) if !ok { - t.warningHandler("Encountered a foreign OpenTracing span, will not propagate the baggage items from OpenTracing span context\n") + t.warningHandler( + "Encountered a foreign OpenTracing span, will not propagate the baggage items from OpenTracing span context\n", + ) return list } items := bSpan.extraBaggageItems @@ -427,7 +435,9 @@ func (t *BridgeTracer) StartSpan(operationName string, opts ...ot.StartSpanOptio ) if ot.SpanFromContext(checkCtx2) != nil { t.warnOnce.Do(func() { - t.warningHandler("SDK should have deferred the context setup, see the documentation of go.opentelemetry.io/otel/bridge/opentracing/migration\n") + t.warningHandler( + "SDK should have deferred the context setup, see the documentation of go.opentelemetry.io/otel/bridge/opentracing/migration\n", + ) }) } if hadTrueErrorTag { @@ -473,7 +483,9 @@ func (t *BridgeTracer) ContextWithBridgeSpan(ctx context.Context, span trace.Spa func (t *BridgeTracer) ContextWithSpanHook(ctx context.Context, span ot.Span) context.Context { bSpan, ok := span.(*bridgeSpan) if !ok { - t.warningHandler("Encountered a foreign OpenTracing span, will not run a possible deferred context setup hook\n") + t.warningHandler( + "Encountered a foreign OpenTracing span, will not run a possible deferred context setup hook\n", + ) return ctx } if bSpan.skipDeferHook { diff --git a/bridge/opentracing/bridge_test.go b/bridge/opentracing/bridge_test.go index 67c97768e..bffbcfca7 100644 --- a/bridge/opentracing/bridge_test.go +++ b/bridge/opentracing/bridge_test.go @@ -368,7 +368,11 @@ type nonDeferWrapperTracer struct { *WrapperTracer } -func (t *nonDeferWrapperTracer) Start(ctx context.Context, name string, opts ...trace.SpanStartOption) (context.Context, trace.Span) { +func (t *nonDeferWrapperTracer) Start( + ctx context.Context, + name string, + opts ...trace.SpanStartOption, +) (context.Context, trace.Span) { // Run start on the parent wrapper with a brand new context // so `WithDeferredSetup` hasn't been called, and the OpenTracing context is injected. return t.WrapperTracer.Start(context.Background(), name, opts...) diff --git a/bridge/opentracing/internal/mock.go b/bridge/opentracing/internal/mock.go index c2025c0b8..bf99d1db7 100644 --- a/bridge/opentracing/internal/mock.go +++ b/bridge/opentracing/internal/mock.go @@ -63,7 +63,11 @@ func NewMockTracer() *MockTracer { } } -func (t *MockTracer) Start(ctx context.Context, name string, opts ...trace.SpanStartOption) (context.Context, trace.Span) { +func (t *MockTracer) Start( + ctx context.Context, + name string, + opts ...trace.SpanStartOption, +) (context.Context, trace.Span) { config := trace.NewSpanStartConfig(opts...) startTime := config.Timestamp() if startTime.IsZero() { diff --git a/bridge/opentracing/mix_test.go b/bridge/opentracing/mix_test.go index 0fd54f3da..45dce83af 100644 --- a/bridge/opentracing/mix_test.go +++ b/bridge/opentracing/mix_test.go @@ -177,20 +177,38 @@ func (cast *currentActiveSpanTest) setup(t *testing.T, tracer *internal.MockTrac func (cast *currentActiveSpanTest) check(t *testing.T, tracer *internal.MockTracer) { checkTraceAndSpans(t, tracer, cast.traceID, cast.spanIDs) if len(cast.recordedCurrentOtelSpanIDs) != len(cast.spanIDs) { - t.Errorf("Expected to have %d recorded Otel current spans, got %d", len(cast.spanIDs), len(cast.recordedCurrentOtelSpanIDs)) + t.Errorf( + "Expected to have %d recorded Otel current spans, got %d", + len(cast.spanIDs), + len(cast.recordedCurrentOtelSpanIDs), + ) } if len(cast.recordedActiveOTSpanIDs) != len(cast.spanIDs) { - t.Errorf("Expected to have %d recorded OT active spans, got %d", len(cast.spanIDs), len(cast.recordedActiveOTSpanIDs)) + t.Errorf( + "Expected to have %d recorded OT active spans, got %d", + len(cast.spanIDs), + len(cast.recordedActiveOTSpanIDs), + ) } minLen := min(len(cast.recordedCurrentOtelSpanIDs), len(cast.spanIDs)) minLen = min(minLen, len(cast.recordedActiveOTSpanIDs)) for i := 0; i < minLen; i++ { if cast.recordedCurrentOtelSpanIDs[i] != cast.spanIDs[i] { - t.Errorf("Expected span idx %d (%d) to be recorded as current span in Otel, got %d", i, cast.spanIDs[i], cast.recordedCurrentOtelSpanIDs[i]) + t.Errorf( + "Expected span idx %d (%d) to be recorded as current span in Otel, got %d", + i, + cast.spanIDs[i], + cast.recordedCurrentOtelSpanIDs[i], + ) } if cast.recordedActiveOTSpanIDs[i] != cast.spanIDs[i] { - t.Errorf("Expected span idx %d (%d) to be recorded as active span in OT, got %d", i, cast.spanIDs[i], cast.recordedActiveOTSpanIDs[i]) + t.Errorf( + "Expected span idx %d (%d) to be recorded as active span in OT, got %d", + i, + cast.spanIDs[i], + cast.recordedActiveOTSpanIDs[i], + ) } } } @@ -264,7 +282,11 @@ func (coin *contextIntactTest) setup(t *testing.T, tracer *internal.MockTracer) func (coin *contextIntactTest) check(t *testing.T, tracer *internal.MockTracer) { if len(coin.recordedContextValues) != len(coin.contextKeyValues) { - t.Errorf("Expected to have %d recorded context values, got %d", len(coin.contextKeyValues), len(coin.recordedContextValues)) + t.Errorf( + "Expected to have %d recorded context values, got %d", + len(coin.contextKeyValues), + len(coin.recordedContextValues), + ) } minLen := min(len(coin.recordedContextValues), len(coin.contextKeyValues)) @@ -344,7 +366,12 @@ func (bip *baggageItemsPreservationTest) check(t *testing.T, tracer *internal.Mo for i := 0; i < minLen; i++ { recordedItems := bip.recordedBaggage[i] if len(recordedItems) != i+1 { - t.Errorf("Expected %d recorded baggage items in recording %d, got %d", i+1, i+1, len(bip.recordedBaggage[i])) + t.Errorf( + "Expected %d recorded baggage items in recording %d, got %d", + i+1, + i+1, + len(bip.recordedBaggage[i]), + ) } minItemLen := min(len(bip.baggageItems), i+1) for j := 0; j < minItemLen; j++ { @@ -452,7 +479,13 @@ func checkBIORecording(t *testing.T, apiDesc string, initialItems []bipBaggage, recordedItems := recordings[i] expectedItemsInStep := (i + 1) * 2 if expectedItemsInStep != len(recordedItems) { - t.Errorf("Expected %d recorded items in recording %d from %s, got %d", expectedItemsInStep, i, apiDesc, len(recordedItems)) + t.Errorf( + "Expected %d recorded items in recording %d from %s, got %d", + expectedItemsInStep, + i, + apiDesc, + len(recordedItems), + ) } recordedItemsCopy := make(map[string]string, len(recordedItems)) for k, v := range recordedItems { @@ -464,7 +497,14 @@ func checkBIORecording(t *testing.T, apiDesc string, initialItems []bipBaggage, for _, k := range []string{otKey, otelKey} { if v, ok := recordedItemsCopy[k]; ok { if value != v { - t.Errorf("Expected value %s under key %s in recording %d from %s, got %s", value, k, i, apiDesc, v) + t.Errorf( + "Expected value %s under key %s in recording %d from %s, got %s", + value, + k, + i, + apiDesc, + v, + ) } delete(recordedItemsCopy, k) } else { @@ -537,7 +577,12 @@ func generateBaggageKeys(key string) (otKey, otelKey string) { // helpers -func checkTraceAndSpans(t *testing.T, tracer *internal.MockTracer, expectedTraceID trace.TraceID, expectedSpanIDs []trace.SpanID) { +func checkTraceAndSpans( + t *testing.T, + tracer *internal.MockTracer, + expectedTraceID trace.TraceID, + expectedSpanIDs []trace.SpanID, +) { expectedSpanCount := len(expectedSpanIDs) // reverse spanIDs, since first span ID belongs to root, that @@ -562,7 +607,13 @@ func checkTraceAndSpans(t *testing.T, tracer *internal.MockTracer, expectedTrace for idx, span := range tracer.FinishedSpans { sctx := span.SpanContext() if sctx.TraceID() != expectedTraceID { - t.Errorf("Expected trace ID %v in span %d (%d), got %v", expectedTraceID, idx, sctx.SpanID(), sctx.TraceID()) + t.Errorf( + "Expected trace ID %v in span %d (%d), got %v", + expectedTraceID, + idx, + sctx.SpanID(), + sctx.TraceID(), + ) } expectedSpanID := spanIDs[idx] expectedParentSpanID := parentSpanIDs[idx] @@ -570,10 +621,22 @@ func checkTraceAndSpans(t *testing.T, tracer *internal.MockTracer, expectedTrace t.Errorf("Expected finished span %d to have span ID %d, but got %d", idx, expectedSpanID, sctx.SpanID()) } if span.ParentSpanID != expectedParentSpanID { - t.Errorf("Expected finished span %d (span ID: %d) to have parent span ID %d, but got %d", idx, sctx.SpanID(), expectedParentSpanID, span.ParentSpanID) + t.Errorf( + "Expected finished span %d (span ID: %d) to have parent span ID %d, but got %d", + idx, + sctx.SpanID(), + expectedParentSpanID, + span.ParentSpanID, + ) } if span.SpanKind != sks[span.SpanContext().SpanID()] { - t.Errorf("Expected finished span %d (span ID: %d) to have span.kind to be '%v' but was '%v'", idx, sctx.SpanID(), sks[span.SpanContext().SpanID()], span.SpanKind) + t.Errorf( + "Expected finished span %d (span ID: %d) to have span.kind to be '%v' but was '%v'", + idx, + sctx.SpanID(), + sks[span.SpanContext().SpanID()], + span.SpanKind, + ) } } } @@ -600,7 +663,12 @@ func simpleSpanIDs(count int) []trace.SpanID { return base[:count] } -func runOtelOTOtel(t *testing.T, ctx context.Context, name string, callback func(*testing.T, context.Context) context.Context) { +func runOtelOTOtel( + t *testing.T, + ctx context.Context, + name string, + callback func(*testing.T, context.Context) context.Context, +) { tr := otel.Tracer("") ctx, span := tr.Start(ctx, fmt.Sprintf("%s_Otel_OTOtel", name), trace.WithSpanKind(trace.SpanKindClient)) defer span.End() @@ -610,16 +678,29 @@ func runOtelOTOtel(t *testing.T, ctx context.Context, name string, callback func defer span.Finish() ctx2 = callback(t, ctx2) func(ctx3 context.Context) { - ctx3, span := tr.Start(ctx3, fmt.Sprintf("%sOtelOT_Otel_", name), trace.WithSpanKind(trace.SpanKindProducer)) + ctx3, span := tr.Start( + ctx3, + fmt.Sprintf("%sOtelOT_Otel_", name), + trace.WithSpanKind(trace.SpanKindProducer), + ) defer span.End() _ = callback(t, ctx3) }(ctx2) }(ctx) } -func runOTOtelOT(t *testing.T, ctx context.Context, name string, callback func(*testing.T, context.Context) context.Context) { +func runOTOtelOT( + t *testing.T, + ctx context.Context, + name string, + callback func(*testing.T, context.Context) context.Context, +) { tr := otel.Tracer("") - span, ctx := ot.StartSpanFromContext(ctx, fmt.Sprintf("%s_OT_OtelOT", name), ot.Tag{Key: "span.kind", Value: "client"}) + span, ctx := ot.StartSpanFromContext( + ctx, + fmt.Sprintf("%s_OT_OtelOT", name), + ot.Tag{Key: "span.kind", Value: "client"}, + ) defer span.Finish() ctx = callback(t, ctx) func(ctx2 context.Context) { @@ -627,7 +708,11 @@ func runOTOtelOT(t *testing.T, ctx context.Context, name string, callback func(* defer span.End() ctx2 = callback(t, ctx2) func(ctx3 context.Context) { - span, ctx3 := ot.StartSpanFromContext(ctx3, fmt.Sprintf("%sOTOtel_OT_", name), ot.Tag{Key: "span.kind", Value: "producer"}) + span, ctx3 := ot.StartSpanFromContext( + ctx3, + fmt.Sprintf("%sOTOtel_OT_", name), + ot.Tag{Key: "span.kind", Value: "producer"}, + ) defer span.Finish() _ = callback(t, ctx3) }(ctx2) diff --git a/bridge/opentracing/provider_test.go b/bridge/opentracing/provider_test.go index 783662492..f3b616e2a 100644 --- a/bridge/opentracing/provider_test.go +++ b/bridge/opentracing/provider_test.go @@ -73,7 +73,11 @@ func TestTracerProvider(t *testing.T) { return provider.Tracer(foobar, trace.WithInstrumentationAttributes(attribute.String("foo", "bar"))) }, func() trace.Tracer { - return provider.Tracer(foobar, trace.WithSchemaURL("https://opentelemetry.io/schemas/1.2.0"), trace.WithInstrumentationAttributes(attribute.String("foo", "bar"))) + return provider.Tracer( + foobar, + trace.WithSchemaURL("https://opentelemetry.io/schemas/1.2.0"), + trace.WithInstrumentationAttributes(attribute.String("foo", "bar")), + ) }, } diff --git a/bridge/opentracing/util.go b/bridge/opentracing/util.go index 585e2dee4..a1ec486d3 100644 --- a/bridge/opentracing/util.go +++ b/bridge/opentracing/util.go @@ -25,7 +25,10 @@ func NewTracerPair(tracer trace.Tracer) (*BridgeTracer, *WrapperTracerProvider) // NewTracerPairWithContext is a convenience function. It calls NewTracerPair // and returns a hooked version of ctx with the created BridgeTracer along // with the BridgeTracer and WrapperTracerProvider. -func NewTracerPairWithContext(ctx context.Context, tracer trace.Tracer) (context.Context, *BridgeTracer, *WrapperTracerProvider) { +func NewTracerPairWithContext( + ctx context.Context, + tracer trace.Tracer, +) (context.Context, *BridgeTracer, *WrapperTracerProvider) { bridgeTracer, wrapperProvider := NewTracerPair(tracer) ctx = bridgeTracer.NewHookedContext(ctx) return ctx, bridgeTracer, wrapperProvider diff --git a/bridge/opentracing/wrapper.go b/bridge/opentracing/wrapper.go index 13b91c240..79be509b8 100644 --- a/bridge/opentracing/wrapper.go +++ b/bridge/opentracing/wrapper.go @@ -76,7 +76,11 @@ func (t *WrapperTracer) otelTracer() trace.Tracer { // Start forwards the call to the wrapped tracer. It also tries to // override the tracer of the returned span if the span implements the // OverrideTracerSpanExtension interface. -func (t *WrapperTracer) Start(ctx context.Context, name string, opts ...trace.SpanStartOption) (context.Context, trace.Span) { +func (t *WrapperTracer) Start( + ctx context.Context, + name string, + opts ...trace.SpanStartOption, +) (context.Context, trace.Span) { ctx, span := t.otelTracer().Start(ctx, name, opts...) if spanWithExtension, ok := span.(migration.OverrideTracerSpanExtension); ok { spanWithExtension.OverrideTracer(t) diff --git a/exporters/otlp/otlplog/otlploggrpc/client_test.go b/exporters/otlp/otlplog/otlploggrpc/client_test.go index b581620d8..bcc739402 100644 --- a/exporters/otlp/otlplog/otlploggrpc/client_test.go +++ b/exporters/otlp/otlplog/otlploggrpc/client_test.go @@ -427,7 +427,10 @@ func newGRPCCollector(endpoint string, resultCh <-chan exportResult) (*grpcColle } // Export handles the export req. -func (c *grpcCollector) Export(ctx context.Context, req *collogpb.ExportLogsServiceRequest) (*collogpb.ExportLogsServiceResponse, error) { +func (c *grpcCollector) Export( + ctx context.Context, + req *collogpb.ExportLogsServiceRequest, +) (*collogpb.ExportLogsServiceResponse, error) { c.storage.Add(req) if h, ok := metadata.FromIncomingContext(ctx); ok { diff --git a/exporters/otlp/otlplog/otlploghttp/client_test.go b/exporters/otlp/otlplog/otlploghttp/client_test.go index 607dea9f0..97ae311ca 100644 --- a/exporters/otlp/otlplog/otlploghttp/client_test.go +++ b/exporters/otlp/otlplog/otlploghttp/client_test.go @@ -229,7 +229,11 @@ type httpCollector struct { // If errCh is not nil, the collector will respond to HTTP requests with errors // sent on that channel. This means that if errCh is not nil Export calls will // block until an error is received. -func newHTTPCollector(endpoint string, resultCh <-chan exportResult, opts ...func(*httpCollector)) (*httpCollector, error) { +func newHTTPCollector( + endpoint string, + resultCh <-chan exportResult, + opts ...func(*httpCollector), +) (*httpCollector, error) { u, err := url.Parse(endpoint) if err != nil { return nil, err diff --git a/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/envconfig.go b/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/envconfig.go index 7ae53f2d1..371be0120 100644 --- a/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/envconfig.go +++ b/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/envconfig.go @@ -80,8 +80,16 @@ func getOptionsFromEnv() []GenericOption { }), envconfig.WithCertPool("CERTIFICATE", func(p *x509.CertPool) { tlsConf.RootCAs = p }), envconfig.WithCertPool("METRICS_CERTIFICATE", func(p *x509.CertPool) { tlsConf.RootCAs = p }), - envconfig.WithClientCert("CLIENT_CERTIFICATE", "CLIENT_KEY", func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }), - envconfig.WithClientCert("METRICS_CLIENT_CERTIFICATE", "METRICS_CLIENT_KEY", func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }), + envconfig.WithClientCert( + "CLIENT_CERTIFICATE", + "CLIENT_KEY", + func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }, + ), + envconfig.WithClientCert( + "METRICS_CLIENT_CERTIFICATE", + "METRICS_CLIENT_KEY", + func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }, + ), envconfig.WithBool("INSECURE", func(b bool) { opts = append(opts, withInsecure(b)) }), envconfig.WithBool("METRICS_INSECURE", func(b bool) { opts = append(opts, withInsecure(b)) }), withTLSConfig(tlsConf, func(c *tls.Config) { opts = append(opts, WithTLSClientConfig(c)) }), @@ -91,8 +99,14 @@ func getOptionsFromEnv() []GenericOption { WithEnvCompression("METRICS_COMPRESSION", func(c Compression) { opts = append(opts, WithCompression(c)) }), envconfig.WithDuration("TIMEOUT", func(d time.Duration) { opts = append(opts, WithTimeout(d)) }), envconfig.WithDuration("METRICS_TIMEOUT", func(d time.Duration) { opts = append(opts, WithTimeout(d)) }), - withEnvTemporalityPreference("METRICS_TEMPORALITY_PREFERENCE", func(t metric.TemporalitySelector) { opts = append(opts, WithTemporalitySelector(t)) }), - withEnvAggPreference("METRICS_DEFAULT_HISTOGRAM_AGGREGATION", func(a metric.AggregationSelector) { opts = append(opts, WithAggregationSelector(a)) }), + withEnvTemporalityPreference( + "METRICS_TEMPORALITY_PREFERENCE", + func(t metric.TemporalitySelector) { opts = append(opts, WithTemporalitySelector(t)) }, + ), + withEnvAggPreference( + "METRICS_DEFAULT_HISTOGRAM_AGGREGATION", + func(a metric.AggregationSelector) { opts = append(opts, WithAggregationSelector(a)) }, + ), ) return opts @@ -157,7 +171,11 @@ func withEnvTemporalityPreference(n string, fn func(metric.TemporalitySelector)) case "lowmemory": fn(lowMemory) default: - global.Warn("OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE is set to an invalid value, ignoring.", "value", s) + global.Warn( + "OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE is set to an invalid value, ignoring.", + "value", + s, + ) } } } @@ -203,7 +221,11 @@ func withEnvAggPreference(n string, fn func(metric.AggregationSelector)) func(e return metric.DefaultAggregationSelector(kind) }) default: - global.Warn("OTEL_EXPORTER_OTLP_METRICS_DEFAULT_HISTOGRAM_AGGREGATION is set to an invalid value, ignoring.", "value", s) + global.Warn( + "OTEL_EXPORTER_OTLP_METRICS_DEFAULT_HISTOGRAM_AGGREGATION is set to an invalid value, ignoring.", + "value", + s, + ) } } } diff --git a/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/envconfig_test.go b/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/envconfig_test.go index 5c621d7b4..53b07677c 100644 --- a/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/envconfig_test.go +++ b/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/envconfig_test.go @@ -113,12 +113,24 @@ func TestWithEnvAggPreference(t *testing.T) { name: "explicit_bucket_histogram", envValue: "explicit_bucket_histogram", want: map[metric.InstrumentKind]metric.Aggregation{ - metric.InstrumentKindCounter: metric.DefaultAggregationSelector(metric.InstrumentKindCounter), - metric.InstrumentKindHistogram: metric.DefaultAggregationSelector(metric.InstrumentKindHistogram), - metric.InstrumentKindUpDownCounter: metric.DefaultAggregationSelector(metric.InstrumentKindUpDownCounter), - metric.InstrumentKindObservableCounter: metric.DefaultAggregationSelector(metric.InstrumentKindObservableCounter), - metric.InstrumentKindObservableUpDownCounter: metric.DefaultAggregationSelector(metric.InstrumentKindObservableUpDownCounter), - metric.InstrumentKindObservableGauge: metric.DefaultAggregationSelector(metric.InstrumentKindObservableGauge), + metric.InstrumentKindCounter: metric.DefaultAggregationSelector( + metric.InstrumentKindCounter, + ), + metric.InstrumentKindHistogram: metric.DefaultAggregationSelector( + metric.InstrumentKindHistogram, + ), + metric.InstrumentKindUpDownCounter: metric.DefaultAggregationSelector( + metric.InstrumentKindUpDownCounter, + ), + metric.InstrumentKindObservableCounter: metric.DefaultAggregationSelector( + metric.InstrumentKindObservableCounter, + ), + metric.InstrumentKindObservableUpDownCounter: metric.DefaultAggregationSelector( + metric.InstrumentKindObservableUpDownCounter, + ), + metric.InstrumentKindObservableGauge: metric.DefaultAggregationSelector( + metric.InstrumentKindObservableGauge, + ), }, }, { @@ -131,10 +143,18 @@ func TestWithEnvAggPreference(t *testing.T) { MaxScale: 20, NoMinMax: false, }, - metric.InstrumentKindUpDownCounter: metric.DefaultAggregationSelector(metric.InstrumentKindUpDownCounter), - metric.InstrumentKindObservableCounter: metric.DefaultAggregationSelector(metric.InstrumentKindObservableCounter), - metric.InstrumentKindObservableUpDownCounter: metric.DefaultAggregationSelector(metric.InstrumentKindObservableUpDownCounter), - metric.InstrumentKindObservableGauge: metric.DefaultAggregationSelector(metric.InstrumentKindObservableGauge), + metric.InstrumentKindUpDownCounter: metric.DefaultAggregationSelector( + metric.InstrumentKindUpDownCounter, + ), + metric.InstrumentKindObservableCounter: metric.DefaultAggregationSelector( + metric.InstrumentKindObservableCounter, + ), + metric.InstrumentKindObservableUpDownCounter: metric.DefaultAggregationSelector( + metric.InstrumentKindObservableUpDownCounter, + ), + metric.InstrumentKindObservableGauge: metric.DefaultAggregationSelector( + metric.InstrumentKindObservableGauge, + ), }, }, } diff --git a/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/otest/collector.go b/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/otest/collector.go index b3e8c531b..674dd48c5 100644 --- a/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/otest/collector.go +++ b/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/otest/collector.go @@ -142,7 +142,10 @@ func (c *GRPCCollector) Headers() map[string][]string { } // Export handles the export req. -func (c *GRPCCollector) Export(ctx context.Context, req *collpb.ExportMetricsServiceRequest) (*collpb.ExportMetricsServiceResponse, error) { +func (c *GRPCCollector) Export( + ctx context.Context, + req *collpb.ExportMetricsServiceRequest, +) (*collpb.ExportMetricsServiceResponse, error) { c.storage.Add(req) if h, ok := metadata.FromIncomingContext(ctx); ok { @@ -208,7 +211,11 @@ type HTTPCollector struct { // If errCh is not nil, the collector will respond to HTTP requests with errors // sent on that channel. This means that if errCh is not nil Export calls will // block until an error is received. -func NewHTTPCollector(endpoint string, resultCh <-chan ExportResult, opts ...func(*HTTPCollector)) (*HTTPCollector, error) { +func NewHTTPCollector( + endpoint string, + resultCh <-chan ExportResult, + opts ...func(*HTTPCollector), +) (*HTTPCollector, error) { u, err := url.Parse(endpoint) if err != nil { return nil, err diff --git a/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/partialsuccess_test.go b/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/partialsuccess_test.go index 56cb8b0ff..15dfa8981 100644 --- a/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/partialsuccess_test.go +++ b/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/partialsuccess_test.go @@ -28,6 +28,10 @@ func requireErrorString(t *testing.T, expect string, err error) { func TestPartialSuccessFormat(t *testing.T) { requireErrorString(t, "empty message (0 metric data points rejected)", MetricPartialSuccessError(0, "")) requireErrorString(t, "help help (0 metric data points rejected)", MetricPartialSuccessError(0, "help help")) - requireErrorString(t, "what happened (10 metric data points rejected)", MetricPartialSuccessError(10, "what happened")) + requireErrorString( + t, + "what happened (10 metric data points rejected)", + MetricPartialSuccessError(10, "what happened"), + ) requireErrorString(t, "what happened (15 spans rejected)", TracePartialSuccessError(15, "what happened")) } diff --git a/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform/metricdata.go b/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform/metricdata.go index abf7f0219..8d1169a8b 100644 --- a/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform/metricdata.go +++ b/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform/metricdata.go @@ -203,7 +203,9 @@ func HistogramDataPoints[N int64 | float64](dPts []metricdata.HistogramDataPoint // ExponentialHistogram returns an OTLP Metric_ExponentialHistogram generated from h. An error is // returned if the temporality of h is unknown. -func ExponentialHistogram[N int64 | float64](h metricdata.ExponentialHistogram[N]) (*mpb.Metric_ExponentialHistogram, error) { +func ExponentialHistogram[N int64 | float64]( + h metricdata.ExponentialHistogram[N], +) (*mpb.Metric_ExponentialHistogram, error) { t, err := Temporality(h.Temporality) if err != nil { return nil, err @@ -218,7 +220,9 @@ func ExponentialHistogram[N int64 | float64](h metricdata.ExponentialHistogram[N // ExponentialHistogramDataPoints returns a slice of OTLP ExponentialHistogramDataPoint generated // from dPts. -func ExponentialHistogramDataPoints[N int64 | float64](dPts []metricdata.ExponentialHistogramDataPoint[N]) []*mpb.ExponentialHistogramDataPoint { +func ExponentialHistogramDataPoints[N int64 | float64]( + dPts []metricdata.ExponentialHistogramDataPoint[N], +) []*mpb.ExponentialHistogramDataPoint { out := make([]*mpb.ExponentialHistogramDataPoint, 0, len(dPts)) for _, dPt := range dPts { sum := float64(dPt.Sum) @@ -250,7 +254,9 @@ func ExponentialHistogramDataPoints[N int64 | float64](dPts []metricdata.Exponen // ExponentialHistogramDataPointBuckets returns an OTLP ExponentialHistogramDataPoint_Buckets generated // from bucket. -func ExponentialHistogramDataPointBuckets(bucket metricdata.ExponentialBucket) *mpb.ExponentialHistogramDataPoint_Buckets { +func ExponentialHistogramDataPointBuckets( + bucket metricdata.ExponentialBucket, +) *mpb.ExponentialHistogramDataPoint_Buckets { return &mpb.ExponentialHistogramDataPoint_Buckets{ Offset: bucket.Offset, BucketCounts: bucket.Counts, diff --git a/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/envconfig.go b/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/envconfig.go index 89b134a39..47cf034cc 100644 --- a/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/envconfig.go +++ b/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/envconfig.go @@ -80,8 +80,16 @@ func getOptionsFromEnv() []GenericOption { }), envconfig.WithCertPool("CERTIFICATE", func(p *x509.CertPool) { tlsConf.RootCAs = p }), envconfig.WithCertPool("METRICS_CERTIFICATE", func(p *x509.CertPool) { tlsConf.RootCAs = p }), - envconfig.WithClientCert("CLIENT_CERTIFICATE", "CLIENT_KEY", func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }), - envconfig.WithClientCert("METRICS_CLIENT_CERTIFICATE", "METRICS_CLIENT_KEY", func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }), + envconfig.WithClientCert( + "CLIENT_CERTIFICATE", + "CLIENT_KEY", + func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }, + ), + envconfig.WithClientCert( + "METRICS_CLIENT_CERTIFICATE", + "METRICS_CLIENT_KEY", + func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }, + ), envconfig.WithBool("INSECURE", func(b bool) { opts = append(opts, withInsecure(b)) }), envconfig.WithBool("METRICS_INSECURE", func(b bool) { opts = append(opts, withInsecure(b)) }), withTLSConfig(tlsConf, func(c *tls.Config) { opts = append(opts, WithTLSClientConfig(c)) }), @@ -91,8 +99,14 @@ func getOptionsFromEnv() []GenericOption { WithEnvCompression("METRICS_COMPRESSION", func(c Compression) { opts = append(opts, WithCompression(c)) }), envconfig.WithDuration("TIMEOUT", func(d time.Duration) { opts = append(opts, WithTimeout(d)) }), envconfig.WithDuration("METRICS_TIMEOUT", func(d time.Duration) { opts = append(opts, WithTimeout(d)) }), - withEnvTemporalityPreference("METRICS_TEMPORALITY_PREFERENCE", func(t metric.TemporalitySelector) { opts = append(opts, WithTemporalitySelector(t)) }), - withEnvAggPreference("METRICS_DEFAULT_HISTOGRAM_AGGREGATION", func(a metric.AggregationSelector) { opts = append(opts, WithAggregationSelector(a)) }), + withEnvTemporalityPreference( + "METRICS_TEMPORALITY_PREFERENCE", + func(t metric.TemporalitySelector) { opts = append(opts, WithTemporalitySelector(t)) }, + ), + withEnvAggPreference( + "METRICS_DEFAULT_HISTOGRAM_AGGREGATION", + func(a metric.AggregationSelector) { opts = append(opts, WithAggregationSelector(a)) }, + ), ) return opts @@ -157,7 +171,11 @@ func withEnvTemporalityPreference(n string, fn func(metric.TemporalitySelector)) case "lowmemory": fn(lowMemory) default: - global.Warn("OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE is set to an invalid value, ignoring.", "value", s) + global.Warn( + "OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE is set to an invalid value, ignoring.", + "value", + s, + ) } } } @@ -203,7 +221,11 @@ func withEnvAggPreference(n string, fn func(metric.AggregationSelector)) func(e return metric.DefaultAggregationSelector(kind) }) default: - global.Warn("OTEL_EXPORTER_OTLP_METRICS_DEFAULT_HISTOGRAM_AGGREGATION is set to an invalid value, ignoring.", "value", s) + global.Warn( + "OTEL_EXPORTER_OTLP_METRICS_DEFAULT_HISTOGRAM_AGGREGATION is set to an invalid value, ignoring.", + "value", + s, + ) } } } diff --git a/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/envconfig_test.go b/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/envconfig_test.go index 5c621d7b4..53b07677c 100644 --- a/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/envconfig_test.go +++ b/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/envconfig_test.go @@ -113,12 +113,24 @@ func TestWithEnvAggPreference(t *testing.T) { name: "explicit_bucket_histogram", envValue: "explicit_bucket_histogram", want: map[metric.InstrumentKind]metric.Aggregation{ - metric.InstrumentKindCounter: metric.DefaultAggregationSelector(metric.InstrumentKindCounter), - metric.InstrumentKindHistogram: metric.DefaultAggregationSelector(metric.InstrumentKindHistogram), - metric.InstrumentKindUpDownCounter: metric.DefaultAggregationSelector(metric.InstrumentKindUpDownCounter), - metric.InstrumentKindObservableCounter: metric.DefaultAggregationSelector(metric.InstrumentKindObservableCounter), - metric.InstrumentKindObservableUpDownCounter: metric.DefaultAggregationSelector(metric.InstrumentKindObservableUpDownCounter), - metric.InstrumentKindObservableGauge: metric.DefaultAggregationSelector(metric.InstrumentKindObservableGauge), + metric.InstrumentKindCounter: metric.DefaultAggregationSelector( + metric.InstrumentKindCounter, + ), + metric.InstrumentKindHistogram: metric.DefaultAggregationSelector( + metric.InstrumentKindHistogram, + ), + metric.InstrumentKindUpDownCounter: metric.DefaultAggregationSelector( + metric.InstrumentKindUpDownCounter, + ), + metric.InstrumentKindObservableCounter: metric.DefaultAggregationSelector( + metric.InstrumentKindObservableCounter, + ), + metric.InstrumentKindObservableUpDownCounter: metric.DefaultAggregationSelector( + metric.InstrumentKindObservableUpDownCounter, + ), + metric.InstrumentKindObservableGauge: metric.DefaultAggregationSelector( + metric.InstrumentKindObservableGauge, + ), }, }, { @@ -131,10 +143,18 @@ func TestWithEnvAggPreference(t *testing.T) { MaxScale: 20, NoMinMax: false, }, - metric.InstrumentKindUpDownCounter: metric.DefaultAggregationSelector(metric.InstrumentKindUpDownCounter), - metric.InstrumentKindObservableCounter: metric.DefaultAggregationSelector(metric.InstrumentKindObservableCounter), - metric.InstrumentKindObservableUpDownCounter: metric.DefaultAggregationSelector(metric.InstrumentKindObservableUpDownCounter), - metric.InstrumentKindObservableGauge: metric.DefaultAggregationSelector(metric.InstrumentKindObservableGauge), + metric.InstrumentKindUpDownCounter: metric.DefaultAggregationSelector( + metric.InstrumentKindUpDownCounter, + ), + metric.InstrumentKindObservableCounter: metric.DefaultAggregationSelector( + metric.InstrumentKindObservableCounter, + ), + metric.InstrumentKindObservableUpDownCounter: metric.DefaultAggregationSelector( + metric.InstrumentKindObservableUpDownCounter, + ), + metric.InstrumentKindObservableGauge: metric.DefaultAggregationSelector( + metric.InstrumentKindObservableGauge, + ), }, }, } diff --git a/exporters/otlp/otlpmetric/otlpmetrichttp/internal/otest/collector.go b/exporters/otlp/otlpmetric/otlpmetrichttp/internal/otest/collector.go index 82a7f974e..dec1889f8 100644 --- a/exporters/otlp/otlpmetric/otlpmetrichttp/internal/otest/collector.go +++ b/exporters/otlp/otlpmetric/otlpmetrichttp/internal/otest/collector.go @@ -142,7 +142,10 @@ func (c *GRPCCollector) Headers() map[string][]string { } // Export handles the export req. -func (c *GRPCCollector) Export(ctx context.Context, req *collpb.ExportMetricsServiceRequest) (*collpb.ExportMetricsServiceResponse, error) { +func (c *GRPCCollector) Export( + ctx context.Context, + req *collpb.ExportMetricsServiceRequest, +) (*collpb.ExportMetricsServiceResponse, error) { c.storage.Add(req) if h, ok := metadata.FromIncomingContext(ctx); ok { @@ -208,7 +211,11 @@ type HTTPCollector struct { // If errCh is not nil, the collector will respond to HTTP requests with errors // sent on that channel. This means that if errCh is not nil Export calls will // block until an error is received. -func NewHTTPCollector(endpoint string, resultCh <-chan ExportResult, opts ...func(*HTTPCollector)) (*HTTPCollector, error) { +func NewHTTPCollector( + endpoint string, + resultCh <-chan ExportResult, + opts ...func(*HTTPCollector), +) (*HTTPCollector, error) { u, err := url.Parse(endpoint) if err != nil { return nil, err diff --git a/exporters/otlp/otlpmetric/otlpmetrichttp/internal/partialsuccess_test.go b/exporters/otlp/otlpmetric/otlpmetrichttp/internal/partialsuccess_test.go index 56cb8b0ff..15dfa8981 100644 --- a/exporters/otlp/otlpmetric/otlpmetrichttp/internal/partialsuccess_test.go +++ b/exporters/otlp/otlpmetric/otlpmetrichttp/internal/partialsuccess_test.go @@ -28,6 +28,10 @@ func requireErrorString(t *testing.T, expect string, err error) { func TestPartialSuccessFormat(t *testing.T) { requireErrorString(t, "empty message (0 metric data points rejected)", MetricPartialSuccessError(0, "")) requireErrorString(t, "help help (0 metric data points rejected)", MetricPartialSuccessError(0, "help help")) - requireErrorString(t, "what happened (10 metric data points rejected)", MetricPartialSuccessError(10, "what happened")) + requireErrorString( + t, + "what happened (10 metric data points rejected)", + MetricPartialSuccessError(10, "what happened"), + ) requireErrorString(t, "what happened (15 spans rejected)", TracePartialSuccessError(15, "what happened")) } diff --git a/exporters/otlp/otlpmetric/otlpmetrichttp/internal/transform/metricdata.go b/exporters/otlp/otlpmetric/otlpmetrichttp/internal/transform/metricdata.go index 8207b15a4..b963b0369 100644 --- a/exporters/otlp/otlpmetric/otlpmetrichttp/internal/transform/metricdata.go +++ b/exporters/otlp/otlpmetric/otlpmetrichttp/internal/transform/metricdata.go @@ -203,7 +203,9 @@ func HistogramDataPoints[N int64 | float64](dPts []metricdata.HistogramDataPoint // ExponentialHistogram returns an OTLP Metric_ExponentialHistogram generated from h. An error is // returned if the temporality of h is unknown. -func ExponentialHistogram[N int64 | float64](h metricdata.ExponentialHistogram[N]) (*mpb.Metric_ExponentialHistogram, error) { +func ExponentialHistogram[N int64 | float64]( + h metricdata.ExponentialHistogram[N], +) (*mpb.Metric_ExponentialHistogram, error) { t, err := Temporality(h.Temporality) if err != nil { return nil, err @@ -218,7 +220,9 @@ func ExponentialHistogram[N int64 | float64](h metricdata.ExponentialHistogram[N // ExponentialHistogramDataPoints returns a slice of OTLP ExponentialHistogramDataPoint generated // from dPts. -func ExponentialHistogramDataPoints[N int64 | float64](dPts []metricdata.ExponentialHistogramDataPoint[N]) []*mpb.ExponentialHistogramDataPoint { +func ExponentialHistogramDataPoints[N int64 | float64]( + dPts []metricdata.ExponentialHistogramDataPoint[N], +) []*mpb.ExponentialHistogramDataPoint { out := make([]*mpb.ExponentialHistogramDataPoint, 0, len(dPts)) for _, dPt := range dPts { sum := float64(dPt.Sum) @@ -250,7 +254,9 @@ func ExponentialHistogramDataPoints[N int64 | float64](dPts []metricdata.Exponen // ExponentialHistogramDataPointBuckets returns an OTLP ExponentialHistogramDataPoint_Buckets generated // from bucket. -func ExponentialHistogramDataPointBuckets(bucket metricdata.ExponentialBucket) *mpb.ExponentialHistogramDataPoint_Buckets { +func ExponentialHistogramDataPointBuckets( + bucket metricdata.ExponentialBucket, +) *mpb.ExponentialHistogramDataPoint_Buckets { return &mpb.ExponentialHistogramDataPoint_Buckets{ Offset: bucket.Offset, BucketCounts: bucket.Counts, diff --git a/exporters/otlp/otlptrace/internal/tracetransform/span_test.go b/exporters/otlp/otlptrace/internal/tracetransform/span_test.go index 9fb85db78..d6e8dcf9d 100644 --- a/exporters/otlp/otlptrace/internal/tracetransform/span_test.go +++ b/exporters/otlp/otlptrace/internal/tracetransform/span_test.go @@ -94,8 +94,21 @@ func TestSpanEvent(t *testing.T) { eventTimestamp := uint64(1589932800 * 1e9) assert.Equal(t, &tracepb.Span_Event{Name: "test 1", Attributes: nil, TimeUnixNano: eventTimestamp}, got[0]) // Do not test Attributes directly, just that the return value goes to the correct field. - assert.Equal(t, &tracepb.Span_Event{Name: "test 2", Attributes: KeyValues(attrs), TimeUnixNano: eventTimestamp, DroppedAttributesCount: 2}, got[1]) - assert.Equal(t, &tracepb.Span_Event{Name: "test 3", Attributes: KeyValues(attrs), TimeUnixNano: 0, DroppedAttributesCount: 2}, got[2]) + assert.Equal( + t, + &tracepb.Span_Event{ + Name: "test 2", + Attributes: KeyValues(attrs), + TimeUnixNano: eventTimestamp, + DroppedAttributesCount: 2, + }, + got[1], + ) + assert.Equal( + t, + &tracepb.Span_Event{Name: "test 3", Attributes: KeyValues(attrs), TimeUnixNano: 0, DroppedAttributesCount: 2}, + got[2], + ) } func TestNilLinks(t *testing.T) { @@ -220,12 +233,18 @@ func TestSpanData(t *testing.T) { traceState, _ := trace.ParseTraceState("key1=val1,key2=val2") spanData := tracetest.SpanStub{ SpanContext: trace.NewSpanContext(trace.SpanContextConfig{ - TraceID: trace.TraceID{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F}, + TraceID: trace.TraceID{ + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, + }, SpanID: trace.SpanID{0xFF, 0xFE, 0xFD, 0xFC, 0xFB, 0xFA, 0xF9, 0xF8}, TraceState: traceState, }), Parent: trace.NewSpanContext(trace.SpanContextConfig{ - TraceID: trace.TraceID{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F}, + TraceID: trace.TraceID{ + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, + }, SpanID: trace.SpanID{0xEF, 0xEE, 0xED, 0xEC, 0xEB, 0xEA, 0xE9, 0xE8}, TraceState: traceState, Remote: true, @@ -251,7 +270,10 @@ func TestSpanData(t *testing.T) { Links: []tracesdk.Link{ { SpanContext: trace.NewSpanContext(trace.SpanContextConfig{ - TraceID: trace.TraceID{0xC0, 0xC1, 0xC2, 0xC3, 0xC4, 0xC5, 0xC6, 0xC7, 0xC8, 0xC9, 0xCA, 0xCB, 0xCC, 0xCD, 0xCE, 0xCF}, + TraceID: trace.TraceID{ + 0xC0, 0xC1, 0xC2, 0xC3, 0xC4, 0xC5, 0xC6, 0xC7, + 0xC8, 0xC9, 0xCA, 0xCB, 0xCC, 0xCD, 0xCE, 0xCF, + }, SpanID: trace.SpanID{0xB0, 0xB1, 0xB2, 0xB3, 0xB4, 0xB5, 0xB6, 0xB7}, TraceFlags: 0, }), @@ -262,7 +284,10 @@ func TestSpanData(t *testing.T) { }, { SpanContext: trace.NewSpanContext(trace.SpanContextConfig{ - TraceID: trace.TraceID{0xE0, 0xE1, 0xE2, 0xE3, 0xE4, 0xE5, 0xE6, 0xE7, 0xE8, 0xE9, 0xEA, 0xEB, 0xEC, 0xED, 0xEE, 0xEF}, + TraceID: trace.TraceID{ + 0xE0, 0xE1, 0xE2, 0xE3, 0xE4, 0xE5, 0xE6, 0xE7, + 0xE8, 0xE9, 0xEA, 0xEB, 0xEC, 0xED, 0xEE, 0xEF, + }, SpanID: trace.SpanID{0xD0, 0xD1, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6, 0xD7}, TraceFlags: 0, }), @@ -299,7 +324,10 @@ func TestSpanData(t *testing.T) { // ordering impossible to guarantee on the output. The Resource // transform function has unit tests that should suffice. expectedSpan := &tracepb.Span{ - TraceId: []byte{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F}, + TraceId: []byte{ + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, + }, SpanId: []byte{0xFF, 0xFE, 0xFD, 0xFC, 0xFB, 0xFA, 0xF9, 0xF8}, ParentSpanId: []byte{0xEF, 0xEE, 0xED, 0xEC, 0xEB, 0xEA, 0xE9, 0xE8}, TraceState: "key1=val1,key2=val2", diff --git a/exporters/otlp/otlptrace/otlptracegrpc/client_test.go b/exporters/otlp/otlptrace/otlptracegrpc/client_test.go index 1dd7631fa..6ea62c346 100644 --- a/exporters/otlp/otlptrace/otlptracegrpc/client_test.go +++ b/exporters/otlp/otlptrace/otlptracegrpc/client_test.go @@ -39,7 +39,11 @@ func TestMain(m *testing.M) { var roSpans = tracetest.SpanStubs{{Name: "Span 0"}}.Snapshots() -func contextWithTimeout(parent context.Context, t *testing.T, timeout time.Duration) (context.Context, context.CancelFunc) { +func contextWithTimeout( + parent context.Context, + t *testing.T, + timeout time.Duration, +) (context.Context, context.CancelFunc) { d, ok := t.Deadline() if !ok { d = time.Now().Add(timeout) @@ -110,7 +114,12 @@ func TestWithEndpointURL(t *testing.T) { otlptracetest.RunEndToEndTest(ctx, t, exp, mc) } -func newGRPCExporter(t *testing.T, ctx context.Context, endpoint string, additionalOpts ...otlptracegrpc.Option) *otlptrace.Exporter { +func newGRPCExporter( + t *testing.T, + ctx context.Context, + endpoint string, + additionalOpts ...otlptracegrpc.Option, +) *otlptrace.Exporter { opts := []otlptracegrpc.Option{ otlptracegrpc.WithInsecure(), otlptracegrpc.WithEndpoint(endpoint), diff --git a/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/envconfig.go b/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/envconfig.go index 7bb189a94..9f5075931 100644 --- a/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/envconfig.go +++ b/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/envconfig.go @@ -77,8 +77,16 @@ func getOptionsFromEnv() []GenericOption { }), envconfig.WithCertPool("CERTIFICATE", func(p *x509.CertPool) { tlsConf.RootCAs = p }), envconfig.WithCertPool("TRACES_CERTIFICATE", func(p *x509.CertPool) { tlsConf.RootCAs = p }), - envconfig.WithClientCert("CLIENT_CERTIFICATE", "CLIENT_KEY", func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }), - envconfig.WithClientCert("TRACES_CLIENT_CERTIFICATE", "TRACES_CLIENT_KEY", func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }), + envconfig.WithClientCert( + "CLIENT_CERTIFICATE", + "CLIENT_KEY", + func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }, + ), + envconfig.WithClientCert( + "TRACES_CLIENT_CERTIFICATE", + "TRACES_CLIENT_KEY", + func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }, + ), withTLSConfig(tlsConf, func(c *tls.Config) { opts = append(opts, WithTLSClientConfig(c)) }), envconfig.WithBool("INSECURE", func(b bool) { opts = append(opts, withInsecure(b)) }), envconfig.WithBool("TRACES_INSECURE", func(b bool) { opts = append(opts, withInsecure(b)) }), diff --git a/exporters/otlp/otlptrace/otlptracegrpc/internal/partialsuccess_test.go b/exporters/otlp/otlptrace/otlptracegrpc/internal/partialsuccess_test.go index 56cb8b0ff..15dfa8981 100644 --- a/exporters/otlp/otlptrace/otlptracegrpc/internal/partialsuccess_test.go +++ b/exporters/otlp/otlptrace/otlptracegrpc/internal/partialsuccess_test.go @@ -28,6 +28,10 @@ func requireErrorString(t *testing.T, expect string, err error) { func TestPartialSuccessFormat(t *testing.T) { requireErrorString(t, "empty message (0 metric data points rejected)", MetricPartialSuccessError(0, "")) requireErrorString(t, "help help (0 metric data points rejected)", MetricPartialSuccessError(0, "help help")) - requireErrorString(t, "what happened (10 metric data points rejected)", MetricPartialSuccessError(10, "what happened")) + requireErrorString( + t, + "what happened (10 metric data points rejected)", + MetricPartialSuccessError(10, "what happened"), + ) requireErrorString(t, "what happened (15 spans rejected)", TracePartialSuccessError(15, "what happened")) } diff --git a/exporters/otlp/otlptrace/otlptracegrpc/mock_collector_test.go b/exporters/otlp/otlptrace/otlptracegrpc/mock_collector_test.go index 27243fd2a..2dda93044 100644 --- a/exporters/otlp/otlptrace/otlptracegrpc/mock_collector_test.go +++ b/exporters/otlp/otlptrace/otlptracegrpc/mock_collector_test.go @@ -61,7 +61,10 @@ func (mts *mockTraceService) getResourceSpans() []*tracepb.ResourceSpans { return mts.storage.GetResourceSpans() } -func (mts *mockTraceService) Export(ctx context.Context, exp *collectortracepb.ExportTraceServiceRequest) (*collectortracepb.ExportTraceServiceResponse, error) { +func (mts *mockTraceService) Export( + ctx context.Context, + exp *collectortracepb.ExportTraceServiceRequest, +) (*collectortracepb.ExportTraceServiceResponse, error) { mts.mu.Lock() defer func() { mts.requests++ diff --git a/exporters/otlp/otlptrace/otlptracehttp/client_test.go b/exporters/otlp/otlptrace/otlptracehttp/client_test.go index 84e9ab7e6..7f7ad55f6 100644 --- a/exporters/otlp/otlptrace/otlptracehttp/client_test.go +++ b/exporters/otlp/otlptrace/otlptracehttp/client_test.go @@ -271,7 +271,11 @@ func TestNoRetry(t *testing.T) { assert.True(t, strings.HasPrefix(err.Error(), "traces export: ")) unwrapped := errors.Unwrap(err) - assert.Contains(t, unwrapped.Error(), fmt.Sprintf("failed to send to http://%s/v1/traces: 400 Bad Request", mc.endpoint)) + assert.Contains( + t, + unwrapped.Error(), + fmt.Sprintf("failed to send to http://%s/v1/traces: 400 Bad Request", mc.endpoint), + ) unwrapped2 := errors.Unwrap(unwrapped) assert.Contains(t, unwrapped2.Error(), "missing required attribute aaa") diff --git a/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/envconfig.go b/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/envconfig.go index ff4141b6d..e61256403 100644 --- a/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/envconfig.go +++ b/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/envconfig.go @@ -77,8 +77,16 @@ func getOptionsFromEnv() []GenericOption { }), envconfig.WithCertPool("CERTIFICATE", func(p *x509.CertPool) { tlsConf.RootCAs = p }), envconfig.WithCertPool("TRACES_CERTIFICATE", func(p *x509.CertPool) { tlsConf.RootCAs = p }), - envconfig.WithClientCert("CLIENT_CERTIFICATE", "CLIENT_KEY", func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }), - envconfig.WithClientCert("TRACES_CLIENT_CERTIFICATE", "TRACES_CLIENT_KEY", func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }), + envconfig.WithClientCert( + "CLIENT_CERTIFICATE", + "CLIENT_KEY", + func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }, + ), + envconfig.WithClientCert( + "TRACES_CLIENT_CERTIFICATE", + "TRACES_CLIENT_KEY", + func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }, + ), withTLSConfig(tlsConf, func(c *tls.Config) { opts = append(opts, WithTLSClientConfig(c)) }), envconfig.WithBool("INSECURE", func(b bool) { opts = append(opts, withInsecure(b)) }), envconfig.WithBool("TRACES_INSECURE", func(b bool) { opts = append(opts, withInsecure(b)) }), diff --git a/exporters/otlp/otlptrace/otlptracehttp/internal/partialsuccess_test.go b/exporters/otlp/otlptrace/otlptracehttp/internal/partialsuccess_test.go index 56cb8b0ff..15dfa8981 100644 --- a/exporters/otlp/otlptrace/otlptracehttp/internal/partialsuccess_test.go +++ b/exporters/otlp/otlptrace/otlptracehttp/internal/partialsuccess_test.go @@ -28,6 +28,10 @@ func requireErrorString(t *testing.T, expect string, err error) { func TestPartialSuccessFormat(t *testing.T) { requireErrorString(t, "empty message (0 metric data points rejected)", MetricPartialSuccessError(0, "")) requireErrorString(t, "help help (0 metric data points rejected)", MetricPartialSuccessError(0, "help help")) - requireErrorString(t, "what happened (10 metric data points rejected)", MetricPartialSuccessError(10, "what happened")) + requireErrorString( + t, + "what happened (10 metric data points rejected)", + MetricPartialSuccessError(10, "what happened"), + ) requireErrorString(t, "what happened (15 spans rejected)", TracePartialSuccessError(15, "what happened")) } diff --git a/exporters/prometheus/config.go b/exporters/prometheus/config.go index a743b469d..28d2be2cb 100644 --- a/exporters/prometheus/config.go +++ b/exporters/prometheus/config.go @@ -28,7 +28,9 @@ type config struct { } var logDeprecatedLegacyScheme = sync.OnceFunc(func() { - global.Warn("prometheus exporter legacy scheme deprecated: support for the legacy NameValidationScheme will be removed in the next release") + global.Warn( + "prometheus exporter legacy scheme deprecated: support for the legacy NameValidationScheme will be removed in the next release", + ) }) // newConfig creates a validated config configured with options. diff --git a/exporters/prometheus/exporter.go b/exporters/prometheus/exporter.go index d4fd68d0c..6af50d9fa 100644 --- a/exporters/prometheus/exporter.go +++ b/exporters/prometheus/exporter.go @@ -254,7 +254,13 @@ func (c *collector) Collect(ch chan<- prometheus.Metric) { } } -func addHistogramMetric[N int64 | float64](ch chan<- prometheus.Metric, histogram metricdata.Histogram[N], m metricdata.Metrics, name string, kv keyVals) { +func addHistogramMetric[N int64 | float64]( + ch chan<- prometheus.Metric, + histogram metricdata.Histogram[N], + m metricdata.Metrics, + name string, + kv keyVals, +) { for _, dp := range histogram.DataPoints { keys, values := getAttrs(dp.Attributes) keys = append(keys, kv.keys...) @@ -278,7 +284,13 @@ func addHistogramMetric[N int64 | float64](ch chan<- prometheus.Metric, histogra } } -func addSumMetric[N int64 | float64](ch chan<- prometheus.Metric, sum metricdata.Sum[N], m metricdata.Metrics, name string, kv keyVals) { +func addSumMetric[N int64 | float64]( + ch chan<- prometheus.Metric, + sum metricdata.Sum[N], + m metricdata.Metrics, + name string, + kv keyVals, +) { valueType := prometheus.CounterValue if !sum.IsMonotonic { valueType = prometheus.GaugeValue @@ -304,7 +316,13 @@ func addSumMetric[N int64 | float64](ch chan<- prometheus.Metric, sum metricdata } } -func addGaugeMetric[N int64 | float64](ch chan<- prometheus.Metric, gauge metricdata.Gauge[N], m metricdata.Metrics, name string, kv keyVals) { +func addGaugeMetric[N int64 | float64]( + ch chan<- prometheus.Metric, + gauge metricdata.Gauge[N], + m metricdata.Metrics, + name string, + kv keyVals, +) { for _, dp := range gauge.DataPoints { keys, values := getAttrs(dp.Attributes) keys = append(keys, kv.keys...) diff --git a/exporters/prometheus/exporter_test.go b/exporters/prometheus/exporter_test.go index ab625be81..907bcde42 100644 --- a/exporters/prometheus/exporter_test.go +++ b/exporters/prometheus/exporter_test.go @@ -234,15 +234,24 @@ func TestPrometheusExporter(t *testing.T) { gauge.Add(ctx, -25, opt) // Invalid, will be renamed. - gauge, err = meter.Float64UpDownCounter("invalid.gauge.name", otelmetric.WithDescription("a gauge with an invalid name")) + gauge, err = meter.Float64UpDownCounter( + "invalid.gauge.name", + otelmetric.WithDescription("a gauge with an invalid name"), + ) require.NoError(t, err) gauge.Add(ctx, 100, opt) - counter, err := meter.Float64Counter("0invalid.counter.name", otelmetric.WithDescription("a counter with an invalid name")) + counter, err := meter.Float64Counter( + "0invalid.counter.name", + otelmetric.WithDescription("a counter with an invalid name"), + ) require.ErrorIs(t, err, metric.ErrInstrumentName) counter.Add(ctx, 100, opt) - histogram, err := meter.Float64Histogram("invalid.hist.name", otelmetric.WithDescription("a histogram with an invalid name")) + histogram, err := meter.Float64Histogram( + "invalid.hist.name", + otelmetric.WithDescription("a histogram with an invalid name"), + ) require.NoError(t, err) histogram.Record(ctx, 23, opt) }, diff --git a/exporters/stdout/stdoutlog/exporter_test.go b/exporters/stdout/stdoutlog/exporter_test.go index 45ade0639..77b7fbac8 100644 --- a/exporters/stdout/stdoutlog/exporter_test.go +++ b/exporters/stdout/stdoutlog/exporter_test.go @@ -194,7 +194,11 @@ func getPrettyJSON(now *time.Time) string { var timestamps string if now != nil { serializedNow, _ := json.Marshal(now) - timestamps = "\n\t\"Timestamp\": " + string(serializedNow) + ",\n\t\"ObservedTimestamp\": " + string(serializedNow) + "," + timestamps = "\n\t\"Timestamp\": " + string( + serializedNow, + ) + ",\n\t\"ObservedTimestamp\": " + string( + serializedNow, + ) + "," } return `{` + timestamps + ` @@ -318,8 +322,12 @@ func getRecord(now time.Time) sdklog.Record { "https://example.com/custom-resource-schema", attribute.String("foo", "bar"), ), - InstrumentationScope: &instrumentation.Scope{Name: "name", Version: "version", SchemaURL: "https://example.com/custom-schema"}, - DroppedAttributes: 10, + InstrumentationScope: &instrumentation.Scope{ + Name: "name", + Version: "version", + SchemaURL: "https://example.com/custom-schema", + }, + DroppedAttributes: 10, } return rf.NewRecord() diff --git a/exporters/stdout/stdoutmetric/exporter.go b/exporters/stdout/stdoutmetric/exporter.go index fc155d79f..76f15b96b 100644 --- a/exporters/stdout/stdoutmetric/exporter.go +++ b/exporters/stdout/stdoutmetric/exporter.go @@ -131,7 +131,9 @@ func redactAggregationTimestamps(orig metricdata.Aggregation) metricdata.Aggrega } } -func redactHistogramTimestamps[T int64 | float64](hdp []metricdata.HistogramDataPoint[T]) []metricdata.HistogramDataPoint[T] { +func redactHistogramTimestamps[T int64 | float64]( + hdp []metricdata.HistogramDataPoint[T], +) []metricdata.HistogramDataPoint[T] { out := make([]metricdata.HistogramDataPoint[T], len(hdp)) for i, dp := range hdp { out[i] = metricdata.HistogramDataPoint[T]{ diff --git a/exporters/zipkin/model_test.go b/exporters/zipkin/model_test.go index e2cefac46..09330ee97 100644 --- a/exporters/zipkin/model_test.go +++ b/exporters/zipkin/model_test.go @@ -38,12 +38,18 @@ func TestModelConversion(t *testing.T) { // typical span data with UNSET status { SpanContext: trace.NewSpanContext(trace.SpanContextConfig{ - TraceID: trace.TraceID{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F}, - SpanID: trace.SpanID{0xFF, 0xFE, 0xFD, 0xFC, 0xFB, 0xFA, 0xF9, 0xF8}, + TraceID: trace.TraceID{ + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, + }, + SpanID: trace.SpanID{0xFF, 0xFE, 0xFD, 0xFC, 0xFB, 0xFA, 0xF9, 0xF8}, }), Parent: trace.NewSpanContext(trace.SpanContextConfig{ - TraceID: trace.TraceID{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F}, - SpanID: trace.SpanID{0x3F, 0x3E, 0x3D, 0x3C, 0x3B, 0x3A, 0x39, 0x38}, + TraceID: trace.TraceID{ + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, + }, + SpanID: trace.SpanID{0x3F, 0x3E, 0x3D, 0x3C, 0x3B, 0x3A, 0x39, 0x38}, }), SpanKind: trace.SpanKindServer, Name: "foo", @@ -80,12 +86,18 @@ func TestModelConversion(t *testing.T) { // typical span data with OK status { SpanContext: trace.NewSpanContext(trace.SpanContextConfig{ - TraceID: trace.TraceID{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F}, - SpanID: trace.SpanID{0xFF, 0xFE, 0xFD, 0xFC, 0xFB, 0xFA, 0xF9, 0xF8}, + TraceID: trace.TraceID{ + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, + }, + SpanID: trace.SpanID{0xFF, 0xFE, 0xFD, 0xFC, 0xFB, 0xFA, 0xF9, 0xF8}, }), Parent: trace.NewSpanContext(trace.SpanContextConfig{ - TraceID: trace.TraceID{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F}, - SpanID: trace.SpanID{0x3F, 0x3E, 0x3D, 0x3C, 0x3B, 0x3A, 0x39, 0x38}, + TraceID: trace.TraceID{ + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, + }, + SpanID: trace.SpanID{0x3F, 0x3E, 0x3D, 0x3C, 0x3B, 0x3A, 0x39, 0x38}, }), SpanKind: trace.SpanKindServer, Name: "foo", @@ -119,12 +131,18 @@ func TestModelConversion(t *testing.T) { // typical span data with ERROR status { SpanContext: trace.NewSpanContext(trace.SpanContextConfig{ - TraceID: trace.TraceID{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F}, - SpanID: trace.SpanID{0xFF, 0xFE, 0xFD, 0xFC, 0xFB, 0xFA, 0xF9, 0xF8}, + TraceID: trace.TraceID{ + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, + }, + SpanID: trace.SpanID{0xFF, 0xFE, 0xFD, 0xFC, 0xFB, 0xFA, 0xF9, 0xF8}, }), Parent: trace.NewSpanContext(trace.SpanContextConfig{ - TraceID: trace.TraceID{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F}, - SpanID: trace.SpanID{0x3F, 0x3E, 0x3D, 0x3C, 0x3B, 0x3A, 0x39, 0x38}, + TraceID: trace.TraceID{ + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, + }, + SpanID: trace.SpanID{0x3F, 0x3E, 0x3D, 0x3C, 0x3B, 0x3A, 0x39, 0x38}, }), SpanKind: trace.SpanKindServer, Name: "foo", @@ -159,8 +177,11 @@ func TestModelConversion(t *testing.T) { // invalid parent) { SpanContext: trace.NewSpanContext(trace.SpanContextConfig{ - TraceID: trace.TraceID{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F}, - SpanID: trace.SpanID{0xFF, 0xFE, 0xFD, 0xFC, 0xFB, 0xFA, 0xF9, 0xF8}, + TraceID: trace.TraceID{ + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, + }, + SpanID: trace.SpanID{0xFF, 0xFE, 0xFD, 0xFC, 0xFB, 0xFA, 0xF9, 0xF8}, }), SpanKind: trace.SpanKindServer, Name: "foo", @@ -193,12 +214,18 @@ func TestModelConversion(t *testing.T) { // span data of unspecified kind { SpanContext: trace.NewSpanContext(trace.SpanContextConfig{ - TraceID: trace.TraceID{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F}, - SpanID: trace.SpanID{0xFF, 0xFE, 0xFD, 0xFC, 0xFB, 0xFA, 0xF9, 0xF8}, + TraceID: trace.TraceID{ + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, + }, + SpanID: trace.SpanID{0xFF, 0xFE, 0xFD, 0xFC, 0xFB, 0xFA, 0xF9, 0xF8}, }), Parent: trace.NewSpanContext(trace.SpanContextConfig{ - TraceID: trace.TraceID{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F}, - SpanID: trace.SpanID{0x3F, 0x3E, 0x3D, 0x3C, 0x3B, 0x3A, 0x39, 0x38}, + TraceID: trace.TraceID{ + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, + }, + SpanID: trace.SpanID{0x3F, 0x3E, 0x3D, 0x3C, 0x3B, 0x3A, 0x39, 0x38}, }), SpanKind: trace.SpanKindUnspecified, Name: "foo", @@ -231,12 +258,18 @@ func TestModelConversion(t *testing.T) { // span data of internal kind { SpanContext: trace.NewSpanContext(trace.SpanContextConfig{ - TraceID: trace.TraceID{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F}, - SpanID: trace.SpanID{0xFF, 0xFE, 0xFD, 0xFC, 0xFB, 0xFA, 0xF9, 0xF8}, + TraceID: trace.TraceID{ + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, + }, + SpanID: trace.SpanID{0xFF, 0xFE, 0xFD, 0xFC, 0xFB, 0xFA, 0xF9, 0xF8}, }), Parent: trace.NewSpanContext(trace.SpanContextConfig{ - TraceID: trace.TraceID{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F}, - SpanID: trace.SpanID{0x3F, 0x3E, 0x3D, 0x3C, 0x3B, 0x3A, 0x39, 0x38}, + TraceID: trace.TraceID{ + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, + }, + SpanID: trace.SpanID{0x3F, 0x3E, 0x3D, 0x3C, 0x3B, 0x3A, 0x39, 0x38}, }), SpanKind: trace.SpanKindInternal, Name: "foo", @@ -269,12 +302,18 @@ func TestModelConversion(t *testing.T) { // span data of client kind { SpanContext: trace.NewSpanContext(trace.SpanContextConfig{ - TraceID: trace.TraceID{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F}, - SpanID: trace.SpanID{0xFF, 0xFE, 0xFD, 0xFC, 0xFB, 0xFA, 0xF9, 0xF8}, + TraceID: trace.TraceID{ + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, + }, + SpanID: trace.SpanID{0xFF, 0xFE, 0xFD, 0xFC, 0xFB, 0xFA, 0xF9, 0xF8}, }), Parent: trace.NewSpanContext(trace.SpanContextConfig{ - TraceID: trace.TraceID{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F}, - SpanID: trace.SpanID{0x3F, 0x3E, 0x3D, 0x3C, 0x3B, 0x3A, 0x39, 0x38}, + TraceID: trace.TraceID{ + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, + }, + SpanID: trace.SpanID{0x3F, 0x3E, 0x3D, 0x3C, 0x3B, 0x3A, 0x39, 0x38}, }), SpanKind: trace.SpanKindClient, Name: "foo", @@ -310,12 +349,18 @@ func TestModelConversion(t *testing.T) { // span data of producer kind { SpanContext: trace.NewSpanContext(trace.SpanContextConfig{ - TraceID: trace.TraceID{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F}, - SpanID: trace.SpanID{0xFF, 0xFE, 0xFD, 0xFC, 0xFB, 0xFA, 0xF9, 0xF8}, + TraceID: trace.TraceID{ + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, + }, + SpanID: trace.SpanID{0xFF, 0xFE, 0xFD, 0xFC, 0xFB, 0xFA, 0xF9, 0xF8}, }), Parent: trace.NewSpanContext(trace.SpanContextConfig{ - TraceID: trace.TraceID{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F}, - SpanID: trace.SpanID{0x3F, 0x3E, 0x3D, 0x3C, 0x3B, 0x3A, 0x39, 0x38}, + TraceID: trace.TraceID{ + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, + }, + SpanID: trace.SpanID{0x3F, 0x3E, 0x3D, 0x3C, 0x3B, 0x3A, 0x39, 0x38}, }), SpanKind: trace.SpanKindProducer, Name: "foo", @@ -348,12 +393,18 @@ func TestModelConversion(t *testing.T) { // span data of consumer kind { SpanContext: trace.NewSpanContext(trace.SpanContextConfig{ - TraceID: trace.TraceID{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F}, - SpanID: trace.SpanID{0xFF, 0xFE, 0xFD, 0xFC, 0xFB, 0xFA, 0xF9, 0xF8}, + TraceID: trace.TraceID{ + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, + }, + SpanID: trace.SpanID{0xFF, 0xFE, 0xFD, 0xFC, 0xFB, 0xFA, 0xF9, 0xF8}, }), Parent: trace.NewSpanContext(trace.SpanContextConfig{ - TraceID: trace.TraceID{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F}, - SpanID: trace.SpanID{0x3F, 0x3E, 0x3D, 0x3C, 0x3B, 0x3A, 0x39, 0x38}, + TraceID: trace.TraceID{ + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, + }, + SpanID: trace.SpanID{0x3F, 0x3E, 0x3D, 0x3C, 0x3B, 0x3A, 0x39, 0x38}, }), SpanKind: trace.SpanKindConsumer, Name: "foo", @@ -386,12 +437,18 @@ func TestModelConversion(t *testing.T) { // span data with no events { SpanContext: trace.NewSpanContext(trace.SpanContextConfig{ - TraceID: trace.TraceID{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F}, - SpanID: trace.SpanID{0xFF, 0xFE, 0xFD, 0xFC, 0xFB, 0xFA, 0xF9, 0xF8}, + TraceID: trace.TraceID{ + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, + }, + SpanID: trace.SpanID{0xFF, 0xFE, 0xFD, 0xFC, 0xFB, 0xFA, 0xF9, 0xF8}, }), Parent: trace.NewSpanContext(trace.SpanContextConfig{ - TraceID: trace.TraceID{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F}, - SpanID: trace.SpanID{0x3F, 0x3E, 0x3D, 0x3C, 0x3B, 0x3A, 0x39, 0x38}, + TraceID: trace.TraceID{ + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, + }, + SpanID: trace.SpanID{0x3F, 0x3E, 0x3D, 0x3C, 0x3B, 0x3A, 0x39, 0x38}, }), SpanKind: trace.SpanKindServer, Name: "foo", @@ -411,12 +468,18 @@ func TestModelConversion(t *testing.T) { // span data with an "error" attribute set to "false" { SpanContext: trace.NewSpanContext(trace.SpanContextConfig{ - TraceID: trace.TraceID{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F}, - SpanID: trace.SpanID{0xFF, 0xFE, 0xFD, 0xFC, 0xFB, 0xFA, 0xF9, 0xF8}, + TraceID: trace.TraceID{ + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, + }, + SpanID: trace.SpanID{0xFF, 0xFE, 0xFD, 0xFC, 0xFB, 0xFA, 0xF9, 0xF8}, }), Parent: trace.NewSpanContext(trace.SpanContextConfig{ - TraceID: trace.TraceID{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F}, - SpanID: trace.SpanID{0x3F, 0x3E, 0x3D, 0x3C, 0x3B, 0x3A, 0x39, 0x38}, + TraceID: trace.TraceID{ + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, + }, + SpanID: trace.SpanID{0x3F, 0x3E, 0x3D, 0x3C, 0x3B, 0x3A, 0x39, 0x38}, }), SpanKind: trace.SpanKindServer, Name: "foo", diff --git a/exporters/zipkin/zipkin_test.go b/exporters/zipkin/zipkin_test.go index e151edaa9..3c748a37d 100644 --- a/exporters/zipkin/zipkin_test.go +++ b/exporters/zipkin/zipkin_test.go @@ -201,8 +201,11 @@ func TestExportSpans(t *testing.T) { // parent { SpanContext: trace.NewSpanContext(trace.SpanContextConfig{ - TraceID: trace.TraceID{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F}, - SpanID: trace.SpanID{0xFF, 0xFE, 0xFD, 0xFC, 0xFB, 0xFA, 0xF9, 0xF8}, + TraceID: trace.TraceID{ + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, + }, + SpanID: trace.SpanID{0xFF, 0xFE, 0xFD, 0xFC, 0xFB, 0xFA, 0xF9, 0xF8}, }), SpanKind: trace.SpanKindServer, Name: "foo", @@ -219,12 +222,18 @@ func TestExportSpans(t *testing.T) { // child { SpanContext: trace.NewSpanContext(trace.SpanContextConfig{ - TraceID: trace.TraceID{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F}, - SpanID: trace.SpanID{0xDF, 0xDE, 0xDD, 0xDC, 0xDB, 0xDA, 0xD9, 0xD8}, + TraceID: trace.TraceID{ + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, + }, + SpanID: trace.SpanID{0xDF, 0xDE, 0xDD, 0xDC, 0xDB, 0xDA, 0xD9, 0xD8}, }), Parent: trace.NewSpanContext(trace.SpanContextConfig{ - TraceID: trace.TraceID{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F}, - SpanID: trace.SpanID{0xFF, 0xFE, 0xFD, 0xFC, 0xFB, 0xFA, 0xF9, 0xF8}, + TraceID: trace.TraceID{ + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, + }, + SpanID: trace.SpanID{0xFF, 0xFE, 0xFD, 0xFC, 0xFB, 0xFA, 0xF9, 0xF8}, }), SpanKind: trace.SpanKindServer, Name: "bar", @@ -394,8 +403,11 @@ func TestWithHeaders(t *testing.T) { spans := tracetest.SpanStubs{ { SpanContext: trace.NewSpanContext(trace.SpanContextConfig{ - TraceID: trace.TraceID{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F}, - SpanID: trace.SpanID{0xFF, 0xFE, 0xFD, 0xFC, 0xFB, 0xFA, 0xF9, 0xF8}, + TraceID: trace.TraceID{ + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, + }, + SpanID: trace.SpanID{0xFF, 0xFE, 0xFD, 0xFC, 0xFB, 0xFA, 0xF9, 0xF8}, }), }, }.Snapshots() diff --git a/internal/global/alternate_meter_test.go b/internal/global/alternate_meter_test.go index 428534178..0bc66a29b 100644 --- a/internal/global/alternate_meter_test.go +++ b/internal/global/alternate_meter_test.go @@ -110,7 +110,10 @@ func (am *altMeter) Int64Counter(name string, _ ...metric.Int64CounterOption) (m return noop.NewMeterProvider().Meter("noop").Int64Counter(name) } -func (am *altMeter) Int64UpDownCounter(name string, _ ...metric.Int64UpDownCounterOption) (metric.Int64UpDownCounter, error) { +func (am *altMeter) Int64UpDownCounter( + name string, + _ ...metric.Int64UpDownCounterOption, +) (metric.Int64UpDownCounter, error) { return noop.NewMeterProvider().Meter("noop").Int64UpDownCounter(name) } @@ -122,19 +125,28 @@ func (am *altMeter) Int64Gauge(name string, _ ...metric.Int64GaugeOption) (metri return noop.NewMeterProvider().Meter("noop").Int64Gauge(name) } -func (am *altMeter) Int64ObservableCounter(name string, options ...metric.Int64ObservableCounterOption) (metric.Int64ObservableCounter, error) { +func (am *altMeter) Int64ObservableCounter( + name string, + options ...metric.Int64ObservableCounterOption, +) (metric.Int64ObservableCounter, error) { return &testAiCounter{ meter: am, }, nil } -func (am *altMeter) Int64ObservableUpDownCounter(name string, options ...metric.Int64ObservableUpDownCounterOption) (metric.Int64ObservableUpDownCounter, error) { +func (am *altMeter) Int64ObservableUpDownCounter( + name string, + options ...metric.Int64ObservableUpDownCounterOption, +) (metric.Int64ObservableUpDownCounter, error) { return &testAiUpDownCounter{ meter: am, }, nil } -func (am *altMeter) Int64ObservableGauge(name string, options ...metric.Int64ObservableGaugeOption) (metric.Int64ObservableGauge, error) { +func (am *altMeter) Int64ObservableGauge( + name string, + options ...metric.Int64ObservableGaugeOption, +) (metric.Int64ObservableGauge, error) { return &testAiGauge{ meter: am, }, nil @@ -144,11 +156,17 @@ func (am *altMeter) Float64Counter(name string, _ ...metric.Float64CounterOption return noop.NewMeterProvider().Meter("noop").Float64Counter(name) } -func (am *altMeter) Float64UpDownCounter(name string, _ ...metric.Float64UpDownCounterOption) (metric.Float64UpDownCounter, error) { +func (am *altMeter) Float64UpDownCounter( + name string, + _ ...metric.Float64UpDownCounterOption, +) (metric.Float64UpDownCounter, error) { return noop.NewMeterProvider().Meter("noop").Float64UpDownCounter(name) } -func (am *altMeter) Float64Histogram(name string, options ...metric.Float64HistogramOption) (metric.Float64Histogram, error) { +func (am *altMeter) Float64Histogram( + name string, + options ...metric.Float64HistogramOption, +) (metric.Float64Histogram, error) { return noop.NewMeterProvider().Meter("noop").Float64Histogram(name) } @@ -156,19 +174,28 @@ func (am *altMeter) Float64Gauge(name string, options ...metric.Float64GaugeOpti return noop.NewMeterProvider().Meter("noop").Float64Gauge(name) } -func (am *altMeter) Float64ObservableCounter(name string, options ...metric.Float64ObservableCounterOption) (metric.Float64ObservableCounter, error) { +func (am *altMeter) Float64ObservableCounter( + name string, + options ...metric.Float64ObservableCounterOption, +) (metric.Float64ObservableCounter, error) { return &testAfCounter{ meter: am, }, nil } -func (am *altMeter) Float64ObservableUpDownCounter(name string, options ...metric.Float64ObservableUpDownCounterOption) (metric.Float64ObservableUpDownCounter, error) { +func (am *altMeter) Float64ObservableUpDownCounter( + name string, + options ...metric.Float64ObservableUpDownCounterOption, +) (metric.Float64ObservableUpDownCounter, error) { return &testAfUpDownCounter{ meter: am, }, nil } -func (am *altMeter) Float64ObservableGauge(name string, options ...metric.Float64ObservableGaugeOption) (metric.Float64ObservableGauge, error) { +func (am *altMeter) Float64ObservableGauge( + name string, + options ...metric.Float64ObservableGaugeOption, +) (metric.Float64ObservableGauge, error) { return &testAfGauge{ meter: am, }, nil diff --git a/internal/global/meter.go b/internal/global/meter.go index a6acd8dca..adb37b5b0 100644 --- a/internal/global/meter.go +++ b/internal/global/meter.go @@ -169,7 +169,10 @@ func (m *meter) Int64Counter(name string, options ...metric.Int64CounterOption) return i, nil } -func (m *meter) Int64UpDownCounter(name string, options ...metric.Int64UpDownCounterOption) (metric.Int64UpDownCounter, error) { +func (m *meter) Int64UpDownCounter( + name string, + options ...metric.Int64UpDownCounterOption, +) (metric.Int64UpDownCounter, error) { m.mtx.Lock() defer m.mtx.Unlock() @@ -238,7 +241,10 @@ func (m *meter) Int64Gauge(name string, options ...metric.Int64GaugeOption) (met return i, nil } -func (m *meter) Int64ObservableCounter(name string, options ...metric.Int64ObservableCounterOption) (metric.Int64ObservableCounter, error) { +func (m *meter) Int64ObservableCounter( + name string, + options ...metric.Int64ObservableCounterOption, +) (metric.Int64ObservableCounter, error) { m.mtx.Lock() defer m.mtx.Unlock() @@ -261,7 +267,10 @@ func (m *meter) Int64ObservableCounter(name string, options ...metric.Int64Obser return i, nil } -func (m *meter) Int64ObservableUpDownCounter(name string, options ...metric.Int64ObservableUpDownCounterOption) (metric.Int64ObservableUpDownCounter, error) { +func (m *meter) Int64ObservableUpDownCounter( + name string, + options ...metric.Int64ObservableUpDownCounterOption, +) (metric.Int64ObservableUpDownCounter, error) { m.mtx.Lock() defer m.mtx.Unlock() @@ -284,7 +293,10 @@ func (m *meter) Int64ObservableUpDownCounter(name string, options ...metric.Int6 return i, nil } -func (m *meter) Int64ObservableGauge(name string, options ...metric.Int64ObservableGaugeOption) (metric.Int64ObservableGauge, error) { +func (m *meter) Int64ObservableGauge( + name string, + options ...metric.Int64ObservableGaugeOption, +) (metric.Int64ObservableGauge, error) { m.mtx.Lock() defer m.mtx.Unlock() @@ -330,7 +342,10 @@ func (m *meter) Float64Counter(name string, options ...metric.Float64CounterOpti return i, nil } -func (m *meter) Float64UpDownCounter(name string, options ...metric.Float64UpDownCounterOption) (metric.Float64UpDownCounter, error) { +func (m *meter) Float64UpDownCounter( + name string, + options ...metric.Float64UpDownCounterOption, +) (metric.Float64UpDownCounter, error) { m.mtx.Lock() defer m.mtx.Unlock() @@ -353,7 +368,10 @@ func (m *meter) Float64UpDownCounter(name string, options ...metric.Float64UpDow return i, nil } -func (m *meter) Float64Histogram(name string, options ...metric.Float64HistogramOption) (metric.Float64Histogram, error) { +func (m *meter) Float64Histogram( + name string, + options ...metric.Float64HistogramOption, +) (metric.Float64Histogram, error) { m.mtx.Lock() defer m.mtx.Unlock() @@ -399,7 +417,10 @@ func (m *meter) Float64Gauge(name string, options ...metric.Float64GaugeOption) return i, nil } -func (m *meter) Float64ObservableCounter(name string, options ...metric.Float64ObservableCounterOption) (metric.Float64ObservableCounter, error) { +func (m *meter) Float64ObservableCounter( + name string, + options ...metric.Float64ObservableCounterOption, +) (metric.Float64ObservableCounter, error) { m.mtx.Lock() defer m.mtx.Unlock() @@ -422,7 +443,10 @@ func (m *meter) Float64ObservableCounter(name string, options ...metric.Float64O return i, nil } -func (m *meter) Float64ObservableUpDownCounter(name string, options ...metric.Float64ObservableUpDownCounterOption) (metric.Float64ObservableUpDownCounter, error) { +func (m *meter) Float64ObservableUpDownCounter( + name string, + options ...metric.Float64ObservableUpDownCounterOption, +) (metric.Float64ObservableUpDownCounter, error) { m.mtx.Lock() defer m.mtx.Unlock() @@ -445,7 +469,10 @@ func (m *meter) Float64ObservableUpDownCounter(name string, options ...metric.Fl return i, nil } -func (m *meter) Float64ObservableGauge(name string, options ...metric.Float64ObservableGaugeOption) (metric.Float64ObservableGauge, error) { +func (m *meter) Float64ObservableGauge( + name string, + options ...metric.Float64ObservableGaugeOption, +) (metric.Float64ObservableGauge, error) { m.mtx.Lock() defer m.mtx.Unlock() diff --git a/internal/global/meter_test.go b/internal/global/meter_test.go index b68c36313..7b444d617 100644 --- a/internal/global/meter_test.go +++ b/internal/global/meter_test.go @@ -125,7 +125,10 @@ func TestUnregisterConcurrentSafe(t *testing.T) { <-done } -func testSetupAllInstrumentTypes(t *testing.T, m metric.Meter) (metric.Float64Counter, metric.Float64ObservableCounter) { +func testSetupAllInstrumentTypes( + t *testing.T, + m metric.Meter, +) (metric.Float64Counter, metric.Float64ObservableCounter) { afcounter, err := m.Float64ObservableCounter("test_Async_Counter") require.NoError(t, err) _, err = m.Float64ObservableUpDownCounter("test_Async_UpDownCounter") @@ -439,7 +442,10 @@ type failingRegisterCallbackMeter struct { noop.Meter } -func (m *failingRegisterCallbackMeter) RegisterCallback(metric.Callback, ...metric.Observable) (metric.Registration, error) { +func (m *failingRegisterCallbackMeter) RegisterCallback( + metric.Callback, + ...metric.Observable, +) (metric.Registration, error) { return nil, errors.New("an error occurred") } diff --git a/internal/global/meter_types_test.go b/internal/global/meter_types_test.go index f23ae1d63..a0257b34c 100644 --- a/internal/global/meter_types_test.go +++ b/internal/global/meter_types_test.go @@ -51,7 +51,10 @@ func (m *testMeter) Int64Counter(name string, options ...metric.Int64CounterOpti return &testCountingIntInstrument{}, nil } -func (m *testMeter) Int64UpDownCounter(name string, options ...metric.Int64UpDownCounterOption) (metric.Int64UpDownCounter, error) { +func (m *testMeter) Int64UpDownCounter( + name string, + options ...metric.Int64UpDownCounterOption, +) (metric.Int64UpDownCounter, error) { m.siUDCount++ return &testCountingIntInstrument{}, nil } @@ -66,17 +69,26 @@ func (m *testMeter) Int64Gauge(name string, options ...metric.Int64GaugeOption) return &testCountingIntInstrument{}, nil } -func (m *testMeter) Int64ObservableCounter(name string, options ...metric.Int64ObservableCounterOption) (metric.Int64ObservableCounter, error) { +func (m *testMeter) Int64ObservableCounter( + name string, + options ...metric.Int64ObservableCounterOption, +) (metric.Int64ObservableCounter, error) { m.aiCount++ return &testCountingIntInstrument{}, nil } -func (m *testMeter) Int64ObservableUpDownCounter(name string, options ...metric.Int64ObservableUpDownCounterOption) (metric.Int64ObservableUpDownCounter, error) { +func (m *testMeter) Int64ObservableUpDownCounter( + name string, + options ...metric.Int64ObservableUpDownCounterOption, +) (metric.Int64ObservableUpDownCounter, error) { m.aiUDCount++ return &testCountingIntInstrument{}, nil } -func (m *testMeter) Int64ObservableGauge(name string, options ...metric.Int64ObservableGaugeOption) (metric.Int64ObservableGauge, error) { +func (m *testMeter) Int64ObservableGauge( + name string, + options ...metric.Int64ObservableGaugeOption, +) (metric.Int64ObservableGauge, error) { m.aiGauge++ return &testCountingIntInstrument{}, nil } @@ -86,12 +98,18 @@ func (m *testMeter) Float64Counter(name string, options ...metric.Float64Counter return &testCountingFloatInstrument{}, nil } -func (m *testMeter) Float64UpDownCounter(name string, options ...metric.Float64UpDownCounterOption) (metric.Float64UpDownCounter, error) { +func (m *testMeter) Float64UpDownCounter( + name string, + options ...metric.Float64UpDownCounterOption, +) (metric.Float64UpDownCounter, error) { m.sfUDCount++ return &testCountingFloatInstrument{}, nil } -func (m *testMeter) Float64Histogram(name string, options ...metric.Float64HistogramOption) (metric.Float64Histogram, error) { +func (m *testMeter) Float64Histogram( + name string, + options ...metric.Float64HistogramOption, +) (metric.Float64Histogram, error) { m.sfHist++ return &testCountingFloatInstrument{}, nil } @@ -101,17 +119,26 @@ func (m *testMeter) Float64Gauge(name string, options ...metric.Float64GaugeOpti return &testCountingFloatInstrument{}, nil } -func (m *testMeter) Float64ObservableCounter(name string, options ...metric.Float64ObservableCounterOption) (metric.Float64ObservableCounter, error) { +func (m *testMeter) Float64ObservableCounter( + name string, + options ...metric.Float64ObservableCounterOption, +) (metric.Float64ObservableCounter, error) { m.afCount++ return &testCountingFloatInstrument{}, nil } -func (m *testMeter) Float64ObservableUpDownCounter(name string, options ...metric.Float64ObservableUpDownCounterOption) (metric.Float64ObservableUpDownCounter, error) { +func (m *testMeter) Float64ObservableUpDownCounter( + name string, + options ...metric.Float64ObservableUpDownCounterOption, +) (metric.Float64ObservableUpDownCounter, error) { m.afUDCount++ return &testCountingFloatInstrument{}, nil } -func (m *testMeter) Float64ObservableGauge(name string, options ...metric.Float64ObservableGaugeOption) (metric.Float64ObservableGauge, error) { +func (m *testMeter) Float64ObservableGauge( + name string, + options ...metric.Float64ObservableGaugeOption, +) (metric.Float64ObservableGauge, error) { m.afGauge++ return &testCountingFloatInstrument{}, nil } diff --git a/internal/global/trace.go b/internal/global/trace.go index f7a58cad5..49e4ac4fa 100644 --- a/internal/global/trace.go +++ b/internal/global/trace.go @@ -164,7 +164,12 @@ var autoInstEnabled = new(bool) // "noinline" pragma prevents the method from ever being inlined. // //go:noinline -func (t *tracer) newSpan(ctx context.Context, autoSpan *bool, name string, opts []trace.SpanStartOption) (context.Context, trace.Span) { +func (t *tracer) newSpan( + ctx context.Context, + autoSpan *bool, + name string, + opts []trace.SpanStartOption, +) (context.Context, trace.Span) { // autoInstEnabled is passed to newSpan via the autoSpan parameter. This is // so the auto-instrumentation can define a uprobe for (*t).newSpan and be // provided with the address of the bool autoInstEnabled points to. It diff --git a/internal/global/trace_test.go b/internal/global/trace_test.go index 8f094fc28..d513d32c2 100644 --- a/internal/global/trace_test.go +++ b/internal/global/trace_test.go @@ -34,7 +34,11 @@ type fnTracer struct { start func(ctx context.Context, spanName string, opts ...trace.SpanStartOption) (context.Context, trace.Span) } -func (fn fnTracer) Start(ctx context.Context, spanName string, opts ...trace.SpanStartOption) (context.Context, trace.Span) { +func (fn fnTracer) Start( + ctx context.Context, + spanName string, + opts ...trace.SpanStartOption, +) (context.Context, trace.Span) { return fn.start(ctx, spanName, opts...) } diff --git a/internal/shared/otlp/otlpmetric/oconf/envconfig.go.tmpl b/internal/shared/otlp/otlpmetric/oconf/envconfig.go.tmpl index 5334c9b39..38f4b31d7 100644 --- a/internal/shared/otlp/otlpmetric/oconf/envconfig.go.tmpl +++ b/internal/shared/otlp/otlpmetric/oconf/envconfig.go.tmpl @@ -80,8 +80,16 @@ func getOptionsFromEnv() []GenericOption { }), envconfig.WithCertPool("CERTIFICATE", func(p *x509.CertPool) { tlsConf.RootCAs = p }), envconfig.WithCertPool("METRICS_CERTIFICATE", func(p *x509.CertPool) { tlsConf.RootCAs = p }), - envconfig.WithClientCert("CLIENT_CERTIFICATE", "CLIENT_KEY", func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }), - envconfig.WithClientCert("METRICS_CLIENT_CERTIFICATE", "METRICS_CLIENT_KEY", func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }), + envconfig.WithClientCert( + "CLIENT_CERTIFICATE", + "CLIENT_KEY", + func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }, + ), + envconfig.WithClientCert( + "METRICS_CLIENT_CERTIFICATE", + "METRICS_CLIENT_KEY", + func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }, + ), envconfig.WithBool("INSECURE", func(b bool) { opts = append(opts, withInsecure(b)) }), envconfig.WithBool("METRICS_INSECURE", func(b bool) { opts = append(opts, withInsecure(b)) }), withTLSConfig(tlsConf, func(c *tls.Config) { opts = append(opts, WithTLSClientConfig(c)) }), @@ -91,8 +99,14 @@ func getOptionsFromEnv() []GenericOption { WithEnvCompression("METRICS_COMPRESSION", func(c Compression) { opts = append(opts, WithCompression(c)) }), envconfig.WithDuration("TIMEOUT", func(d time.Duration) { opts = append(opts, WithTimeout(d)) }), envconfig.WithDuration("METRICS_TIMEOUT", func(d time.Duration) { opts = append(opts, WithTimeout(d)) }), - withEnvTemporalityPreference("METRICS_TEMPORALITY_PREFERENCE", func(t metric.TemporalitySelector) { opts = append(opts, WithTemporalitySelector(t)) }), - withEnvAggPreference("METRICS_DEFAULT_HISTOGRAM_AGGREGATION", func(a metric.AggregationSelector) { opts = append(opts, WithAggregationSelector(a)) }), + withEnvTemporalityPreference( + "METRICS_TEMPORALITY_PREFERENCE", + func(t metric.TemporalitySelector) { opts = append(opts, WithTemporalitySelector(t)) }, + ), + withEnvAggPreference( + "METRICS_DEFAULT_HISTOGRAM_AGGREGATION", + func(a metric.AggregationSelector) { opts = append(opts, WithAggregationSelector(a)) }, + ), ) return opts @@ -157,7 +171,11 @@ func withEnvTemporalityPreference(n string, fn func(metric.TemporalitySelector)) case "lowmemory": fn(lowMemory) default: - global.Warn("OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE is set to an invalid value, ignoring.", "value", s) + global.Warn( + "OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE is set to an invalid value, ignoring.", + "value", + s, + ) } } } @@ -203,7 +221,11 @@ func withEnvAggPreference(n string, fn func(metric.AggregationSelector)) func(e return metric.DefaultAggregationSelector(kind) }) default: - global.Warn("OTEL_EXPORTER_OTLP_METRICS_DEFAULT_HISTOGRAM_AGGREGATION is set to an invalid value, ignoring.", "value", s) + global.Warn( + "OTEL_EXPORTER_OTLP_METRICS_DEFAULT_HISTOGRAM_AGGREGATION is set to an invalid value, ignoring.", + "value", + s, + ) } } } diff --git a/internal/shared/otlp/otlpmetric/oconf/envconfig_test.go.tmpl b/internal/shared/otlp/otlpmetric/oconf/envconfig_test.go.tmpl index 5c621d7b4..53b07677c 100644 --- a/internal/shared/otlp/otlpmetric/oconf/envconfig_test.go.tmpl +++ b/internal/shared/otlp/otlpmetric/oconf/envconfig_test.go.tmpl @@ -113,12 +113,24 @@ func TestWithEnvAggPreference(t *testing.T) { name: "explicit_bucket_histogram", envValue: "explicit_bucket_histogram", want: map[metric.InstrumentKind]metric.Aggregation{ - metric.InstrumentKindCounter: metric.DefaultAggregationSelector(metric.InstrumentKindCounter), - metric.InstrumentKindHistogram: metric.DefaultAggregationSelector(metric.InstrumentKindHistogram), - metric.InstrumentKindUpDownCounter: metric.DefaultAggregationSelector(metric.InstrumentKindUpDownCounter), - metric.InstrumentKindObservableCounter: metric.DefaultAggregationSelector(metric.InstrumentKindObservableCounter), - metric.InstrumentKindObservableUpDownCounter: metric.DefaultAggregationSelector(metric.InstrumentKindObservableUpDownCounter), - metric.InstrumentKindObservableGauge: metric.DefaultAggregationSelector(metric.InstrumentKindObservableGauge), + metric.InstrumentKindCounter: metric.DefaultAggregationSelector( + metric.InstrumentKindCounter, + ), + metric.InstrumentKindHistogram: metric.DefaultAggregationSelector( + metric.InstrumentKindHistogram, + ), + metric.InstrumentKindUpDownCounter: metric.DefaultAggregationSelector( + metric.InstrumentKindUpDownCounter, + ), + metric.InstrumentKindObservableCounter: metric.DefaultAggregationSelector( + metric.InstrumentKindObservableCounter, + ), + metric.InstrumentKindObservableUpDownCounter: metric.DefaultAggregationSelector( + metric.InstrumentKindObservableUpDownCounter, + ), + metric.InstrumentKindObservableGauge: metric.DefaultAggregationSelector( + metric.InstrumentKindObservableGauge, + ), }, }, { @@ -131,10 +143,18 @@ func TestWithEnvAggPreference(t *testing.T) { MaxScale: 20, NoMinMax: false, }, - metric.InstrumentKindUpDownCounter: metric.DefaultAggregationSelector(metric.InstrumentKindUpDownCounter), - metric.InstrumentKindObservableCounter: metric.DefaultAggregationSelector(metric.InstrumentKindObservableCounter), - metric.InstrumentKindObservableUpDownCounter: metric.DefaultAggregationSelector(metric.InstrumentKindObservableUpDownCounter), - metric.InstrumentKindObservableGauge: metric.DefaultAggregationSelector(metric.InstrumentKindObservableGauge), + metric.InstrumentKindUpDownCounter: metric.DefaultAggregationSelector( + metric.InstrumentKindUpDownCounter, + ), + metric.InstrumentKindObservableCounter: metric.DefaultAggregationSelector( + metric.InstrumentKindObservableCounter, + ), + metric.InstrumentKindObservableUpDownCounter: metric.DefaultAggregationSelector( + metric.InstrumentKindObservableUpDownCounter, + ), + metric.InstrumentKindObservableGauge: metric.DefaultAggregationSelector( + metric.InstrumentKindObservableGauge, + ), }, }, } diff --git a/internal/shared/otlp/otlpmetric/otest/collector.go.tmpl b/internal/shared/otlp/otlpmetric/otest/collector.go.tmpl index c033aa215..83caab340 100644 --- a/internal/shared/otlp/otlpmetric/otest/collector.go.tmpl +++ b/internal/shared/otlp/otlpmetric/otest/collector.go.tmpl @@ -142,7 +142,10 @@ func (c *GRPCCollector) Headers() map[string][]string { } // Export handles the export req. -func (c *GRPCCollector) Export(ctx context.Context, req *collpb.ExportMetricsServiceRequest) (*collpb.ExportMetricsServiceResponse, error) { +func (c *GRPCCollector) Export( + ctx context.Context, + req *collpb.ExportMetricsServiceRequest, +) (*collpb.ExportMetricsServiceResponse, error) { c.storage.Add(req) if h, ok := metadata.FromIncomingContext(ctx); ok { @@ -208,7 +211,11 @@ type HTTPCollector struct { // If errCh is not nil, the collector will respond to HTTP requests with errors // sent on that channel. This means that if errCh is not nil Export calls will // block until an error is received. -func NewHTTPCollector(endpoint string, resultCh <-chan ExportResult, opts ...func(*HTTPCollector)) (*HTTPCollector, error) { +func NewHTTPCollector( + endpoint string, + resultCh <-chan ExportResult, + opts ...func(*HTTPCollector), +) (*HTTPCollector, error) { u, err := url.Parse(endpoint) if err != nil { return nil, err diff --git a/internal/shared/otlp/otlpmetric/transform/metricdata.go.tmpl b/internal/shared/otlp/otlpmetric/transform/metricdata.go.tmpl index d11ee4331..3318217a2 100644 --- a/internal/shared/otlp/otlpmetric/transform/metricdata.go.tmpl +++ b/internal/shared/otlp/otlpmetric/transform/metricdata.go.tmpl @@ -203,7 +203,9 @@ func HistogramDataPoints[N int64 | float64](dPts []metricdata.HistogramDataPoint // ExponentialHistogram returns an OTLP Metric_ExponentialHistogram generated from h. An error is // returned if the temporality of h is unknown. -func ExponentialHistogram[N int64 | float64](h metricdata.ExponentialHistogram[N]) (*mpb.Metric_ExponentialHistogram, error) { +func ExponentialHistogram[N int64 | float64]( + h metricdata.ExponentialHistogram[N], +) (*mpb.Metric_ExponentialHistogram, error) { t, err := Temporality(h.Temporality) if err != nil { return nil, err @@ -218,7 +220,9 @@ func ExponentialHistogram[N int64 | float64](h metricdata.ExponentialHistogram[N // ExponentialHistogramDataPoints returns a slice of OTLP ExponentialHistogramDataPoint generated // from dPts. -func ExponentialHistogramDataPoints[N int64 | float64](dPts []metricdata.ExponentialHistogramDataPoint[N]) []*mpb.ExponentialHistogramDataPoint { +func ExponentialHistogramDataPoints[N int64 | float64]( + dPts []metricdata.ExponentialHistogramDataPoint[N], +) []*mpb.ExponentialHistogramDataPoint { out := make([]*mpb.ExponentialHistogramDataPoint, 0, len(dPts)) for _, dPt := range dPts { sum := float64(dPt.Sum) @@ -250,7 +254,9 @@ func ExponentialHistogramDataPoints[N int64 | float64](dPts []metricdata.Exponen // ExponentialHistogramDataPointBuckets returns an OTLP ExponentialHistogramDataPoint_Buckets generated // from bucket. -func ExponentialHistogramDataPointBuckets(bucket metricdata.ExponentialBucket) *mpb.ExponentialHistogramDataPoint_Buckets { +func ExponentialHistogramDataPointBuckets( + bucket metricdata.ExponentialBucket, +) *mpb.ExponentialHistogramDataPoint_Buckets { return &mpb.ExponentialHistogramDataPoint_Buckets{ Offset: bucket.Offset, BucketCounts: bucket.Counts, diff --git a/internal/shared/otlp/otlptrace/otlpconfig/envconfig.go.tmpl b/internal/shared/otlp/otlptrace/otlpconfig/envconfig.go.tmpl index f1fa3dc84..9eab4b0a4 100644 --- a/internal/shared/otlp/otlptrace/otlpconfig/envconfig.go.tmpl +++ b/internal/shared/otlp/otlptrace/otlpconfig/envconfig.go.tmpl @@ -77,8 +77,16 @@ func getOptionsFromEnv() []GenericOption { }), envconfig.WithCertPool("CERTIFICATE", func(p *x509.CertPool) { tlsConf.RootCAs = p }), envconfig.WithCertPool("TRACES_CERTIFICATE", func(p *x509.CertPool) { tlsConf.RootCAs = p }), - envconfig.WithClientCert("CLIENT_CERTIFICATE", "CLIENT_KEY", func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }), - envconfig.WithClientCert("TRACES_CLIENT_CERTIFICATE", "TRACES_CLIENT_KEY", func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }), + envconfig.WithClientCert( + "CLIENT_CERTIFICATE", + "CLIENT_KEY", + func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }, + ), + envconfig.WithClientCert( + "TRACES_CLIENT_CERTIFICATE", + "TRACES_CLIENT_KEY", + func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }, + ), withTLSConfig(tlsConf, func(c *tls.Config) { opts = append(opts, WithTLSClientConfig(c)) }), envconfig.WithBool("INSECURE", func(b bool) { opts = append(opts, withInsecure(b)) }), envconfig.WithBool("TRACES_INSECURE", func(b bool) { opts = append(opts, withInsecure(b)) }), diff --git a/internal/shared/otlp/partialsuccess_test.go.tmpl b/internal/shared/otlp/partialsuccess_test.go.tmpl index 56cb8b0ff..15dfa8981 100644 --- a/internal/shared/otlp/partialsuccess_test.go.tmpl +++ b/internal/shared/otlp/partialsuccess_test.go.tmpl @@ -28,6 +28,10 @@ func requireErrorString(t *testing.T, expect string, err error) { func TestPartialSuccessFormat(t *testing.T) { requireErrorString(t, "empty message (0 metric data points rejected)", MetricPartialSuccessError(0, "")) requireErrorString(t, "help help (0 metric data points rejected)", MetricPartialSuccessError(0, "help help")) - requireErrorString(t, "what happened (10 metric data points rejected)", MetricPartialSuccessError(10, "what happened")) + requireErrorString( + t, + "what happened (10 metric data points rejected)", + MetricPartialSuccessError(10, "what happened"), + ) requireErrorString(t, "what happened (15 spans rejected)", TracePartialSuccessError(15, "what happened")) } diff --git a/log/logtest/assertions.go b/log/logtest/assertions.go index 479cc3b08..9b6145fe1 100644 --- a/log/logtest/assertions.go +++ b/log/logtest/assertions.go @@ -24,7 +24,11 @@ func AssertRecordEqual(t testing.TB, want, got log.Record) bool { return false } if !want.ObservedTimestamp().Equal(got.ObservedTimestamp()) { - t.Errorf("ObservedTimestamp value is not equal:\nwant: %v\ngot: %v", want.ObservedTimestamp(), got.ObservedTimestamp()) + t.Errorf( + "ObservedTimestamp value is not equal:\nwant: %v\ngot: %v", + want.ObservedTimestamp(), + got.ObservedTimestamp(), + ) return false } if want.Severity() != got.Severity() { diff --git a/metric/asyncfloat64.go b/metric/asyncfloat64.go index f8435d8f2..b7fc973a6 100644 --- a/metric/asyncfloat64.go +++ b/metric/asyncfloat64.go @@ -106,7 +106,9 @@ type Float64ObservableUpDownCounterConfig struct { // NewFloat64ObservableUpDownCounterConfig returns a new // [Float64ObservableUpDownCounterConfig] with all opts applied. -func NewFloat64ObservableUpDownCounterConfig(opts ...Float64ObservableUpDownCounterOption) Float64ObservableUpDownCounterConfig { +func NewFloat64ObservableUpDownCounterConfig( + opts ...Float64ObservableUpDownCounterOption, +) Float64ObservableUpDownCounterConfig { var config Float64ObservableUpDownCounterConfig for _, o := range opts { config = o.applyFloat64ObservableUpDownCounter(config) @@ -239,12 +241,16 @@ type float64CallbackOpt struct { cback Float64Callback } -func (o float64CallbackOpt) applyFloat64ObservableCounter(cfg Float64ObservableCounterConfig) Float64ObservableCounterConfig { +func (o float64CallbackOpt) applyFloat64ObservableCounter( + cfg Float64ObservableCounterConfig, +) Float64ObservableCounterConfig { cfg.callbacks = append(cfg.callbacks, o.cback) return cfg } -func (o float64CallbackOpt) applyFloat64ObservableUpDownCounter(cfg Float64ObservableUpDownCounterConfig) Float64ObservableUpDownCounterConfig { +func (o float64CallbackOpt) applyFloat64ObservableUpDownCounter( + cfg Float64ObservableUpDownCounterConfig, +) Float64ObservableUpDownCounterConfig { cfg.callbacks = append(cfg.callbacks, o.cback) return cfg } diff --git a/metric/asyncint64.go b/metric/asyncint64.go index e079aaef1..4404b71a2 100644 --- a/metric/asyncint64.go +++ b/metric/asyncint64.go @@ -105,7 +105,9 @@ type Int64ObservableUpDownCounterConfig struct { // NewInt64ObservableUpDownCounterConfig returns a new // [Int64ObservableUpDownCounterConfig] with all opts applied. -func NewInt64ObservableUpDownCounterConfig(opts ...Int64ObservableUpDownCounterOption) Int64ObservableUpDownCounterConfig { +func NewInt64ObservableUpDownCounterConfig( + opts ...Int64ObservableUpDownCounterOption, +) Int64ObservableUpDownCounterConfig { var config Int64ObservableUpDownCounterConfig for _, o := range opts { config = o.applyInt64ObservableUpDownCounter(config) @@ -242,7 +244,9 @@ func (o int64CallbackOpt) applyInt64ObservableCounter(cfg Int64ObservableCounter return cfg } -func (o int64CallbackOpt) applyInt64ObservableUpDownCounter(cfg Int64ObservableUpDownCounterConfig) Int64ObservableUpDownCounterConfig { +func (o int64CallbackOpt) applyInt64ObservableUpDownCounter( + cfg Int64ObservableUpDownCounterConfig, +) Int64ObservableUpDownCounterConfig { cfg.callbacks = append(cfg.callbacks, o.cback) return cfg } diff --git a/metric/instrument.go b/metric/instrument.go index a535782e1..9f48d5f11 100644 --- a/metric/instrument.go +++ b/metric/instrument.go @@ -63,7 +63,9 @@ func (o descOpt) applyFloat64ObservableCounter(c Float64ObservableCounterConfig) return c } -func (o descOpt) applyFloat64ObservableUpDownCounter(c Float64ObservableUpDownCounterConfig) Float64ObservableUpDownCounterConfig { +func (o descOpt) applyFloat64ObservableUpDownCounter( + c Float64ObservableUpDownCounterConfig, +) Float64ObservableUpDownCounterConfig { c.description = string(o) return c } @@ -98,7 +100,9 @@ func (o descOpt) applyInt64ObservableCounter(c Int64ObservableCounterConfig) Int return c } -func (o descOpt) applyInt64ObservableUpDownCounter(c Int64ObservableUpDownCounterConfig) Int64ObservableUpDownCounterConfig { +func (o descOpt) applyInt64ObservableUpDownCounter( + c Int64ObservableUpDownCounterConfig, +) Int64ObservableUpDownCounterConfig { c.description = string(o) return c } @@ -138,7 +142,9 @@ func (o unitOpt) applyFloat64ObservableCounter(c Float64ObservableCounterConfig) return c } -func (o unitOpt) applyFloat64ObservableUpDownCounter(c Float64ObservableUpDownCounterConfig) Float64ObservableUpDownCounterConfig { +func (o unitOpt) applyFloat64ObservableUpDownCounter( + c Float64ObservableUpDownCounterConfig, +) Float64ObservableUpDownCounterConfig { c.unit = string(o) return c } @@ -173,7 +179,9 @@ func (o unitOpt) applyInt64ObservableCounter(c Int64ObservableCounterConfig) Int return c } -func (o unitOpt) applyInt64ObservableUpDownCounter(c Int64ObservableUpDownCounterConfig) Int64ObservableUpDownCounterConfig { +func (o unitOpt) applyInt64ObservableUpDownCounter( + c Int64ObservableUpDownCounterConfig, +) Int64ObservableUpDownCounterConfig { c.unit = string(o) return c } diff --git a/metric/meter.go b/metric/meter.go index 14e08c24a..fdd2a7011 100644 --- a/metric/meter.go +++ b/metric/meter.go @@ -110,7 +110,10 @@ type Meter interface { // The name needs to conform to the OpenTelemetry instrument name syntax. // See the Instrument Name section of the package documentation for more // information. - Int64ObservableUpDownCounter(name string, options ...Int64ObservableUpDownCounterOption) (Int64ObservableUpDownCounter, error) + Int64ObservableUpDownCounter( + name string, + options ...Int64ObservableUpDownCounterOption, + ) (Int64ObservableUpDownCounter, error) // Int64ObservableGauge returns a new Int64ObservableGauge instrument // identified by name and configured with options. The instrument is used @@ -194,7 +197,10 @@ type Meter interface { // The name needs to conform to the OpenTelemetry instrument name syntax. // See the Instrument Name section of the package documentation for more // information. - Float64ObservableUpDownCounter(name string, options ...Float64ObservableUpDownCounterOption) (Float64ObservableUpDownCounter, error) + Float64ObservableUpDownCounter( + name string, + options ...Float64ObservableUpDownCounterOption, + ) (Float64ObservableUpDownCounter, error) // Float64ObservableGauge returns a new Float64ObservableGauge instrument // identified by name and configured with options. The instrument is used diff --git a/metric/noop/noop.go b/metric/noop/noop.go index ca6fcbdc0..9afb69e58 100644 --- a/metric/noop/noop.go +++ b/metric/noop/noop.go @@ -86,13 +86,19 @@ func (Meter) Int64Gauge(string, ...metric.Int64GaugeOption) (metric.Int64Gauge, // Int64ObservableCounter returns an ObservableCounter used to record int64 // measurements that produces no telemetry. -func (Meter) Int64ObservableCounter(string, ...metric.Int64ObservableCounterOption) (metric.Int64ObservableCounter, error) { +func (Meter) Int64ObservableCounter( + string, + ...metric.Int64ObservableCounterOption, +) (metric.Int64ObservableCounter, error) { return Int64ObservableCounter{}, nil } // Int64ObservableUpDownCounter returns an ObservableUpDownCounter used to // record int64 measurements that produces no telemetry. -func (Meter) Int64ObservableUpDownCounter(string, ...metric.Int64ObservableUpDownCounterOption) (metric.Int64ObservableUpDownCounter, error) { +func (Meter) Int64ObservableUpDownCounter( + string, + ...metric.Int64ObservableUpDownCounterOption, +) (metric.Int64ObservableUpDownCounter, error) { return Int64ObservableUpDownCounter{}, nil } @@ -128,19 +134,28 @@ func (Meter) Float64Gauge(string, ...metric.Float64GaugeOption) (metric.Float64G // Float64ObservableCounter returns an ObservableCounter used to record int64 // measurements that produces no telemetry. -func (Meter) Float64ObservableCounter(string, ...metric.Float64ObservableCounterOption) (metric.Float64ObservableCounter, error) { +func (Meter) Float64ObservableCounter( + string, + ...metric.Float64ObservableCounterOption, +) (metric.Float64ObservableCounter, error) { return Float64ObservableCounter{}, nil } // Float64ObservableUpDownCounter returns an ObservableUpDownCounter used to // record int64 measurements that produces no telemetry. -func (Meter) Float64ObservableUpDownCounter(string, ...metric.Float64ObservableUpDownCounterOption) (metric.Float64ObservableUpDownCounter, error) { +func (Meter) Float64ObservableUpDownCounter( + string, + ...metric.Float64ObservableUpDownCounterOption, +) (metric.Float64ObservableUpDownCounter, error) { return Float64ObservableUpDownCounter{}, nil } // Float64ObservableGauge returns an ObservableGauge used to record int64 // measurements that produces no telemetry. -func (Meter) Float64ObservableGauge(string, ...metric.Float64ObservableGaugeOption) (metric.Float64ObservableGauge, error) { +func (Meter) Float64ObservableGauge( + string, + ...metric.Float64ObservableGaugeOption, +) (metric.Float64ObservableGauge, error) { return Float64ObservableGauge{}, nil } diff --git a/schema/internal/parser_checks.go b/schema/internal/parser_checks.go index ac68a6cb7..cf719e5e8 100644 --- a/schema/internal/parser_checks.go +++ b/schema/internal/parser_checks.go @@ -34,7 +34,9 @@ func CheckFileFormatField(fileFormat string, supportedFormatMajor, supportedForm } // Check that the major version number in the file is the same as what we expect. - if fileFormatParsed.Major() != uint64(supportedFormatMajor) { // nolint:gosec // Version can't be negative (overflow checked). + if fileFormatParsed.Major() != uint64( + supportedFormatMajor, + ) { // nolint:gosec // Version can't be negative (overflow checked). return fmt.Errorf( "this library cannot parse file formats with major version other than %v", supportedFormatMajor, @@ -43,7 +45,9 @@ func CheckFileFormatField(fileFormat string, supportedFormatMajor, supportedForm // Check that the file minor version number is not greater than // what is requested supports. - if fileFormatParsed.Minor() > uint64(supportedFormatMinor) { // nolint:gosec // Version can't be negative (overflow checked). + if fileFormatParsed.Minor() > uint64( + supportedFormatMinor, + ) { // nolint:gosec // Version can't be negative (overflow checked). supportedFormatMajorMinor := strconv.Itoa(supportedFormatMajor) + "." + strconv.Itoa(supportedFormatMinor) // 1.0 diff --git a/sdk/log/logger_test.go b/sdk/log/logger_test.go index e3865e512..168ac5527 100644 --- a/sdk/log/logger_test.go +++ b/sdk/log/logger_test.go @@ -47,11 +47,14 @@ func TestLoggerEmit(t *testing.T) { rWithNoObservedTimestamp := r rWithNoObservedTimestamp.SetObservedTimestamp(time.Time{}) - contextWithSpanContext := trace.ContextWithSpanContext(context.Background(), trace.NewSpanContext(trace.SpanContextConfig{ - TraceID: trace.TraceID{0o1}, - SpanID: trace.SpanID{0o2}, - TraceFlags: 0x1, - })) + contextWithSpanContext := trace.ContextWithSpanContext( + context.Background(), + trace.NewSpanContext(trace.SpanContextConfig{ + TraceID: trace.TraceID{0o1}, + SpanID: trace.SpanID{0o2}, + TraceFlags: 0x1, + }), + ) testCases := []struct { name string diff --git a/sdk/log/logtest/factory.go b/sdk/log/logtest/factory.go index e62695e87..f91366985 100644 --- a/sdk/log/logtest/factory.go +++ b/sdk/log/logtest/factory.go @@ -73,6 +73,8 @@ func (f RecordFactory) NewRecord() sdklog.Record { func set(r *sdklog.Record, name string, value any) { rVal := reflect.ValueOf(r).Elem() rf := rVal.FieldByName(name) - rf = reflect.NewAt(rf.Type(), unsafe.Pointer(rf.UnsafeAddr())).Elem() // nolint: gosec // conversion of uintptr -> unsafe.Pointer. + rf = reflect.NewAt(rf.Type(), unsafe.Pointer(rf.UnsafeAddr())). + Elem() + // nolint: gosec // conversion of uintptr -> unsafe.Pointer. rf.Set(reflect.ValueOf(value)) } diff --git a/sdk/log/provider_test.go b/sdk/log/provider_test.go index d3e8faf66..9497baa73 100644 --- a/sdk/log/provider_test.go +++ b/sdk/log/provider_test.go @@ -202,9 +202,15 @@ func TestWithResource(t *testing.T) { want: resource.Default(), }, { - name: "explicit resource", - options: []LoggerProviderOption{WithResource(resource.NewSchemaless(attribute.String("rk1", "rv1"), attribute.Int64("rk2", 5)))}, - want: mergeResource(t, resource.Environment(), resource.NewSchemaless(attribute.String("rk1", "rv1"), attribute.Int64("rk2", 5))), + name: "explicit resource", + options: []LoggerProviderOption{ + WithResource(resource.NewSchemaless(attribute.String("rk1", "rv1"), attribute.Int64("rk2", 5))), + }, + want: mergeResource( + t, + resource.Environment(), + resource.NewSchemaless(attribute.String("rk1", "rv1"), attribute.Int64("rk2", 5)), + ), }, { name: "last resource wins", @@ -212,12 +218,22 @@ func TestWithResource(t *testing.T) { WithResource(resource.NewSchemaless(attribute.String("rk1", "vk1"), attribute.Int64("rk2", 5))), WithResource(resource.NewSchemaless(attribute.String("rk3", "rv3"), attribute.Int64("rk4", 10))), }, - want: mergeResource(t, resource.Environment(), resource.NewSchemaless(attribute.String("rk3", "rv3"), attribute.Int64("rk4", 10))), + want: mergeResource( + t, + resource.Environment(), + resource.NewSchemaless(attribute.String("rk3", "rv3"), attribute.Int64("rk4", 10)), + ), }, { - name: "overlapping attributes with environment resource", - options: []LoggerProviderOption{WithResource(resource.NewSchemaless(attribute.String("rk1", "rv1"), attribute.Int64("rk5", 10)))}, - want: mergeResource(t, resource.Environment(), resource.NewSchemaless(attribute.String("rk1", "rv1"), attribute.Int64("rk5", 10))), + name: "overlapping attributes with environment resource", + options: []LoggerProviderOption{ + WithResource(resource.NewSchemaless(attribute.String("rk1", "rv1"), attribute.Int64("rk5", 10))), + }, + want: mergeResource( + t, + resource.Environment(), + resource.NewSchemaless(attribute.String("rk1", "rv1"), attribute.Int64("rk5", 10)), + ), }, } for _, tc := range cases { @@ -296,12 +312,26 @@ func TestLoggerProviderLogger(t *testing.T) { t.Run("SameLoggers", func(t *testing.T) { p := NewLoggerProvider() - l0, l1, l2 := p.Logger("l0"), p.Logger("l1"), p.Logger("l0", log.WithInstrumentationAttributes(attribute.String("foo", "bar"))) + l0, l1, l2 := p.Logger( + "l0", + ), p.Logger( + "l1", + ), p.Logger( + "l0", + log.WithInstrumentationAttributes(attribute.String("foo", "bar")), + ) assert.NotSame(t, l0, l1) assert.NotSame(t, l0, l2) assert.NotSame(t, l1, l2) - l3, l4, l5 := p.Logger("l0"), p.Logger("l1"), p.Logger("l0", log.WithInstrumentationAttributes(attribute.String("foo", "bar"))) + l3, l4, l5 := p.Logger( + "l0", + ), p.Logger( + "l1", + ), p.Logger( + "l0", + log.WithInstrumentationAttributes(attribute.String("foo", "bar")), + ) assert.Same(t, l0, l3) assert.Same(t, l1, l4) assert.Same(t, l2, l5) diff --git a/sdk/metric/config_test.go b/sdk/metric/config_test.go index 6640cc629..307c3e598 100644 --- a/sdk/metric/config_test.go +++ b/sdk/metric/config_test.go @@ -33,7 +33,9 @@ const envVarResourceAttributes = "OTEL_RESOURCE_ATTRIBUTES" var _ Reader = (*reader)(nil) -func (r *reader) aggregation(kind InstrumentKind) Aggregation { // nolint:revive // import-shadow for method scoped by type. +func (r *reader) aggregation( + kind InstrumentKind, +) Aggregation { // nolint:revive // import-shadow for method scoped by type. return r.aggregationFunc(kind) } @@ -148,9 +150,15 @@ func TestWithResource(t *testing.T) { want: resource.Default(), }, { - name: "explicit resource", - options: []Option{WithResource(resource.NewSchemaless(attribute.String("rk1", "rv1"), attribute.Int64("rk2", 5)))}, - want: mergeResource(t, resource.Environment(), resource.NewSchemaless(attribute.String("rk1", "rv1"), attribute.Int64("rk2", 5))), + name: "explicit resource", + options: []Option{ + WithResource(resource.NewSchemaless(attribute.String("rk1", "rv1"), attribute.Int64("rk2", 5))), + }, + want: mergeResource( + t, + resource.Environment(), + resource.NewSchemaless(attribute.String("rk1", "rv1"), attribute.Int64("rk2", 5)), + ), }, { name: "last resource wins", @@ -158,12 +166,22 @@ func TestWithResource(t *testing.T) { WithResource(resource.NewSchemaless(attribute.String("rk1", "vk1"), attribute.Int64("rk2", 5))), WithResource(resource.NewSchemaless(attribute.String("rk3", "rv3"), attribute.Int64("rk4", 10))), }, - want: mergeResource(t, resource.Environment(), resource.NewSchemaless(attribute.String("rk3", "rv3"), attribute.Int64("rk4", 10))), + want: mergeResource( + t, + resource.Environment(), + resource.NewSchemaless(attribute.String("rk3", "rv3"), attribute.Int64("rk4", 10)), + ), }, { - name: "overlapping attributes with environment resource", - options: []Option{WithResource(resource.NewSchemaless(attribute.String("rk1", "rv1"), attribute.Int64("rk5", 10)))}, - want: mergeResource(t, resource.Environment(), resource.NewSchemaless(attribute.String("rk1", "rv1"), attribute.Int64("rk5", 10))), + name: "overlapping attributes with environment resource", + options: []Option{ + WithResource(resource.NewSchemaless(attribute.String("rk1", "rv1"), attribute.Int64("rk5", 10))), + }, + want: mergeResource( + t, + resource.Environment(), + resource.NewSchemaless(attribute.String("rk1", "rv1"), attribute.Int64("rk5", 10)), + ), }, } for _, tc := range cases { diff --git a/sdk/metric/exemplar.go b/sdk/metric/exemplar.go index 0335b8ae4..549d3bd5f 100644 --- a/sdk/metric/exemplar.go +++ b/sdk/metric/exemplar.go @@ -18,7 +18,10 @@ type ExemplarReservoirProviderSelector func(Aggregation) exemplar.ReservoirProvi // reservoirFunc returns the appropriately configured exemplar reservoir // creation func based on the passed InstrumentKind and filter configuration. -func reservoirFunc[N int64 | float64](provider exemplar.ReservoirProvider, filter exemplar.Filter) func(attribute.Set) aggregate.FilteredExemplarReservoir[N] { +func reservoirFunc[N int64 | float64]( + provider exemplar.ReservoirProvider, + filter exemplar.Filter, +) func(attribute.Set) aggregate.FilteredExemplarReservoir[N] { return func(attrs attribute.Set) aggregate.FilteredExemplarReservoir[N] { return aggregate.NewFilteredExemplarReservoir[N](filter, provider(attrs)) } diff --git a/sdk/metric/instrument.go b/sdk/metric/instrument.go index 100785008..18891ed5b 100644 --- a/sdk/metric/instrument.go +++ b/sdk/metric/instrument.go @@ -208,7 +208,11 @@ func (i *int64Inst) Enabled(_ context.Context) bool { return len(i.measures) != 0 } -func (i *int64Inst) aggregate(ctx context.Context, val int64, s attribute.Set) { // nolint:revive // okay to shadow pkg with method. +func (i *int64Inst) aggregate( + ctx context.Context, + val int64, + s attribute.Set, +) { // nolint:revive // okay to shadow pkg with method. for _, in := range i.measures { in(ctx, val, s) } diff --git a/sdk/metric/internal/aggregate/aggregate.go b/sdk/metric/internal/aggregate/aggregate.go index fde219333..0321da681 100644 --- a/sdk/metric/internal/aggregate/aggregate.go +++ b/sdk/metric/internal/aggregate/aggregate.go @@ -121,7 +121,10 @@ func (b Builder[N]) Sum(monotonic bool) (Measure[N], ComputeAggregation) { // ExplicitBucketHistogram returns a histogram aggregate function input and // output. -func (b Builder[N]) ExplicitBucketHistogram(boundaries []float64, noMinMax, noSum bool) (Measure[N], ComputeAggregation) { +func (b Builder[N]) ExplicitBucketHistogram( + boundaries []float64, + noMinMax, noSum bool, +) (Measure[N], ComputeAggregation) { h := newHistogram[N](boundaries, noMinMax, noSum, b.AggregationLimit, b.resFunc()) switch b.Temporality { case metricdata.DeltaTemporality: @@ -133,7 +136,10 @@ func (b Builder[N]) ExplicitBucketHistogram(boundaries []float64, noMinMax, noSu // ExponentialBucketHistogram returns a histogram aggregate function input and // output. -func (b Builder[N]) ExponentialBucketHistogram(maxSize, maxScale int32, noMinMax, noSum bool) (Measure[N], ComputeAggregation) { +func (b Builder[N]) ExponentialBucketHistogram( + maxSize, maxScale int32, + noMinMax, noSum bool, +) (Measure[N], ComputeAggregation) { h := newExponentialHistogram[N](maxSize, maxScale, noMinMax, noSum, b.AggregationLimit, b.resFunc()) switch b.Temporality { case metricdata.DeltaTemporality: diff --git a/sdk/metric/internal/aggregate/exponential_histogram.go b/sdk/metric/internal/aggregate/exponential_histogram.go index 32a62e1b8..ae1f59344 100644 --- a/sdk/metric/internal/aggregate/exponential_histogram.go +++ b/sdk/metric/internal/aggregate/exponential_histogram.go @@ -48,7 +48,12 @@ type expoHistogramDataPoint[N int64 | float64] struct { zeroCount uint64 } -func newExpoHistogramDataPoint[N int64 | float64](attrs attribute.Set, maxSize int, maxScale int32, noMinMax, noSum bool) *expoHistogramDataPoint[N] { // nolint:revive // we need this control flag +func newExpoHistogramDataPoint[N int64 | float64]( + attrs attribute.Set, + maxSize int, + maxScale int32, + noMinMax, noSum bool, +) *expoHistogramDataPoint[N] { // nolint:revive // we need this control flag f := math.MaxFloat64 ma := N(f) // if N is int64, max will overflow to -9223372036854775808 mi := N(-f) @@ -283,7 +288,12 @@ func (b *expoBuckets) downscale(delta int32) { // newExponentialHistogram returns an Aggregator that summarizes a set of // measurements as an exponential histogram. Each histogram is scoped by attributes // and the aggregation cycle the measurements were made in. -func newExponentialHistogram[N int64 | float64](maxSize, maxScale int32, noMinMax, noSum bool, limit int, r func(attribute.Set) FilteredExemplarReservoir[N]) *expoHistogram[N] { +func newExponentialHistogram[N int64 | float64]( + maxSize, maxScale int32, + noMinMax, noSum bool, + limit int, + r func(attribute.Set) FilteredExemplarReservoir[N], +) *expoHistogram[N] { return &expoHistogram[N]{ noSum: noSum, noMinMax: noMinMax, @@ -314,7 +324,12 @@ type expoHistogram[N int64 | float64] struct { start time.Time } -func (e *expoHistogram[N]) measure(ctx context.Context, value N, fltrAttr attribute.Set, droppedAttr []attribute.KeyValue) { +func (e *expoHistogram[N]) measure( + ctx context.Context, + value N, + fltrAttr attribute.Set, + droppedAttr []attribute.KeyValue, +) { // Ignore NaN and infinity. if math.IsInf(float64(value), 0) || math.IsNaN(float64(value)) { return @@ -360,11 +375,19 @@ func (e *expoHistogram[N]) delta(dest *metricdata.Aggregation) int { hDPts[i].ZeroThreshold = 0.0 hDPts[i].PositiveBucket.Offset = val.posBuckets.startBin - hDPts[i].PositiveBucket.Counts = reset(hDPts[i].PositiveBucket.Counts, len(val.posBuckets.counts), len(val.posBuckets.counts)) + hDPts[i].PositiveBucket.Counts = reset( + hDPts[i].PositiveBucket.Counts, + len(val.posBuckets.counts), + len(val.posBuckets.counts), + ) copy(hDPts[i].PositiveBucket.Counts, val.posBuckets.counts) hDPts[i].NegativeBucket.Offset = val.negBuckets.startBin - hDPts[i].NegativeBucket.Counts = reset(hDPts[i].NegativeBucket.Counts, len(val.negBuckets.counts), len(val.negBuckets.counts)) + hDPts[i].NegativeBucket.Counts = reset( + hDPts[i].NegativeBucket.Counts, + len(val.negBuckets.counts), + len(val.negBuckets.counts), + ) copy(hDPts[i].NegativeBucket.Counts, val.negBuckets.counts) if !e.noSum { @@ -413,11 +436,19 @@ func (e *expoHistogram[N]) cumulative(dest *metricdata.Aggregation) int { hDPts[i].ZeroThreshold = 0.0 hDPts[i].PositiveBucket.Offset = val.posBuckets.startBin - hDPts[i].PositiveBucket.Counts = reset(hDPts[i].PositiveBucket.Counts, len(val.posBuckets.counts), len(val.posBuckets.counts)) + hDPts[i].PositiveBucket.Counts = reset( + hDPts[i].PositiveBucket.Counts, + len(val.posBuckets.counts), + len(val.posBuckets.counts), + ) copy(hDPts[i].PositiveBucket.Counts, val.posBuckets.counts) hDPts[i].NegativeBucket.Offset = val.negBuckets.startBin - hDPts[i].NegativeBucket.Counts = reset(hDPts[i].NegativeBucket.Counts, len(val.negBuckets.counts), len(val.negBuckets.counts)) + hDPts[i].NegativeBucket.Counts = reset( + hDPts[i].NegativeBucket.Counts, + len(val.negBuckets.counts), + len(val.negBuckets.counts), + ) copy(hDPts[i].NegativeBucket.Counts, val.negBuckets.counts) if !e.noSum { diff --git a/sdk/metric/internal/aggregate/exponential_histogram_test.go b/sdk/metric/internal/aggregate/exponential_histogram_test.go index 9ede71ac8..a262e5a77 100644 --- a/sdk/metric/internal/aggregate/exponential_histogram_test.go +++ b/sdk/metric/internal/aggregate/exponential_histogram_test.go @@ -1073,10 +1073,22 @@ func FuzzGetBin(f *testing.F) { p.scale = (scale%31+31)%31 - 10 got := p.getBin(v) if v <= lowerBound(got, p.scale) { - t.Errorf("v=%x scale =%d had bin %d, but was below lower bound %x", v, p.scale, got, lowerBound(got, p.scale)) + t.Errorf( + "v=%x scale =%d had bin %d, but was below lower bound %x", + v, + p.scale, + got, + lowerBound(got, p.scale), + ) } if v > lowerBound(got+1, p.scale) { - t.Errorf("v=%x scale =%d had bin %d, but was above upper bound %x", v, p.scale, got, lowerBound(got+1, p.scale)) + t.Errorf( + "v=%x scale =%d had bin %d, but was above upper bound %x", + v, + p.scale, + got, + lowerBound(got+1, p.scale), + ) } }) } diff --git a/sdk/metric/internal/aggregate/filtered_reservoir.go b/sdk/metric/internal/aggregate/filtered_reservoir.go index 691a91060..d4c41642d 100644 --- a/sdk/metric/internal/aggregate/filtered_reservoir.go +++ b/sdk/metric/internal/aggregate/filtered_reservoir.go @@ -33,7 +33,10 @@ type filteredExemplarReservoir[N int64 | float64] struct { // NewFilteredExemplarReservoir creates a [FilteredExemplarReservoir] which only offers values // that are allowed by the filter. -func NewFilteredExemplarReservoir[N int64 | float64](f exemplar.Filter, r exemplar.Reservoir) FilteredExemplarReservoir[N] { +func NewFilteredExemplarReservoir[N int64 | float64]( + f exemplar.Filter, + r exemplar.Reservoir, +) FilteredExemplarReservoir[N] { return &filteredExemplarReservoir[N]{ filter: f, reservoir: r, diff --git a/sdk/metric/internal/aggregate/histogram.go b/sdk/metric/internal/aggregate/histogram.go index d577ae2c1..d3068484c 100644 --- a/sdk/metric/internal/aggregate/histogram.go +++ b/sdk/metric/internal/aggregate/histogram.go @@ -53,7 +53,12 @@ type histValues[N int64 | float64] struct { valuesMu sync.Mutex } -func newHistValues[N int64 | float64](bounds []float64, noSum bool, limit int, r func(attribute.Set) FilteredExemplarReservoir[N]) *histValues[N] { +func newHistValues[N int64 | float64]( + bounds []float64, + noSum bool, + limit int, + r func(attribute.Set) FilteredExemplarReservoir[N], +) *histValues[N] { // The responsibility of keeping all buckets correctly associated with the // passed boundaries is ultimately this type's responsibility. Make a copy // here so we can always guarantee this. Or, in the case of failure, have @@ -71,7 +76,12 @@ func newHistValues[N int64 | float64](bounds []float64, noSum bool, limit int, r // Aggregate records the measurement value, scoped by attr, and aggregates it // into a histogram. -func (s *histValues[N]) measure(ctx context.Context, value N, fltrAttr attribute.Set, droppedAttr []attribute.KeyValue) { +func (s *histValues[N]) measure( + ctx context.Context, + value N, + fltrAttr attribute.Set, + droppedAttr []attribute.KeyValue, +) { // This search will return an index in the range [0, len(s.bounds)], where // it will return len(s.bounds) if value is greater than the last element // of s.bounds. This aligns with the buckets in that the length of buckets @@ -108,7 +118,12 @@ func (s *histValues[N]) measure(ctx context.Context, value N, fltrAttr attribute // newHistogram returns an Aggregator that summarizes a set of measurements as // an histogram. -func newHistogram[N int64 | float64](boundaries []float64, noMinMax, noSum bool, limit int, r func(attribute.Set) FilteredExemplarReservoir[N]) *histogram[N] { +func newHistogram[N int64 | float64]( + boundaries []float64, + noMinMax, noSum bool, + limit int, + r func(attribute.Set) FilteredExemplarReservoir[N], +) *histogram[N] { return &histogram[N]{ histValues: newHistValues[N](boundaries, noSum, limit, r), noMinMax: noMinMax, diff --git a/sdk/metric/internal/aggregate/histogram_test.go b/sdk/metric/internal/aggregate/histogram_test.go index 5c7d4bd31..fd7ec3acf 100644 --- a/sdk/metric/internal/aggregate/histogram_test.go +++ b/sdk/metric/internal/aggregate/histogram_test.go @@ -225,7 +225,12 @@ func testCumulativeHist[N int64 | float64](c conf[N]) func(t *testing.T) { // hPointSummed returns an HistogramDataPoint that started and ended now with // multi number of measurements values v. It includes a min and max (set to v). -func hPointSummed[N int64 | float64](a attribute.Set, v N, multi uint64, start, t time.Time) metricdata.HistogramDataPoint[N] { +func hPointSummed[N int64 | float64]( + a attribute.Set, + v N, + multi uint64, + start, t time.Time, +) metricdata.HistogramDataPoint[N] { idx := sort.SearchFloat64s(bounds, float64(v)) counts := make([]uint64, len(bounds)+1) counts[idx] += multi @@ -244,7 +249,12 @@ func hPointSummed[N int64 | float64](a attribute.Set, v N, multi uint64, start, // hPoint returns an HistogramDataPoint that started and ended now with multi // number of measurements values v. It includes a min and max (set to v). -func hPoint[N int64 | float64](a attribute.Set, v N, multi uint64, start, t time.Time) metricdata.HistogramDataPoint[N] { +func hPoint[N int64 | float64]( + a attribute.Set, + v N, + multi uint64, + start, t time.Time, +) metricdata.HistogramDataPoint[N] { idx := sort.SearchFloat64s(bounds, float64(v)) counts := make([]uint64, len(bounds)+1) counts[idx] += multi @@ -339,7 +349,12 @@ func TestCumulativeHistogramImmutableCounts(t *testing.T) { cpCounts := make([]uint64, len(hdp.BucketCounts)) copy(cpCounts, hdp.BucketCounts) hdp.BucketCounts[0] = 10 - assert.Equal(t, cpCounts, h.values[alice.Equivalent()].counts, "modifying the Aggregator bucket counts should not change the Aggregator") + assert.Equal( + t, + cpCounts, + h.values[alice.Equivalent()].counts, + "modifying the Aggregator bucket counts should not change the Aggregator", + ) } func TestDeltaHistogramReset(t *testing.T) { diff --git a/sdk/metric/internal/aggregate/lastvalue.go b/sdk/metric/internal/aggregate/lastvalue.go index d3a93f085..350ccebdc 100644 --- a/sdk/metric/internal/aggregate/lastvalue.go +++ b/sdk/metric/internal/aggregate/lastvalue.go @@ -114,7 +114,10 @@ func (s *lastValue[N]) copyDpts(dest *[]metricdata.DataPoint[N], t time.Time) in // newPrecomputedLastValue returns an aggregator that summarizes a set of // observations as the last one made. -func newPrecomputedLastValue[N int64 | float64](limit int, r func(attribute.Set) FilteredExemplarReservoir[N]) *precomputedLastValue[N] { +func newPrecomputedLastValue[N int64 | float64]( + limit int, + r func(attribute.Set) FilteredExemplarReservoir[N], +) *precomputedLastValue[N] { return &precomputedLastValue[N]{lastValue: newLastValue[N](limit, r)} } diff --git a/sdk/metric/internal/aggregate/sum.go b/sdk/metric/internal/aggregate/sum.go index 8e132ad61..612cde432 100644 --- a/sdk/metric/internal/aggregate/sum.go +++ b/sdk/metric/internal/aggregate/sum.go @@ -143,7 +143,11 @@ func (s *sum[N]) cumulative(dest *metricdata.Aggregation) int { // newPrecomputedSum returns an aggregator that summarizes a set of // observations as their arithmetic sum. Each sum is scoped by attributes and // the aggregation cycle the measurements were made in. -func newPrecomputedSum[N int64 | float64](monotonic bool, limit int, r func(attribute.Set) FilteredExemplarReservoir[N]) *precomputedSum[N] { +func newPrecomputedSum[N int64 | float64]( + monotonic bool, + limit int, + r func(attribute.Set) FilteredExemplarReservoir[N], +) *precomputedSum[N] { return &precomputedSum[N]{ valueMap: newValueMap[N](limit, r), monotonic: monotonic, diff --git a/sdk/metric/manual_reader.go b/sdk/metric/manual_reader.go index c495985bc..96e779086 100644 --- a/sdk/metric/manual_reader.go +++ b/sdk/metric/manual_reader.go @@ -58,7 +58,9 @@ func (mr *ManualReader) temporality(kind InstrumentKind) metricdata.Temporality } // aggregation returns what Aggregation to use for kind. -func (mr *ManualReader) aggregation(kind InstrumentKind) Aggregation { // nolint:revive // import-shadow for method scoped by type. +func (mr *ManualReader) aggregation( + kind InstrumentKind, +) Aggregation { // nolint:revive // import-shadow for method scoped by type. return mr.aggregationSelector(kind) } diff --git a/sdk/metric/meter.go b/sdk/metric/meter.go index 49126c993..c500fd9f2 100644 --- a/sdk/metric/meter.go +++ b/sdk/metric/meter.go @@ -82,7 +82,10 @@ func (m *meter) Int64Counter(name string, options ...metric.Int64CounterOption) // Int64UpDownCounter returns a new instrument identified by name and // configured with options. The instrument is used to synchronously record // int64 measurements during a computational operation. -func (m *meter) Int64UpDownCounter(name string, options ...metric.Int64UpDownCounterOption) (metric.Int64UpDownCounter, error) { +func (m *meter) Int64UpDownCounter( + name string, + options ...metric.Int64UpDownCounterOption, +) (metric.Int64UpDownCounter, error) { cfg := metric.NewInt64UpDownCounterConfig(options...) const kind = InstrumentKindUpDownCounter p := int64InstProvider{m} @@ -174,7 +177,10 @@ func (m *meter) int64ObservableInstrument(id Instrument, callbacks []metric.Int6 // Description, and Unit, only the first set of callbacks provided are used. // Use meter.RegisterCallback and Registration.Unregister to manage callbacks // if instrumentation can be created multiple times with different callbacks. -func (m *meter) Int64ObservableCounter(name string, options ...metric.Int64ObservableCounterOption) (metric.Int64ObservableCounter, error) { +func (m *meter) Int64ObservableCounter( + name string, + options ...metric.Int64ObservableCounterOption, +) (metric.Int64ObservableCounter, error) { cfg := metric.NewInt64ObservableCounterConfig(options...) id := Instrument{ Name: name, @@ -195,7 +201,10 @@ func (m *meter) Int64ObservableCounter(name string, options ...metric.Int64Obser // Description, and Unit, only the first set of callbacks provided are used. // Use meter.RegisterCallback and Registration.Unregister to manage callbacks // if instrumentation can be created multiple times with different callbacks. -func (m *meter) Int64ObservableUpDownCounter(name string, options ...metric.Int64ObservableUpDownCounterOption) (metric.Int64ObservableUpDownCounter, error) { +func (m *meter) Int64ObservableUpDownCounter( + name string, + options ...metric.Int64ObservableUpDownCounterOption, +) (metric.Int64ObservableUpDownCounter, error) { cfg := metric.NewInt64ObservableUpDownCounterConfig(options...) id := Instrument{ Name: name, @@ -216,7 +225,10 @@ func (m *meter) Int64ObservableUpDownCounter(name string, options ...metric.Int6 // Description, and Unit, only the first set of callbacks provided are used. // Use meter.RegisterCallback and Registration.Unregister to manage callbacks // if instrumentation can be created multiple times with different callbacks. -func (m *meter) Int64ObservableGauge(name string, options ...metric.Int64ObservableGaugeOption) (metric.Int64ObservableGauge, error) { +func (m *meter) Int64ObservableGauge( + name string, + options ...metric.Int64ObservableGaugeOption, +) (metric.Int64ObservableGauge, error) { cfg := metric.NewInt64ObservableGaugeConfig(options...) id := Instrument{ Name: name, @@ -246,7 +258,10 @@ func (m *meter) Float64Counter(name string, options ...metric.Float64CounterOpti // Float64UpDownCounter returns a new instrument identified by name and // configured with options. The instrument is used to synchronously record // float64 measurements during a computational operation. -func (m *meter) Float64UpDownCounter(name string, options ...metric.Float64UpDownCounterOption) (metric.Float64UpDownCounter, error) { +func (m *meter) Float64UpDownCounter( + name string, + options ...metric.Float64UpDownCounterOption, +) (metric.Float64UpDownCounter, error) { cfg := metric.NewFloat64UpDownCounterConfig(options...) const kind = InstrumentKindUpDownCounter p := float64InstProvider{m} @@ -261,7 +276,10 @@ func (m *meter) Float64UpDownCounter(name string, options ...metric.Float64UpDow // Float64Histogram returns a new instrument identified by name and configured // with options. The instrument is used to synchronously record the // distribution of float64 measurements during a computational operation. -func (m *meter) Float64Histogram(name string, options ...metric.Float64HistogramOption) (metric.Float64Histogram, error) { +func (m *meter) Float64Histogram( + name string, + options ...metric.Float64HistogramOption, +) (metric.Float64Histogram, error) { cfg := metric.NewFloat64HistogramConfig(options...) p := float64InstProvider{m} i, err := p.lookupHistogram(name, cfg) @@ -289,7 +307,10 @@ func (m *meter) Float64Gauge(name string, options ...metric.Float64GaugeOption) // float64ObservableInstrument returns a new observable identified by the Instrument. // It registers callbacks for each reader's pipeline. -func (m *meter) float64ObservableInstrument(id Instrument, callbacks []metric.Float64Callback) (float64Observable, error) { +func (m *meter) float64ObservableInstrument( + id Instrument, + callbacks []metric.Float64Callback, +) (float64Observable, error) { key := instID{ Name: id.Name, Description: id.Description, @@ -338,7 +359,10 @@ func (m *meter) float64ObservableInstrument(id Instrument, callbacks []metric.Fl // Description, and Unit, only the first set of callbacks provided are used. // Use meter.RegisterCallback and Registration.Unregister to manage callbacks // if instrumentation can be created multiple times with different callbacks. -func (m *meter) Float64ObservableCounter(name string, options ...metric.Float64ObservableCounterOption) (metric.Float64ObservableCounter, error) { +func (m *meter) Float64ObservableCounter( + name string, + options ...metric.Float64ObservableCounterOption, +) (metric.Float64ObservableCounter, error) { cfg := metric.NewFloat64ObservableCounterConfig(options...) id := Instrument{ Name: name, @@ -359,7 +383,10 @@ func (m *meter) Float64ObservableCounter(name string, options ...metric.Float64O // Description, and Unit, only the first set of callbacks provided are used. // Use meter.RegisterCallback and Registration.Unregister to manage callbacks // if instrumentation can be created multiple times with different callbacks. -func (m *meter) Float64ObservableUpDownCounter(name string, options ...metric.Float64ObservableUpDownCounterOption) (metric.Float64ObservableUpDownCounter, error) { +func (m *meter) Float64ObservableUpDownCounter( + name string, + options ...metric.Float64ObservableUpDownCounterOption, +) (metric.Float64ObservableUpDownCounter, error) { cfg := metric.NewFloat64ObservableUpDownCounterConfig(options...) id := Instrument{ Name: name, @@ -380,7 +407,10 @@ func (m *meter) Float64ObservableUpDownCounter(name string, options ...metric.Fl // Description, and Unit, only the first set of callbacks provided are used. // Use meter.RegisterCallback and Registration.Unregister to manage callbacks // if instrumentation can be created multiple times with different callbacks. -func (m *meter) Float64ObservableGauge(name string, options ...metric.Float64ObservableGaugeOption) (metric.Float64ObservableGauge, error) { +func (m *meter) Float64ObservableGauge( + name string, + options ...metric.Float64ObservableGaugeOption, +) (metric.Float64ObservableGauge, error) { cfg := metric.NewFloat64ObservableGaugeConfig(options...) id := Instrument{ Name: name, @@ -426,8 +456,10 @@ func warnRepeatedObservableCallbacks(id Instrument) { "Instrument{Name: %q, Description: %q, Kind: %q, Unit: %q}", id.Name, id.Description, "InstrumentKind"+id.Kind.String(), id.Unit, ) - global.Warn("Repeated observable instrument creation with callbacks. Ignoring new callbacks. Use meter.RegisterCallback and Registration.Unregister to manage callbacks.", - "instrument", inst, + global.Warn( + "Repeated observable instrument creation with callbacks. Ignoring new callbacks. Use meter.RegisterCallback and Registration.Unregister to manage callbacks.", + "instrument", + inst, ) } @@ -613,7 +645,10 @@ func (p int64InstProvider) aggs(kind InstrumentKind, name, desc, u string) ([]ag return p.int64Resolver.Aggregators(inst) } -func (p int64InstProvider) histogramAggs(name string, cfg metric.Int64HistogramConfig) ([]aggregate.Measure[int64], error) { +func (p int64InstProvider) histogramAggs( + name string, + cfg metric.Int64HistogramConfig, +) ([]aggregate.Measure[int64], error) { boundaries := cfg.ExplicitBucketBoundaries() aggError := AggregationExplicitBucketHistogram{Boundaries: boundaries}.err() if aggError != nil { @@ -671,7 +706,10 @@ func (p float64InstProvider) aggs(kind InstrumentKind, name, desc, u string) ([] return p.float64Resolver.Aggregators(inst) } -func (p float64InstProvider) histogramAggs(name string, cfg metric.Float64HistogramConfig) ([]aggregate.Measure[float64], error) { +func (p float64InstProvider) histogramAggs( + name string, + cfg metric.Float64HistogramConfig, +) ([]aggregate.Measure[float64], error) { boundaries := cfg.ExplicitBucketBoundaries() aggError := AggregationExplicitBucketHistogram{Boundaries: boundaries}.err() if aggError != nil { diff --git a/sdk/metric/meter_test.go b/sdk/metric/meter_test.go index 10f473a4c..b51ffdb51 100644 --- a/sdk/metric/meter_test.go +++ b/sdk/metric/meter_test.go @@ -441,9 +441,12 @@ func TestMeterCreatesInstruments(t *testing.T) { Temporality: metricdata.CumulativeTemporality, DataPoints: []metricdata.HistogramDataPoint[int64]{ { - Attributes: attribute.Set{}, - Count: 1, - Bounds: []float64{0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, 7500, 10000}, + Attributes: attribute.Set{}, + Count: 1, + Bounds: []float64{ + 0, 5, 10, 25, 50, 75, 100, 250, 500, + 750, 1000, 2500, 5000, 7500, 10000, + }, BucketCounts: []uint64{0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, Min: metricdata.NewExtrema[int64](7), Max: metricdata.NewExtrema[int64](7), @@ -511,9 +514,12 @@ func TestMeterCreatesInstruments(t *testing.T) { Temporality: metricdata.CumulativeTemporality, DataPoints: []metricdata.HistogramDataPoint[float64]{ { - Attributes: attribute.Set{}, - Count: 1, - Bounds: []float64{0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, 7500, 10000}, + Attributes: attribute.Set{}, + Count: 1, + Bounds: []float64{ + 0, 5, 10, 25, 50, 75, 100, 250, 500, + 750, 1000, 2500, 5000, 7500, 10000, + }, BucketCounts: []uint64{0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, Min: metricdata.NewExtrema[float64](7.), Max: metricdata.NewExtrema[float64](7.), @@ -1576,8 +1582,11 @@ func testAttributeFilter(temporality metricdata.Temporality) func(*testing.T) { Data: metricdata.Histogram[float64]{ DataPoints: []metricdata.HistogramDataPoint[float64]{ { - Attributes: fooBar, - Bounds: []float64{0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, 7500, 10000}, + Attributes: fooBar, + Bounds: []float64{ + 0, 5, 10, 25, 50, 75, 100, 250, 500, + 750, 1000, 2500, 5000, 7500, 10000, + }, BucketCounts: []uint64{0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, Count: 2, Min: metricdata.NewExtrema(1.), @@ -1652,8 +1661,11 @@ func testAttributeFilter(temporality metricdata.Temporality) func(*testing.T) { Data: metricdata.Histogram[int64]{ DataPoints: []metricdata.HistogramDataPoint[int64]{ { - Attributes: fooBar, - Bounds: []float64{0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, 7500, 10000}, + Attributes: fooBar, + Bounds: []float64{ + 0, 5, 10, 25, 50, 75, 100, 250, 500, + 750, 1000, 2500, 5000, 7500, 10000, + }, BucketCounts: []uint64{0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, Count: 2, Min: metricdata.NewExtrema[int64](1), @@ -1689,7 +1701,12 @@ func testAttributeFilter(temporality metricdata.Temporality) func(*testing.T) { require.Len(t, m.ScopeMetrics, 1) require.Len(t, m.ScopeMetrics[0].Metrics, 1) - metricdatatest.AssertEqual(t, tt.wantMetric, m.ScopeMetrics[0].Metrics[0], metricdatatest.IgnoreTimestamp()) + metricdatatest.AssertEqual( + t, + tt.wantMetric, + m.ScopeMetrics[0].Metrics[0], + metricdatatest.IgnoreTimestamp(), + ) }) } } @@ -2119,7 +2136,15 @@ func TestMalformedSelectors(t *testing.T) { obs.ObserveFloat64(afGauge, 1) return nil } - _, err = meter.RegisterCallback(callback, aiCounter, aiUpDownCounter, aiGauge, afCounter, afUpDownCounter, afGauge) + _, err = meter.RegisterCallback( + callback, + aiCounter, + aiUpDownCounter, + aiGauge, + afCounter, + afUpDownCounter, + afGauge, + ) require.NoError(t, err) siCounter.Add(context.Background(), 1) @@ -2182,7 +2207,10 @@ func TestHistogramBucketPrecedenceOrdering(t *testing.T) { }, } { t.Run(tt.desc, func(t *testing.T) { - meter := NewMeterProvider(WithView(tt.views...), WithReader(tt.reader)).Meter("TestHistogramBucketPrecedenceOrdering") + meter := NewMeterProvider( + WithView(tt.views...), + WithReader(tt.reader), + ).Meter("TestHistogramBucketPrecedenceOrdering") sfHistogram, err := meter.Float64Histogram("sync.float64.histogram", tt.histogramOpts...) require.NoError(t, err) sfHistogram.Record(context.Background(), 1) @@ -2508,7 +2536,15 @@ func TestDuplicateInstrumentCreation(t *testing.T) { internalMeter, ok := m.(*meter) require.True(t, ok) // check that multiple calls to create the same instrument only create 1 instrument - numInstruments := len(internalMeter.int64Insts.data) + len(internalMeter.float64Insts.data) + len(internalMeter.int64ObservableInsts.data) + len(internalMeter.float64ObservableInsts.data) + numInstruments := len( + internalMeter.int64Insts.data, + ) + len( + internalMeter.float64Insts.data, + ) + len( + internalMeter.int64ObservableInsts.data, + ) + len( + internalMeter.float64ObservableInsts.data, + ) require.Equal(t, 1, numInstruments) }) } diff --git a/sdk/metric/metricdata/metricdatatest/assertion_test.go b/sdk/metric/metricdata/metricdatatest/assertion_test.go index 92a81cb51..0fc96c8e3 100644 --- a/sdk/metric/metricdata/metricdatatest/assertion_test.go +++ b/sdk/metric/metricdata/metricdatatest/assertion_test.go @@ -680,18 +680,44 @@ func TestAssertEqual(t *testing.T) { t.Run("SumFloat64", testDatatype(sumFloat64A, sumFloat64B, equalSums[float64])) t.Run("GaugeInt64", testDatatype(gaugeInt64A, gaugeInt64B, equalGauges[int64])) t.Run("GaugeFloat64", testDatatype(gaugeFloat64A, gaugeFloat64B, equalGauges[float64])) - t.Run("HistogramDataPointInt64", testDatatype(histogramDataPointInt64A, histogramDataPointInt64B, equalHistogramDataPoints[int64])) - t.Run("HistogramDataPointFloat64", testDatatype(histogramDataPointFloat64A, histogramDataPointFloat64B, equalHistogramDataPoints[float64])) + t.Run( + "HistogramDataPointInt64", + testDatatype(histogramDataPointInt64A, histogramDataPointInt64B, equalHistogramDataPoints[int64]), + ) + t.Run( + "HistogramDataPointFloat64", + testDatatype(histogramDataPointFloat64A, histogramDataPointFloat64B, equalHistogramDataPoints[float64]), + ) t.Run("DataPointInt64", testDatatype(dataPointInt64A, dataPointInt64B, equalDataPoints[int64])) t.Run("DataPointFloat64", testDatatype(dataPointFloat64A, dataPointFloat64B, equalDataPoints[float64])) t.Run("ExtremaInt64", testDatatype(minInt64A, minInt64B, equalExtrema[int64])) t.Run("ExtremaFloat64", testDatatype(minFloat64A, minFloat64B, equalExtrema[float64])) t.Run("ExemplarInt64", testDatatype(exemplarInt64A, exemplarInt64B, equalExemplars[int64])) t.Run("ExemplarFloat64", testDatatype(exemplarFloat64A, exemplarFloat64B, equalExemplars[float64])) - t.Run("ExponentialHistogramInt64", testDatatype(exponentialHistogramInt64A, exponentialHistogramInt64B, equalExponentialHistograms[int64])) - t.Run("ExponentialHistogramFloat64", testDatatype(exponentialHistogramFloat64A, exponentialHistogramFloat64B, equalExponentialHistograms[float64])) - t.Run("ExponentialHistogramDataPointInt64", testDatatype(exponentialHistogramDataPointInt64A, exponentialHistogramDataPointInt64B, equalExponentialHistogramDataPoints[int64])) - t.Run("ExponentialHistogramDataPointFloat64", testDatatype(exponentialHistogramDataPointFloat64A, exponentialHistogramDataPointFloat64B, equalExponentialHistogramDataPoints[float64])) + t.Run( + "ExponentialHistogramInt64", + testDatatype(exponentialHistogramInt64A, exponentialHistogramInt64B, equalExponentialHistograms[int64]), + ) + t.Run( + "ExponentialHistogramFloat64", + testDatatype(exponentialHistogramFloat64A, exponentialHistogramFloat64B, equalExponentialHistograms[float64]), + ) + t.Run( + "ExponentialHistogramDataPointInt64", + testDatatype( + exponentialHistogramDataPointInt64A, + exponentialHistogramDataPointInt64B, + equalExponentialHistogramDataPoints[int64], + ), + ) + t.Run( + "ExponentialHistogramDataPointFloat64", + testDatatype( + exponentialHistogramDataPointFloat64A, + exponentialHistogramDataPointFloat64B, + equalExponentialHistogramDataPoints[float64], + ), + ) t.Run("ExponentialBuckets", testDatatype(exponentialBucket2, exponentialBucket3, equalExponentialBuckets)) t.Run("Summary", testDatatype(summaryA, summaryB, equalSummary)) t.Run("SummaryDataPoint", testDatatype(summaryDataPointA, summaryDataPointB, equalSummaryDataPoint)) @@ -708,18 +734,56 @@ func TestAssertEqualIgnoreTime(t *testing.T) { t.Run("SumFloat64", testDatatypeIgnoreTime(sumFloat64A, sumFloat64C, equalSums[float64])) t.Run("GaugeInt64", testDatatypeIgnoreTime(gaugeInt64A, gaugeInt64C, equalGauges[int64])) t.Run("GaugeFloat64", testDatatypeIgnoreTime(gaugeFloat64A, gaugeFloat64C, equalGauges[float64])) - t.Run("HistogramDataPointInt64", testDatatypeIgnoreTime(histogramDataPointInt64A, histogramDataPointInt64C, equalHistogramDataPoints[int64])) - t.Run("HistogramDataPointFloat64", testDatatypeIgnoreTime(histogramDataPointFloat64A, histogramDataPointFloat64C, equalHistogramDataPoints[float64])) + t.Run( + "HistogramDataPointInt64", + testDatatypeIgnoreTime(histogramDataPointInt64A, histogramDataPointInt64C, equalHistogramDataPoints[int64]), + ) + t.Run( + "HistogramDataPointFloat64", + testDatatypeIgnoreTime( + histogramDataPointFloat64A, + histogramDataPointFloat64C, + equalHistogramDataPoints[float64], + ), + ) t.Run("DataPointInt64", testDatatypeIgnoreTime(dataPointInt64A, dataPointInt64C, equalDataPoints[int64])) t.Run("DataPointFloat64", testDatatypeIgnoreTime(dataPointFloat64A, dataPointFloat64C, equalDataPoints[float64])) t.Run("ExtremaInt64", testDatatypeIgnoreTime(minInt64A, minInt64C, equalExtrema[int64])) t.Run("ExtremaFloat64", testDatatypeIgnoreTime(minFloat64A, minFloat64C, equalExtrema[float64])) t.Run("ExemplarInt64", testDatatypeIgnoreTime(exemplarInt64A, exemplarInt64C, equalExemplars[int64])) t.Run("ExemplarFloat64", testDatatypeIgnoreTime(exemplarFloat64A, exemplarFloat64C, equalExemplars[float64])) - t.Run("ExponentialHistogramInt64", testDatatypeIgnoreTime(exponentialHistogramInt64A, exponentialHistogramInt64C, equalExponentialHistograms[int64])) - t.Run("ExponentialHistogramFloat64", testDatatypeIgnoreTime(exponentialHistogramFloat64A, exponentialHistogramFloat64C, equalExponentialHistograms[float64])) - t.Run("ExponentialHistogramDataPointInt64", testDatatypeIgnoreTime(exponentialHistogramDataPointInt64A, exponentialHistogramDataPointInt64C, equalExponentialHistogramDataPoints[int64])) - t.Run("ExponentialHistogramDataPointFloat64", testDatatypeIgnoreTime(exponentialHistogramDataPointFloat64A, exponentialHistogramDataPointFloat64C, equalExponentialHistogramDataPoints[float64])) + t.Run( + "ExponentialHistogramInt64", + testDatatypeIgnoreTime( + exponentialHistogramInt64A, + exponentialHistogramInt64C, + equalExponentialHistograms[int64], + ), + ) + t.Run( + "ExponentialHistogramFloat64", + testDatatypeIgnoreTime( + exponentialHistogramFloat64A, + exponentialHistogramFloat64C, + equalExponentialHistograms[float64], + ), + ) + t.Run( + "ExponentialHistogramDataPointInt64", + testDatatypeIgnoreTime( + exponentialHistogramDataPointInt64A, + exponentialHistogramDataPointInt64C, + equalExponentialHistogramDataPoints[int64], + ), + ) + t.Run( + "ExponentialHistogramDataPointFloat64", + testDatatypeIgnoreTime( + exponentialHistogramDataPointFloat64A, + exponentialHistogramDataPointFloat64C, + equalExponentialHistogramDataPoints[float64], + ), + ) t.Run("Summary", testDatatypeIgnoreTime(summaryA, summaryC, equalSummary)) t.Run("SummaryDataPoint", testDatatypeIgnoreTime(summaryDataPointA, summaryDataPointC, equalSummaryDataPoint)) } @@ -727,11 +791,17 @@ func TestAssertEqualIgnoreTime(t *testing.T) { func TestAssertEqualIgnoreExemplars(t *testing.T) { hdpInt64 := histogramDataPointInt64A hdpInt64.Exemplars = []metricdata.Exemplar[int64]{exemplarInt64B} - t.Run("HistogramDataPointInt64", testDatatypeIgnoreExemplars(histogramDataPointInt64A, hdpInt64, equalHistogramDataPoints[int64])) + t.Run( + "HistogramDataPointInt64", + testDatatypeIgnoreExemplars(histogramDataPointInt64A, hdpInt64, equalHistogramDataPoints[int64]), + ) hdpFloat64 := histogramDataPointFloat64A hdpFloat64.Exemplars = []metricdata.Exemplar[float64]{exemplarFloat64B} - t.Run("HistogramDataPointFloat64", testDatatypeIgnoreExemplars(histogramDataPointFloat64A, hdpFloat64, equalHistogramDataPoints[float64])) + t.Run( + "HistogramDataPointFloat64", + testDatatypeIgnoreExemplars(histogramDataPointFloat64A, hdpFloat64, equalHistogramDataPoints[float64]), + ) dpInt64 := dataPointInt64A dpInt64.Exemplars = []metricdata.Exemplar[int64]{exemplarInt64B} @@ -743,11 +813,25 @@ func TestAssertEqualIgnoreExemplars(t *testing.T) { ehdpInt64 := exponentialHistogramDataPointInt64A ehdpInt64.Exemplars = []metricdata.Exemplar[int64]{exemplarInt64B} - t.Run("ExponentialHistogramDataPointInt64", testDatatypeIgnoreExemplars(exponentialHistogramDataPointInt64A, ehdpInt64, equalExponentialHistogramDataPoints[int64])) + t.Run( + "ExponentialHistogramDataPointInt64", + testDatatypeIgnoreExemplars( + exponentialHistogramDataPointInt64A, + ehdpInt64, + equalExponentialHistogramDataPoints[int64], + ), + ) ehdpFloat64 := exponentialHistogramDataPointFloat64A ehdpFloat64.Exemplars = []metricdata.Exemplar[float64]{exemplarFloat64B} - t.Run("ExponentialHistogramDataPointFloat64", testDatatypeIgnoreExemplars(exponentialHistogramDataPointFloat64A, ehdpFloat64, equalExponentialHistogramDataPoints[float64])) + t.Run( + "ExponentialHistogramDataPointFloat64", + testDatatypeIgnoreExemplars( + exponentialHistogramDataPointFloat64A, + ehdpFloat64, + equalExponentialHistogramDataPoints[float64], + ), + ) } func TestAssertEqualIgnoreValue(t *testing.T) { @@ -760,16 +844,54 @@ func TestAssertEqualIgnoreValue(t *testing.T) { t.Run("SumFloat64", testDatatypeIgnoreValue(sumFloat64A, sumFloat64D, equalSums[float64])) t.Run("GaugeInt64", testDatatypeIgnoreValue(gaugeInt64A, gaugeInt64D, equalGauges[int64])) t.Run("GaugeFloat64", testDatatypeIgnoreValue(gaugeFloat64A, gaugeFloat64D, equalGauges[float64])) - t.Run("HistogramDataPointInt64", testDatatypeIgnoreValue(histogramDataPointInt64A, histogramDataPointInt64D, equalHistogramDataPoints[int64])) - t.Run("HistogramDataPointFloat64", testDatatypeIgnoreValue(histogramDataPointFloat64A, histogramDataPointFloat64D, equalHistogramDataPoints[float64])) + t.Run( + "HistogramDataPointInt64", + testDatatypeIgnoreValue(histogramDataPointInt64A, histogramDataPointInt64D, equalHistogramDataPoints[int64]), + ) + t.Run( + "HistogramDataPointFloat64", + testDatatypeIgnoreValue( + histogramDataPointFloat64A, + histogramDataPointFloat64D, + equalHistogramDataPoints[float64], + ), + ) t.Run("DataPointInt64", testDatatypeIgnoreValue(dataPointInt64A, dataPointInt64D, equalDataPoints[int64])) t.Run("DataPointFloat64", testDatatypeIgnoreValue(dataPointFloat64A, dataPointFloat64D, equalDataPoints[float64])) t.Run("ExemplarInt64", testDatatypeIgnoreValue(exemplarInt64A, exemplarInt64D, equalExemplars[int64])) t.Run("ExemplarFloat64", testDatatypeIgnoreValue(exemplarFloat64A, exemplarFloat64D, equalExemplars[float64])) - t.Run("ExponentialHistogramInt64", testDatatypeIgnoreValue(exponentialHistogramInt64A, exponentialHistogramInt64D, equalExponentialHistograms[int64])) - t.Run("ExponentialHistogramFloat64", testDatatypeIgnoreValue(exponentialHistogramFloat64A, exponentialHistogramFloat64D, equalExponentialHistograms[float64])) - t.Run("ExponentialHistogramDataPointInt64", testDatatypeIgnoreValue(exponentialHistogramDataPointInt64A, exponentialHistogramDataPointInt64D, equalExponentialHistogramDataPoints[int64])) - t.Run("ExponentialHistogramDataPointFloat64", testDatatypeIgnoreValue(exponentialHistogramDataPointFloat64A, exponentialHistogramDataPointFloat64D, equalExponentialHistogramDataPoints[float64])) + t.Run( + "ExponentialHistogramInt64", + testDatatypeIgnoreValue( + exponentialHistogramInt64A, + exponentialHistogramInt64D, + equalExponentialHistograms[int64], + ), + ) + t.Run( + "ExponentialHistogramFloat64", + testDatatypeIgnoreValue( + exponentialHistogramFloat64A, + exponentialHistogramFloat64D, + equalExponentialHistograms[float64], + ), + ) + t.Run( + "ExponentialHistogramDataPointInt64", + testDatatypeIgnoreValue( + exponentialHistogramDataPointInt64A, + exponentialHistogramDataPointInt64D, + equalExponentialHistogramDataPoints[int64], + ), + ) + t.Run( + "ExponentialHistogramDataPointFloat64", + testDatatypeIgnoreValue( + exponentialHistogramDataPointFloat64A, + exponentialHistogramDataPointFloat64D, + equalExponentialHistogramDataPoints[float64], + ), + ) t.Run("Summary", testDatatypeIgnoreValue(summaryA, summaryD, equalSummary)) t.Run("SummaryDataPoint", testDatatypeIgnoreValue(summaryDataPointA, summaryDataPointD, equalSummaryDataPoint)) } @@ -854,7 +976,13 @@ func TestAssertAggregationsEqual(t *testing.T) { assert.Empty(t, r, "value should be ignored: %v == %v", histogramFloat64A, histogramFloat64D) r = equalAggregations(exponentialHistogramInt64A, exponentialHistogramInt64B, config{}) - assert.NotEmptyf(t, r, "exponential histograms should not be equal: %v == %v", exponentialHistogramInt64A, exponentialHistogramInt64B) + assert.NotEmptyf( + t, + r, + "exponential histograms should not be equal: %v == %v", + exponentialHistogramInt64A, + exponentialHistogramInt64B, + ) r = equalAggregations(exponentialHistogramInt64A, exponentialHistogramInt64C, config{ignoreTimestamp: true}) assert.Empty(t, r, "exponential histograms should be equal: %v", r) @@ -863,7 +991,13 @@ func TestAssertAggregationsEqual(t *testing.T) { assert.Empty(t, r, "value should be ignored: %v == %v", exponentialHistogramInt64A, exponentialHistogramInt64D) r = equalAggregations(exponentialHistogramFloat64A, exponentialHistogramFloat64B, config{}) - assert.NotEmptyf(t, r, "exponential histograms should not be equal: %v == %v", exponentialHistogramFloat64A, exponentialHistogramFloat64B) + assert.NotEmptyf( + t, + r, + "exponential histograms should not be equal: %v == %v", + exponentialHistogramFloat64A, + exponentialHistogramFloat64B, + ) r = equalAggregations(exponentialHistogramFloat64A, exponentialHistogramFloat64C, config{ignoreTimestamp: true}) assert.Empty(t, r, "exponential histograms should be equal: %v", r) diff --git a/sdk/metric/metricdata/metricdatatest/comparisons.go b/sdk/metric/metricdata/metricdatatest/comparisons.go index 68f9649f6..68f20d966 100644 --- a/sdk/metric/metricdata/metricdatatest/comparisons.go +++ b/sdk/metric/metricdata/metricdatatest/comparisons.go @@ -230,7 +230,10 @@ func equalHistograms[N int64 | float64](a, b metricdata.Histogram[N], cfg config // equalDataPoints returns reasons DataPoints are not equal. If they are // equal, the returned reasons will be empty. -func equalDataPoints[N int64 | float64](a, b metricdata.DataPoint[N], cfg config) (reasons []string) { // nolint: revive // Intentional internal control flag +func equalDataPoints[N int64 | float64]( + a, b metricdata.DataPoint[N], + cfg config, +) (reasons []string) { // nolint: revive // Intentional internal control flag if !a.Attributes.Equals(&b.Attributes) { reasons = append(reasons, notEqualStr( "Attributes", @@ -272,7 +275,10 @@ func equalDataPoints[N int64 | float64](a, b metricdata.DataPoint[N], cfg config // equalHistogramDataPoints returns reasons HistogramDataPoints are not equal. // If they are equal, the returned reasons will be empty. -func equalHistogramDataPoints[N int64 | float64](a, b metricdata.HistogramDataPoint[N], cfg config) (reasons []string) { // nolint: revive // Intentional internal control flag +func equalHistogramDataPoints[N int64 | float64]( + a, b metricdata.HistogramDataPoint[N], + cfg config, +) (reasons []string) { // nolint: revive // Intentional internal control flag if !a.Attributes.Equals(&b.Attributes) { reasons = append(reasons, notEqualStr( "Attributes", @@ -329,7 +335,10 @@ func equalHistogramDataPoints[N int64 | float64](a, b metricdata.HistogramDataPo // // The DataPoints each Histogram contains are compared based on containing the // same HistogramDataPoint, not the order they are stored in. -func equalExponentialHistograms[N int64 | float64](a, b metricdata.ExponentialHistogram[N], cfg config) (reasons []string) { +func equalExponentialHistograms[N int64 | float64]( + a, b metricdata.ExponentialHistogram[N], + cfg config, +) (reasons []string) { if a.Temporality != b.Temporality { reasons = append(reasons, notEqualStr("Temporality", a.Temporality, b.Temporality)) } @@ -350,7 +359,10 @@ func equalExponentialHistograms[N int64 | float64](a, b metricdata.ExponentialHi // equalExponentialHistogramDataPoints returns reasons HistogramDataPoints are not equal. // If they are equal, the returned reasons will be empty. -func equalExponentialHistogramDataPoints[N int64 | float64](a, b metricdata.ExponentialHistogramDataPoint[N], cfg config) (reasons []string) { // nolint: revive // Intentional internal control flag +func equalExponentialHistogramDataPoints[N int64 | float64]( + a, b metricdata.ExponentialHistogramDataPoint[N], + cfg config, +) (reasons []string) { // nolint: revive // Intentional internal control flag if !a.Attributes.Equals(&b.Attributes) { reasons = append(reasons, notEqualStr( "Attributes", @@ -637,7 +649,10 @@ func missingAttrStr(name string) string { return "missing attribute " + name } -func hasAttributesExemplars[T int64 | float64](exemplar metricdata.Exemplar[T], attrs ...attribute.KeyValue) (reasons []string) { +func hasAttributesExemplars[T int64 | float64]( + exemplar metricdata.Exemplar[T], + attrs ...attribute.KeyValue, +) (reasons []string) { s := attribute.NewSet(exemplar.FilteredAttributes...) for _, attr := range attrs { val, ok := s.Value(attr.Key) @@ -652,7 +667,10 @@ func hasAttributesExemplars[T int64 | float64](exemplar metricdata.Exemplar[T], return reasons } -func hasAttributesDataPoints[T int64 | float64](dp metricdata.DataPoint[T], attrs ...attribute.KeyValue) (reasons []string) { +func hasAttributesDataPoints[T int64 | float64]( + dp metricdata.DataPoint[T], + attrs ...attribute.KeyValue, +) (reasons []string) { for _, attr := range attrs { val, ok := dp.Attributes.Value(attr.Key) if !ok { @@ -688,7 +706,10 @@ func hasAttributesSum[T int64 | float64](sum metricdata.Sum[T], attrs ...attribu return reasons } -func hasAttributesHistogramDataPoints[T int64 | float64](dp metricdata.HistogramDataPoint[T], attrs ...attribute.KeyValue) (reasons []string) { +func hasAttributesHistogramDataPoints[T int64 | float64]( + dp metricdata.HistogramDataPoint[T], + attrs ...attribute.KeyValue, +) (reasons []string) { for _, attr := range attrs { val, ok := dp.Attributes.Value(attr.Key) if !ok { @@ -702,7 +723,10 @@ func hasAttributesHistogramDataPoints[T int64 | float64](dp metricdata.Histogram return reasons } -func hasAttributesHistogram[T int64 | float64](histogram metricdata.Histogram[T], attrs ...attribute.KeyValue) (reasons []string) { +func hasAttributesHistogram[T int64 | float64]( + histogram metricdata.Histogram[T], + attrs ...attribute.KeyValue, +) (reasons []string) { for n, dp := range histogram.DataPoints { reas := hasAttributesHistogramDataPoints(dp, attrs...) if len(reas) > 0 { @@ -713,7 +737,10 @@ func hasAttributesHistogram[T int64 | float64](histogram metricdata.Histogram[T] return reasons } -func hasAttributesExponentialHistogramDataPoints[T int64 | float64](dp metricdata.ExponentialHistogramDataPoint[T], attrs ...attribute.KeyValue) (reasons []string) { +func hasAttributesExponentialHistogramDataPoints[T int64 | float64]( + dp metricdata.ExponentialHistogramDataPoint[T], + attrs ...attribute.KeyValue, +) (reasons []string) { for _, attr := range attrs { val, ok := dp.Attributes.Value(attr.Key) if !ok { @@ -727,7 +754,10 @@ func hasAttributesExponentialHistogramDataPoints[T int64 | float64](dp metricdat return reasons } -func hasAttributesExponentialHistogram[T int64 | float64](histogram metricdata.ExponentialHistogram[T], attrs ...attribute.KeyValue) (reasons []string) { +func hasAttributesExponentialHistogram[T int64 | float64]( + histogram metricdata.ExponentialHistogram[T], + attrs ...attribute.KeyValue, +) (reasons []string) { for n, dp := range histogram.DataPoints { reas := hasAttributesExponentialHistogramDataPoints(dp, attrs...) if len(reas) > 0 { diff --git a/sdk/metric/periodic_reader.go b/sdk/metric/periodic_reader.go index dcd2182d9..ebb9a0463 100644 --- a/sdk/metric/periodic_reader.go +++ b/sdk/metric/periodic_reader.go @@ -193,7 +193,9 @@ func (r *PeriodicReader) temporality(kind InstrumentKind) metricdata.Temporality } // aggregation returns what Aggregation to use for kind. -func (r *PeriodicReader) aggregation(kind InstrumentKind) Aggregation { // nolint:revive // import-shadow for method scoped by type. +func (r *PeriodicReader) aggregation( + kind InstrumentKind, +) Aggregation { // nolint:revive // import-shadow for method scoped by type. return r.exporter.Aggregation(kind) } diff --git a/sdk/metric/pipeline.go b/sdk/metric/pipeline.go index 775e24526..2240c26e9 100644 --- a/sdk/metric/pipeline.go +++ b/sdk/metric/pipeline.go @@ -347,7 +347,12 @@ func (i *inserter[N]) readerDefaultAggregation(kind InstrumentKind) Aggregation // // If the instrument defines an unknown or incompatible aggregation, an error // is returned. -func (i *inserter[N]) cachedAggregator(scope instrumentation.Scope, kind InstrumentKind, stream Stream, readerAggregation Aggregation) (meas aggregate.Measure[N], aggID uint64, err error) { +func (i *inserter[N]) cachedAggregator( + scope instrumentation.Scope, + kind InstrumentKind, + stream Stream, + readerAggregation Aggregation, +) (meas aggregate.Measure[N], aggID uint64, err error) { switch stream.Aggregation.(type) { case nil: // The aggregation was not overridden with a view. Use the aggregation @@ -379,8 +384,11 @@ func (i *inserter[N]) cachedAggregator(scope instrumentation.Scope, kind Instrum normID := id.normalize() cv := i.aggregators.Lookup(normID, func() aggVal[N] { b := aggregate.Builder[N]{ - Temporality: i.pipeline.reader.temporality(kind), - ReservoirFunc: reservoirFunc[N](stream.ExemplarReservoirProviderSelector(stream.Aggregation), i.pipeline.exemplarFilter), + Temporality: i.pipeline.reader.temporality(kind), + ReservoirFunc: reservoirFunc[N]( + stream.ExemplarReservoirProviderSelector(stream.Aggregation), + i.pipeline.exemplarFilter, + ), } b.Filter = stream.AttributeFilter // A value less than or equal to zero will disable the aggregation @@ -471,7 +479,11 @@ func (i *inserter[N]) instID(kind InstrumentKind, stream Stream) instID { // aggregateFunc returns new aggregate functions matching agg, kind, and // monotonic. If the agg is unknown or temporality is invalid, an error is // returned. -func (i *inserter[N]) aggregateFunc(b aggregate.Builder[N], agg Aggregation, kind InstrumentKind) (meas aggregate.Measure[N], comp aggregate.ComputeAggregation, err error) { +func (i *inserter[N]) aggregateFunc( + b aggregate.Builder[N], + agg Aggregation, + kind InstrumentKind, +) (meas aggregate.Measure[N], comp aggregate.ComputeAggregation, err error) { switch a := agg.(type) { case AggregationDefault: return i.aggregateFunc(b, DefaultAggregationSelector(kind), kind) diff --git a/sdk/metric/pipeline_registry_test.go b/sdk/metric/pipeline_registry_test.go index d1fb71a3b..7f248735b 100644 --- a/sdk/metric/pipeline_registry_test.go +++ b/sdk/metric/pipeline_registry_test.go @@ -34,14 +34,25 @@ func (invalidAggregation) err() error { return nil } -func requireN[N int64 | float64](t *testing.T, n int, m []aggregate.Measure[N], comps []aggregate.ComputeAggregation, err error) { +func requireN[N int64 | float64]( + t *testing.T, + n int, + m []aggregate.Measure[N], + comps []aggregate.ComputeAggregation, + err error, +) { t.Helper() assert.NoError(t, err) require.Len(t, m, n) require.Len(t, comps, n) } -func assertSum[N int64 | float64](n int, temp metricdata.Temporality, mono bool, v [2]N) func(*testing.T, []aggregate.Measure[N], []aggregate.ComputeAggregation, error) { +func assertSum[N int64 | float64]( + n int, + temp metricdata.Temporality, + mono bool, + v [2]N, +) func(*testing.T, []aggregate.Measure[N], []aggregate.ComputeAggregation, error) { return func(t *testing.T, meas []aggregate.Measure[N], comps []aggregate.ComputeAggregation, err error) { t.Helper() requireN[N](t, n, meas, comps, err) @@ -71,7 +82,9 @@ func assertSum[N int64 | float64](n int, temp metricdata.Temporality, mono bool, } } -func assertHist[N int64 | float64](temp metricdata.Temporality) func(*testing.T, []aggregate.Measure[N], []aggregate.ComputeAggregation, error) { +func assertHist[N int64 | float64]( + temp metricdata.Temporality, +) func(*testing.T, []aggregate.Measure[N], []aggregate.ComputeAggregation, error) { return func(t *testing.T, meas []aggregate.Measure[N], comps []aggregate.ComputeAggregation, err error) { t.Helper() requireN[N](t, 1, meas, comps, err) @@ -116,7 +129,12 @@ func assertHist[N int64 | float64](temp metricdata.Temporality) func(*testing.T, } } -func assertLastValue[N int64 | float64](t *testing.T, meas []aggregate.Measure[N], comps []aggregate.ComputeAggregation, err error) { +func assertLastValue[N int64 | float64]( + t *testing.T, + meas []aggregate.Measure[N], + comps []aggregate.ComputeAggregation, + err error, +) { t.Helper() requireN[N](t, 1, meas, comps, err) @@ -164,9 +182,11 @@ func testCreateAggregators[N int64 | float64](t *testing.T) { validate func(*testing.T, []aggregate.Measure[N], []aggregate.ComputeAggregation, error) }{ { - name: "Default/Drop", - reader: NewManualReader(WithAggregationSelector(func(ik InstrumentKind) Aggregation { return AggregationDrop{} })), - inst: instruments[InstrumentKindCounter], + name: "Default/Drop", + reader: NewManualReader( + WithAggregationSelector(func(ik InstrumentKind) Aggregation { return AggregationDrop{} }), + ), + inst: instruments[InstrumentKindCounter], validate: func(t *testing.T, meas []aggregate.Measure[N], comps []aggregate.ComputeAggregation, err error) { t.Helper() assert.NoError(t, err) @@ -304,44 +324,58 @@ func testCreateAggregators[N int64 | float64](t *testing.T) { validate: assertSum[N](2, metricdata.CumulativeTemporality, true, [2]N{1, 4}), }, { - name: "Reader/Default/Cumulative/Sum/Monotonic", - reader: NewManualReader(WithAggregationSelector(func(ik InstrumentKind) Aggregation { return AggregationDefault{} })), + name: "Reader/Default/Cumulative/Sum/Monotonic", + reader: NewManualReader( + WithAggregationSelector(func(ik InstrumentKind) Aggregation { return AggregationDefault{} }), + ), inst: instruments[InstrumentKindCounter], validate: assertSum[N](1, metricdata.CumulativeTemporality, true, [2]N{1, 4}), }, { - name: "Reader/Default/Cumulative/Sum/NonMonotonic", - reader: NewManualReader(WithAggregationSelector(func(ik InstrumentKind) Aggregation { return AggregationDefault{} })), + name: "Reader/Default/Cumulative/Sum/NonMonotonic", + reader: NewManualReader( + WithAggregationSelector(func(ik InstrumentKind) Aggregation { return AggregationDefault{} }), + ), inst: instruments[InstrumentKindUpDownCounter], validate: assertSum[N](1, metricdata.CumulativeTemporality, false, [2]N{1, 4}), }, { - name: "Reader/Default/Cumulative/ExplicitBucketHistogram", - reader: NewManualReader(WithAggregationSelector(func(ik InstrumentKind) Aggregation { return AggregationDefault{} })), + name: "Reader/Default/Cumulative/ExplicitBucketHistogram", + reader: NewManualReader( + WithAggregationSelector(func(ik InstrumentKind) Aggregation { return AggregationDefault{} }), + ), inst: instruments[InstrumentKindHistogram], validate: assertHist[N](metricdata.CumulativeTemporality), }, { - name: "Reader/Default/Cumulative/Gauge", - reader: NewManualReader(WithAggregationSelector(func(ik InstrumentKind) Aggregation { return AggregationDefault{} })), + name: "Reader/Default/Cumulative/Gauge", + reader: NewManualReader( + WithAggregationSelector(func(ik InstrumentKind) Aggregation { return AggregationDefault{} }), + ), inst: instruments[InstrumentKindGauge], validate: assertLastValue[N], }, { - name: "Reader/Default/Cumulative/PrecomputedSum/Monotonic", - reader: NewManualReader(WithAggregationSelector(func(ik InstrumentKind) Aggregation { return AggregationDefault{} })), + name: "Reader/Default/Cumulative/PrecomputedSum/Monotonic", + reader: NewManualReader( + WithAggregationSelector(func(ik InstrumentKind) Aggregation { return AggregationDefault{} }), + ), inst: instruments[InstrumentKindObservableCounter], validate: assertSum[N](1, metricdata.CumulativeTemporality, true, [2]N{1, 3}), }, { - name: "Reader/Default/Cumulative/PrecomputedSum/NonMonotonic", - reader: NewManualReader(WithAggregationSelector(func(ik InstrumentKind) Aggregation { return AggregationDefault{} })), + name: "Reader/Default/Cumulative/PrecomputedSum/NonMonotonic", + reader: NewManualReader( + WithAggregationSelector(func(ik InstrumentKind) Aggregation { return AggregationDefault{} }), + ), inst: instruments[InstrumentKindObservableUpDownCounter], validate: assertSum[N](1, metricdata.CumulativeTemporality, false, [2]N{1, 3}), }, { - name: "Reader/Default/Gauge", - reader: NewManualReader(WithAggregationSelector(func(ik InstrumentKind) Aggregation { return AggregationDefault{} })), + name: "Reader/Default/Gauge", + reader: NewManualReader( + WithAggregationSelector(func(ik InstrumentKind) Aggregation { return AggregationDefault{} }), + ), inst: instruments[InstrumentKindObservableGauge], validate: assertLastValue[N], }, @@ -418,7 +452,9 @@ func TestPipelinesAggregatorForEachReader(t *testing.T) { func TestPipelineRegistryCreateAggregators(t *testing.T) { renameView := NewView(Instrument{Name: "foo"}, Stream{Name: "bar"}) testRdr := NewManualReader() - testRdrHistogram := NewManualReader(WithAggregationSelector(func(ik InstrumentKind) Aggregation { return AggregationExplicitBucketHistogram{} })) + testRdrHistogram := NewManualReader( + WithAggregationSelector(func(ik InstrumentKind) Aggregation { return AggregationExplicitBucketHistogram{} }), + ) testCases := []struct { name string @@ -529,7 +565,9 @@ func TestPipelineRegistryResource(t *testing.T) { } func TestPipelineRegistryCreateAggregatorsIncompatibleInstrument(t *testing.T) { - testRdrHistogram := NewManualReader(WithAggregationSelector(func(ik InstrumentKind) Aggregation { return AggregationSum{} })) + testRdrHistogram := NewManualReader( + WithAggregationSelector(func(ik InstrumentKind) Aggregation { return AggregationSum{} }), + ) readers := []Reader{testRdrHistogram} views := []View{defaultView} diff --git a/sdk/metric/reader.go b/sdk/metric/reader.go index d13a70697..c96e500a2 100644 --- a/sdk/metric/reader.go +++ b/sdk/metric/reader.go @@ -146,7 +146,10 @@ type AggregationSelector func(InstrumentKind) Aggregation // Histogram ⇨ ExplicitBucketHistogram. func DefaultAggregationSelector(ik InstrumentKind) Aggregation { switch ik { - case InstrumentKindCounter, InstrumentKindUpDownCounter, InstrumentKindObservableCounter, InstrumentKindObservableUpDownCounter: + case InstrumentKindCounter, + InstrumentKindUpDownCounter, + InstrumentKindObservableCounter, + InstrumentKindObservableUpDownCounter: return AggregationSum{} case InstrumentKindObservableGauge, InstrumentKindGauge: return AggregationLastValue{} diff --git a/sdk/metric/reader_test.go b/sdk/metric/reader_test.go index ccd61e8f0..a84dd569c 100644 --- a/sdk/metric/reader_test.go +++ b/sdk/metric/reader_test.go @@ -287,7 +287,14 @@ func benchReaderCollectFunc(r Reader) func(*testing.B) { for n := 0; n < b.N; n++ { err = r.Collect(ctx, &collectedMetrics) - assert.Equalf(b, testResourceMetricsA, collectedMetrics, "unexpected Collect response: (%#v, %v)", collectedMetrics, err) + assert.Equalf( + b, + testResourceMetricsA, + collectedMetrics, + "unexpected Collect response: (%#v, %v)", + collectedMetrics, + err, + ) } } } diff --git a/sdk/resource/os_unix_test.go b/sdk/resource/os_unix_test.go index e35b1ce40..bfa03da76 100644 --- a/sdk/resource/os_unix_test.go +++ b/sdk/resource/os_unix_test.go @@ -69,11 +69,26 @@ func TestGetFirstAvailableFile(t *testing.T) { }{ {"Gets first, skip second candidate", []string{filename1, filename2}, filename1, ""}, {"Skips first, gets second candidate", []string{"does_not_exists", filename2}, filename2, ""}, - {"Skips first, gets second, ignores third candidate", []string{"does_not_exists", filename2, filename1}, filename2, ""}, + { + "Skips first, gets second, ignores third candidate", + []string{"does_not_exists", filename2, filename1}, + filename2, + "", + }, {"No candidates (empty slice)", []string{}, "", "no candidate file available: []"}, {"No candidates (nil slice)", nil, "", "no candidate file available: []"}, - {"Single nonexisting candidate", []string{"does_not_exists"}, "", "no candidate file available: [does_not_exists]"}, - {"Multiple nonexisting candidates", []string{"does_not_exists", "this_either"}, "", "no candidate file available: [does_not_exists this_either]"}, + { + "Single nonexisting candidate", + []string{"does_not_exists"}, + "", + "no candidate file available: [does_not_exists]", + }, + { + "Multiple nonexisting candidates", + []string{"does_not_exists", "this_either"}, + "", + "no candidate file available: [does_not_exists this_either]", + }, } for _, tc := range tt { diff --git a/sdk/resource/resource_experimental_test.go b/sdk/resource/resource_experimental_test.go index 21f2caddb..7b0ebaeb9 100644 --- a/sdk/resource/resource_experimental_test.go +++ b/sdk/resource/resource_experimental_test.go @@ -29,7 +29,10 @@ func TestDefaultExperimental(t *testing.T) { serviceInstanceID, ok := res.Set().Value(semconv.ServiceInstanceIDKey) require.True(t, ok) - matched, err := regexp.MatchString("^[A-Fa-f0-9]{8}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{12}$", serviceInstanceID.AsString()) + matched, err := regexp.MatchString( + "^[A-Fa-f0-9]{8}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{12}$", + serviceInstanceID.AsString(), + ) require.NoError(t, err) require.True(t, matched) } diff --git a/sdk/resource/resource_test.go b/sdk/resource/resource_test.go index 4872a71c0..a916e9338 100644 --- a/sdk/resource/resource_test.go +++ b/sdk/resource/resource_test.go @@ -409,7 +409,11 @@ func TestNew(t *testing.T) { options: []resource.Option{ resource.WithDetectors( resource.StringDetector("https://opentelemetry.io/schemas/1.0.0", semconv.HostNameKey, os.Hostname), - resource.StringDetector("https://opentelemetry.io/schemas/1.1.0", semconv.HostNameKey, func() (string, error) { return "", errors.New("fail") }), + resource.StringDetector( + "https://opentelemetry.io/schemas/1.1.0", + semconv.HostNameKey, + func() (string, error) { return "", errors.New("fail") }, + ), ), resource.WithSchemaURL("https://opentelemetry.io/schemas/1.2.0"), }, diff --git a/sdk/trace/provider.go b/sdk/trace/provider.go index 185aa7c08..0e2a2e7c6 100644 --- a/sdk/trace/provider.go +++ b/sdk/trace/provider.go @@ -169,7 +169,17 @@ func (p *TracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.T // slowing down all tracing consumers. // - Logging code may be instrumented with tracing and deadlock because it could try // acquiring the same non-reentrant mutex. - global.Info("Tracer created", "name", name, "version", is.Version, "schemaURL", is.SchemaURL, "attributes", is.Attributes) + global.Info( + "Tracer created", + "name", + name, + "version", + is.Version, + "schemaURL", + is.SchemaURL, + "attributes", + is.Attributes, + ) } return t } diff --git a/sdk/trace/provider_test.go b/sdk/trace/provider_test.go index 577a5307d..4ccc69d7d 100644 --- a/sdk/trace/provider_test.go +++ b/sdk/trace/provider_test.go @@ -374,12 +374,26 @@ func testStoredError(t *testing.T, target interface{}) { func TestTracerProviderReturnsSameTracer(t *testing.T) { p := NewTracerProvider() - t0, t1, t2 := p.Tracer("t0"), p.Tracer("t1"), p.Tracer("t0", trace.WithInstrumentationAttributes(attribute.String("foo", "bar"))) + t0, t1, t2 := p.Tracer( + "t0", + ), p.Tracer( + "t1", + ), p.Tracer( + "t0", + trace.WithInstrumentationAttributes(attribute.String("foo", "bar")), + ) assert.NotSame(t, t0, t1) assert.NotSame(t, t0, t2) assert.NotSame(t, t1, t2) - t3, t4, t5 := p.Tracer("t0"), p.Tracer("t1"), p.Tracer("t0", trace.WithInstrumentationAttributes(attribute.String("foo", "bar"))) + t3, t4, t5 := p.Tracer( + "t0", + ), p.Tracer( + "t1", + ), p.Tracer( + "t0", + trace.WithInstrumentationAttributes(attribute.String("foo", "bar")), + ) assert.Same(t, t0, t3) assert.Same(t, t1, t4) assert.Same(t, t2, t5) diff --git a/sdk/trace/trace_test.go b/sdk/trace/trace_test.go index 2bf3dc33e..e56f336c9 100644 --- a/sdk/trace/trace_test.go +++ b/sdk/trace/trace_test.go @@ -151,7 +151,10 @@ func (ts *testSampler) ShouldSample(p SamplingParameters) SamplingResult { if strings.HasPrefix(p.Name, ts.prefix) { decision = RecordAndSample } - return SamplingResult{Decision: decision, Attributes: []attribute.KeyValue{attribute.Int("callCount", ts.callCount)}} + return SamplingResult{ + Decision: decision, + Attributes: []attribute.KeyValue{attribute.Int("callCount", ts.callCount)}, + } } func (ts testSampler) Description() string { @@ -374,13 +377,21 @@ func TestStartSpanNewRootNotSampled(t *testing.T) { _, s2 := neverSampledTr.Start(ctx, "span2-no-newroot") if !s2.SpanContext().IsSampled() { - t.Error(fmt.Errorf("got child span is not sampled, want child span with sampler: ParentBased(NeverSample()) to be sampled")) + t.Error( + fmt.Errorf( + "got child span is not sampled, want child span with sampler: ParentBased(NeverSample()) to be sampled", + ), + ) } // Adding WithNewRoot causes child spans to not sample based on parent context _, s3 := neverSampledTr.Start(ctx, "span3-newroot", trace.WithNewRoot()) if s3.SpanContext().IsSampled() { - t.Error(fmt.Errorf("got child span is sampled, want child span WithNewRoot() and with sampler: ParentBased(NeverSample()) to not be sampled")) + t.Error( + fmt.Errorf( + "got child span is sampled, want child span WithNewRoot() and with sampler: ParentBased(NeverSample()) to not be sampled", + ), + ) } } @@ -731,8 +742,12 @@ func TestLinks(t *testing.T) { k2v2 := attribute.String("key2", "value2") k3v3 := attribute.String("key3", "value3") - sc1 := trace.NewSpanContext(trace.SpanContextConfig{TraceID: trace.TraceID([16]byte{1, 1}), SpanID: trace.SpanID{3}}) - sc2 := trace.NewSpanContext(trace.SpanContextConfig{TraceID: trace.TraceID([16]byte{1, 1}), SpanID: trace.SpanID{3}}) + sc1 := trace.NewSpanContext( + trace.SpanContextConfig{TraceID: trace.TraceID([16]byte{1, 1}), SpanID: trace.SpanID{3}}, + ) + sc2 := trace.NewSpanContext( + trace.SpanContextConfig{TraceID: trace.TraceID([16]byte{1, 1}), SpanID: trace.SpanID{3}}, + ) l1 := trace.Link{SpanContext: sc1, Attributes: []attribute.KeyValue{k1v1}} l2 := trace.Link{SpanContext: sc2, Attributes: []attribute.KeyValue{k2v2, k3v3}} @@ -773,9 +788,15 @@ func TestLinks(t *testing.T) { func TestLinksOverLimit(t *testing.T) { te := NewTestExporter() - sc1 := trace.NewSpanContext(trace.SpanContextConfig{TraceID: trace.TraceID([16]byte{1, 1}), SpanID: trace.SpanID{3}}) - sc2 := trace.NewSpanContext(trace.SpanContextConfig{TraceID: trace.TraceID([16]byte{1, 1}), SpanID: trace.SpanID{3}}) - sc3 := trace.NewSpanContext(trace.SpanContextConfig{TraceID: trace.TraceID([16]byte{1, 1}), SpanID: trace.SpanID{3}}) + sc1 := trace.NewSpanContext( + trace.SpanContextConfig{TraceID: trace.TraceID([16]byte{1, 1}), SpanID: trace.SpanID{3}}, + ) + sc2 := trace.NewSpanContext( + trace.SpanContextConfig{TraceID: trace.TraceID([16]byte{1, 1}), SpanID: trace.SpanID{3}}, + ) + sc3 := trace.NewSpanContext( + trace.SpanContextConfig{TraceID: trace.TraceID([16]byte{1, 1}), SpanID: trace.SpanID{3}}, + ) sl := NewSpanLimits() sl.LinkCountLimit = 2 @@ -951,7 +972,12 @@ func startNamedSpan(tp *TracerProvider, trName, name string, args ...trace.SpanS // passed name and with the passed context. The context is returned // along with the span so this parent can be used to create child // spans. -func startLocalSpan(ctx context.Context, tp *TracerProvider, trName, name string, args ...trace.SpanStartOption) (context.Context, trace.Span) { +func startLocalSpan( + ctx context.Context, + tp *TracerProvider, + trName, name string, + args ...trace.SpanStartOption, +) (context.Context, trace.Span) { ctx, span := tp.Tracer(trName).Start( ctx, name, @@ -1299,8 +1325,21 @@ func TestRecordErrorWithStackTrace(t *testing.T) { assert.Equal(t, got.events[0].Attributes[1].Value.AsString(), want.events[0].Attributes[1].Value.AsString()) gotStackTraceFunctionName := strings.Split(got.events[0].Attributes[2].Value.AsString(), "\n") - assert.Truef(t, strings.HasPrefix(gotStackTraceFunctionName[1], "go.opentelemetry.io/otel/sdk/trace.recordStackTrace"), "%q not prefixed with go.opentelemetry.io/otel/sdk/trace.recordStackTrace", gotStackTraceFunctionName[1]) - assert.Truef(t, strings.HasPrefix(gotStackTraceFunctionName[3], "go.opentelemetry.io/otel/sdk/trace.(*recordingSpan).RecordError"), "%q not prefixed with go.opentelemetry.io/otel/sdk/trace.(*recordingSpan).RecordError", gotStackTraceFunctionName[3]) + assert.Truef( + t, + strings.HasPrefix(gotStackTraceFunctionName[1], "go.opentelemetry.io/otel/sdk/trace.recordStackTrace"), + "%q not prefixed with go.opentelemetry.io/otel/sdk/trace.recordStackTrace", + gotStackTraceFunctionName[1], + ) + assert.Truef( + t, + strings.HasPrefix( + gotStackTraceFunctionName[3], + "go.opentelemetry.io/otel/sdk/trace.(*recordingSpan).RecordError", + ), + "%q not prefixed with go.opentelemetry.io/otel/sdk/trace.(*recordingSpan).RecordError", + gotStackTraceFunctionName[3], + ) } func TestRecordErrorNil(t *testing.T) { @@ -1346,7 +1385,11 @@ func TestWithSpanKind(t *testing.T) { } if spanData.SpanKind() != trace.SpanKindInternal { - t.Errorf("Default value of Spankind should be Internal: got %+v, want %+v\n", spanData.SpanKind(), trace.SpanKindInternal) + t.Errorf( + "Default value of Spankind should be Internal: got %+v, want %+v\n", + spanData.SpanKind(), + trace.SpanKindInternal, + ) } sks := []trace.SpanKind{ @@ -1397,9 +1440,15 @@ func TestWithResource(t *testing.T) { want: resource.Default(), }, { - name: "explicit resource", - options: []TracerProviderOption{WithResource(resource.NewSchemaless(attribute.String("rk1", "rv1"), attribute.Int64("rk2", 5)))}, - want: mergeResource(t, resource.Environment(), resource.NewSchemaless(attribute.String("rk1", "rv1"), attribute.Int64("rk2", 5))), + name: "explicit resource", + options: []TracerProviderOption{ + WithResource(resource.NewSchemaless(attribute.String("rk1", "rv1"), attribute.Int64("rk2", 5))), + }, + want: mergeResource( + t, + resource.Environment(), + resource.NewSchemaless(attribute.String("rk1", "rv1"), attribute.Int64("rk2", 5)), + ), }, { name: "last resource wins", @@ -1407,12 +1456,22 @@ func TestWithResource(t *testing.T) { WithResource(resource.NewSchemaless(attribute.String("rk1", "vk1"), attribute.Int64("rk2", 5))), WithResource(resource.NewSchemaless(attribute.String("rk3", "rv3"), attribute.Int64("rk4", 10))), }, - want: mergeResource(t, resource.Environment(), resource.NewSchemaless(attribute.String("rk3", "rv3"), attribute.Int64("rk4", 10))), + want: mergeResource( + t, + resource.Environment(), + resource.NewSchemaless(attribute.String("rk3", "rv3"), attribute.Int64("rk4", 10)), + ), }, { - name: "overlapping attributes with environment resource", - options: []TracerProviderOption{WithResource(resource.NewSchemaless(attribute.String("rk1", "rv1"), attribute.Int64("rk5", 10)))}, - want: mergeResource(t, resource.Environment(), resource.NewSchemaless(attribute.String("rk1", "rv1"), attribute.Int64("rk5", 10))), + name: "overlapping attributes with environment resource", + options: []TracerProviderOption{ + WithResource(resource.NewSchemaless(attribute.String("rk1", "rv1"), attribute.Int64("rk5", 10))), + }, + want: mergeResource( + t, + resource.Environment(), + resource.NewSchemaless(attribute.String("rk1", "rv1"), attribute.Int64("rk5", 10)), + ), }, } for _, tc := range cases { @@ -1527,8 +1586,18 @@ func TestSpanCapturesPanicWithStackTrace(t *testing.T) { assert.Equal(t, "error message", spans[0].Events()[0].Attributes[1].Value.AsString()) gotStackTraceFunctionName := strings.Split(spans[0].Events()[0].Attributes[2].Value.AsString(), "\n") - assert.Truef(t, strings.HasPrefix(gotStackTraceFunctionName[1], "go.opentelemetry.io/otel/sdk/trace.recordStackTrace"), "%q not prefixed with go.opentelemetry.io/otel/sdk/trace.recordStackTrace", gotStackTraceFunctionName[1]) - assert.Truef(t, strings.HasPrefix(gotStackTraceFunctionName[3], "go.opentelemetry.io/otel/sdk/trace.(*recordingSpan).End"), "%q not prefixed with go.opentelemetry.io/otel/sdk/trace.(*recordingSpan).End", gotStackTraceFunctionName[3]) + assert.Truef( + t, + strings.HasPrefix(gotStackTraceFunctionName[1], "go.opentelemetry.io/otel/sdk/trace.recordStackTrace"), + "%q not prefixed with go.opentelemetry.io/otel/sdk/trace.recordStackTrace", + gotStackTraceFunctionName[1], + ) + assert.Truef( + t, + strings.HasPrefix(gotStackTraceFunctionName[3], "go.opentelemetry.io/otel/sdk/trace.(*recordingSpan).End"), + "%q not prefixed with go.opentelemetry.io/otel/sdk/trace.(*recordingSpan).End", + gotStackTraceFunctionName[3], + ) } func TestReadOnlySpan(t *testing.T) { @@ -1930,7 +1999,9 @@ func TestSpanAddLink(t *testing.T) { name: "AddLinkWithInvalidSpanContext", attrLinkCountLimit: 128, link: trace.Link{ - SpanContext: trace.NewSpanContext(trace.SpanContextConfig{TraceID: trace.TraceID([16]byte{}), SpanID: [8]byte{}}), + SpanContext: trace.NewSpanContext( + trace.SpanContextConfig{TraceID: trace.TraceID([16]byte{}), SpanID: [8]byte{}}, + ), }, want: &snapshot{ name: "span0", diff --git a/sdk/trace/tracer.go b/sdk/trace/tracer.go index 43419d3b5..0b65ae9ab 100644 --- a/sdk/trace/tracer.go +++ b/sdk/trace/tracer.go @@ -26,7 +26,11 @@ var _ trace.Tracer = &tracer{} // The Span is created with the provided name and as a child of any existing // span context found in the passed context. The created Span will be // configured appropriately by any SpanOption passed. -func (tr *tracer) Start(ctx context.Context, name string, options ...trace.SpanStartOption) (context.Context, trace.Span) { +func (tr *tracer) Start( + ctx context.Context, + name string, + options ...trace.SpanStartOption, +) (context.Context, trace.Span) { config := trace.NewSpanStartConfig(options...) if ctx == nil { @@ -112,7 +116,12 @@ func (tr *tracer) newSpan(ctx context.Context, name string, config *trace.SpanCo } // newRecordingSpan returns a new configured recordingSpan. -func (tr *tracer) newRecordingSpan(psc, sc trace.SpanContext, name string, sr SamplingResult, config *trace.SpanConfig) *recordingSpan { +func (tr *tracer) newRecordingSpan( + psc, sc trace.SpanContext, + name string, + sr SamplingResult, + config *trace.SpanConfig, +) *recordingSpan { startTime := config.Timestamp() if startTime.IsZero() { startTime = time.Now() diff --git a/semconv/internal/http.go b/semconv/internal/http.go index 8662e8b9d..e9eb57734 100644 --- a/semconv/internal/http.go +++ b/semconv/internal/http.go @@ -50,7 +50,10 @@ type SemanticConventions struct { // namespace as specified by the OpenTelemetry specification for a // span. The network parameter is a string that net.Dial function // from standard library can understand. -func (sc *SemanticConventions) NetAttributesFromHTTPRequest(network string, request *http.Request) []attribute.KeyValue { +func (sc *SemanticConventions) NetAttributesFromHTTPRequest( + network string, + request *http.Request, +) []attribute.KeyValue { attrs := []attribute.KeyValue{} switch network { @@ -200,7 +203,10 @@ func (sc *SemanticConventions) httpBasicAttributesFromHTTPRequest(request *http. // HTTPServerMetricAttributesFromHTTPRequest generates low-cardinality attributes // to be used with server-side HTTP metrics. -func (sc *SemanticConventions) HTTPServerMetricAttributesFromHTTPRequest(serverName string, request *http.Request) []attribute.KeyValue { +func (sc *SemanticConventions) HTTPServerMetricAttributesFromHTTPRequest( + serverName string, + request *http.Request, +) []attribute.KeyValue { attrs := []attribute.KeyValue{} if serverName != "" { attrs = append(attrs, sc.HTTPServerNameKey.String(serverName)) @@ -212,7 +218,10 @@ func (sc *SemanticConventions) HTTPServerMetricAttributesFromHTTPRequest(serverN // http namespace as specified by the OpenTelemetry specification for // a span on the server side. Currently, only basic authentication is // supported. -func (sc *SemanticConventions) HTTPServerAttributesFromHTTPRequest(serverName, route string, request *http.Request) []attribute.KeyValue { +func (sc *SemanticConventions) HTTPServerAttributesFromHTTPRequest( + serverName, route string, + request *http.Request, +) []attribute.KeyValue { attrs := []attribute.KeyValue{ sc.HTTPTargetKey.String(request.RequestURI), } diff --git a/semconv/internal/http_test.go b/semconv/internal/http_test.go index e2b0e0939..bf06af608 100644 --- a/semconv/internal/http_test.go +++ b/semconv/internal/http_test.go @@ -961,7 +961,12 @@ func assertElementsMatch(t *testing.T, expected, got []attribute.KeyValue, forma } } -func testRequest(method, requestURI, proto, remoteAddr, host string, u *url.URL, header http.Header, tlsopt tlsOption) *http.Request { +func testRequest( + method, requestURI, proto, remoteAddr, host string, + u *url.URL, + header http.Header, + tlsopt tlsOption, +) *http.Request { major, minor := protoToInts(proto) var tlsConn *tls.ConnectionState switch tlsopt { diff --git a/trace/auto_test.go b/trace/auto_test.go index 6db559164..cf3686c8e 100644 --- a/trace/auto_test.go +++ b/trace/auto_test.go @@ -816,12 +816,24 @@ func TestSpanAttributeValueLimits(t *testing.T) { assert.Truef(t, eq(want, s.span.Attrs), "set span attributes: got %#v, want %#v", s.span.Attrs, want) s.AddEvent("test", WithAttributes(aStr, aStrSlice)) - assert.Truef(t, eq(want, s.span.Events[0].Attrs), "span event attributes: got %#v, want %#v", s.span.Events[0].Attrs, want) + assert.Truef( + t, + eq(want, s.span.Events[0].Attrs), + "span event attributes: got %#v, want %#v", + s.span.Events[0].Attrs, + want, + ) s.AddLink(Link{ Attributes: []attribute.KeyValue{aStr, aStrSlice}, }) - assert.Truef(t, eq(want, s.span.Links[0].Attrs), "span link attributes: got %#v, want %#v", s.span.Links[0].Attrs, want) + assert.Truef( + t, + eq(want, s.span.Links[0].Attrs), + "span link attributes: got %#v, want %#v", + s.span.Links[0].Attrs, + want, + ) builder.Options = []SpanStartOption{ WithAttributes(aStr, aStrSlice), @@ -831,7 +843,13 @@ func TestSpanAttributeValueLimits(t *testing.T) { } s = builder.Build() assert.Truef(t, eq(want, s.span.Attrs), "new span attributes: got %#v, want %#v", s.span.Attrs, want) - assert.Truef(t, eq(want, s.span.Links[0].Attrs), "new span link attributes: got %#v, want %#v", s.span.Attrs, want) + assert.Truef( + t, + eq(want, s.span.Links[0].Attrs), + "new span link attributes: got %#v, want %#v", + s.span.Attrs, + want, + ) }) } }