mirror of
https://github.com/open-telemetry/opentelemetry-go.git
synced 2025-02-05 13:15:41 +02:00
1519d95982
* Use reasonable interval in sdktrace.WithBatchTimeout This patch resolves #1564. * Add pull request ID to CHANGELOG.md
547 lines
15 KiB
Go
547 lines
15 KiB
Go
// Copyright The OpenTelemetry Authors
|
|
//
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
// you may not use this file except in compliance with the License.
|
|
// You may obtain a copy of the License at
|
|
//
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
//
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
// See the License for the specific language governing permissions and
|
|
// limitations under the License.
|
|
|
|
package otlpgrpc_test
|
|
|
|
import (
|
|
"context"
|
|
"fmt"
|
|
"net"
|
|
"strings"
|
|
"testing"
|
|
"time"
|
|
|
|
"github.com/stretchr/testify/assert"
|
|
"github.com/stretchr/testify/require"
|
|
|
|
"google.golang.org/grpc"
|
|
"google.golang.org/grpc/encoding/gzip"
|
|
|
|
"go.opentelemetry.io/otel/attribute"
|
|
"go.opentelemetry.io/otel/exporters/otlp"
|
|
commonpb "go.opentelemetry.io/otel/exporters/otlp/internal/opentelemetry-proto-gen/common/v1"
|
|
"go.opentelemetry.io/otel/exporters/otlp/internal/otlptest"
|
|
"go.opentelemetry.io/otel/exporters/otlp/otlpgrpc"
|
|
exporttrace "go.opentelemetry.io/otel/sdk/export/trace"
|
|
sdktrace "go.opentelemetry.io/otel/sdk/trace"
|
|
)
|
|
|
|
func TestNewExporter_endToEnd(t *testing.T) {
|
|
tests := []struct {
|
|
name string
|
|
additionalOpts []otlpgrpc.Option
|
|
}{
|
|
{
|
|
name: "StandardExporter",
|
|
},
|
|
{
|
|
name: "WithCompressor",
|
|
additionalOpts: []otlpgrpc.Option{
|
|
otlpgrpc.WithCompressor(gzip.Name),
|
|
},
|
|
},
|
|
{
|
|
name: "WithServiceConfig",
|
|
additionalOpts: []otlpgrpc.Option{
|
|
otlpgrpc.WithServiceConfig("{}"),
|
|
},
|
|
},
|
|
{
|
|
name: "WithDialOptions",
|
|
additionalOpts: []otlpgrpc.Option{
|
|
otlpgrpc.WithDialOption(grpc.WithBlock()),
|
|
},
|
|
},
|
|
}
|
|
|
|
for _, test := range tests {
|
|
t.Run(test.name, func(t *testing.T) {
|
|
newExporterEndToEndTest(t, test.additionalOpts)
|
|
})
|
|
}
|
|
}
|
|
|
|
func newGRPCExporter(t *testing.T, ctx context.Context, endpoint string, additionalOpts ...otlpgrpc.Option) *otlp.Exporter {
|
|
opts := []otlpgrpc.Option{
|
|
otlpgrpc.WithInsecure(),
|
|
otlpgrpc.WithEndpoint(endpoint),
|
|
otlpgrpc.WithReconnectionPeriod(50 * time.Millisecond),
|
|
}
|
|
|
|
opts = append(opts, additionalOpts...)
|
|
driver := otlpgrpc.NewDriver(opts...)
|
|
exp, err := otlp.NewExporter(ctx, driver)
|
|
if err != nil {
|
|
t.Fatalf("failed to create a new collector exporter: %v", err)
|
|
}
|
|
return exp
|
|
}
|
|
|
|
func newExporterEndToEndTest(t *testing.T, additionalOpts []otlpgrpc.Option) {
|
|
mc := runMockCollectorAtEndpoint(t, "localhost:56561")
|
|
|
|
defer func() {
|
|
_ = mc.stop()
|
|
}()
|
|
|
|
<-time.After(5 * time.Millisecond)
|
|
|
|
ctx := context.Background()
|
|
exp := newGRPCExporter(t, ctx, mc.endpoint, additionalOpts...)
|
|
defer func() {
|
|
ctx, cancel := context.WithTimeout(ctx, 10*time.Second)
|
|
defer cancel()
|
|
if err := exp.Shutdown(ctx); err != nil {
|
|
panic(err)
|
|
}
|
|
}()
|
|
|
|
otlptest.RunEndToEndTest(ctx, t, exp, mc, mc)
|
|
}
|
|
|
|
func TestNewExporter_invokeStartThenStopManyTimes(t *testing.T) {
|
|
mc := runMockCollector(t)
|
|
defer func() {
|
|
_ = mc.stop()
|
|
}()
|
|
|
|
ctx := context.Background()
|
|
exp := newGRPCExporter(t, ctx, mc.endpoint)
|
|
defer func() {
|
|
if err := exp.Shutdown(ctx); err != nil {
|
|
panic(err)
|
|
}
|
|
}()
|
|
|
|
// Invoke Start numerous times, should return errAlreadyStarted
|
|
for i := 0; i < 10; i++ {
|
|
if err := exp.Start(ctx); err == nil || !strings.Contains(err.Error(), "already started") {
|
|
t.Fatalf("#%d unexpected Start error: %v", i, err)
|
|
}
|
|
}
|
|
|
|
if err := exp.Shutdown(ctx); err != nil {
|
|
t.Fatalf("failed to Shutdown the exporter: %v", err)
|
|
}
|
|
// Invoke Shutdown numerous times
|
|
for i := 0; i < 10; i++ {
|
|
if err := exp.Shutdown(ctx); err != nil {
|
|
t.Fatalf(`#%d got error (%v) expected none`, i, err)
|
|
}
|
|
}
|
|
}
|
|
|
|
func TestNewExporter_collectorConnectionDiesThenReconnectsWhenInRestMode(t *testing.T) {
|
|
mc := runMockCollector(t)
|
|
|
|
reconnectionPeriod := 2 * time.Second // 2 second + jitter rest time after reconnection
|
|
ctx := context.Background()
|
|
exp := newGRPCExporter(t, ctx, mc.endpoint,
|
|
otlpgrpc.WithReconnectionPeriod(reconnectionPeriod))
|
|
defer func() {
|
|
_ = exp.Shutdown(ctx)
|
|
}()
|
|
|
|
// We'll now stop the collector right away to simulate a connection
|
|
// dying in the midst of communication or even not existing before.
|
|
_ = mc.stop()
|
|
|
|
// first export, it will send disconnected message to the channel on export failure,
|
|
// trigger almost immediate reconnection
|
|
require.Error(
|
|
t,
|
|
exp.ExportSpans(ctx, []*exporttrace.SpanSnapshot{{Name: "in the midst"}}),
|
|
"transport: Error while dialing dial tcp %s: connect: connection refused",
|
|
mc.endpoint,
|
|
)
|
|
|
|
// give it time for first reconnection
|
|
<-time.After(time.Millisecond * 20)
|
|
|
|
// second export, it will detect connection issue, change state of exporter to disconnected and
|
|
// send message to disconnected channel but this time reconnection gouroutine will be in (rest mode, not listening to the disconnected channel)
|
|
require.Error(
|
|
t,
|
|
exp.ExportSpans(ctx, []*exporttrace.SpanSnapshot{{Name: "in the midst"}}),
|
|
"transport: Error while dialing dial tcp %s: connect: connection refused2",
|
|
mc.endpoint,
|
|
)
|
|
|
|
// as a result we have exporter in disconnected state waiting for disconnection message to reconnect
|
|
|
|
// resurrect collector
|
|
nmc := runMockCollectorAtEndpoint(t, mc.endpoint)
|
|
|
|
// make sure reconnection loop hits beginning and goes back to waiting mode
|
|
// after hitting beginning of the loop it should reconnect
|
|
<-time.After(time.Second * 4)
|
|
|
|
n := 10
|
|
for i := 0; i < n; i++ {
|
|
// when disconnected exp.ExportSpans doesnt send disconnected messages again
|
|
// it just quits and return last connection error
|
|
require.NoError(t, exp.ExportSpans(ctx, []*exporttrace.SpanSnapshot{{Name: "Resurrected"}}))
|
|
}
|
|
|
|
nmaSpans := nmc.getSpans()
|
|
|
|
// Expecting 10 SpanSnapshots that were sampled, given that
|
|
if g, w := len(nmaSpans), n; g != w {
|
|
t.Fatalf("Connected collector: spans: got %d want %d", g, w)
|
|
}
|
|
|
|
dSpans := mc.getSpans()
|
|
// Expecting 0 spans to have been received by the original but now dead collector
|
|
if g, w := len(dSpans), 0; g != w {
|
|
t.Fatalf("Disconnected collector: spans: got %d want %d", g, w)
|
|
}
|
|
}
|
|
|
|
func TestNewExporter_collectorConnectionDiesThenReconnects(t *testing.T) {
|
|
mc := runMockCollector(t)
|
|
|
|
reconnectionPeriod := 20 * time.Millisecond
|
|
ctx := context.Background()
|
|
exp := newGRPCExporter(t, ctx, mc.endpoint,
|
|
otlpgrpc.WithReconnectionPeriod(reconnectionPeriod))
|
|
defer func() {
|
|
_ = exp.Shutdown(ctx)
|
|
}()
|
|
|
|
// We'll now stop the collector right away to simulate a connection
|
|
// dying in the midst of communication or even not existing before.
|
|
_ = mc.stop()
|
|
|
|
// In the test below, we'll stop the collector many times,
|
|
// while exporting traces and test to ensure that we can
|
|
// reconnect.
|
|
for j := 0; j < 3; j++ {
|
|
|
|
// No endpoint up.
|
|
require.Error(
|
|
t,
|
|
exp.ExportSpans(ctx, []*exporttrace.SpanSnapshot{{Name: "in the midst"}}),
|
|
"transport: Error while dialing dial tcp %s: connect: connection refused",
|
|
mc.endpoint,
|
|
)
|
|
|
|
// Now resurrect the collector by making a new one but reusing the
|
|
// old endpoint, and the collector should reconnect automatically.
|
|
nmc := runMockCollectorAtEndpoint(t, mc.endpoint)
|
|
|
|
// Give the exporter sometime to reconnect
|
|
<-time.After(reconnectionPeriod * 4)
|
|
|
|
n := 10
|
|
for i := 0; i < n; i++ {
|
|
require.NoError(t, exp.ExportSpans(ctx, []*exporttrace.SpanSnapshot{{Name: "Resurrected"}}))
|
|
}
|
|
|
|
nmaSpans := nmc.getSpans()
|
|
// Expecting 10 SpanSnapshots that were sampled, given that
|
|
if g, w := len(nmaSpans), n; g != w {
|
|
t.Fatalf("Round #%d: Connected collector: spans: got %d want %d", j, g, w)
|
|
}
|
|
|
|
dSpans := mc.getSpans()
|
|
// Expecting 0 spans to have been received by the original but now dead collector
|
|
if g, w := len(dSpans), 0; g != w {
|
|
t.Fatalf("Round #%d: Disconnected collector: spans: got %d want %d", j, g, w)
|
|
}
|
|
_ = nmc.stop()
|
|
}
|
|
}
|
|
|
|
// This test takes a long time to run: to skip it, run tests using: -short
|
|
func TestNewExporter_collectorOnBadConnection(t *testing.T) {
|
|
if testing.Short() {
|
|
t.Skipf("Skipping this long running test")
|
|
}
|
|
|
|
ln, err := net.Listen("tcp", "localhost:0")
|
|
if err != nil {
|
|
t.Fatalf("Failed to grab an available port: %v", err)
|
|
}
|
|
// Firstly close the "collector's" channel: optimistically this endpoint won't get reused ASAP
|
|
// However, our goal of closing it is to simulate an unavailable connection
|
|
_ = ln.Close()
|
|
|
|
_, collectorPortStr, _ := net.SplitHostPort(ln.Addr().String())
|
|
|
|
endpoint := fmt.Sprintf("localhost:%s", collectorPortStr)
|
|
ctx := context.Background()
|
|
exp := newGRPCExporter(t, ctx, endpoint)
|
|
_ = exp.Shutdown(ctx)
|
|
}
|
|
|
|
func TestNewExporter_withEndpoint(t *testing.T) {
|
|
mc := runMockCollector(t)
|
|
defer func() {
|
|
_ = mc.stop()
|
|
}()
|
|
|
|
ctx := context.Background()
|
|
exp := newGRPCExporter(t, ctx, mc.endpoint)
|
|
_ = exp.Shutdown(ctx)
|
|
}
|
|
|
|
func TestNewExporter_withHeaders(t *testing.T) {
|
|
mc := runMockCollector(t)
|
|
defer func() {
|
|
_ = mc.stop()
|
|
}()
|
|
|
|
ctx := context.Background()
|
|
exp := newGRPCExporter(t, ctx, mc.endpoint,
|
|
otlpgrpc.WithHeaders(map[string]string{"header1": "value1"}))
|
|
require.NoError(t, exp.ExportSpans(ctx, []*exporttrace.SpanSnapshot{{Name: "in the midst"}}))
|
|
|
|
defer func() {
|
|
_ = exp.Shutdown(ctx)
|
|
}()
|
|
|
|
headers := mc.getHeaders()
|
|
require.Len(t, headers.Get("header1"), 1)
|
|
assert.Equal(t, "value1", headers.Get("header1")[0])
|
|
}
|
|
|
|
func TestNewExporter_withMultipleAttributeTypes(t *testing.T) {
|
|
mc := runMockCollector(t)
|
|
|
|
defer func() {
|
|
_ = mc.stop()
|
|
}()
|
|
|
|
<-time.After(5 * time.Millisecond)
|
|
|
|
ctx := context.Background()
|
|
exp := newGRPCExporter(t, ctx, mc.endpoint)
|
|
|
|
defer func() {
|
|
_ = exp.Shutdown(ctx)
|
|
}()
|
|
|
|
tp := sdktrace.NewTracerProvider(
|
|
sdktrace.WithConfig(sdktrace.Config{DefaultSampler: sdktrace.AlwaysSample()}),
|
|
sdktrace.WithBatcher(
|
|
exp,
|
|
// add following two options to ensure flush
|
|
sdktrace.WithBatchTimeout(5*time.Second),
|
|
sdktrace.WithMaxExportBatchSize(10),
|
|
),
|
|
)
|
|
defer func() { _ = tp.Shutdown(ctx) }()
|
|
|
|
tr := tp.Tracer("test-tracer")
|
|
testKvs := []attribute.KeyValue{
|
|
attribute.Int("Int", 1),
|
|
attribute.Int64("Int64", int64(3)),
|
|
attribute.Float64("Float64", 2.22),
|
|
attribute.Bool("Bool", true),
|
|
attribute.String("String", "test"),
|
|
}
|
|
_, span := tr.Start(ctx, "AlwaysSample")
|
|
span.SetAttributes(testKvs...)
|
|
span.End()
|
|
|
|
// Flush and close.
|
|
func() {
|
|
ctx, cancel := context.WithTimeout(ctx, 10*time.Second)
|
|
defer cancel()
|
|
if err := tp.Shutdown(ctx); err != nil {
|
|
t.Fatalf("failed to shut down a tracer provider: %v", err)
|
|
}
|
|
}()
|
|
|
|
// Wait >2 cycles.
|
|
<-time.After(40 * time.Millisecond)
|
|
|
|
// Now shutdown the exporter
|
|
ctx, cancel := context.WithTimeout(ctx, 10*time.Second)
|
|
defer cancel()
|
|
if err := exp.Shutdown(ctx); err != nil {
|
|
t.Fatalf("failed to stop the exporter: %v", err)
|
|
}
|
|
|
|
// Shutdown the collector too so that we can begin
|
|
// verification checks of expected data back.
|
|
_ = mc.stop()
|
|
|
|
// Now verify that we only got one span
|
|
rss := mc.getSpans()
|
|
if got, want := len(rss), 1; got != want {
|
|
t.Fatalf("resource span count: got %d, want %d\n", got, want)
|
|
}
|
|
|
|
expected := []*commonpb.KeyValue{
|
|
{
|
|
Key: "Int",
|
|
Value: &commonpb.AnyValue{
|
|
Value: &commonpb.AnyValue_IntValue{
|
|
IntValue: 1,
|
|
},
|
|
},
|
|
},
|
|
{
|
|
Key: "Int64",
|
|
Value: &commonpb.AnyValue{
|
|
Value: &commonpb.AnyValue_IntValue{
|
|
IntValue: 3,
|
|
},
|
|
},
|
|
},
|
|
{
|
|
Key: "Float64",
|
|
Value: &commonpb.AnyValue{
|
|
Value: &commonpb.AnyValue_DoubleValue{
|
|
DoubleValue: 2.22,
|
|
},
|
|
},
|
|
},
|
|
{
|
|
Key: "Bool",
|
|
Value: &commonpb.AnyValue{
|
|
Value: &commonpb.AnyValue_BoolValue{
|
|
BoolValue: true,
|
|
},
|
|
},
|
|
},
|
|
{
|
|
Key: "String",
|
|
Value: &commonpb.AnyValue{
|
|
Value: &commonpb.AnyValue_StringValue{
|
|
StringValue: "test",
|
|
},
|
|
},
|
|
},
|
|
}
|
|
|
|
// Verify attributes
|
|
if !assert.Len(t, rss[0].Attributes, len(expected)) {
|
|
t.Fatalf("attributes count: got %d, want %d\n", len(rss[0].Attributes), len(expected))
|
|
}
|
|
for i, actual := range rss[0].Attributes {
|
|
if a, ok := actual.Value.Value.(*commonpb.AnyValue_DoubleValue); ok {
|
|
e, ok := expected[i].Value.Value.(*commonpb.AnyValue_DoubleValue)
|
|
if !ok {
|
|
t.Errorf("expected AnyValue_DoubleValue, got %T", expected[i].Value.Value)
|
|
continue
|
|
}
|
|
if !assert.InDelta(t, e.DoubleValue, a.DoubleValue, 0.01) {
|
|
continue
|
|
}
|
|
e.DoubleValue = a.DoubleValue
|
|
}
|
|
assert.Equal(t, expected[i], actual)
|
|
}
|
|
}
|
|
|
|
func TestDisconnected(t *testing.T) {
|
|
ctx := context.Background()
|
|
// The endpoint is whatever, we want to be disconnected. But we
|
|
// setting a blocking connection, so dialing to the invalid
|
|
// endpoint actually fails.
|
|
exp := newGRPCExporter(t, ctx, "invalid",
|
|
otlpgrpc.WithReconnectionPeriod(time.Hour),
|
|
otlpgrpc.WithDialOption(
|
|
grpc.WithBlock(),
|
|
grpc.FailOnNonTempDialError(true),
|
|
),
|
|
)
|
|
defer func() {
|
|
assert.NoError(t, exp.Shutdown(ctx))
|
|
}()
|
|
|
|
assert.Error(t, exp.Export(ctx, otlptest.OneRecordCheckpointSet{}))
|
|
assert.Error(t, exp.ExportSpans(ctx, otlptest.SingleSpanSnapshot()))
|
|
}
|
|
|
|
func TestEmptyData(t *testing.T) {
|
|
mc := runMockCollectorAtEndpoint(t, "localhost:56561")
|
|
|
|
defer func() {
|
|
_ = mc.stop()
|
|
}()
|
|
|
|
<-time.After(5 * time.Millisecond)
|
|
|
|
ctx := context.Background()
|
|
exp := newGRPCExporter(t, ctx, mc.endpoint)
|
|
defer func() {
|
|
assert.NoError(t, exp.Shutdown(ctx))
|
|
}()
|
|
|
|
assert.NoError(t, exp.ExportSpans(ctx, nil))
|
|
assert.NoError(t, exp.Export(ctx, otlptest.EmptyCheckpointSet{}))
|
|
}
|
|
|
|
func TestFailedMetricTransform(t *testing.T) {
|
|
mc := runMockCollectorAtEndpoint(t, "localhost:56561")
|
|
|
|
defer func() {
|
|
_ = mc.stop()
|
|
}()
|
|
|
|
<-time.After(5 * time.Millisecond)
|
|
|
|
ctx := context.Background()
|
|
exp := newGRPCExporter(t, ctx, mc.endpoint)
|
|
defer func() {
|
|
assert.NoError(t, exp.Shutdown(ctx))
|
|
}()
|
|
|
|
assert.Error(t, exp.Export(ctx, otlptest.FailCheckpointSet{}))
|
|
}
|
|
|
|
func TestMultiConnectionDriver(t *testing.T) {
|
|
mcTraces := runMockCollector(t)
|
|
mcMetrics := runMockCollector(t)
|
|
|
|
defer func() {
|
|
_ = mcTraces.stop()
|
|
_ = mcMetrics.stop()
|
|
}()
|
|
|
|
<-time.After(5 * time.Millisecond)
|
|
|
|
commonOpts := []otlpgrpc.Option{
|
|
otlpgrpc.WithInsecure(),
|
|
otlpgrpc.WithReconnectionPeriod(50 * time.Millisecond),
|
|
otlpgrpc.WithDialOption(grpc.WithBlock()),
|
|
}
|
|
optsTraces := append([]otlpgrpc.Option{
|
|
otlpgrpc.WithEndpoint(mcTraces.endpoint),
|
|
}, commonOpts...)
|
|
optsMetrics := append([]otlpgrpc.Option{
|
|
otlpgrpc.WithEndpoint(mcMetrics.endpoint),
|
|
}, commonOpts...)
|
|
|
|
tracesDriver := otlpgrpc.NewDriver(optsTraces...)
|
|
metricsDriver := otlpgrpc.NewDriver(optsMetrics...)
|
|
splitCfg := otlp.SplitConfig{
|
|
ForMetrics: metricsDriver,
|
|
ForTraces: tracesDriver,
|
|
}
|
|
driver := otlp.NewSplitDriver(splitCfg)
|
|
ctx := context.Background()
|
|
exp, err := otlp.NewExporter(ctx, driver)
|
|
if err != nil {
|
|
t.Fatalf("failed to create a new collector exporter: %v", err)
|
|
}
|
|
defer func() {
|
|
assert.NoError(t, exp.Shutdown(ctx))
|
|
}()
|
|
otlptest.RunEndToEndTest(ctx, t, exp, mcTraces, mcMetrics)
|
|
}
|