1
0
mirror of https://github.com/open-telemetry/opentelemetry-go.git synced 2024-12-14 10:13:10 +02:00
opentelemetry-go/exporters/otlp/otlp_test.go

280 lines
7.6 KiB
Go
Raw Normal View History

// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package otlp_test
import (
"context"
"errors"
Some cleanups in otlp exporter (#1350) * Drop WorkerCount option This is not a good option - the user isn't likely to know how many worker goroutines is optimal. This should be something that an exporter should figure out itself. The second problem with the option is that it is specific to the metric transformation from SDK export format into protobuf. When the exporter starts supporting other protocols (HTTP/JSON for example), this option may be of no use. So the option should rather belong to the protocol, not to the exporter. Currently both mean the same, but later they will be separated, and this option breaks the separation. * Make stop channel a typical signalling channel Signalling channels are idiomatically defined as chan struct{}, so let's make it so, to avoid confusion about the meaning of the bool type. * Close a race when grpc connection is closed multiple times If several goroutines call Shutdown at the same time, then the following scenario is possible: goroutine A locks a mutex, reads a started member, unlocks the mutex and gets preempted goroutine B locks a mutex, reads a started member, unlocks the mutex and gets preempted goroutine A does not return early in the "if !started" conditional and continues to close the connection and execute the rest of the function (where it finally sets the started member to false), gets preempted goroutine B also does not return early, because it got a copy of started before goroutine A set it to false, so it tries to close the connection again. * Update changelog
2020-11-20 04:03:25 +02:00
"sync"
"testing"
"time"
Split connection management away from exporter (#1369) * Split protocol handling away from exporter This commits adds a ProtocolDriver interface, which the exporter will use to connect to the collector and send both metrics and traces to it. That way, the Exporter type is free from dealing with any connection/protocol details, as this business is taken over by the implementations of the ProtocolDriver interface. The gRPC code from the exporter is moved into the implementation of ProtocolDriver. Currently it only maintains a single connection, just as the Exporter used to do. With the split, most of the Exporter options became actually gRPC connection manager's options. Currently the only option that remained to be Exporter's is about setting the export kind selector. * Update changelog * Increase the test coverage of GRPC driver * Do not close a channel with multiple senders The disconnected channel can be used for sending by multiple goroutines (for example, by metric controller and span processor), so this channel should not be closed at all. Dropping this line closes a race between closing a channel and sending to it. * Simplify new connection handler The callbacks never return an error, so drop the return type from it. * Access clients under a lock The client may change as a result on reconnection in background, so guard against a racy access. * Simplify the GRPC driver a bit The config type was exported earlier to have a consistent way of configuring the driver, when also the multiple connection driver would appear. Since we are not going to add a multiple connection driver, pass the options directly to the driver constructor. Also shorten the name of the constructor to `NewGRPCDriver`. * Merge common gRPC code back into the driver The common code was supposed to be shared between single connection driver and multiple connection driver, but since the latter won't be happening, it makes no sense to keep the not-so-common code in a separate file. Also drop some abstraction too. * Rename the file with gRPC driver implementation * Update changelog * Sleep for a second to trigger the timeout Sometimes CI has it's better moments, so it's blazing fast and manages to finish shutting the exporter down within the 1 microsecond timeout. * Increase the timeout for shutting down the exporter One millisecond is quite short, and I was getting failures locally or in CI: go test ./... + race in ./exporters/otlp 2020/12/14 18:27:54 rpc error: code = Canceled desc = context canceled 2020/12/14 18:27:54 context deadline exceeded --- FAIL: TestNewExporter_withMultipleAttributeTypes (0.37s) otlp_integration_test.go:541: resource span count: got 0, want 1 FAIL FAIL go.opentelemetry.io/otel/exporters/otlp 5.278s or go test ./... + coverage in ./exporters/otlp 2020/12/14 17:41:16 rpc error: code = Canceled desc = context canceled 2020/12/14 17:41:16 exporter disconnected --- FAIL: TestNewExporter_endToEnd (1.53s) --- FAIL: TestNewExporter_endToEnd/WithCompressor (0.41s) otlp_integration_test.go:246: span counts: got 3, want 4 2020/12/14 17:41:18 context canceled FAIL coverage: 35.3% of statements in ./... FAIL go.opentelemetry.io/otel/exporters/otlp 4.753s * Shut down the providers in end to end test This is to make sure that all batched spans are actually flushed before closing the exporter.
2020-12-21 22:49:45 +02:00
"github.com/stretchr/testify/assert"
Split connection management away from exporter (#1369) * Split protocol handling away from exporter This commits adds a ProtocolDriver interface, which the exporter will use to connect to the collector and send both metrics and traces to it. That way, the Exporter type is free from dealing with any connection/protocol details, as this business is taken over by the implementations of the ProtocolDriver interface. The gRPC code from the exporter is moved into the implementation of ProtocolDriver. Currently it only maintains a single connection, just as the Exporter used to do. With the split, most of the Exporter options became actually gRPC connection manager's options. Currently the only option that remained to be Exporter's is about setting the export kind selector. * Update changelog * Increase the test coverage of GRPC driver * Do not close a channel with multiple senders The disconnected channel can be used for sending by multiple goroutines (for example, by metric controller and span processor), so this channel should not be closed at all. Dropping this line closes a race between closing a channel and sending to it. * Simplify new connection handler The callbacks never return an error, so drop the return type from it. * Access clients under a lock The client may change as a result on reconnection in background, so guard against a racy access. * Simplify the GRPC driver a bit The config type was exported earlier to have a consistent way of configuring the driver, when also the multiple connection driver would appear. Since we are not going to add a multiple connection driver, pass the options directly to the driver constructor. Also shorten the name of the constructor to `NewGRPCDriver`. * Merge common gRPC code back into the driver The common code was supposed to be shared between single connection driver and multiple connection driver, but since the latter won't be happening, it makes no sense to keep the not-so-common code in a separate file. Also drop some abstraction too. * Rename the file with gRPC driver implementation * Update changelog * Sleep for a second to trigger the timeout Sometimes CI has it's better moments, so it's blazing fast and manages to finish shutting the exporter down within the 1 microsecond timeout. * Increase the timeout for shutting down the exporter One millisecond is quite short, and I was getting failures locally or in CI: go test ./... + race in ./exporters/otlp 2020/12/14 18:27:54 rpc error: code = Canceled desc = context canceled 2020/12/14 18:27:54 context deadline exceeded --- FAIL: TestNewExporter_withMultipleAttributeTypes (0.37s) otlp_integration_test.go:541: resource span count: got 0, want 1 FAIL FAIL go.opentelemetry.io/otel/exporters/otlp 5.278s or go test ./... + coverage in ./exporters/otlp 2020/12/14 17:41:16 rpc error: code = Canceled desc = context canceled 2020/12/14 17:41:16 exporter disconnected --- FAIL: TestNewExporter_endToEnd (1.53s) --- FAIL: TestNewExporter_endToEnd/WithCompressor (0.41s) otlp_integration_test.go:246: span counts: got 3, want 4 2020/12/14 17:41:18 context canceled FAIL coverage: 35.3% of statements in ./... FAIL go.opentelemetry.io/otel/exporters/otlp 4.753s * Shut down the providers in end to end test This is to make sure that all batched spans are actually flushed before closing the exporter.
2020-12-21 22:49:45 +02:00
"github.com/stretchr/testify/require"
"go.opentelemetry.io/otel/exporters/otlp"
Split connection management away from exporter (#1369) * Split protocol handling away from exporter This commits adds a ProtocolDriver interface, which the exporter will use to connect to the collector and send both metrics and traces to it. That way, the Exporter type is free from dealing with any connection/protocol details, as this business is taken over by the implementations of the ProtocolDriver interface. The gRPC code from the exporter is moved into the implementation of ProtocolDriver. Currently it only maintains a single connection, just as the Exporter used to do. With the split, most of the Exporter options became actually gRPC connection manager's options. Currently the only option that remained to be Exporter's is about setting the export kind selector. * Update changelog * Increase the test coverage of GRPC driver * Do not close a channel with multiple senders The disconnected channel can be used for sending by multiple goroutines (for example, by metric controller and span processor), so this channel should not be closed at all. Dropping this line closes a race between closing a channel and sending to it. * Simplify new connection handler The callbacks never return an error, so drop the return type from it. * Access clients under a lock The client may change as a result on reconnection in background, so guard against a racy access. * Simplify the GRPC driver a bit The config type was exported earlier to have a consistent way of configuring the driver, when also the multiple connection driver would appear. Since we are not going to add a multiple connection driver, pass the options directly to the driver constructor. Also shorten the name of the constructor to `NewGRPCDriver`. * Merge common gRPC code back into the driver The common code was supposed to be shared between single connection driver and multiple connection driver, but since the latter won't be happening, it makes no sense to keep the not-so-common code in a separate file. Also drop some abstraction too. * Rename the file with gRPC driver implementation * Update changelog * Sleep for a second to trigger the timeout Sometimes CI has it's better moments, so it's blazing fast and manages to finish shutting the exporter down within the 1 microsecond timeout. * Increase the timeout for shutting down the exporter One millisecond is quite short, and I was getting failures locally or in CI: go test ./... + race in ./exporters/otlp 2020/12/14 18:27:54 rpc error: code = Canceled desc = context canceled 2020/12/14 18:27:54 context deadline exceeded --- FAIL: TestNewExporter_withMultipleAttributeTypes (0.37s) otlp_integration_test.go:541: resource span count: got 0, want 1 FAIL FAIL go.opentelemetry.io/otel/exporters/otlp 5.278s or go test ./... + coverage in ./exporters/otlp 2020/12/14 17:41:16 rpc error: code = Canceled desc = context canceled 2020/12/14 17:41:16 exporter disconnected --- FAIL: TestNewExporter_endToEnd (1.53s) --- FAIL: TestNewExporter_endToEnd/WithCompressor (0.41s) otlp_integration_test.go:246: span counts: got 3, want 4 2020/12/14 17:41:18 context canceled FAIL coverage: 35.3% of statements in ./... FAIL go.opentelemetry.io/otel/exporters/otlp 4.753s * Shut down the providers in end to end test This is to make sure that all batched spans are actually flushed before closing the exporter.
2020-12-21 22:49:45 +02:00
metricpb "go.opentelemetry.io/otel/exporters/otlp/internal/opentelemetry-proto-gen/metrics/v1"
tracepb "go.opentelemetry.io/otel/exporters/otlp/internal/opentelemetry-proto-gen/trace/v1"
"go.opentelemetry.io/otel/exporters/otlp/internal/transform"
metricsdk "go.opentelemetry.io/otel/sdk/export/metric"
tracesdk "go.opentelemetry.io/otel/sdk/export/trace"
)
Split connection management away from exporter (#1369) * Split protocol handling away from exporter This commits adds a ProtocolDriver interface, which the exporter will use to connect to the collector and send both metrics and traces to it. That way, the Exporter type is free from dealing with any connection/protocol details, as this business is taken over by the implementations of the ProtocolDriver interface. The gRPC code from the exporter is moved into the implementation of ProtocolDriver. Currently it only maintains a single connection, just as the Exporter used to do. With the split, most of the Exporter options became actually gRPC connection manager's options. Currently the only option that remained to be Exporter's is about setting the export kind selector. * Update changelog * Increase the test coverage of GRPC driver * Do not close a channel with multiple senders The disconnected channel can be used for sending by multiple goroutines (for example, by metric controller and span processor), so this channel should not be closed at all. Dropping this line closes a race between closing a channel and sending to it. * Simplify new connection handler The callbacks never return an error, so drop the return type from it. * Access clients under a lock The client may change as a result on reconnection in background, so guard against a racy access. * Simplify the GRPC driver a bit The config type was exported earlier to have a consistent way of configuring the driver, when also the multiple connection driver would appear. Since we are not going to add a multiple connection driver, pass the options directly to the driver constructor. Also shorten the name of the constructor to `NewGRPCDriver`. * Merge common gRPC code back into the driver The common code was supposed to be shared between single connection driver and multiple connection driver, but since the latter won't be happening, it makes no sense to keep the not-so-common code in a separate file. Also drop some abstraction too. * Rename the file with gRPC driver implementation * Update changelog * Sleep for a second to trigger the timeout Sometimes CI has it's better moments, so it's blazing fast and manages to finish shutting the exporter down within the 1 microsecond timeout. * Increase the timeout for shutting down the exporter One millisecond is quite short, and I was getting failures locally or in CI: go test ./... + race in ./exporters/otlp 2020/12/14 18:27:54 rpc error: code = Canceled desc = context canceled 2020/12/14 18:27:54 context deadline exceeded --- FAIL: TestNewExporter_withMultipleAttributeTypes (0.37s) otlp_integration_test.go:541: resource span count: got 0, want 1 FAIL FAIL go.opentelemetry.io/otel/exporters/otlp 5.278s or go test ./... + coverage in ./exporters/otlp 2020/12/14 17:41:16 rpc error: code = Canceled desc = context canceled 2020/12/14 17:41:16 exporter disconnected --- FAIL: TestNewExporter_endToEnd (1.53s) --- FAIL: TestNewExporter_endToEnd/WithCompressor (0.41s) otlp_integration_test.go:246: span counts: got 3, want 4 2020/12/14 17:41:18 context canceled FAIL coverage: 35.3% of statements in ./... FAIL go.opentelemetry.io/otel/exporters/otlp 4.753s * Shut down the providers in end to end test This is to make sure that all batched spans are actually flushed before closing the exporter.
2020-12-21 22:49:45 +02:00
type stubProtocolDriver struct {
started int
stopped int
tracesExported int
metricsExported int
injectedStartError error
injectedStopError error
Split connection management away from exporter (#1369) * Split protocol handling away from exporter This commits adds a ProtocolDriver interface, which the exporter will use to connect to the collector and send both metrics and traces to it. That way, the Exporter type is free from dealing with any connection/protocol details, as this business is taken over by the implementations of the ProtocolDriver interface. The gRPC code from the exporter is moved into the implementation of ProtocolDriver. Currently it only maintains a single connection, just as the Exporter used to do. With the split, most of the Exporter options became actually gRPC connection manager's options. Currently the only option that remained to be Exporter's is about setting the export kind selector. * Update changelog * Increase the test coverage of GRPC driver * Do not close a channel with multiple senders The disconnected channel can be used for sending by multiple goroutines (for example, by metric controller and span processor), so this channel should not be closed at all. Dropping this line closes a race between closing a channel and sending to it. * Simplify new connection handler The callbacks never return an error, so drop the return type from it. * Access clients under a lock The client may change as a result on reconnection in background, so guard against a racy access. * Simplify the GRPC driver a bit The config type was exported earlier to have a consistent way of configuring the driver, when also the multiple connection driver would appear. Since we are not going to add a multiple connection driver, pass the options directly to the driver constructor. Also shorten the name of the constructor to `NewGRPCDriver`. * Merge common gRPC code back into the driver The common code was supposed to be shared between single connection driver and multiple connection driver, but since the latter won't be happening, it makes no sense to keep the not-so-common code in a separate file. Also drop some abstraction too. * Rename the file with gRPC driver implementation * Update changelog * Sleep for a second to trigger the timeout Sometimes CI has it's better moments, so it's blazing fast and manages to finish shutting the exporter down within the 1 microsecond timeout. * Increase the timeout for shutting down the exporter One millisecond is quite short, and I was getting failures locally or in CI: go test ./... + race in ./exporters/otlp 2020/12/14 18:27:54 rpc error: code = Canceled desc = context canceled 2020/12/14 18:27:54 context deadline exceeded --- FAIL: TestNewExporter_withMultipleAttributeTypes (0.37s) otlp_integration_test.go:541: resource span count: got 0, want 1 FAIL FAIL go.opentelemetry.io/otel/exporters/otlp 5.278s or go test ./... + coverage in ./exporters/otlp 2020/12/14 17:41:16 rpc error: code = Canceled desc = context canceled 2020/12/14 17:41:16 exporter disconnected --- FAIL: TestNewExporter_endToEnd (1.53s) --- FAIL: TestNewExporter_endToEnd/WithCompressor (0.41s) otlp_integration_test.go:246: span counts: got 3, want 4 2020/12/14 17:41:18 context canceled FAIL coverage: 35.3% of statements in ./... FAIL go.opentelemetry.io/otel/exporters/otlp 4.753s * Shut down the providers in end to end test This is to make sure that all batched spans are actually flushed before closing the exporter.
2020-12-21 22:49:45 +02:00
rm []metricpb.ResourceMetrics
rs []tracepb.ResourceSpans
}
var _ otlp.ProtocolDriver = (*stubProtocolDriver)(nil)
Split connection management away from exporter (#1369) * Split protocol handling away from exporter This commits adds a ProtocolDriver interface, which the exporter will use to connect to the collector and send both metrics and traces to it. That way, the Exporter type is free from dealing with any connection/protocol details, as this business is taken over by the implementations of the ProtocolDriver interface. The gRPC code from the exporter is moved into the implementation of ProtocolDriver. Currently it only maintains a single connection, just as the Exporter used to do. With the split, most of the Exporter options became actually gRPC connection manager's options. Currently the only option that remained to be Exporter's is about setting the export kind selector. * Update changelog * Increase the test coverage of GRPC driver * Do not close a channel with multiple senders The disconnected channel can be used for sending by multiple goroutines (for example, by metric controller and span processor), so this channel should not be closed at all. Dropping this line closes a race between closing a channel and sending to it. * Simplify new connection handler The callbacks never return an error, so drop the return type from it. * Access clients under a lock The client may change as a result on reconnection in background, so guard against a racy access. * Simplify the GRPC driver a bit The config type was exported earlier to have a consistent way of configuring the driver, when also the multiple connection driver would appear. Since we are not going to add a multiple connection driver, pass the options directly to the driver constructor. Also shorten the name of the constructor to `NewGRPCDriver`. * Merge common gRPC code back into the driver The common code was supposed to be shared between single connection driver and multiple connection driver, but since the latter won't be happening, it makes no sense to keep the not-so-common code in a separate file. Also drop some abstraction too. * Rename the file with gRPC driver implementation * Update changelog * Sleep for a second to trigger the timeout Sometimes CI has it's better moments, so it's blazing fast and manages to finish shutting the exporter down within the 1 microsecond timeout. * Increase the timeout for shutting down the exporter One millisecond is quite short, and I was getting failures locally or in CI: go test ./... + race in ./exporters/otlp 2020/12/14 18:27:54 rpc error: code = Canceled desc = context canceled 2020/12/14 18:27:54 context deadline exceeded --- FAIL: TestNewExporter_withMultipleAttributeTypes (0.37s) otlp_integration_test.go:541: resource span count: got 0, want 1 FAIL FAIL go.opentelemetry.io/otel/exporters/otlp 5.278s or go test ./... + coverage in ./exporters/otlp 2020/12/14 17:41:16 rpc error: code = Canceled desc = context canceled 2020/12/14 17:41:16 exporter disconnected --- FAIL: TestNewExporter_endToEnd (1.53s) --- FAIL: TestNewExporter_endToEnd/WithCompressor (0.41s) otlp_integration_test.go:246: span counts: got 3, want 4 2020/12/14 17:41:18 context canceled FAIL coverage: 35.3% of statements in ./... FAIL go.opentelemetry.io/otel/exporters/otlp 4.753s * Shut down the providers in end to end test This is to make sure that all batched spans are actually flushed before closing the exporter.
2020-12-21 22:49:45 +02:00
func (m *stubProtocolDriver) Start(ctx context.Context) error {
m.started++
Split connection management away from exporter (#1369) * Split protocol handling away from exporter This commits adds a ProtocolDriver interface, which the exporter will use to connect to the collector and send both metrics and traces to it. That way, the Exporter type is free from dealing with any connection/protocol details, as this business is taken over by the implementations of the ProtocolDriver interface. The gRPC code from the exporter is moved into the implementation of ProtocolDriver. Currently it only maintains a single connection, just as the Exporter used to do. With the split, most of the Exporter options became actually gRPC connection manager's options. Currently the only option that remained to be Exporter's is about setting the export kind selector. * Update changelog * Increase the test coverage of GRPC driver * Do not close a channel with multiple senders The disconnected channel can be used for sending by multiple goroutines (for example, by metric controller and span processor), so this channel should not be closed at all. Dropping this line closes a race between closing a channel and sending to it. * Simplify new connection handler The callbacks never return an error, so drop the return type from it. * Access clients under a lock The client may change as a result on reconnection in background, so guard against a racy access. * Simplify the GRPC driver a bit The config type was exported earlier to have a consistent way of configuring the driver, when also the multiple connection driver would appear. Since we are not going to add a multiple connection driver, pass the options directly to the driver constructor. Also shorten the name of the constructor to `NewGRPCDriver`. * Merge common gRPC code back into the driver The common code was supposed to be shared between single connection driver and multiple connection driver, but since the latter won't be happening, it makes no sense to keep the not-so-common code in a separate file. Also drop some abstraction too. * Rename the file with gRPC driver implementation * Update changelog * Sleep for a second to trigger the timeout Sometimes CI has it's better moments, so it's blazing fast and manages to finish shutting the exporter down within the 1 microsecond timeout. * Increase the timeout for shutting down the exporter One millisecond is quite short, and I was getting failures locally or in CI: go test ./... + race in ./exporters/otlp 2020/12/14 18:27:54 rpc error: code = Canceled desc = context canceled 2020/12/14 18:27:54 context deadline exceeded --- FAIL: TestNewExporter_withMultipleAttributeTypes (0.37s) otlp_integration_test.go:541: resource span count: got 0, want 1 FAIL FAIL go.opentelemetry.io/otel/exporters/otlp 5.278s or go test ./... + coverage in ./exporters/otlp 2020/12/14 17:41:16 rpc error: code = Canceled desc = context canceled 2020/12/14 17:41:16 exporter disconnected --- FAIL: TestNewExporter_endToEnd (1.53s) --- FAIL: TestNewExporter_endToEnd/WithCompressor (0.41s) otlp_integration_test.go:246: span counts: got 3, want 4 2020/12/14 17:41:18 context canceled FAIL coverage: 35.3% of statements in ./... FAIL go.opentelemetry.io/otel/exporters/otlp 4.753s * Shut down the providers in end to end test This is to make sure that all batched spans are actually flushed before closing the exporter.
2020-12-21 22:49:45 +02:00
select {
case <-ctx.Done():
return ctx.Err()
default:
return m.injectedStartError
Split connection management away from exporter (#1369) * Split protocol handling away from exporter This commits adds a ProtocolDriver interface, which the exporter will use to connect to the collector and send both metrics and traces to it. That way, the Exporter type is free from dealing with any connection/protocol details, as this business is taken over by the implementations of the ProtocolDriver interface. The gRPC code from the exporter is moved into the implementation of ProtocolDriver. Currently it only maintains a single connection, just as the Exporter used to do. With the split, most of the Exporter options became actually gRPC connection manager's options. Currently the only option that remained to be Exporter's is about setting the export kind selector. * Update changelog * Increase the test coverage of GRPC driver * Do not close a channel with multiple senders The disconnected channel can be used for sending by multiple goroutines (for example, by metric controller and span processor), so this channel should not be closed at all. Dropping this line closes a race between closing a channel and sending to it. * Simplify new connection handler The callbacks never return an error, so drop the return type from it. * Access clients under a lock The client may change as a result on reconnection in background, so guard against a racy access. * Simplify the GRPC driver a bit The config type was exported earlier to have a consistent way of configuring the driver, when also the multiple connection driver would appear. Since we are not going to add a multiple connection driver, pass the options directly to the driver constructor. Also shorten the name of the constructor to `NewGRPCDriver`. * Merge common gRPC code back into the driver The common code was supposed to be shared between single connection driver and multiple connection driver, but since the latter won't be happening, it makes no sense to keep the not-so-common code in a separate file. Also drop some abstraction too. * Rename the file with gRPC driver implementation * Update changelog * Sleep for a second to trigger the timeout Sometimes CI has it's better moments, so it's blazing fast and manages to finish shutting the exporter down within the 1 microsecond timeout. * Increase the timeout for shutting down the exporter One millisecond is quite short, and I was getting failures locally or in CI: go test ./... + race in ./exporters/otlp 2020/12/14 18:27:54 rpc error: code = Canceled desc = context canceled 2020/12/14 18:27:54 context deadline exceeded --- FAIL: TestNewExporter_withMultipleAttributeTypes (0.37s) otlp_integration_test.go:541: resource span count: got 0, want 1 FAIL FAIL go.opentelemetry.io/otel/exporters/otlp 5.278s or go test ./... + coverage in ./exporters/otlp 2020/12/14 17:41:16 rpc error: code = Canceled desc = context canceled 2020/12/14 17:41:16 exporter disconnected --- FAIL: TestNewExporter_endToEnd (1.53s) --- FAIL: TestNewExporter_endToEnd/WithCompressor (0.41s) otlp_integration_test.go:246: span counts: got 3, want 4 2020/12/14 17:41:18 context canceled FAIL coverage: 35.3% of statements in ./... FAIL go.opentelemetry.io/otel/exporters/otlp 4.753s * Shut down the providers in end to end test This is to make sure that all batched spans are actually flushed before closing the exporter.
2020-12-21 22:49:45 +02:00
}
}
func (m *stubProtocolDriver) Stop(ctx context.Context) error {
m.stopped++
Split connection management away from exporter (#1369) * Split protocol handling away from exporter This commits adds a ProtocolDriver interface, which the exporter will use to connect to the collector and send both metrics and traces to it. That way, the Exporter type is free from dealing with any connection/protocol details, as this business is taken over by the implementations of the ProtocolDriver interface. The gRPC code from the exporter is moved into the implementation of ProtocolDriver. Currently it only maintains a single connection, just as the Exporter used to do. With the split, most of the Exporter options became actually gRPC connection manager's options. Currently the only option that remained to be Exporter's is about setting the export kind selector. * Update changelog * Increase the test coverage of GRPC driver * Do not close a channel with multiple senders The disconnected channel can be used for sending by multiple goroutines (for example, by metric controller and span processor), so this channel should not be closed at all. Dropping this line closes a race between closing a channel and sending to it. * Simplify new connection handler The callbacks never return an error, so drop the return type from it. * Access clients under a lock The client may change as a result on reconnection in background, so guard against a racy access. * Simplify the GRPC driver a bit The config type was exported earlier to have a consistent way of configuring the driver, when also the multiple connection driver would appear. Since we are not going to add a multiple connection driver, pass the options directly to the driver constructor. Also shorten the name of the constructor to `NewGRPCDriver`. * Merge common gRPC code back into the driver The common code was supposed to be shared between single connection driver and multiple connection driver, but since the latter won't be happening, it makes no sense to keep the not-so-common code in a separate file. Also drop some abstraction too. * Rename the file with gRPC driver implementation * Update changelog * Sleep for a second to trigger the timeout Sometimes CI has it's better moments, so it's blazing fast and manages to finish shutting the exporter down within the 1 microsecond timeout. * Increase the timeout for shutting down the exporter One millisecond is quite short, and I was getting failures locally or in CI: go test ./... + race in ./exporters/otlp 2020/12/14 18:27:54 rpc error: code = Canceled desc = context canceled 2020/12/14 18:27:54 context deadline exceeded --- FAIL: TestNewExporter_withMultipleAttributeTypes (0.37s) otlp_integration_test.go:541: resource span count: got 0, want 1 FAIL FAIL go.opentelemetry.io/otel/exporters/otlp 5.278s or go test ./... + coverage in ./exporters/otlp 2020/12/14 17:41:16 rpc error: code = Canceled desc = context canceled 2020/12/14 17:41:16 exporter disconnected --- FAIL: TestNewExporter_endToEnd (1.53s) --- FAIL: TestNewExporter_endToEnd/WithCompressor (0.41s) otlp_integration_test.go:246: span counts: got 3, want 4 2020/12/14 17:41:18 context canceled FAIL coverage: 35.3% of statements in ./... FAIL go.opentelemetry.io/otel/exporters/otlp 4.753s * Shut down the providers in end to end test This is to make sure that all batched spans are actually flushed before closing the exporter.
2020-12-21 22:49:45 +02:00
select {
case <-ctx.Done():
return ctx.Err()
default:
return m.injectedStopError
Split connection management away from exporter (#1369) * Split protocol handling away from exporter This commits adds a ProtocolDriver interface, which the exporter will use to connect to the collector and send both metrics and traces to it. That way, the Exporter type is free from dealing with any connection/protocol details, as this business is taken over by the implementations of the ProtocolDriver interface. The gRPC code from the exporter is moved into the implementation of ProtocolDriver. Currently it only maintains a single connection, just as the Exporter used to do. With the split, most of the Exporter options became actually gRPC connection manager's options. Currently the only option that remained to be Exporter's is about setting the export kind selector. * Update changelog * Increase the test coverage of GRPC driver * Do not close a channel with multiple senders The disconnected channel can be used for sending by multiple goroutines (for example, by metric controller and span processor), so this channel should not be closed at all. Dropping this line closes a race between closing a channel and sending to it. * Simplify new connection handler The callbacks never return an error, so drop the return type from it. * Access clients under a lock The client may change as a result on reconnection in background, so guard against a racy access. * Simplify the GRPC driver a bit The config type was exported earlier to have a consistent way of configuring the driver, when also the multiple connection driver would appear. Since we are not going to add a multiple connection driver, pass the options directly to the driver constructor. Also shorten the name of the constructor to `NewGRPCDriver`. * Merge common gRPC code back into the driver The common code was supposed to be shared between single connection driver and multiple connection driver, but since the latter won't be happening, it makes no sense to keep the not-so-common code in a separate file. Also drop some abstraction too. * Rename the file with gRPC driver implementation * Update changelog * Sleep for a second to trigger the timeout Sometimes CI has it's better moments, so it's blazing fast and manages to finish shutting the exporter down within the 1 microsecond timeout. * Increase the timeout for shutting down the exporter One millisecond is quite short, and I was getting failures locally or in CI: go test ./... + race in ./exporters/otlp 2020/12/14 18:27:54 rpc error: code = Canceled desc = context canceled 2020/12/14 18:27:54 context deadline exceeded --- FAIL: TestNewExporter_withMultipleAttributeTypes (0.37s) otlp_integration_test.go:541: resource span count: got 0, want 1 FAIL FAIL go.opentelemetry.io/otel/exporters/otlp 5.278s or go test ./... + coverage in ./exporters/otlp 2020/12/14 17:41:16 rpc error: code = Canceled desc = context canceled 2020/12/14 17:41:16 exporter disconnected --- FAIL: TestNewExporter_endToEnd (1.53s) --- FAIL: TestNewExporter_endToEnd/WithCompressor (0.41s) otlp_integration_test.go:246: span counts: got 3, want 4 2020/12/14 17:41:18 context canceled FAIL coverage: 35.3% of statements in ./... FAIL go.opentelemetry.io/otel/exporters/otlp 4.753s * Shut down the providers in end to end test This is to make sure that all batched spans are actually flushed before closing the exporter.
2020-12-21 22:49:45 +02:00
}
}
func (m *stubProtocolDriver) ExportMetrics(parent context.Context, cps metricsdk.CheckpointSet, selector metricsdk.ExportKindSelector) error {
m.metricsExported++
Split connection management away from exporter (#1369) * Split protocol handling away from exporter This commits adds a ProtocolDriver interface, which the exporter will use to connect to the collector and send both metrics and traces to it. That way, the Exporter type is free from dealing with any connection/protocol details, as this business is taken over by the implementations of the ProtocolDriver interface. The gRPC code from the exporter is moved into the implementation of ProtocolDriver. Currently it only maintains a single connection, just as the Exporter used to do. With the split, most of the Exporter options became actually gRPC connection manager's options. Currently the only option that remained to be Exporter's is about setting the export kind selector. * Update changelog * Increase the test coverage of GRPC driver * Do not close a channel with multiple senders The disconnected channel can be used for sending by multiple goroutines (for example, by metric controller and span processor), so this channel should not be closed at all. Dropping this line closes a race between closing a channel and sending to it. * Simplify new connection handler The callbacks never return an error, so drop the return type from it. * Access clients under a lock The client may change as a result on reconnection in background, so guard against a racy access. * Simplify the GRPC driver a bit The config type was exported earlier to have a consistent way of configuring the driver, when also the multiple connection driver would appear. Since we are not going to add a multiple connection driver, pass the options directly to the driver constructor. Also shorten the name of the constructor to `NewGRPCDriver`. * Merge common gRPC code back into the driver The common code was supposed to be shared between single connection driver and multiple connection driver, but since the latter won't be happening, it makes no sense to keep the not-so-common code in a separate file. Also drop some abstraction too. * Rename the file with gRPC driver implementation * Update changelog * Sleep for a second to trigger the timeout Sometimes CI has it's better moments, so it's blazing fast and manages to finish shutting the exporter down within the 1 microsecond timeout. * Increase the timeout for shutting down the exporter One millisecond is quite short, and I was getting failures locally or in CI: go test ./... + race in ./exporters/otlp 2020/12/14 18:27:54 rpc error: code = Canceled desc = context canceled 2020/12/14 18:27:54 context deadline exceeded --- FAIL: TestNewExporter_withMultipleAttributeTypes (0.37s) otlp_integration_test.go:541: resource span count: got 0, want 1 FAIL FAIL go.opentelemetry.io/otel/exporters/otlp 5.278s or go test ./... + coverage in ./exporters/otlp 2020/12/14 17:41:16 rpc error: code = Canceled desc = context canceled 2020/12/14 17:41:16 exporter disconnected --- FAIL: TestNewExporter_endToEnd (1.53s) --- FAIL: TestNewExporter_endToEnd/WithCompressor (0.41s) otlp_integration_test.go:246: span counts: got 3, want 4 2020/12/14 17:41:18 context canceled FAIL coverage: 35.3% of statements in ./... FAIL go.opentelemetry.io/otel/exporters/otlp 4.753s * Shut down the providers in end to end test This is to make sure that all batched spans are actually flushed before closing the exporter.
2020-12-21 22:49:45 +02:00
rms, err := transform.CheckpointSet(parent, selector, cps, 1)
if err != nil {
return err
}
for _, rm := range rms {
if rm == nil {
continue
}
m.rm = append(m.rm, *rm)
}
return nil
}
func (m *stubProtocolDriver) ExportTraces(ctx context.Context, ss []*tracesdk.SpanSnapshot) error {
m.tracesExported++
Split connection management away from exporter (#1369) * Split protocol handling away from exporter This commits adds a ProtocolDriver interface, which the exporter will use to connect to the collector and send both metrics and traces to it. That way, the Exporter type is free from dealing with any connection/protocol details, as this business is taken over by the implementations of the ProtocolDriver interface. The gRPC code from the exporter is moved into the implementation of ProtocolDriver. Currently it only maintains a single connection, just as the Exporter used to do. With the split, most of the Exporter options became actually gRPC connection manager's options. Currently the only option that remained to be Exporter's is about setting the export kind selector. * Update changelog * Increase the test coverage of GRPC driver * Do not close a channel with multiple senders The disconnected channel can be used for sending by multiple goroutines (for example, by metric controller and span processor), so this channel should not be closed at all. Dropping this line closes a race between closing a channel and sending to it. * Simplify new connection handler The callbacks never return an error, so drop the return type from it. * Access clients under a lock The client may change as a result on reconnection in background, so guard against a racy access. * Simplify the GRPC driver a bit The config type was exported earlier to have a consistent way of configuring the driver, when also the multiple connection driver would appear. Since we are not going to add a multiple connection driver, pass the options directly to the driver constructor. Also shorten the name of the constructor to `NewGRPCDriver`. * Merge common gRPC code back into the driver The common code was supposed to be shared between single connection driver and multiple connection driver, but since the latter won't be happening, it makes no sense to keep the not-so-common code in a separate file. Also drop some abstraction too. * Rename the file with gRPC driver implementation * Update changelog * Sleep for a second to trigger the timeout Sometimes CI has it's better moments, so it's blazing fast and manages to finish shutting the exporter down within the 1 microsecond timeout. * Increase the timeout for shutting down the exporter One millisecond is quite short, and I was getting failures locally or in CI: go test ./... + race in ./exporters/otlp 2020/12/14 18:27:54 rpc error: code = Canceled desc = context canceled 2020/12/14 18:27:54 context deadline exceeded --- FAIL: TestNewExporter_withMultipleAttributeTypes (0.37s) otlp_integration_test.go:541: resource span count: got 0, want 1 FAIL FAIL go.opentelemetry.io/otel/exporters/otlp 5.278s or go test ./... + coverage in ./exporters/otlp 2020/12/14 17:41:16 rpc error: code = Canceled desc = context canceled 2020/12/14 17:41:16 exporter disconnected --- FAIL: TestNewExporter_endToEnd (1.53s) --- FAIL: TestNewExporter_endToEnd/WithCompressor (0.41s) otlp_integration_test.go:246: span counts: got 3, want 4 2020/12/14 17:41:18 context canceled FAIL coverage: 35.3% of statements in ./... FAIL go.opentelemetry.io/otel/exporters/otlp 4.753s * Shut down the providers in end to end test This is to make sure that all batched spans are actually flushed before closing the exporter.
2020-12-21 22:49:45 +02:00
for _, rs := range transform.SpanData(ss) {
if rs == nil {
continue
}
m.rs = append(m.rs, *rs)
}
return nil
}
func (m *stubProtocolDriver) Reset() {
m.rm = nil
m.rs = nil
}
func newExporter(t *testing.T, opts ...otlp.ExporterOption) (*otlp.Exporter, *stubProtocolDriver) {
Split connection management away from exporter (#1369) * Split protocol handling away from exporter This commits adds a ProtocolDriver interface, which the exporter will use to connect to the collector and send both metrics and traces to it. That way, the Exporter type is free from dealing with any connection/protocol details, as this business is taken over by the implementations of the ProtocolDriver interface. The gRPC code from the exporter is moved into the implementation of ProtocolDriver. Currently it only maintains a single connection, just as the Exporter used to do. With the split, most of the Exporter options became actually gRPC connection manager's options. Currently the only option that remained to be Exporter's is about setting the export kind selector. * Update changelog * Increase the test coverage of GRPC driver * Do not close a channel with multiple senders The disconnected channel can be used for sending by multiple goroutines (for example, by metric controller and span processor), so this channel should not be closed at all. Dropping this line closes a race between closing a channel and sending to it. * Simplify new connection handler The callbacks never return an error, so drop the return type from it. * Access clients under a lock The client may change as a result on reconnection in background, so guard against a racy access. * Simplify the GRPC driver a bit The config type was exported earlier to have a consistent way of configuring the driver, when also the multiple connection driver would appear. Since we are not going to add a multiple connection driver, pass the options directly to the driver constructor. Also shorten the name of the constructor to `NewGRPCDriver`. * Merge common gRPC code back into the driver The common code was supposed to be shared between single connection driver and multiple connection driver, but since the latter won't be happening, it makes no sense to keep the not-so-common code in a separate file. Also drop some abstraction too. * Rename the file with gRPC driver implementation * Update changelog * Sleep for a second to trigger the timeout Sometimes CI has it's better moments, so it's blazing fast and manages to finish shutting the exporter down within the 1 microsecond timeout. * Increase the timeout for shutting down the exporter One millisecond is quite short, and I was getting failures locally or in CI: go test ./... + race in ./exporters/otlp 2020/12/14 18:27:54 rpc error: code = Canceled desc = context canceled 2020/12/14 18:27:54 context deadline exceeded --- FAIL: TestNewExporter_withMultipleAttributeTypes (0.37s) otlp_integration_test.go:541: resource span count: got 0, want 1 FAIL FAIL go.opentelemetry.io/otel/exporters/otlp 5.278s or go test ./... + coverage in ./exporters/otlp 2020/12/14 17:41:16 rpc error: code = Canceled desc = context canceled 2020/12/14 17:41:16 exporter disconnected --- FAIL: TestNewExporter_endToEnd (1.53s) --- FAIL: TestNewExporter_endToEnd/WithCompressor (0.41s) otlp_integration_test.go:246: span counts: got 3, want 4 2020/12/14 17:41:18 context canceled FAIL coverage: 35.3% of statements in ./... FAIL go.opentelemetry.io/otel/exporters/otlp 4.753s * Shut down the providers in end to end test This is to make sure that all batched spans are actually flushed before closing the exporter.
2020-12-21 22:49:45 +02:00
driver := &stubProtocolDriver{}
exp, err := otlp.NewExporter(context.Background(), driver, opts...)
Split connection management away from exporter (#1369) * Split protocol handling away from exporter This commits adds a ProtocolDriver interface, which the exporter will use to connect to the collector and send both metrics and traces to it. That way, the Exporter type is free from dealing with any connection/protocol details, as this business is taken over by the implementations of the ProtocolDriver interface. The gRPC code from the exporter is moved into the implementation of ProtocolDriver. Currently it only maintains a single connection, just as the Exporter used to do. With the split, most of the Exporter options became actually gRPC connection manager's options. Currently the only option that remained to be Exporter's is about setting the export kind selector. * Update changelog * Increase the test coverage of GRPC driver * Do not close a channel with multiple senders The disconnected channel can be used for sending by multiple goroutines (for example, by metric controller and span processor), so this channel should not be closed at all. Dropping this line closes a race between closing a channel and sending to it. * Simplify new connection handler The callbacks never return an error, so drop the return type from it. * Access clients under a lock The client may change as a result on reconnection in background, so guard against a racy access. * Simplify the GRPC driver a bit The config type was exported earlier to have a consistent way of configuring the driver, when also the multiple connection driver would appear. Since we are not going to add a multiple connection driver, pass the options directly to the driver constructor. Also shorten the name of the constructor to `NewGRPCDriver`. * Merge common gRPC code back into the driver The common code was supposed to be shared between single connection driver and multiple connection driver, but since the latter won't be happening, it makes no sense to keep the not-so-common code in a separate file. Also drop some abstraction too. * Rename the file with gRPC driver implementation * Update changelog * Sleep for a second to trigger the timeout Sometimes CI has it's better moments, so it's blazing fast and manages to finish shutting the exporter down within the 1 microsecond timeout. * Increase the timeout for shutting down the exporter One millisecond is quite short, and I was getting failures locally or in CI: go test ./... + race in ./exporters/otlp 2020/12/14 18:27:54 rpc error: code = Canceled desc = context canceled 2020/12/14 18:27:54 context deadline exceeded --- FAIL: TestNewExporter_withMultipleAttributeTypes (0.37s) otlp_integration_test.go:541: resource span count: got 0, want 1 FAIL FAIL go.opentelemetry.io/otel/exporters/otlp 5.278s or go test ./... + coverage in ./exporters/otlp 2020/12/14 17:41:16 rpc error: code = Canceled desc = context canceled 2020/12/14 17:41:16 exporter disconnected --- FAIL: TestNewExporter_endToEnd (1.53s) --- FAIL: TestNewExporter_endToEnd/WithCompressor (0.41s) otlp_integration_test.go:246: span counts: got 3, want 4 2020/12/14 17:41:18 context canceled FAIL coverage: 35.3% of statements in ./... FAIL go.opentelemetry.io/otel/exporters/otlp 4.753s * Shut down the providers in end to end test This is to make sure that all batched spans are actually flushed before closing the exporter.
2020-12-21 22:49:45 +02:00
require.NoError(t, err)
return exp, driver
}
func TestExporterShutdownHonorsTimeout(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute)
Another batch of cleanups in otlp exporter (#1357) * Move connection logic into grpcConnection object If we will need to maintain more than one connection in future, this splitting off will come in handy. Co-authored-by: Stefan Prisca <stefan.prisca@gmail.com> * Make another channel a signal channel There is another channel that serves as a one-time signal, where channel's data type does not matter. * Reorder and document connection members This is to make clear that the lock is guarding only the connection since it can be changed by multiple goroutines, and other members are either atomic or read-only. * Move stop signal into connection The stop channel was rather useless on the exporter side - the primary reason for existence of this channel is to stop a background reconnecting goroutine. Since the goroutine lives entirely within grpcConnection object, move the stop channel here. Also expose a function to unify the stop channel with the context cancellation, so exporter can use it without knowing anything about stop channels. Also make export functions a bit more consistent. * Do not run reconnection routine when being stopped too It's possible that both disconnected channel and stop channel will be triggered around the same time, so the goroutine is as likely to start reconnecting as to return from the goroutine. Make sure we return if the stop channel is closed. * Nil clients on connection error Set clients to nil on connection error, so we don't try to send the data over a bad connection, but return a "no client" error immediately. * Do not call new connection handler within critical section It's rather risky to call a callback coming from outside within a critical section. Move it out. * Add context parameter to connection routines Connecting to the collector may also take its time, so it can be useful in some cases to pass a context with a deadline. Currently we just pass a background context, so this commit does not really change any behavior. The follow-up commits will make a use of it, though. * Add context parameter to NewExporter and Start It makes it possible to limit the time spent on connecting to the collector. * Stop connecting on shutdown Dialling to grpc service ignored the closing of the stop channel, but this can be easily changed. * Close connection after background is shut down That way we can make sure that there won't be a window between closing a connection and waiting for the background goroutine to return, where the new connection could be established. * Remove unnecessary nil check This member is never nil, unless the Exporter is created like &Exporter{}, which is not a thing we support anyway. * Update changelog Co-authored-by: Stefan Prisca <stefan.prisca@gmail.com>
2020-11-24 21:50:05 +02:00
defer cancel()
e := otlp.NewUnstartedExporter(&stubProtocolDriver{})
Another batch of cleanups in otlp exporter (#1357) * Move connection logic into grpcConnection object If we will need to maintain more than one connection in future, this splitting off will come in handy. Co-authored-by: Stefan Prisca <stefan.prisca@gmail.com> * Make another channel a signal channel There is another channel that serves as a one-time signal, where channel's data type does not matter. * Reorder and document connection members This is to make clear that the lock is guarding only the connection since it can be changed by multiple goroutines, and other members are either atomic or read-only. * Move stop signal into connection The stop channel was rather useless on the exporter side - the primary reason for existence of this channel is to stop a background reconnecting goroutine. Since the goroutine lives entirely within grpcConnection object, move the stop channel here. Also expose a function to unify the stop channel with the context cancellation, so exporter can use it without knowing anything about stop channels. Also make export functions a bit more consistent. * Do not run reconnection routine when being stopped too It's possible that both disconnected channel and stop channel will be triggered around the same time, so the goroutine is as likely to start reconnecting as to return from the goroutine. Make sure we return if the stop channel is closed. * Nil clients on connection error Set clients to nil on connection error, so we don't try to send the data over a bad connection, but return a "no client" error immediately. * Do not call new connection handler within critical section It's rather risky to call a callback coming from outside within a critical section. Move it out. * Add context parameter to connection routines Connecting to the collector may also take its time, so it can be useful in some cases to pass a context with a deadline. Currently we just pass a background context, so this commit does not really change any behavior. The follow-up commits will make a use of it, though. * Add context parameter to NewExporter and Start It makes it possible to limit the time spent on connecting to the collector. * Stop connecting on shutdown Dialling to grpc service ignored the closing of the stop channel, but this can be easily changed. * Close connection after background is shut down That way we can make sure that there won't be a window between closing a connection and waiting for the background goroutine to return, where the new connection could be established. * Remove unnecessary nil check This member is never nil, unless the Exporter is created like &Exporter{}, which is not a thing we support anyway. * Update changelog Co-authored-by: Stefan Prisca <stefan.prisca@gmail.com>
2020-11-24 21:50:05 +02:00
if err := e.Start(ctx); err != nil {
t.Fatalf("failed to start exporter: %v", err)
}
innerCtx, innerCancel := context.WithTimeout(ctx, time.Microsecond)
Split connection management away from exporter (#1369) * Split protocol handling away from exporter This commits adds a ProtocolDriver interface, which the exporter will use to connect to the collector and send both metrics and traces to it. That way, the Exporter type is free from dealing with any connection/protocol details, as this business is taken over by the implementations of the ProtocolDriver interface. The gRPC code from the exporter is moved into the implementation of ProtocolDriver. Currently it only maintains a single connection, just as the Exporter used to do. With the split, most of the Exporter options became actually gRPC connection manager's options. Currently the only option that remained to be Exporter's is about setting the export kind selector. * Update changelog * Increase the test coverage of GRPC driver * Do not close a channel with multiple senders The disconnected channel can be used for sending by multiple goroutines (for example, by metric controller and span processor), so this channel should not be closed at all. Dropping this line closes a race between closing a channel and sending to it. * Simplify new connection handler The callbacks never return an error, so drop the return type from it. * Access clients under a lock The client may change as a result on reconnection in background, so guard against a racy access. * Simplify the GRPC driver a bit The config type was exported earlier to have a consistent way of configuring the driver, when also the multiple connection driver would appear. Since we are not going to add a multiple connection driver, pass the options directly to the driver constructor. Also shorten the name of the constructor to `NewGRPCDriver`. * Merge common gRPC code back into the driver The common code was supposed to be shared between single connection driver and multiple connection driver, but since the latter won't be happening, it makes no sense to keep the not-so-common code in a separate file. Also drop some abstraction too. * Rename the file with gRPC driver implementation * Update changelog * Sleep for a second to trigger the timeout Sometimes CI has it's better moments, so it's blazing fast and manages to finish shutting the exporter down within the 1 microsecond timeout. * Increase the timeout for shutting down the exporter One millisecond is quite short, and I was getting failures locally or in CI: go test ./... + race in ./exporters/otlp 2020/12/14 18:27:54 rpc error: code = Canceled desc = context canceled 2020/12/14 18:27:54 context deadline exceeded --- FAIL: TestNewExporter_withMultipleAttributeTypes (0.37s) otlp_integration_test.go:541: resource span count: got 0, want 1 FAIL FAIL go.opentelemetry.io/otel/exporters/otlp 5.278s or go test ./... + coverage in ./exporters/otlp 2020/12/14 17:41:16 rpc error: code = Canceled desc = context canceled 2020/12/14 17:41:16 exporter disconnected --- FAIL: TestNewExporter_endToEnd (1.53s) --- FAIL: TestNewExporter_endToEnd/WithCompressor (0.41s) otlp_integration_test.go:246: span counts: got 3, want 4 2020/12/14 17:41:18 context canceled FAIL coverage: 35.3% of statements in ./... FAIL go.opentelemetry.io/otel/exporters/otlp 4.753s * Shut down the providers in end to end test This is to make sure that all batched spans are actually flushed before closing the exporter.
2020-12-21 22:49:45 +02:00
<-time.After(time.Second)
if err := e.Shutdown(innerCtx); err == nil {
t.Error("expected context DeadlineExceeded error, got nil")
} else if !errors.Is(err, context.DeadlineExceeded) {
t.Errorf("expected context DeadlineExceeded error, got %v", err)
}
innerCancel()
}
func TestExporterShutdownHonorsCancel(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute)
Another batch of cleanups in otlp exporter (#1357) * Move connection logic into grpcConnection object If we will need to maintain more than one connection in future, this splitting off will come in handy. Co-authored-by: Stefan Prisca <stefan.prisca@gmail.com> * Make another channel a signal channel There is another channel that serves as a one-time signal, where channel's data type does not matter. * Reorder and document connection members This is to make clear that the lock is guarding only the connection since it can be changed by multiple goroutines, and other members are either atomic or read-only. * Move stop signal into connection The stop channel was rather useless on the exporter side - the primary reason for existence of this channel is to stop a background reconnecting goroutine. Since the goroutine lives entirely within grpcConnection object, move the stop channel here. Also expose a function to unify the stop channel with the context cancellation, so exporter can use it without knowing anything about stop channels. Also make export functions a bit more consistent. * Do not run reconnection routine when being stopped too It's possible that both disconnected channel and stop channel will be triggered around the same time, so the goroutine is as likely to start reconnecting as to return from the goroutine. Make sure we return if the stop channel is closed. * Nil clients on connection error Set clients to nil on connection error, so we don't try to send the data over a bad connection, but return a "no client" error immediately. * Do not call new connection handler within critical section It's rather risky to call a callback coming from outside within a critical section. Move it out. * Add context parameter to connection routines Connecting to the collector may also take its time, so it can be useful in some cases to pass a context with a deadline. Currently we just pass a background context, so this commit does not really change any behavior. The follow-up commits will make a use of it, though. * Add context parameter to NewExporter and Start It makes it possible to limit the time spent on connecting to the collector. * Stop connecting on shutdown Dialling to grpc service ignored the closing of the stop channel, but this can be easily changed. * Close connection after background is shut down That way we can make sure that there won't be a window between closing a connection and waiting for the background goroutine to return, where the new connection could be established. * Remove unnecessary nil check This member is never nil, unless the Exporter is created like &Exporter{}, which is not a thing we support anyway. * Update changelog Co-authored-by: Stefan Prisca <stefan.prisca@gmail.com>
2020-11-24 21:50:05 +02:00
defer cancel()
e := otlp.NewUnstartedExporter(&stubProtocolDriver{})
Another batch of cleanups in otlp exporter (#1357) * Move connection logic into grpcConnection object If we will need to maintain more than one connection in future, this splitting off will come in handy. Co-authored-by: Stefan Prisca <stefan.prisca@gmail.com> * Make another channel a signal channel There is another channel that serves as a one-time signal, where channel's data type does not matter. * Reorder and document connection members This is to make clear that the lock is guarding only the connection since it can be changed by multiple goroutines, and other members are either atomic or read-only. * Move stop signal into connection The stop channel was rather useless on the exporter side - the primary reason for existence of this channel is to stop a background reconnecting goroutine. Since the goroutine lives entirely within grpcConnection object, move the stop channel here. Also expose a function to unify the stop channel with the context cancellation, so exporter can use it without knowing anything about stop channels. Also make export functions a bit more consistent. * Do not run reconnection routine when being stopped too It's possible that both disconnected channel and stop channel will be triggered around the same time, so the goroutine is as likely to start reconnecting as to return from the goroutine. Make sure we return if the stop channel is closed. * Nil clients on connection error Set clients to nil on connection error, so we don't try to send the data over a bad connection, but return a "no client" error immediately. * Do not call new connection handler within critical section It's rather risky to call a callback coming from outside within a critical section. Move it out. * Add context parameter to connection routines Connecting to the collector may also take its time, so it can be useful in some cases to pass a context with a deadline. Currently we just pass a background context, so this commit does not really change any behavior. The follow-up commits will make a use of it, though. * Add context parameter to NewExporter and Start It makes it possible to limit the time spent on connecting to the collector. * Stop connecting on shutdown Dialling to grpc service ignored the closing of the stop channel, but this can be easily changed. * Close connection after background is shut down That way we can make sure that there won't be a window between closing a connection and waiting for the background goroutine to return, where the new connection could be established. * Remove unnecessary nil check This member is never nil, unless the Exporter is created like &Exporter{}, which is not a thing we support anyway. * Update changelog Co-authored-by: Stefan Prisca <stefan.prisca@gmail.com>
2020-11-24 21:50:05 +02:00
if err := e.Start(ctx); err != nil {
t.Fatalf("failed to start exporter: %v", err)
}
var innerCancel context.CancelFunc
ctx, innerCancel = context.WithCancel(ctx)
innerCancel()
if err := e.Shutdown(ctx); err == nil {
t.Error("expected context canceled error, got nil")
} else if !errors.Is(err, context.Canceled) {
t.Errorf("expected context canceled error, got %v", err)
}
}
func TestExporterShutdownNoError(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute)
defer cancel()
e := otlp.NewUnstartedExporter(&stubProtocolDriver{})
Another batch of cleanups in otlp exporter (#1357) * Move connection logic into grpcConnection object If we will need to maintain more than one connection in future, this splitting off will come in handy. Co-authored-by: Stefan Prisca <stefan.prisca@gmail.com> * Make another channel a signal channel There is another channel that serves as a one-time signal, where channel's data type does not matter. * Reorder and document connection members This is to make clear that the lock is guarding only the connection since it can be changed by multiple goroutines, and other members are either atomic or read-only. * Move stop signal into connection The stop channel was rather useless on the exporter side - the primary reason for existence of this channel is to stop a background reconnecting goroutine. Since the goroutine lives entirely within grpcConnection object, move the stop channel here. Also expose a function to unify the stop channel with the context cancellation, so exporter can use it without knowing anything about stop channels. Also make export functions a bit more consistent. * Do not run reconnection routine when being stopped too It's possible that both disconnected channel and stop channel will be triggered around the same time, so the goroutine is as likely to start reconnecting as to return from the goroutine. Make sure we return if the stop channel is closed. * Nil clients on connection error Set clients to nil on connection error, so we don't try to send the data over a bad connection, but return a "no client" error immediately. * Do not call new connection handler within critical section It's rather risky to call a callback coming from outside within a critical section. Move it out. * Add context parameter to connection routines Connecting to the collector may also take its time, so it can be useful in some cases to pass a context with a deadline. Currently we just pass a background context, so this commit does not really change any behavior. The follow-up commits will make a use of it, though. * Add context parameter to NewExporter and Start It makes it possible to limit the time spent on connecting to the collector. * Stop connecting on shutdown Dialling to grpc service ignored the closing of the stop channel, but this can be easily changed. * Close connection after background is shut down That way we can make sure that there won't be a window between closing a connection and waiting for the background goroutine to return, where the new connection could be established. * Remove unnecessary nil check This member is never nil, unless the Exporter is created like &Exporter{}, which is not a thing we support anyway. * Update changelog Co-authored-by: Stefan Prisca <stefan.prisca@gmail.com>
2020-11-24 21:50:05 +02:00
if err := e.Start(ctx); err != nil {
t.Fatalf("failed to start exporter: %v", err)
}
if err := e.Shutdown(ctx); err != nil {
t.Errorf("shutdown errored: expected nil, got %v", err)
}
}
Some cleanups in otlp exporter (#1350) * Drop WorkerCount option This is not a good option - the user isn't likely to know how many worker goroutines is optimal. This should be something that an exporter should figure out itself. The second problem with the option is that it is specific to the metric transformation from SDK export format into protobuf. When the exporter starts supporting other protocols (HTTP/JSON for example), this option may be of no use. So the option should rather belong to the protocol, not to the exporter. Currently both mean the same, but later they will be separated, and this option breaks the separation. * Make stop channel a typical signalling channel Signalling channels are idiomatically defined as chan struct{}, so let's make it so, to avoid confusion about the meaning of the bool type. * Close a race when grpc connection is closed multiple times If several goroutines call Shutdown at the same time, then the following scenario is possible: goroutine A locks a mutex, reads a started member, unlocks the mutex and gets preempted goroutine B locks a mutex, reads a started member, unlocks the mutex and gets preempted goroutine A does not return early in the "if !started" conditional and continues to close the connection and execute the rest of the function (where it finally sets the started member to false), gets preempted goroutine B also does not return early, because it got a copy of started before goroutine A set it to false, so it tries to close the connection again. * Update changelog
2020-11-20 04:03:25 +02:00
func TestExporterShutdownManyTimes(t *testing.T) {
ctx := context.Background()
e, err := otlp.NewExporter(ctx, &stubProtocolDriver{})
Some cleanups in otlp exporter (#1350) * Drop WorkerCount option This is not a good option - the user isn't likely to know how many worker goroutines is optimal. This should be something that an exporter should figure out itself. The second problem with the option is that it is specific to the metric transformation from SDK export format into protobuf. When the exporter starts supporting other protocols (HTTP/JSON for example), this option may be of no use. So the option should rather belong to the protocol, not to the exporter. Currently both mean the same, but later they will be separated, and this option breaks the separation. * Make stop channel a typical signalling channel Signalling channels are idiomatically defined as chan struct{}, so let's make it so, to avoid confusion about the meaning of the bool type. * Close a race when grpc connection is closed multiple times If several goroutines call Shutdown at the same time, then the following scenario is possible: goroutine A locks a mutex, reads a started member, unlocks the mutex and gets preempted goroutine B locks a mutex, reads a started member, unlocks the mutex and gets preempted goroutine A does not return early in the "if !started" conditional and continues to close the connection and execute the rest of the function (where it finally sets the started member to false), gets preempted goroutine B also does not return early, because it got a copy of started before goroutine A set it to false, so it tries to close the connection again. * Update changelog
2020-11-20 04:03:25 +02:00
if err != nil {
t.Fatalf("failed to start an exporter: %v", err)
}
ch := make(chan struct{})
wg := sync.WaitGroup{}
const num int = 20
wg.Add(num)
errs := make([]error, num)
for i := 0; i < num; i++ {
go func(idx int) {
defer wg.Done()
<-ch
errs[idx] = e.Shutdown(ctx)
}(i)
}
close(ch)
wg.Wait()
for _, err := range errs {
if err != nil {
t.Fatalf("failed to shutdown exporter: %v", err)
}
}
}
func TestSplitDriver(t *testing.T) {
driverTraces := &stubProtocolDriver{}
driverMetrics := &stubProtocolDriver{}
config := otlp.SplitConfig{
ForMetrics: driverMetrics,
ForTraces: driverTraces,
}
driver := otlp.NewSplitDriver(config)
ctx := context.Background()
assert.NoError(t, driver.Start(ctx))
assert.Equal(t, 1, driverTraces.started)
assert.Equal(t, 1, driverMetrics.started)
assert.Equal(t, 0, driverTraces.stopped)
assert.Equal(t, 0, driverMetrics.stopped)
assert.Equal(t, 0, driverTraces.tracesExported)
assert.Equal(t, 0, driverTraces.metricsExported)
assert.Equal(t, 0, driverMetrics.tracesExported)
assert.Equal(t, 0, driverMetrics.metricsExported)
assert.NoError(t, driver.ExportMetrics(ctx, discCheckpointSet{}, metricsdk.StatelessExportKindSelector()))
assert.NoError(t, driver.ExportTraces(ctx, []*tracesdk.SpanSnapshot{discSpanSnapshot()}))
assert.Len(t, driverTraces.rm, 0)
assert.Len(t, driverTraces.rs, 1)
assert.Len(t, driverMetrics.rm, 1)
assert.Len(t, driverMetrics.rs, 0)
assert.Equal(t, 1, driverTraces.tracesExported)
assert.Equal(t, 0, driverTraces.metricsExported)
assert.Equal(t, 0, driverMetrics.tracesExported)
assert.Equal(t, 1, driverMetrics.metricsExported)
assert.NoError(t, driver.Stop(ctx))
assert.Equal(t, 1, driverTraces.started)
assert.Equal(t, 1, driverMetrics.started)
assert.Equal(t, 1, driverTraces.stopped)
assert.Equal(t, 1, driverMetrics.stopped)
assert.Equal(t, 1, driverTraces.tracesExported)
assert.Equal(t, 0, driverTraces.metricsExported)
assert.Equal(t, 0, driverMetrics.tracesExported)
assert.Equal(t, 1, driverMetrics.metricsExported)
}
func TestSplitDriverFail(t *testing.T) {
ctx := context.Background()
for i := 0; i < 16; i++ {
var (
errStartMetric error
errStartTrace error
errStopMetric error
errStopTrace error
)
if (i & 1) != 0 {
errStartTrace = errors.New("trace start failed")
}
if (i & 2) != 0 {
errStopTrace = errors.New("trace stop failed")
}
if (i & 4) != 0 {
errStartMetric = errors.New("metric start failed")
}
if (i & 8) != 0 {
errStopMetric = errors.New("metric stop failed")
}
shouldStartFail := errStartTrace != nil || errStartMetric != nil
shouldStopFail := errStopTrace != nil || errStopMetric != nil
driverTraces := &stubProtocolDriver{
injectedStartError: errStartTrace,
injectedStopError: errStopTrace,
}
driverMetrics := &stubProtocolDriver{
injectedStartError: errStartMetric,
injectedStopError: errStopMetric,
}
config := otlp.SplitConfig{
ForMetrics: driverMetrics,
ForTraces: driverTraces,
}
driver := otlp.NewSplitDriver(config)
errStart := driver.Start(ctx)
if shouldStartFail {
assert.Error(t, errStart)
} else {
assert.NoError(t, errStart)
}
errStop := driver.Stop(ctx)
if shouldStopFail {
assert.Error(t, errStop)
} else {
assert.NoError(t, errStop)
}
}
}