1
0
mirror of https://github.com/open-telemetry/opentelemetry-go.git synced 2025-03-31 21:55:32 +02:00

OTLP metrics gRPC exporter (#1991)

* OTLP metrics gRPC exporter

* add newline

* address comments
This commit is contained in:
Gustavo Silva Paiva 2021-06-11 17:25:56 -03:00 committed by GitHub
parent 64b640cc26
commit b33edaa552
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
60 changed files with 6755 additions and 0 deletions

View File

@ -266,3 +266,23 @@ updates:
schedule:
day: sunday
interval: weekly
-
package-ecosystem: gomod
directory: /exporters/otlp/otlpmetric
labels:
- dependencies
- go
- "Skip Changelog"
schedule:
day: sunday
interval: weekly
-
package-ecosystem: gomod
directory: /exporters/otlp/otlpmetric/otlpmetricgrpc
labels:
- dependencies
- go
- "Skip Changelog"
schedule:
day: sunday
interval: weekly

View File

@ -40,6 +40,8 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm
- Creates package `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp` implementing a HTTP `otlptrace.Client` and offers convenience functions, `NewExportPipeline` and `InstallNewPipeline`, to setup and install a `otlptrace.Exporter` in tracing. (#1963)
- Changes `go.opentelemetry.io/otel/sdk/resource.NewWithAttributes` to require a schema URL. The old function is still available as `resource.NewSchemaless`. This is a breaking change. (#1938)
- Several builtin resource detectors now correctly populate the schema URL. (#1938)
- Creates package `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` that defines a metrics exporter that uses a `otlpmetric.Client` to send data.
Creates package `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` implementing a gRPC `otlpmetric.Client` and offers convenience functions, `New` and `NewUnstarted`, to create an `otlpmetric.Exporter`.(#1991)
### Changed

View File

@ -63,3 +63,7 @@ replace go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc => ../..
replace go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp => ../../exporters/otlp/otlptrace/otlptracehttp
replace go.opentelemetry.io/otel/internal/metric => ../../internal/metric
replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric => ../../exporters/otlp/otlpmetric
replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc => ../../exporters/otlp/otlpmetric/otlpmetricgrpc

View File

@ -59,3 +59,7 @@ replace go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc => ../..
replace go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp => ../../exporters/otlp/otlptrace/otlptracehttp
replace go.opentelemetry.io/otel/internal/metric => ../../internal/metric
replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric => ../../exporters/otlp/otlpmetric
replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc => ../../exporters/otlp/otlpmetric/otlpmetricgrpc

View File

@ -59,3 +59,7 @@ replace go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc => ../..
replace go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp => ../../exporters/otlp/otlptrace/otlptracehttp
replace go.opentelemetry.io/otel/internal/metric => ../../internal/metric
replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric => ../../exporters/otlp/otlpmetric
replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc => ../../exporters/otlp/otlpmetric/otlpmetricgrpc

View File

@ -60,3 +60,7 @@ replace go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc => ../..
replace go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp => ../../exporters/otlp/otlptrace/otlptracehttp
replace go.opentelemetry.io/otel/internal/metric => ../../internal/metric
replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric => ../../exporters/otlp/otlpmetric
replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc => ../../exporters/otlp/otlpmetric/otlpmetricgrpc

View File

@ -61,3 +61,7 @@ replace go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc => ../..
replace go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp => ../../exporters/otlp/otlptrace/otlptracehttp
replace go.opentelemetry.io/otel/internal/metric => ../../internal/metric
replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric => ../../exporters/otlp/otlpmetric
replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc => ../../exporters/otlp/otlpmetric/otlpmetricgrpc

View File

@ -62,3 +62,7 @@ replace go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc => ../..
replace go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp => ../../exporters/otlp/otlptrace/otlptracehttp
replace go.opentelemetry.io/otel/internal/metric => ../../internal/metric
replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric => ../../exporters/otlp/otlpmetric
replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc => ../../exporters/otlp/otlpmetric/otlpmetricgrpc

View File

@ -61,3 +61,7 @@ replace go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc => ../..
replace go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp => ../../exporters/otlp/otlptrace/otlptracehttp
replace go.opentelemetry.io/otel/internal/metric => ../../internal/metric
replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric => ../../exporters/otlp/otlpmetric
replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc => ../../exporters/otlp/otlpmetric/otlpmetricgrpc

View File

@ -61,3 +61,7 @@ replace go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc => ../..
replace go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp => ../../exporters/otlp/otlptrace/otlptracehttp
replace go.opentelemetry.io/otel/internal/metric => ../../internal/metric
replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric => ../../exporters/otlp/otlpmetric
replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc => ../../exporters/otlp/otlpmetric/otlpmetricgrpc

View File

@ -60,3 +60,7 @@ replace go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc => ../..
replace go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp => ../../exporters/otlp/otlptrace/otlptracehttp
replace go.opentelemetry.io/otel/internal/metric => ../../internal/metric
replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric => ../../exporters/otlp/otlpmetric
replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc => ../../exporters/otlp/otlpmetric/otlpmetricgrpc

View File

@ -64,3 +64,7 @@ replace go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc => ../..
replace go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp => ../../otlp/otlptrace/otlptracehttp
replace go.opentelemetry.io/otel/internal/metric => ../../../internal/metric
replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric => ../../otlp/otlpmetric
replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc => ../../otlp/otlpmetric/otlpmetricgrpc

View File

@ -0,0 +1,43 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package otlpmetric // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric"
import (
"context"
metricpb "go.opentelemetry.io/proto/otlp/metrics/v1"
)
// Client manages connections to the collector, handles the
// transformation of data into wire format, and the transmission of that
// data to the collector.
type Client interface {
// Start should establish connection(s) to endpoint(s). It is
// called just once by the exporter, so the implementation
// does not need to worry about idempotence and locking.
Start(ctx context.Context) error
// Stop should close the connections. The function is called
// only once by the exporter, so the implementation does not
// need to worry about idempotence, but it may be called
// concurrently with UploadMetrics, so proper
// locking is required. The function serves as a
// synchronization point - after the function returns, the
// process of closing connections is assumed to be finished.
Stop(ctx context.Context) error
// UploadMetrics should transform the passed metrics to the
// wire format and send it to the collector. May be called
// concurrently.
UploadMetrics(ctx context.Context, protoMetrics []*metricpb.ResourceMetrics) error
}

View File

@ -0,0 +1,127 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package otlpmetric
import (
"context"
"errors"
"sync"
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/metrictransform"
"go.opentelemetry.io/otel/metric"
metricsdk "go.opentelemetry.io/otel/sdk/export/metric"
"go.opentelemetry.io/otel/sdk/export/metric/aggregation"
)
var (
errAlreadyStarted = errors.New("already started")
)
// Exporter exports metrics data in the OTLP wire format.
type Exporter struct {
client Client
exportKindSelector metricsdk.ExportKindSelector
mu sync.RWMutex
started bool
startOnce sync.Once
stopOnce sync.Once
}
// Export exports a batch of metrics.
func (e *Exporter) Export(ctx context.Context, checkpointSet metricsdk.CheckpointSet) error {
rms, err := metrictransform.CheckpointSet(ctx, e, checkpointSet, 1)
if err != nil {
return err
}
if len(rms) == 0 {
return nil
}
return e.client.UploadMetrics(ctx, rms)
}
// Start establishes a connection to the receiving endpoint.
func (e *Exporter) Start(ctx context.Context) error {
var err = errAlreadyStarted
e.startOnce.Do(func() {
e.mu.Lock()
e.started = true
e.mu.Unlock()
err = e.client.Start(ctx)
})
return err
}
// Shutdown flushes all exports and closes all connections to the receiving endpoint.
func (e *Exporter) Shutdown(ctx context.Context) error {
e.mu.RLock()
started := e.started
e.mu.RUnlock()
if !started {
return nil
}
var err error
e.stopOnce.Do(func() {
err = e.client.Stop(ctx)
e.mu.Lock()
e.started = false
e.mu.Unlock()
})
return err
}
func (e *Exporter) ExportKindFor(descriptor *metric.Descriptor, aggregatorKind aggregation.Kind) metricsdk.ExportKind {
return e.exportKindSelector.ExportKindFor(descriptor, aggregatorKind)
}
var _ metricsdk.Exporter = (*Exporter)(nil)
// New constructs a new Exporter and starts it.
func New(ctx context.Context, client Client, opts ...Option) (*Exporter, error) {
exp := NewUnstarted(client, opts...)
if err := exp.Start(ctx); err != nil {
return nil, err
}
return exp, nil
}
// NewUnstarted constructs a new Exporter and does not start it.
func NewUnstarted(client Client, opts ...Option) *Exporter {
cfg := config{
// Note: the default ExportKindSelector is specified
// as Cumulative:
// https://github.com/open-telemetry/opentelemetry-specification/issues/731
exportKindSelector: metricsdk.CumulativeExportKindSelector(),
}
for _, opt := range opts {
opt.apply(&cfg)
}
e := &Exporter{
client: client,
exportKindSelector: cfg.exportKindSelector,
}
return e
}

View File

@ -0,0 +1,899 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package otlpmetric_test
import (
"context"
"sync"
"testing"
"time"
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric"
"go.opentelemetry.io/otel/sdk/export/metric/aggregation"
"go.opentelemetry.io/otel/sdk/export/metric/metrictest"
"go.opentelemetry.io/otel/sdk/metric/aggregator/histogram"
"go.opentelemetry.io/otel/sdk/metric/aggregator/sum"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/metric"
"go.opentelemetry.io/otel/metric/number"
metricsdk "go.opentelemetry.io/otel/sdk/export/metric"
"go.opentelemetry.io/otel/sdk/resource"
commonpb "go.opentelemetry.io/proto/otlp/common/v1"
metricpb "go.opentelemetry.io/proto/otlp/metrics/v1"
resourcepb "go.opentelemetry.io/proto/otlp/resource/v1"
)
var (
// Timestamps used in this test:
intervalStart = time.Now()
intervalEnd = intervalStart.Add(time.Hour)
)
type stubClient struct {
rm []*metricpb.ResourceMetrics
}
func (m *stubClient) Start(ctx context.Context) error {
return nil
}
func (m *stubClient) Stop(ctx context.Context) error {
return nil
}
func (m *stubClient) UploadMetrics(ctx context.Context, protoMetrics []*metricpb.ResourceMetrics) error {
m.rm = append(m.rm, protoMetrics...)
return nil
}
var _ otlpmetric.Client = (*stubClient)(nil)
func (m *stubClient) Reset() {
m.rm = nil
}
func newExporter(t *testing.T, opts ...otlpmetric.Option) (*otlpmetric.Exporter, *stubClient) {
client := &stubClient{}
exp, _ := otlpmetric.New(context.Background(), client, opts...)
return exp, client
}
func startTime() uint64 {
return uint64(intervalStart.UnixNano())
}
func pointTime() uint64 {
return uint64(intervalEnd.UnixNano())
}
type checkpointSet struct {
sync.RWMutex
records []metricsdk.Record
}
func (m *checkpointSet) ForEach(_ metricsdk.ExportKindSelector, fn func(metricsdk.Record) error) error {
for _, r := range m.records {
if err := fn(r); err != nil && err != aggregation.ErrNoData {
return err
}
}
return nil
}
type record struct {
name string
iKind metric.InstrumentKind
nKind number.Kind
resource *resource.Resource
opts []metric.InstrumentOption
labels []attribute.KeyValue
}
var (
baseKeyValues = []attribute.KeyValue{attribute.String("host", "test.com")}
cpuKey = attribute.Key("CPU")
testInstA = resource.NewSchemaless(attribute.String("instance", "tester-a"))
testInstB = resource.NewSchemaless(attribute.String("instance", "tester-b"))
testHistogramBoundaries = []float64{2.0, 4.0, 8.0}
cpu1Labels = []*commonpb.KeyValue{
{
Key: "CPU",
Value: &commonpb.AnyValue{
Value: &commonpb.AnyValue_IntValue{
IntValue: 1,
},
},
},
{
Key: "host",
Value: &commonpb.AnyValue{
Value: &commonpb.AnyValue_StringValue{
StringValue: "test.com",
},
},
},
}
cpu2Labels = []*commonpb.KeyValue{
{
Key: "CPU",
Value: &commonpb.AnyValue{
Value: &commonpb.AnyValue_IntValue{
IntValue: 2,
},
},
},
{
Key: "host",
Value: &commonpb.AnyValue{
Value: &commonpb.AnyValue_StringValue{
StringValue: "test.com",
},
},
},
}
testerAResource = &resourcepb.Resource{
Attributes: []*commonpb.KeyValue{
{
Key: "instance",
Value: &commonpb.AnyValue{
Value: &commonpb.AnyValue_StringValue{
StringValue: "tester-a",
},
},
},
},
}
testerBResource = &resourcepb.Resource{
Attributes: []*commonpb.KeyValue{
{
Key: "instance",
Value: &commonpb.AnyValue{
Value: &commonpb.AnyValue_StringValue{
StringValue: "tester-b",
},
},
},
},
}
)
func TestNoGroupingExport(t *testing.T) {
runMetricExportTests(
t,
nil,
[]record{
{
"int64-count",
metric.CounterInstrumentKind,
number.Int64Kind,
nil,
nil,
append(baseKeyValues, cpuKey.Int(1)),
},
{
"int64-count",
metric.CounterInstrumentKind,
number.Int64Kind,
nil,
nil,
append(baseKeyValues, cpuKey.Int(2)),
},
},
[]*metricpb.ResourceMetrics{
{
Resource: nil,
InstrumentationLibraryMetrics: []*metricpb.InstrumentationLibraryMetrics{
{
Metrics: []*metricpb.Metric{
{
Name: "int64-count",
Data: &metricpb.Metric_Sum{
Sum: &metricpb.Sum{
IsMonotonic: true,
AggregationTemporality: metricpb.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE,
DataPoints: []*metricpb.NumberDataPoint{
{
Value: &metricpb.NumberDataPoint_AsInt{AsInt: 11},
Attributes: cpu1Labels,
StartTimeUnixNano: startTime(),
TimeUnixNano: pointTime(),
},
{
Value: &metricpb.NumberDataPoint_AsInt{AsInt: 11},
Attributes: cpu2Labels,
StartTimeUnixNano: startTime(),
TimeUnixNano: pointTime(),
},
},
},
},
},
},
},
},
},
},
)
}
func TestValuerecorderMetricGroupingExport(t *testing.T) {
r := record{
"valuerecorder",
metric.ValueRecorderInstrumentKind,
number.Int64Kind,
nil,
nil,
append(baseKeyValues, cpuKey.Int(1)),
}
expected := []*metricpb.ResourceMetrics{
{
Resource: nil,
InstrumentationLibraryMetrics: []*metricpb.InstrumentationLibraryMetrics{
{
Metrics: []*metricpb.Metric{
{
Name: "valuerecorder",
Data: &metricpb.Metric_Histogram{
Histogram: &metricpb.Histogram{
AggregationTemporality: metricpb.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE,
DataPoints: []*metricpb.HistogramDataPoint{
{
Attributes: cpu1Labels,
StartTimeUnixNano: startTime(),
TimeUnixNano: pointTime(),
Count: 2,
Sum: 11,
ExplicitBounds: testHistogramBoundaries,
BucketCounts: []uint64{1, 0, 0, 1},
},
{
Attributes: cpu1Labels,
Count: 2,
Sum: 11,
ExplicitBounds: testHistogramBoundaries,
BucketCounts: []uint64{1, 0, 0, 1},
StartTimeUnixNano: startTime(),
TimeUnixNano: pointTime(),
},
},
},
},
},
},
},
},
},
}
runMetricExportTests(t, nil, []record{r, r}, expected)
}
func TestCountInt64MetricGroupingExport(t *testing.T) {
r := record{
"int64-count",
metric.CounterInstrumentKind,
number.Int64Kind,
nil,
nil,
append(baseKeyValues, cpuKey.Int(1)),
}
runMetricExportTests(
t,
nil,
[]record{r, r},
[]*metricpb.ResourceMetrics{
{
Resource: nil,
InstrumentationLibraryMetrics: []*metricpb.InstrumentationLibraryMetrics{
{
Metrics: []*metricpb.Metric{
{
Name: "int64-count",
Data: &metricpb.Metric_Sum{
Sum: &metricpb.Sum{
IsMonotonic: true,
AggregationTemporality: metricpb.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE,
DataPoints: []*metricpb.NumberDataPoint{
{
Value: &metricpb.NumberDataPoint_AsInt{AsInt: 11},
Attributes: cpu1Labels,
StartTimeUnixNano: startTime(),
TimeUnixNano: pointTime(),
},
{
Value: &metricpb.NumberDataPoint_AsInt{AsInt: 11},
Attributes: cpu1Labels,
StartTimeUnixNano: startTime(),
TimeUnixNano: pointTime(),
},
},
},
},
},
},
},
},
},
},
)
}
func TestCountFloat64MetricGroupingExport(t *testing.T) {
r := record{
"float64-count",
metric.CounterInstrumentKind,
number.Float64Kind,
nil,
nil,
append(baseKeyValues, cpuKey.Int(1)),
}
runMetricExportTests(
t,
nil,
[]record{r, r},
[]*metricpb.ResourceMetrics{
{
Resource: nil,
InstrumentationLibraryMetrics: []*metricpb.InstrumentationLibraryMetrics{
{
Metrics: []*metricpb.Metric{
{
Name: "float64-count",
Data: &metricpb.Metric_Sum{
Sum: &metricpb.Sum{
IsMonotonic: true,
AggregationTemporality: metricpb.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE,
DataPoints: []*metricpb.NumberDataPoint{
{
Value: &metricpb.NumberDataPoint_AsDouble{AsDouble: 11.0},
Attributes: cpu1Labels,
StartTimeUnixNano: startTime(),
TimeUnixNano: pointTime(),
},
{
Value: &metricpb.NumberDataPoint_AsDouble{AsDouble: 11.0},
Attributes: cpu1Labels,
StartTimeUnixNano: startTime(),
TimeUnixNano: pointTime(),
},
},
},
},
},
},
},
},
},
},
)
}
func TestResourceMetricGroupingExport(t *testing.T) {
runMetricExportTests(
t,
nil,
[]record{
{
"int64-count",
metric.CounterInstrumentKind,
number.Int64Kind,
testInstA,
nil,
append(baseKeyValues, cpuKey.Int(1)),
},
{
"int64-count",
metric.CounterInstrumentKind,
number.Int64Kind,
testInstA,
nil,
append(baseKeyValues, cpuKey.Int(1)),
},
{
"int64-count",
metric.CounterInstrumentKind,
number.Int64Kind,
testInstA,
nil,
append(baseKeyValues, cpuKey.Int(2)),
},
{
"int64-count",
metric.CounterInstrumentKind,
number.Int64Kind,
testInstB,
nil,
append(baseKeyValues, cpuKey.Int(1)),
},
},
[]*metricpb.ResourceMetrics{
{
Resource: testerAResource,
InstrumentationLibraryMetrics: []*metricpb.InstrumentationLibraryMetrics{
{
Metrics: []*metricpb.Metric{
{
Name: "int64-count",
Data: &metricpb.Metric_Sum{
Sum: &metricpb.Sum{
IsMonotonic: true,
AggregationTemporality: metricpb.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE,
DataPoints: []*metricpb.NumberDataPoint{
{
Value: &metricpb.NumberDataPoint_AsInt{AsInt: 11},
Attributes: cpu1Labels,
StartTimeUnixNano: startTime(),
TimeUnixNano: pointTime(),
},
{
Value: &metricpb.NumberDataPoint_AsInt{AsInt: 11},
Attributes: cpu1Labels,
StartTimeUnixNano: startTime(),
TimeUnixNano: pointTime(),
},
{
Value: &metricpb.NumberDataPoint_AsInt{AsInt: 11},
Attributes: cpu2Labels,
StartTimeUnixNano: startTime(),
TimeUnixNano: pointTime(),
},
},
},
},
},
},
},
},
},
{
Resource: testerBResource,
InstrumentationLibraryMetrics: []*metricpb.InstrumentationLibraryMetrics{
{
Metrics: []*metricpb.Metric{
{
Name: "int64-count",
Data: &metricpb.Metric_Sum{
Sum: &metricpb.Sum{
IsMonotonic: true,
AggregationTemporality: metricpb.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE,
DataPoints: []*metricpb.NumberDataPoint{
{
Value: &metricpb.NumberDataPoint_AsInt{AsInt: 11},
Attributes: cpu1Labels,
StartTimeUnixNano: startTime(),
TimeUnixNano: pointTime(),
},
},
},
},
},
},
},
},
},
},
)
}
func TestResourceInstLibMetricGroupingExport(t *testing.T) {
countingLib1 := []metric.InstrumentOption{
metric.WithInstrumentationName("counting-lib"),
metric.WithInstrumentationVersion("v1"),
}
countingLib2 := []metric.InstrumentOption{
metric.WithInstrumentationName("counting-lib"),
metric.WithInstrumentationVersion("v2"),
}
summingLib := []metric.InstrumentOption{
metric.WithInstrumentationName("summing-lib"),
}
runMetricExportTests(
t,
nil,
[]record{
{
"int64-count",
metric.CounterInstrumentKind,
number.Int64Kind,
testInstA,
countingLib1,
append(baseKeyValues, cpuKey.Int(1)),
},
{
"int64-count",
metric.CounterInstrumentKind,
number.Int64Kind,
testInstA,
countingLib2,
append(baseKeyValues, cpuKey.Int(1)),
},
{
"int64-count",
metric.CounterInstrumentKind,
number.Int64Kind,
testInstA,
countingLib1,
append(baseKeyValues, cpuKey.Int(1)),
},
{
"int64-count",
metric.CounterInstrumentKind,
number.Int64Kind,
testInstA,
countingLib1,
append(baseKeyValues, cpuKey.Int(2)),
},
{
"int64-count",
metric.CounterInstrumentKind,
number.Int64Kind,
testInstA,
summingLib,
append(baseKeyValues, cpuKey.Int(1)),
},
{
"int64-count",
metric.CounterInstrumentKind,
number.Int64Kind,
testInstB,
countingLib1,
append(baseKeyValues, cpuKey.Int(1)),
},
},
[]*metricpb.ResourceMetrics{
{
Resource: testerAResource,
InstrumentationLibraryMetrics: []*metricpb.InstrumentationLibraryMetrics{
{
InstrumentationLibrary: &commonpb.InstrumentationLibrary{
Name: "counting-lib",
Version: "v1",
},
Metrics: []*metricpb.Metric{
{
Name: "int64-count",
Data: &metricpb.Metric_Sum{
Sum: &metricpb.Sum{
IsMonotonic: true,
AggregationTemporality: metricpb.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE,
DataPoints: []*metricpb.NumberDataPoint{
{
Value: &metricpb.NumberDataPoint_AsInt{AsInt: 11},
Attributes: cpu1Labels,
StartTimeUnixNano: startTime(),
TimeUnixNano: pointTime(),
},
{
Value: &metricpb.NumberDataPoint_AsInt{AsInt: 11},
Attributes: cpu1Labels,
StartTimeUnixNano: startTime(),
TimeUnixNano: pointTime(),
},
{
Value: &metricpb.NumberDataPoint_AsInt{AsInt: 11},
Attributes: cpu2Labels,
StartTimeUnixNano: startTime(),
TimeUnixNano: pointTime(),
},
},
},
},
},
},
},
{
InstrumentationLibrary: &commonpb.InstrumentationLibrary{
Name: "counting-lib",
Version: "v2",
},
Metrics: []*metricpb.Metric{
{
Name: "int64-count",
Data: &metricpb.Metric_Sum{
Sum: &metricpb.Sum{
IsMonotonic: true,
AggregationTemporality: metricpb.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE,
DataPoints: []*metricpb.NumberDataPoint{
{
Value: &metricpb.NumberDataPoint_AsInt{AsInt: 11},
Attributes: cpu1Labels,
StartTimeUnixNano: startTime(),
TimeUnixNano: pointTime(),
},
},
},
},
},
},
},
{
InstrumentationLibrary: &commonpb.InstrumentationLibrary{
Name: "summing-lib",
},
Metrics: []*metricpb.Metric{
{
Name: "int64-count",
Data: &metricpb.Metric_Sum{
Sum: &metricpb.Sum{
IsMonotonic: true,
AggregationTemporality: metricpb.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE,
DataPoints: []*metricpb.NumberDataPoint{
{
Value: &metricpb.NumberDataPoint_AsInt{AsInt: 11},
Attributes: cpu1Labels,
StartTimeUnixNano: startTime(),
TimeUnixNano: pointTime(),
},
},
},
},
},
},
},
},
},
{
Resource: testerBResource,
InstrumentationLibraryMetrics: []*metricpb.InstrumentationLibraryMetrics{
{
InstrumentationLibrary: &commonpb.InstrumentationLibrary{
Name: "counting-lib",
Version: "v1",
},
Metrics: []*metricpb.Metric{
{
Name: "int64-count",
Data: &metricpb.Metric_Sum{
Sum: &metricpb.Sum{
IsMonotonic: true,
AggregationTemporality: metricpb.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE,
DataPoints: []*metricpb.NumberDataPoint{
{
Value: &metricpb.NumberDataPoint_AsInt{AsInt: 11},
Attributes: cpu1Labels,
StartTimeUnixNano: startTime(),
TimeUnixNano: pointTime(),
},
},
},
},
},
},
},
},
},
},
)
}
func TestStatelessExportKind(t *testing.T) {
type testcase struct {
name string
instrumentKind metric.InstrumentKind
aggTemporality metricpb.AggregationTemporality
monotonic bool
}
for _, k := range []testcase{
{"counter", metric.CounterInstrumentKind, metricpb.AggregationTemporality_AGGREGATION_TEMPORALITY_DELTA, true},
{"updowncounter", metric.UpDownCounterInstrumentKind, metricpb.AggregationTemporality_AGGREGATION_TEMPORALITY_DELTA, false},
{"sumobserver", metric.SumObserverInstrumentKind, metricpb.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE, true},
{"updownsumobserver", metric.UpDownSumObserverInstrumentKind, metricpb.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE, false},
} {
t.Run(k.name, func(t *testing.T) {
runMetricExportTests(
t,
[]otlpmetric.Option{
otlpmetric.WithMetricExportKindSelector(
metricsdk.StatelessExportKindSelector(),
),
},
[]record{
{
"instrument",
k.instrumentKind,
number.Int64Kind,
testInstA,
nil,
append(baseKeyValues, cpuKey.Int(1)),
},
},
[]*metricpb.ResourceMetrics{
{
Resource: testerAResource,
InstrumentationLibraryMetrics: []*metricpb.InstrumentationLibraryMetrics{
{
Metrics: []*metricpb.Metric{
{
Name: "instrument",
Data: &metricpb.Metric_Sum{
Sum: &metricpb.Sum{
IsMonotonic: k.monotonic,
AggregationTemporality: k.aggTemporality,
DataPoints: []*metricpb.NumberDataPoint{
{
Value: &metricpb.NumberDataPoint_AsInt{AsInt: 11},
Attributes: cpu1Labels,
StartTimeUnixNano: startTime(),
TimeUnixNano: pointTime(),
},
},
},
},
},
},
},
},
},
},
)
})
}
}
func runMetricExportTests(t *testing.T, opts []otlpmetric.Option, rs []record, expected []*metricpb.ResourceMetrics) {
exp, driver := newExporter(t, opts...)
recs := map[attribute.Distinct][]metricsdk.Record{}
resources := map[attribute.Distinct]*resource.Resource{}
for _, r := range rs {
lcopy := make([]attribute.KeyValue, len(r.labels))
copy(lcopy, r.labels)
desc := metric.NewDescriptor(r.name, r.iKind, r.nKind, r.opts...)
labs := attribute.NewSet(lcopy...)
var agg, ckpt metricsdk.Aggregator
if r.iKind.Adding() {
agg, ckpt = metrictest.Unslice2(sum.New(2))
} else {
agg, ckpt = metrictest.Unslice2(histogram.New(2, &desc, histogram.WithExplicitBoundaries(testHistogramBoundaries)))
}
ctx := context.Background()
if r.iKind.Synchronous() {
// For synchronous instruments, perform two updates: 1 and 10
switch r.nKind {
case number.Int64Kind:
require.NoError(t, agg.Update(ctx, number.NewInt64Number(1), &desc))
require.NoError(t, agg.Update(ctx, number.NewInt64Number(10), &desc))
case number.Float64Kind:
require.NoError(t, agg.Update(ctx, number.NewFloat64Number(1), &desc))
require.NoError(t, agg.Update(ctx, number.NewFloat64Number(10), &desc))
default:
t.Fatalf("invalid number kind: %v", r.nKind)
}
} else {
// For asynchronous instruments, perform a single update: 11
switch r.nKind {
case number.Int64Kind:
require.NoError(t, agg.Update(ctx, number.NewInt64Number(11), &desc))
case number.Float64Kind:
require.NoError(t, agg.Update(ctx, number.NewFloat64Number(11), &desc))
default:
t.Fatalf("invalid number kind: %v", r.nKind)
}
}
require.NoError(t, agg.SynchronizedMove(ckpt, &desc))
equiv := r.resource.Equivalent()
resources[equiv] = r.resource
recs[equiv] = append(recs[equiv], metricsdk.NewRecord(&desc, &labs, r.resource, ckpt.Aggregation(), intervalStart, intervalEnd))
}
for _, records := range recs {
assert.NoError(t, exp.Export(context.Background(), &checkpointSet{records: records}))
}
// assert.ElementsMatch does not equate nested slices of different order,
// therefore this requires the top level slice to be broken down.
// Build a map of Resource/InstrumentationLibrary pairs to Metrics, from
// that validate the metric elements match for all expected pairs. Finally,
// make we saw all expected pairs.
type key struct {
resource, instrumentationLibrary string
}
got := map[key][]*metricpb.Metric{}
for _, rm := range driver.rm {
for _, ilm := range rm.InstrumentationLibraryMetrics {
k := key{
resource: rm.GetResource().String(),
instrumentationLibrary: ilm.GetInstrumentationLibrary().String(),
}
got[k] = ilm.GetMetrics()
}
}
seen := map[key]struct{}{}
for _, rm := range expected {
for _, ilm := range rm.InstrumentationLibraryMetrics {
k := key{
resource: rm.GetResource().String(),
instrumentationLibrary: ilm.GetInstrumentationLibrary().String(),
}
seen[k] = struct{}{}
g, ok := got[k]
if !ok {
t.Errorf("missing metrics for:\n\tResource: %s\n\tInstrumentationLibrary: %s\n", k.resource, k.instrumentationLibrary)
continue
}
if !assert.Len(t, g, len(ilm.GetMetrics())) {
continue
}
for i, expected := range ilm.GetMetrics() {
assert.Equal(t, expected.Name, g[i].Name)
assert.Equal(t, expected.Unit, g[i].Unit)
assert.Equal(t, expected.Description, g[i].Description)
switch g[i].Data.(type) {
case *metricpb.Metric_Gauge:
assert.ElementsMatch(t, expected.GetGauge().GetDataPoints(), g[i].GetGauge().GetDataPoints())
case *metricpb.Metric_Sum:
assert.Equal(t,
expected.GetSum().GetAggregationTemporality(),
g[i].GetSum().GetAggregationTemporality(),
)
assert.Equal(t,
expected.GetSum().GetIsMonotonic(),
g[i].GetSum().GetIsMonotonic(),
)
assert.ElementsMatch(t, expected.GetSum().GetDataPoints(), g[i].GetSum().GetDataPoints())
case *metricpb.Metric_Histogram:
assert.Equal(
t,
expected.GetHistogram().GetAggregationTemporality(),
g[i].GetHistogram().GetAggregationTemporality(),
)
assert.ElementsMatch(t, expected.GetHistogram().GetDataPoints(), g[i].GetHistogram().GetDataPoints())
case *metricpb.Metric_Summary:
assert.ElementsMatch(t, expected.GetSummary().GetDataPoints(), g[i].GetSummary().GetDataPoints())
default:
assert.Failf(t, "unknown data type", g[i].Name)
}
}
}
}
for k := range got {
if _, ok := seen[k]; !ok {
t.Errorf("did not expect metrics for:\n\tResource: %s\n\tInstrumentationLibrary: %s\n", k.resource, k.instrumentationLibrary)
}
}
}
func TestEmptyMetricExport(t *testing.T) {
exp, driver := newExporter(t)
for _, test := range []struct {
records []metricsdk.Record
want []*metricpb.ResourceMetrics
}{
{
[]metricsdk.Record(nil),
[]*metricpb.ResourceMetrics(nil),
},
{
[]metricsdk.Record{},
[]*metricpb.ResourceMetrics(nil),
},
} {
driver.Reset()
require.NoError(t, exp.Export(context.Background(), &checkpointSet{records: test.records}))
assert.Equal(t, test.want, driver.rm)
}
}

View File

@ -0,0 +1,75 @@
module go.opentelemetry.io/otel/exporters/otlp/otlpmetric
go 1.15
require (
github.com/cenkalti/backoff/v4 v4.1.1
github.com/stretchr/testify v1.7.0
go.opentelemetry.io/otel v0.20.0
go.opentelemetry.io/otel/metric v0.20.0
go.opentelemetry.io/otel/sdk v0.20.0
go.opentelemetry.io/otel/sdk/export/metric v0.20.0
go.opentelemetry.io/otel/sdk/metric v0.20.0
go.opentelemetry.io/proto/otlp v0.9.0
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013
google.golang.org/grpc v1.38.0
google.golang.org/protobuf v1.26.0
)
replace go.opentelemetry.io/otel => ../../..
replace go.opentelemetry.io/otel/sdk => ../../../sdk
replace go.opentelemetry.io/otel/exporters/otlp => ../
replace go.opentelemetry.io/otel/metric => ../../../metric
replace go.opentelemetry.io/otel/oteltest => ../../../oteltest
replace go.opentelemetry.io/otel/trace => ../../../trace
replace go.opentelemetry.io/otel/sdk/export/metric => ../../../sdk/export/metric
replace go.opentelemetry.io/otel/sdk/metric => ../../../sdk/metric
replace go.opentelemetry.io/otel/bridge/opencensus => ../../../bridge/opencensus
replace go.opentelemetry.io/otel/bridge/opentracing => ../../../bridge/opentracing
replace go.opentelemetry.io/otel/example/jaeger => ../../../example/jaeger
replace go.opentelemetry.io/otel/example/namedtracer => ../../../example/namedtracer
replace go.opentelemetry.io/otel/example/opencensus => ../../../example/opencensus
replace go.opentelemetry.io/otel/example/otel-collector => ../../../example/otel-collector
replace go.opentelemetry.io/otel/example/passthrough => ../../../example/passthrough
replace go.opentelemetry.io/otel/example/prom-collector => ../../../example/prom-collector
replace go.opentelemetry.io/otel/example/prometheus => ../../../example/prometheus
replace go.opentelemetry.io/otel/example/zipkin => ../../../example/zipkin
replace go.opentelemetry.io/otel/exporters/metric/prometheus => ../../metric/prometheus
replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric => ./
replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc => ./otlpmetricgrpc
replace go.opentelemetry.io/otel/exporters/otlp/otlptrace => ../otlptrace
replace go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc => ../otlptrace/otlptracegrpc
replace go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp => ../otlptrace/otlptracehttp
replace go.opentelemetry.io/otel/exporters/stdout => ../../stdout
replace go.opentelemetry.io/otel/exporters/trace/jaeger => ../../trace/jaeger
replace go.opentelemetry.io/otel/exporters/trace/zipkin => ../../trace/zipkin
replace go.opentelemetry.io/otel/internal/tools => ../../../internal/tools
replace go.opentelemetry.io/otel/internal/metric => ../../../internal/metric

View File

@ -0,0 +1,125 @@
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8=
github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
github.com/cenkalti/backoff/v4 v4.1.1 h1:G2HAfAmvm/GcKan2oOQpBXOd2tT2G57ZnZGWa1PxPBQ=
github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ=
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo=
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
go.opentelemetry.io/proto/otlp v0.9.0 h1:C0g6TWmQYvjKRnljRULLWUVJGy8Uvu0NEL/5frY2/t4=
go.opentelemetry.io/proto/otlp v0.9.0/go.mod h1:1vKfU9rv61e9EVGthD1zNvUbiwPcimSsOPU9brfSHJg=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20200822124328-c89045814202 h1:VvcQYSHwXgi7W+TpUR6A9g6Up98WAHf3f/ulnJ62IyA=
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd h1:xhmwyvizuTgC2qz7ZlMluP20uW+C3Rm0FD/WLDX8884=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 h1:+kGHl1aib/qcwaRi1CbqBZ1rk19r85MNUf8HaBghugY=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
google.golang.org/grpc v1.38.0 h1:/9BgsAsa5nWe26HqOlvlgJnqBuktYOLCgjCPqsa56W0=
google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=

View File

@ -0,0 +1,38 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package connection
import (
"os"
"testing"
"unsafe"
ottest "go.opentelemetry.io/otel/internal/internaltest"
)
// Ensure struct alignment prior to running tests.
func TestMain(m *testing.M) {
fields := []ottest.FieldOffset{
{
Name: "Connection.lastConnectErrPtr",
Offset: unsafe.Offsetof(Connection{}.lastConnectErrPtr),
},
}
if !ottest.Aligned8Byte(fields, os.Stderr) {
os.Exit(1)
}
os.Exit(m.Run())
}

View File

@ -0,0 +1,429 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package connection
import (
"context"
"fmt"
"math/rand"
"sync"
"sync/atomic"
"time"
"unsafe"
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/otlpconfig"
"github.com/cenkalti/backoff/v4"
"google.golang.org/genproto/googleapis/rpc/errdetails"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"google.golang.org/grpc/encoding/gzip"
"google.golang.org/grpc"
"google.golang.org/grpc/metadata"
)
type Connection struct {
// Ensure pointer is 64-bit aligned for atomic operations on both 32 and 64 bit machines.
lastConnectErrPtr unsafe.Pointer
// mu protects the Connection as it is accessed by the
// exporter goroutines and background Connection goroutine
mu sync.Mutex
cc *grpc.ClientConn
// these fields are read-only after constructor is finished
cfg otlpconfig.Config
SCfg otlpconfig.SignalConfig
metadata metadata.MD
newConnectionHandler func(cc *grpc.ClientConn)
// these channels are created once
disconnectedCh chan bool
backgroundConnectionDoneCh chan struct{}
stopCh chan struct{}
// this is for tests, so they can replace the closing
// routine without a worry of modifying some global variable
// or changing it back to original after the test is done
closeBackgroundConnectionDoneCh func(ch chan struct{})
}
func NewConnection(cfg otlpconfig.Config, sCfg otlpconfig.SignalConfig, handler func(cc *grpc.ClientConn)) *Connection {
c := new(Connection)
c.newConnectionHandler = handler
c.cfg = cfg
c.SCfg = sCfg
if len(c.SCfg.Headers) > 0 {
c.metadata = metadata.New(c.SCfg.Headers)
}
c.closeBackgroundConnectionDoneCh = func(ch chan struct{}) {
close(ch)
}
return c
}
func (c *Connection) StartConnection(ctx context.Context) error {
c.stopCh = make(chan struct{})
c.disconnectedCh = make(chan bool, 1)
c.backgroundConnectionDoneCh = make(chan struct{})
if err := c.connect(ctx); err == nil {
c.setStateConnected()
} else {
c.SetStateDisconnected(err)
}
go c.indefiniteBackgroundConnection()
// TODO: proper error handling when initializing connections.
// We can report permanent errors, e.g., invalid settings.
return nil
}
func (c *Connection) LastConnectError() error {
errPtr := (*error)(atomic.LoadPointer(&c.lastConnectErrPtr))
if errPtr == nil {
return nil
}
return *errPtr
}
func (c *Connection) saveLastConnectError(err error) {
var errPtr *error
if err != nil {
errPtr = &err
}
atomic.StorePointer(&c.lastConnectErrPtr, unsafe.Pointer(errPtr))
}
func (c *Connection) SetStateDisconnected(err error) {
c.saveLastConnectError(err)
select {
case c.disconnectedCh <- true:
default:
}
c.newConnectionHandler(nil)
}
func (c *Connection) setStateConnected() {
c.saveLastConnectError(nil)
}
func (c *Connection) Connected() bool {
return c.LastConnectError() == nil
}
const defaultConnReattemptPeriod = 10 * time.Second
func (c *Connection) indefiniteBackgroundConnection() {
defer func() {
c.closeBackgroundConnectionDoneCh(c.backgroundConnectionDoneCh)
}()
connReattemptPeriod := c.cfg.ReconnectionPeriod
if connReattemptPeriod <= 0 {
connReattemptPeriod = defaultConnReattemptPeriod
}
// No strong seeding required, nano time can
// already help with pseudo uniqueness.
rng := rand.New(rand.NewSource(time.Now().UnixNano() + rand.Int63n(1024)))
// maxJitterNanos: 70% of the connectionReattemptPeriod
maxJitterNanos := int64(0.7 * float64(connReattemptPeriod))
for {
// Otherwise these will be the normal scenarios to enable
// reconnection if we trip out.
// 1. If we've stopped, return entirely
// 2. Otherwise block until we are disconnected, and
// then retry connecting
select {
case <-c.stopCh:
return
case <-c.disconnectedCh:
// Quickly check if we haven't stopped at the
// same time.
select {
case <-c.stopCh:
return
default:
}
// Normal scenario that we'll wait for
}
if err := c.connect(context.Background()); err == nil {
c.setStateConnected()
} else {
// this code is unreachable in most cases
// c.connect does not establish Connection
c.SetStateDisconnected(err)
}
// Apply some jitter to avoid lockstep retrials of other
// collector-exporters. Lockstep retrials could result in an
// innocent DDOS, by clogging the machine's resources and network.
jitter := time.Duration(rng.Int63n(maxJitterNanos))
select {
case <-c.stopCh:
return
case <-time.After(connReattemptPeriod + jitter):
}
}
}
func (c *Connection) connect(ctx context.Context) error {
cc, err := c.dialToCollector(ctx)
if err != nil {
return err
}
c.setConnection(cc)
c.newConnectionHandler(cc)
return nil
}
// setConnection sets cc as the client Connection and returns true if
// the Connection state changed.
func (c *Connection) setConnection(cc *grpc.ClientConn) bool {
c.mu.Lock()
defer c.mu.Unlock()
// If previous clientConn is same as the current then just return.
// This doesn't happen right now as this func is only called with new ClientConn.
// It is more about future-proofing.
if c.cc == cc {
return false
}
// If the previous clientConn was non-nil, close it
if c.cc != nil {
_ = c.cc.Close()
}
c.cc = cc
return true
}
func (c *Connection) dialToCollector(ctx context.Context) (*grpc.ClientConn, error) {
dialOpts := []grpc.DialOption{}
if c.cfg.ServiceConfig != "" {
dialOpts = append(dialOpts, grpc.WithDefaultServiceConfig(c.cfg.ServiceConfig))
}
if c.SCfg.GRPCCredentials != nil {
dialOpts = append(dialOpts, grpc.WithTransportCredentials(c.SCfg.GRPCCredentials))
} else if c.SCfg.Insecure {
dialOpts = append(dialOpts, grpc.WithInsecure())
}
if c.SCfg.Compression == otlpconfig.GzipCompression {
dialOpts = append(dialOpts, grpc.WithDefaultCallOptions(grpc.UseCompressor(gzip.Name)))
}
if len(c.cfg.DialOptions) != 0 {
dialOpts = append(dialOpts, c.cfg.DialOptions...)
}
ctx, cancel := c.ContextWithStop(ctx)
defer cancel()
ctx = c.ContextWithMetadata(ctx)
return grpc.DialContext(ctx, c.SCfg.Endpoint, dialOpts...)
}
func (c *Connection) ContextWithMetadata(ctx context.Context) context.Context {
if c.metadata.Len() > 0 {
return metadata.NewOutgoingContext(ctx, c.metadata)
}
return ctx
}
func (c *Connection) Shutdown(ctx context.Context) error {
close(c.stopCh)
// Ensure that the backgroundConnector returns
select {
case <-c.backgroundConnectionDoneCh:
case <-ctx.Done():
return ctx.Err()
}
c.mu.Lock()
cc := c.cc
c.cc = nil
c.mu.Unlock()
if cc != nil {
return cc.Close()
}
return nil
}
func (c *Connection) ContextWithStop(ctx context.Context) (context.Context, context.CancelFunc) {
// Unify the parent context Done signal with the Connection's
// stop channel.
ctx, cancel := context.WithCancel(ctx)
go func(ctx context.Context, cancel context.CancelFunc) {
select {
case <-ctx.Done():
// Nothing to do, either cancelled or deadline
// happened.
case <-c.stopCh:
cancel()
}
}(ctx, cancel)
return ctx, cancel
}
func (c *Connection) DoRequest(ctx context.Context, fn func(context.Context) error) error {
expBackoff := newExponentialBackoff(c.cfg.RetrySettings)
for {
err := fn(ctx)
if err == nil {
// request succeeded.
return nil
}
if !c.cfg.RetrySettings.Enabled {
return err
}
// We have an error, check gRPC status code.
st := status.Convert(err)
if st.Code() == codes.OK {
// Not really an error, still success.
return nil
}
// Now, this is this a real error.
if !shouldRetry(st.Code()) {
// It is not a retryable error, we should not retry.
return err
}
// Need to retry.
throttle := getThrottleDuration(st)
backoffDelay := expBackoff.NextBackOff()
if backoffDelay == backoff.Stop {
// throw away the batch
err = fmt.Errorf("max elapsed time expired: %w", err)
return err
}
var delay time.Duration
if backoffDelay > throttle {
delay = backoffDelay
} else {
if expBackoff.GetElapsedTime()+throttle > expBackoff.MaxElapsedTime {
err = fmt.Errorf("max elapsed time expired when respecting server throttle: %w", err)
return err
}
// Respect server throttling.
delay = throttle
}
// back-off, but get interrupted when shutting down or request is cancelled or timed out.
err = func() error {
dt := time.NewTimer(delay)
defer dt.Stop()
select {
case <-ctx.Done():
return ctx.Err()
case <-c.stopCh:
return fmt.Errorf("interrupted due to shutdown: %w", err)
case <-dt.C:
}
return nil
}()
if err != nil {
return err
}
}
}
func shouldRetry(code codes.Code) bool {
switch code {
case codes.OK:
// Success. This function should not be called for this code, the best we
// can do is tell the caller not to retry.
return false
case codes.Canceled,
codes.DeadlineExceeded,
codes.ResourceExhausted,
codes.Aborted,
codes.OutOfRange,
codes.Unavailable,
codes.DataLoss:
// These are retryable errors.
return true
case codes.Unknown,
codes.InvalidArgument,
codes.Unauthenticated,
codes.PermissionDenied,
codes.NotFound,
codes.AlreadyExists,
codes.FailedPrecondition,
codes.Unimplemented,
codes.Internal:
// These are fatal errors, don't retry.
return false
default:
// Don't retry on unknown codes.
return false
}
}
func getThrottleDuration(status *status.Status) time.Duration {
// See if throttling information is available.
for _, detail := range status.Details() {
if t, ok := detail.(*errdetails.RetryInfo); ok {
if t.RetryDelay.Seconds > 0 || t.RetryDelay.Nanos > 0 {
// We are throttled. Wait before retrying as requested by the server.
return time.Duration(t.RetryDelay.Seconds)*time.Second + time.Duration(t.RetryDelay.Nanos)*time.Nanosecond
}
return 0
}
}
return 0
}
func newExponentialBackoff(rs otlpconfig.RetrySettings) *backoff.ExponentialBackOff {
// Do not use NewExponentialBackOff since it calls Reset and the code here must
// call Reset after changing the InitialInterval (this saves an unnecessary call to Now).
expBackoff := &backoff.ExponentialBackOff{
InitialInterval: rs.InitialInterval,
RandomizationFactor: backoff.DefaultRandomizationFactor,
Multiplier: backoff.DefaultMultiplier,
MaxInterval: rs.MaxInterval,
MaxElapsedTime: rs.MaxElapsedTime,
Stop: backoff.Stop,
Clock: backoff.SystemClock,
}
expBackoff.Reset()
return expBackoff
}

View File

@ -0,0 +1,90 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package connection
import (
"testing"
"time"
"github.com/stretchr/testify/require"
"google.golang.org/genproto/googleapis/rpc/errdetails"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"google.golang.org/protobuf/types/known/durationpb"
)
func TestGetThrottleDuration(t *testing.T) {
tts := []struct {
stsFn func() (*status.Status, error)
throttle time.Duration
}{
{
stsFn: func() (*status.Status, error) {
return status.New(
codes.OK,
"status with no retry info",
), nil
},
throttle: 0,
},
{
stsFn: func() (*status.Status, error) {
st := status.New(codes.ResourceExhausted, "status with retry info")
return st.WithDetails(
&errdetails.RetryInfo{RetryDelay: durationpb.New(15 * time.Millisecond)},
)
},
throttle: 15 * time.Millisecond,
},
{
stsFn: func() (*status.Status, error) {
st := status.New(codes.ResourceExhausted, "status with error info detail")
return st.WithDetails(
&errdetails.ErrorInfo{Reason: "no throttle detail"},
)
},
throttle: 0,
},
{
stsFn: func() (*status.Status, error) {
st := status.New(codes.ResourceExhausted, "status with error info and retry info")
return st.WithDetails(
&errdetails.ErrorInfo{Reason: "no throttle detail"},
&errdetails.RetryInfo{RetryDelay: durationpb.New(13 * time.Minute)},
)
},
throttle: 13 * time.Minute,
},
{
stsFn: func() (*status.Status, error) {
st := status.New(codes.ResourceExhausted, "status with two retry info should take the first")
return st.WithDetails(
&errdetails.RetryInfo{RetryDelay: durationpb.New(13 * time.Minute)},
&errdetails.RetryInfo{RetryDelay: durationpb.New(18 * time.Minute)},
)
},
throttle: 13 * time.Minute,
},
}
for _, tt := range tts {
sts, _ := tt.stsFn()
t.Run(sts.Message(), func(t *testing.T) {
th := getThrottleDuration(sts)
require.Equal(t, tt.throttle, th)
})
}
}

View File

@ -0,0 +1,141 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package metrictransform
import (
"reflect"
"go.opentelemetry.io/otel/attribute"
commonpb "go.opentelemetry.io/proto/otlp/common/v1"
"go.opentelemetry.io/otel/sdk/resource"
)
// Attributes transforms a slice of KeyValues into a slice of OTLP attribute key-values.
func Attributes(attrs []attribute.KeyValue) []*commonpb.KeyValue {
if len(attrs) == 0 {
return nil
}
out := make([]*commonpb.KeyValue, 0, len(attrs))
for _, kv := range attrs {
out = append(out, toAttribute(kv))
}
return out
}
// ResourceAttributes transforms a Resource into a slice of OTLP attribute key-values.
func ResourceAttributes(resource *resource.Resource) []*commonpb.KeyValue {
if resource.Len() == 0 {
return nil
}
out := make([]*commonpb.KeyValue, 0, resource.Len())
for iter := resource.Iter(); iter.Next(); {
out = append(out, toAttribute(iter.Attribute()))
}
return out
}
func toAttribute(v attribute.KeyValue) *commonpb.KeyValue {
result := &commonpb.KeyValue{
Key: string(v.Key),
Value: new(commonpb.AnyValue),
}
switch v.Value.Type() {
case attribute.BOOL:
result.Value.Value = &commonpb.AnyValue_BoolValue{
BoolValue: v.Value.AsBool(),
}
case attribute.INT64:
result.Value.Value = &commonpb.AnyValue_IntValue{
IntValue: v.Value.AsInt64(),
}
case attribute.FLOAT64:
result.Value.Value = &commonpb.AnyValue_DoubleValue{
DoubleValue: v.Value.AsFloat64(),
}
case attribute.STRING:
result.Value.Value = &commonpb.AnyValue_StringValue{
StringValue: v.Value.AsString(),
}
case attribute.ARRAY:
result.Value.Value = &commonpb.AnyValue_ArrayValue{
ArrayValue: &commonpb.ArrayValue{
Values: arrayValues(v),
},
}
default:
result.Value.Value = &commonpb.AnyValue_StringValue{
StringValue: "INVALID",
}
}
return result
}
func arrayValues(kv attribute.KeyValue) []*commonpb.AnyValue {
a := kv.Value.AsArray()
aType := reflect.TypeOf(a)
var valueFunc func(reflect.Value) *commonpb.AnyValue
switch aType.Elem().Kind() {
case reflect.Bool:
valueFunc = func(v reflect.Value) *commonpb.AnyValue {
return &commonpb.AnyValue{
Value: &commonpb.AnyValue_BoolValue{
BoolValue: v.Bool(),
},
}
}
case reflect.Int, reflect.Int64:
valueFunc = func(v reflect.Value) *commonpb.AnyValue {
return &commonpb.AnyValue{
Value: &commonpb.AnyValue_IntValue{
IntValue: v.Int(),
},
}
}
case reflect.Uintptr:
valueFunc = func(v reflect.Value) *commonpb.AnyValue {
return &commonpb.AnyValue{
Value: &commonpb.AnyValue_IntValue{
IntValue: int64(v.Uint()),
},
}
}
case reflect.Float64:
valueFunc = func(v reflect.Value) *commonpb.AnyValue {
return &commonpb.AnyValue{
Value: &commonpb.AnyValue_DoubleValue{
DoubleValue: v.Float(),
},
}
}
case reflect.String:
valueFunc = func(v reflect.Value) *commonpb.AnyValue {
return &commonpb.AnyValue{
Value: &commonpb.AnyValue_StringValue{
StringValue: v.String(),
},
}
}
}
results := make([]*commonpb.AnyValue, aType.Len())
for i, aValue := 0, reflect.ValueOf(a); i < aValue.Len(); i++ {
results[i] = valueFunc(aValue.Index(i))
}
return results
}

View File

@ -0,0 +1,256 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package metrictransform
import (
"testing"
"github.com/stretchr/testify/assert"
"go.opentelemetry.io/otel/attribute"
commonpb "go.opentelemetry.io/proto/otlp/common/v1"
)
type attributeTest struct {
attrs []attribute.KeyValue
expected []*commonpb.KeyValue
}
func TestAttributes(t *testing.T) {
for _, test := range []attributeTest{
{nil, nil},
{
[]attribute.KeyValue{
attribute.Int("int to int", 123),
attribute.Int64("int64 to int64", 1234567),
attribute.Float64("float64 to double", 1.61),
attribute.String("string to string", "string"),
attribute.Bool("bool to bool", true),
},
[]*commonpb.KeyValue{
{
Key: "int to int",
Value: &commonpb.AnyValue{
Value: &commonpb.AnyValue_IntValue{
IntValue: 123,
},
},
},
{
Key: "int64 to int64",
Value: &commonpb.AnyValue{
Value: &commonpb.AnyValue_IntValue{
IntValue: 1234567,
},
},
},
{
Key: "float64 to double",
Value: &commonpb.AnyValue{
Value: &commonpb.AnyValue_DoubleValue{
DoubleValue: 1.61,
},
},
},
{
Key: "string to string",
Value: &commonpb.AnyValue{
Value: &commonpb.AnyValue_StringValue{
StringValue: "string",
},
},
},
{
Key: "bool to bool",
Value: &commonpb.AnyValue{
Value: &commonpb.AnyValue_BoolValue{
BoolValue: true,
},
},
},
},
},
} {
got := Attributes(test.attrs)
if !assert.Len(t, got, len(test.expected)) {
continue
}
for i, actual := range got {
if a, ok := actual.Value.Value.(*commonpb.AnyValue_DoubleValue); ok {
e, ok := test.expected[i].Value.Value.(*commonpb.AnyValue_DoubleValue)
if !ok {
t.Errorf("expected AnyValue_DoubleValue, got %T", test.expected[i].Value.Value)
continue
}
if !assert.InDelta(t, e.DoubleValue, a.DoubleValue, 0.01) {
continue
}
e.DoubleValue = a.DoubleValue
}
assert.Equal(t, test.expected[i], actual)
}
}
}
func TestArrayAttributes(t *testing.T) {
// Array KeyValue supports only arrays of primitive types:
// "bool", "int", "int64",
// "float64", "string",
for _, test := range []attributeTest{
{nil, nil},
{
[]attribute.KeyValue{
attribute.Array("invalid", [][]string{{"1", "2"}, {"a"}}),
},
[]*commonpb.KeyValue{
{
Key: "invalid",
Value: &commonpb.AnyValue{
Value: &commonpb.AnyValue_StringValue{
StringValue: "INVALID",
},
},
},
},
},
{
[]attribute.KeyValue{
attribute.Array("bool array to bool array", []bool{true, false}),
attribute.Array("int array to int64 array", []int{1, 2, 3}),
attribute.Array("int64 array to int64 array", []int64{1, 2, 3}),
attribute.Array("float64 array to double array", []float64{1.11, 2.22, 3.33}),
attribute.Array("string array to string array", []string{"foo", "bar", "baz"}),
},
[]*commonpb.KeyValue{
newOTelBoolArray("bool array to bool array", []bool{true, false}),
newOTelIntArray("int array to int64 array", []int64{1, 2, 3}),
newOTelIntArray("int64 array to int64 array", []int64{1, 2, 3}),
newOTelDoubleArray("float64 array to double array", []float64{1.11, 2.22, 3.33}),
newOTelStringArray("string array to string array", []string{"foo", "bar", "baz"}),
},
},
} {
actualArrayAttributes := Attributes(test.attrs)
expectedArrayAttributes := test.expected
if !assert.Len(t, actualArrayAttributes, len(expectedArrayAttributes)) {
continue
}
for i, actualArrayAttr := range actualArrayAttributes {
expectedArrayAttr := expectedArrayAttributes[i]
expectedKey, actualKey := expectedArrayAttr.Key, actualArrayAttr.Key
if !assert.Equal(t, expectedKey, actualKey) {
continue
}
expected := expectedArrayAttr.Value.GetArrayValue()
actual := actualArrayAttr.Value.GetArrayValue()
if expected == nil {
assert.Nil(t, actual)
continue
}
if assert.NotNil(t, actual, "expected not nil for %s", actualKey) {
assertExpectedArrayValues(t, expected.Values, actual.Values)
}
}
}
}
func assertExpectedArrayValues(t *testing.T, expectedValues, actualValues []*commonpb.AnyValue) {
for i, actual := range actualValues {
expected := expectedValues[i]
if a, ok := actual.Value.(*commonpb.AnyValue_DoubleValue); ok {
e, ok := expected.Value.(*commonpb.AnyValue_DoubleValue)
if !ok {
t.Errorf("expected AnyValue_DoubleValue, got %T", expected.Value)
continue
}
if !assert.InDelta(t, e.DoubleValue, a.DoubleValue, 0.01) {
continue
}
e.DoubleValue = a.DoubleValue
}
assert.Equal(t, expected, actual)
}
}
func newOTelBoolArray(key string, values []bool) *commonpb.KeyValue {
arrayValues := []*commonpb.AnyValue{}
for _, b := range values {
arrayValues = append(arrayValues, &commonpb.AnyValue{
Value: &commonpb.AnyValue_BoolValue{
BoolValue: b,
},
})
}
return newOTelArray(key, arrayValues)
}
func newOTelIntArray(key string, values []int64) *commonpb.KeyValue {
arrayValues := []*commonpb.AnyValue{}
for _, i := range values {
arrayValues = append(arrayValues, &commonpb.AnyValue{
Value: &commonpb.AnyValue_IntValue{
IntValue: i,
},
})
}
return newOTelArray(key, arrayValues)
}
func newOTelDoubleArray(key string, values []float64) *commonpb.KeyValue {
arrayValues := []*commonpb.AnyValue{}
for _, d := range values {
arrayValues = append(arrayValues, &commonpb.AnyValue{
Value: &commonpb.AnyValue_DoubleValue{
DoubleValue: d,
},
})
}
return newOTelArray(key, arrayValues)
}
func newOTelStringArray(key string, values []string) *commonpb.KeyValue {
arrayValues := []*commonpb.AnyValue{}
for _, s := range values {
arrayValues = append(arrayValues, &commonpb.AnyValue{
Value: &commonpb.AnyValue_StringValue{
StringValue: s,
},
})
}
return newOTelArray(key, arrayValues)
}
func newOTelArray(key string, arrayValues []*commonpb.AnyValue) *commonpb.KeyValue {
return &commonpb.KeyValue{
Key: key,
Value: &commonpb.AnyValue{
Value: &commonpb.AnyValue_ArrayValue{
ArrayValue: &commonpb.ArrayValue{
Values: arrayValues,
},
},
},
}
}

View File

@ -0,0 +1,690 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package transform provides translations for opentelemetry-go concepts and
// structures to otlp structures.
package metrictransform
import (
"context"
"errors"
"fmt"
"strings"
"sync"
"time"
"go.opentelemetry.io/otel/attribute"
commonpb "go.opentelemetry.io/proto/otlp/common/v1"
metricpb "go.opentelemetry.io/proto/otlp/metrics/v1"
resourcepb "go.opentelemetry.io/proto/otlp/resource/v1"
"go.opentelemetry.io/otel/metric/number"
export "go.opentelemetry.io/otel/sdk/export/metric"
"go.opentelemetry.io/otel/sdk/export/metric/aggregation"
"go.opentelemetry.io/otel/sdk/instrumentation"
"go.opentelemetry.io/otel/sdk/resource"
)
var (
// ErrUnimplementedAgg is returned when a transformation of an unimplemented
// aggregator is attempted.
ErrUnimplementedAgg = errors.New("unimplemented aggregator")
// ErrIncompatibleAgg is returned when
// aggregation.Kind implies an interface conversion that has
// failed
ErrIncompatibleAgg = errors.New("incompatible aggregation type")
// ErrUnknownValueType is returned when a transformation of an unknown value
// is attempted.
ErrUnknownValueType = errors.New("invalid value type")
// ErrContextCanceled is returned when a context cancellation halts a
// transformation.
ErrContextCanceled = errors.New("context canceled")
// ErrTransforming is returned when an unexected error is encoutered transforming.
ErrTransforming = errors.New("transforming failed")
)
// result is the product of transforming Records into OTLP Metrics.
type result struct {
Resource *resource.Resource
InstrumentationLibrary instrumentation.Library
Metric *metricpb.Metric
Err error
}
// toNanos returns the number of nanoseconds since the UNIX epoch.
func toNanos(t time.Time) uint64 {
if t.IsZero() {
return 0
}
return uint64(t.UnixNano())
}
// CheckpointSet transforms all records contained in a checkpoint into
// batched OTLP ResourceMetrics.
func CheckpointSet(ctx context.Context, exportSelector export.ExportKindSelector, cps export.CheckpointSet, numWorkers uint) ([]*metricpb.ResourceMetrics, error) {
records, errc := source(ctx, exportSelector, cps)
// Start a fixed number of goroutines to transform records.
transformed := make(chan result)
var wg sync.WaitGroup
wg.Add(int(numWorkers))
for i := uint(0); i < numWorkers; i++ {
go func() {
defer wg.Done()
transformer(ctx, exportSelector, records, transformed)
}()
}
go func() {
wg.Wait()
close(transformed)
}()
// Synchronously collect the transformed records and transmit.
rms, err := sink(ctx, transformed)
if err != nil {
return nil, err
}
// source is complete, check for any errors.
if err := <-errc; err != nil {
return nil, err
}
return rms, nil
}
// source starts a goroutine that sends each one of the Records yielded by
// the CheckpointSet on the returned chan. Any error encoutered will be sent
// on the returned error chan after seeding is complete.
func source(ctx context.Context, exportSelector export.ExportKindSelector, cps export.CheckpointSet) (<-chan export.Record, <-chan error) {
errc := make(chan error, 1)
out := make(chan export.Record)
// Seed records into process.
go func() {
defer close(out)
// No select is needed since errc is buffered.
errc <- cps.ForEach(exportSelector, func(r export.Record) error {
select {
case <-ctx.Done():
return ErrContextCanceled
case out <- r:
}
return nil
})
}()
return out, errc
}
// transformer transforms records read from the passed in chan into
// OTLP Metrics which are sent on the out chan.
func transformer(ctx context.Context, exportSelector export.ExportKindSelector, in <-chan export.Record, out chan<- result) {
for r := range in {
m, err := Record(exportSelector, r)
// Propagate errors, but do not send empty results.
if err == nil && m == nil {
continue
}
res := result{
Resource: r.Resource(),
InstrumentationLibrary: instrumentation.Library{
Name: r.Descriptor().InstrumentationName(),
Version: r.Descriptor().InstrumentationVersion(),
},
Metric: m,
Err: err,
}
select {
case <-ctx.Done():
return
case out <- res:
}
}
}
// sink collects transformed Records and batches them.
//
// Any errors encoutered transforming input will be reported with an
// ErrTransforming as well as the completed ResourceMetrics. It is up to the
// caller to handle any incorrect data in these ResourceMetrics.
func sink(ctx context.Context, in <-chan result) ([]*metricpb.ResourceMetrics, error) {
var errStrings []string
type resourceBatch struct {
Resource *resourcepb.Resource
// Group by instrumentation library name and then the MetricDescriptor.
InstrumentationLibraryBatches map[instrumentation.Library]map[string]*metricpb.Metric
SchemaURL string
}
// group by unique Resource string.
grouped := make(map[attribute.Distinct]resourceBatch)
for res := range in {
if res.Err != nil {
errStrings = append(errStrings, res.Err.Error())
continue
}
rID := res.Resource.Equivalent()
rb, ok := grouped[rID]
if !ok {
rb = resourceBatch{
Resource: Resource(res.Resource),
InstrumentationLibraryBatches: make(map[instrumentation.Library]map[string]*metricpb.Metric),
}
if res.Resource != nil {
rb.SchemaURL = res.Resource.SchemaURL()
}
grouped[rID] = rb
}
mb, ok := rb.InstrumentationLibraryBatches[res.InstrumentationLibrary]
if !ok {
mb = make(map[string]*metricpb.Metric)
rb.InstrumentationLibraryBatches[res.InstrumentationLibrary] = mb
}
mID := res.Metric.GetName()
m, ok := mb[mID]
if !ok {
mb[mID] = res.Metric
continue
}
switch res.Metric.Data.(type) {
case *metricpb.Metric_Gauge:
m.GetGauge().DataPoints = append(m.GetGauge().DataPoints, res.Metric.GetGauge().DataPoints...)
case *metricpb.Metric_Sum:
m.GetSum().DataPoints = append(m.GetSum().DataPoints, res.Metric.GetSum().DataPoints...)
case *metricpb.Metric_Histogram:
m.GetHistogram().DataPoints = append(m.GetHistogram().DataPoints, res.Metric.GetHistogram().DataPoints...)
case *metricpb.Metric_Summary:
m.GetSummary().DataPoints = append(m.GetSummary().DataPoints, res.Metric.GetSummary().DataPoints...)
default:
err := fmt.Sprintf("unsupported metric type: %T", res.Metric.Data)
errStrings = append(errStrings, err)
}
}
if len(grouped) == 0 {
return nil, nil
}
var rms []*metricpb.ResourceMetrics
for _, rb := range grouped {
// TODO: populate ResourceMetrics.SchemaURL when the field is added to the Protobuf message.
rm := &metricpb.ResourceMetrics{Resource: rb.Resource}
for il, mb := range rb.InstrumentationLibraryBatches {
ilm := &metricpb.InstrumentationLibraryMetrics{
Metrics: make([]*metricpb.Metric, 0, len(mb)),
}
if il != (instrumentation.Library{}) {
ilm.InstrumentationLibrary = &commonpb.InstrumentationLibrary{
Name: il.Name,
Version: il.Version,
}
}
for _, m := range mb {
ilm.Metrics = append(ilm.Metrics, m)
}
rm.InstrumentationLibraryMetrics = append(rm.InstrumentationLibraryMetrics, ilm)
}
rms = append(rms, rm)
}
// Report any transform errors.
if len(errStrings) > 0 {
return rms, fmt.Errorf("%w:\n -%s", ErrTransforming, strings.Join(errStrings, "\n -"))
}
return rms, nil
}
// Record transforms a Record into an OTLP Metric. An ErrIncompatibleAgg
// error is returned if the Record Aggregator is not supported.
func Record(exportSelector export.ExportKindSelector, r export.Record) (*metricpb.Metric, error) {
agg := r.Aggregation()
switch agg.Kind() {
case aggregation.MinMaxSumCountKind:
mmsc, ok := agg.(aggregation.MinMaxSumCount)
if !ok {
return nil, fmt.Errorf("%w: %T", ErrIncompatibleAgg, agg)
}
return minMaxSumCount(r, mmsc)
case aggregation.HistogramKind:
h, ok := agg.(aggregation.Histogram)
if !ok {
return nil, fmt.Errorf("%w: %T", ErrIncompatibleAgg, agg)
}
return histogramPoint(r, exportSelector.ExportKindFor(r.Descriptor(), aggregation.HistogramKind), h)
case aggregation.SumKind:
s, ok := agg.(aggregation.Sum)
if !ok {
return nil, fmt.Errorf("%w: %T", ErrIncompatibleAgg, agg)
}
sum, err := s.Sum()
if err != nil {
return nil, err
}
return sumPoint(r, sum, r.StartTime(), r.EndTime(), exportSelector.ExportKindFor(r.Descriptor(), aggregation.SumKind), r.Descriptor().InstrumentKind().Monotonic())
case aggregation.LastValueKind:
lv, ok := agg.(aggregation.LastValue)
if !ok {
return nil, fmt.Errorf("%w: %T", ErrIncompatibleAgg, agg)
}
value, tm, err := lv.LastValue()
if err != nil {
return nil, err
}
return gaugePoint(r, value, time.Time{}, tm)
case aggregation.ExactKind:
e, ok := agg.(aggregation.Points)
if !ok {
return nil, fmt.Errorf("%w: %T", ErrIncompatibleAgg, agg)
}
pts, err := e.Points()
if err != nil {
return nil, err
}
return gaugeArray(r, pts)
default:
return nil, fmt.Errorf("%w: %T", ErrUnimplementedAgg, agg)
}
}
func gaugeArray(record export.Record, points []aggregation.Point) (*metricpb.Metric, error) {
desc := record.Descriptor()
labels := record.Labels()
m := &metricpb.Metric{
Name: desc.Name(),
Description: desc.Description(),
Unit: string(desc.Unit()),
}
pbAttrs := keyValues(labels.Iter())
ndp := make([]*metricpb.NumberDataPoint, 0, len(points))
switch nk := desc.NumberKind(); nk {
case number.Int64Kind:
for _, p := range points {
ndp = append(ndp, &metricpb.NumberDataPoint{
Attributes: pbAttrs,
StartTimeUnixNano: toNanos(record.StartTime()),
TimeUnixNano: toNanos(record.EndTime()),
Value: &metricpb.NumberDataPoint_AsInt{
AsInt: p.Number.CoerceToInt64(nk),
},
})
}
case number.Float64Kind:
for _, p := range points {
ndp = append(ndp, &metricpb.NumberDataPoint{
Attributes: pbAttrs,
StartTimeUnixNano: toNanos(record.StartTime()),
TimeUnixNano: toNanos(record.EndTime()),
Value: &metricpb.NumberDataPoint_AsDouble{
AsDouble: p.Number.CoerceToFloat64(nk),
},
})
}
default:
return nil, fmt.Errorf("%w: %v", ErrUnknownValueType, nk)
}
m.Data = &metricpb.Metric_Gauge{
Gauge: &metricpb.Gauge{
DataPoints: ndp,
},
}
return m, nil
}
func gaugePoint(record export.Record, num number.Number, start, end time.Time) (*metricpb.Metric, error) {
desc := record.Descriptor()
labels := record.Labels()
m := &metricpb.Metric{
Name: desc.Name(),
Description: desc.Description(),
Unit: string(desc.Unit()),
}
switch n := desc.NumberKind(); n {
case number.Int64Kind:
m.Data = &metricpb.Metric_Gauge{
Gauge: &metricpb.Gauge{
DataPoints: []*metricpb.NumberDataPoint{
{
Value: &metricpb.NumberDataPoint_AsInt{
AsInt: num.CoerceToInt64(n),
},
Attributes: keyValues(labels.Iter()),
StartTimeUnixNano: toNanos(start),
TimeUnixNano: toNanos(end),
},
},
},
}
case number.Float64Kind:
m.Data = &metricpb.Metric_Gauge{
Gauge: &metricpb.Gauge{
DataPoints: []*metricpb.NumberDataPoint{
{
Value: &metricpb.NumberDataPoint_AsDouble{
AsDouble: num.CoerceToFloat64(n),
},
Attributes: keyValues(labels.Iter()),
StartTimeUnixNano: toNanos(start),
TimeUnixNano: toNanos(end),
},
},
},
}
default:
return nil, fmt.Errorf("%w: %v", ErrUnknownValueType, n)
}
return m, nil
}
func exportKindToTemporality(ek export.ExportKind) metricpb.AggregationTemporality {
switch ek {
case export.DeltaExportKind:
return metricpb.AggregationTemporality_AGGREGATION_TEMPORALITY_DELTA
case export.CumulativeExportKind:
return metricpb.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE
}
return metricpb.AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED
}
func sumPoint(record export.Record, num number.Number, start, end time.Time, ek export.ExportKind, monotonic bool) (*metricpb.Metric, error) {
desc := record.Descriptor()
labels := record.Labels()
m := &metricpb.Metric{
Name: desc.Name(),
Description: desc.Description(),
Unit: string(desc.Unit()),
}
switch n := desc.NumberKind(); n {
case number.Int64Kind:
m.Data = &metricpb.Metric_Sum{
Sum: &metricpb.Sum{
IsMonotonic: monotonic,
AggregationTemporality: exportKindToTemporality(ek),
DataPoints: []*metricpb.NumberDataPoint{
{
Value: &metricpb.NumberDataPoint_AsInt{
AsInt: num.CoerceToInt64(n),
},
Attributes: keyValues(labels.Iter()),
StartTimeUnixNano: toNanos(start),
TimeUnixNano: toNanos(end),
},
},
},
}
case number.Float64Kind:
m.Data = &metricpb.Metric_Sum{
Sum: &metricpb.Sum{
IsMonotonic: monotonic,
AggregationTemporality: exportKindToTemporality(ek),
DataPoints: []*metricpb.NumberDataPoint{
{
Value: &metricpb.NumberDataPoint_AsDouble{
AsDouble: num.CoerceToFloat64(n),
},
Attributes: keyValues(labels.Iter()),
StartTimeUnixNano: toNanos(start),
TimeUnixNano: toNanos(end),
},
},
},
}
default:
return nil, fmt.Errorf("%w: %v", ErrUnknownValueType, n)
}
return m, nil
}
// minMaxSumCountValue returns the values of the MinMaxSumCount Aggregator
// as discrete values.
func minMaxSumCountValues(a aggregation.MinMaxSumCount) (min, max, sum number.Number, count uint64, err error) {
if min, err = a.Min(); err != nil {
return
}
if max, err = a.Max(); err != nil {
return
}
if sum, err = a.Sum(); err != nil {
return
}
if count, err = a.Count(); err != nil {
return
}
return
}
// minMaxSumCount transforms a MinMaxSumCount Aggregator into an OTLP Metric.
func minMaxSumCount(record export.Record, a aggregation.MinMaxSumCount) (*metricpb.Metric, error) {
desc := record.Descriptor()
labels := record.Labels()
min, max, sum, count, err := minMaxSumCountValues(a)
if err != nil {
return nil, err
}
m := &metricpb.Metric{
Name: desc.Name(),
Description: desc.Description(),
Unit: string(desc.Unit()),
Data: &metricpb.Metric_Summary{
Summary: &metricpb.Summary{
DataPoints: []*metricpb.SummaryDataPoint{
{
Sum: sum.CoerceToFloat64(desc.NumberKind()),
Attributes: keyValues(labels.Iter()),
StartTimeUnixNano: toNanos(record.StartTime()),
TimeUnixNano: toNanos(record.EndTime()),
Count: uint64(count),
QuantileValues: []*metricpb.SummaryDataPoint_ValueAtQuantile{
{
Quantile: 0.0,
Value: min.CoerceToFloat64(desc.NumberKind()),
},
{
Quantile: 1.0,
Value: max.CoerceToFloat64(desc.NumberKind()),
},
},
},
},
},
},
}
return m, nil
}
func histogramValues(a aggregation.Histogram) (boundaries []float64, counts []uint64, err error) {
var buckets aggregation.Buckets
if buckets, err = a.Histogram(); err != nil {
return
}
boundaries, counts = buckets.Boundaries, buckets.Counts
if len(counts) != len(boundaries)+1 {
err = ErrTransforming
return
}
return
}
// histogram transforms a Histogram Aggregator into an OTLP Metric.
func histogramPoint(record export.Record, ek export.ExportKind, a aggregation.Histogram) (*metricpb.Metric, error) {
desc := record.Descriptor()
labels := record.Labels()
boundaries, counts, err := histogramValues(a)
if err != nil {
return nil, err
}
count, err := a.Count()
if err != nil {
return nil, err
}
sum, err := a.Sum()
if err != nil {
return nil, err
}
m := &metricpb.Metric{
Name: desc.Name(),
Description: desc.Description(),
Unit: string(desc.Unit()),
Data: &metricpb.Metric_Histogram{
Histogram: &metricpb.Histogram{
AggregationTemporality: exportKindToTemporality(ek),
DataPoints: []*metricpb.HistogramDataPoint{
{
Sum: sum.CoerceToFloat64(desc.NumberKind()),
Attributes: keyValues(labels.Iter()),
StartTimeUnixNano: toNanos(record.StartTime()),
TimeUnixNano: toNanos(record.EndTime()),
Count: uint64(count),
BucketCounts: counts,
ExplicitBounds: boundaries,
},
},
},
},
}
return m, nil
}
// keyValues transforms an attribute iterator into an OTLP KeyValues.
func keyValues(iter attribute.Iterator) []*commonpb.KeyValue {
l := iter.Len()
if l == 0 {
return nil
}
result := make([]*commonpb.KeyValue, 0, l)
for iter.Next() {
kv := iter.Label()
result = append(result, &commonpb.KeyValue{
Key: string(kv.Key),
Value: value(kv.Value),
})
}
return result
}
// value transforms an attribute Value into an OTLP AnyValue.
func value(v attribute.Value) *commonpb.AnyValue {
switch v.Type() {
case attribute.BOOL:
return &commonpb.AnyValue{
Value: &commonpb.AnyValue_BoolValue{
BoolValue: v.AsBool(),
},
}
case attribute.INT64:
return &commonpb.AnyValue{
Value: &commonpb.AnyValue_IntValue{
IntValue: v.AsInt64(),
},
}
case attribute.FLOAT64:
return &commonpb.AnyValue{
Value: &commonpb.AnyValue_DoubleValue{
DoubleValue: v.AsFloat64(),
},
}
case attribute.ARRAY:
return &commonpb.AnyValue{
Value: &commonpb.AnyValue_ArrayValue{
ArrayValue: &commonpb.ArrayValue{
Values: arrayValue(v.AsArray()),
},
},
}
default:
return &commonpb.AnyValue{
Value: &commonpb.AnyValue_StringValue{
StringValue: v.Emit(),
},
}
}
}
// arrayValue transforms an attribute Value of ARRAY type into an slice of
// OTLP AnyValue.
func arrayValue(arr interface{}) []*commonpb.AnyValue {
var av []*commonpb.AnyValue
switch val := arr.(type) {
case []bool:
av = make([]*commonpb.AnyValue, len(val))
for i, v := range val {
av[i] = &commonpb.AnyValue{
Value: &commonpb.AnyValue_BoolValue{
BoolValue: v,
},
}
}
case []int:
av = make([]*commonpb.AnyValue, len(val))
for i, v := range val {
av[i] = &commonpb.AnyValue{
Value: &commonpb.AnyValue_IntValue{
IntValue: int64(v),
},
}
}
case []int64:
av = make([]*commonpb.AnyValue, len(val))
for i, v := range val {
av[i] = &commonpb.AnyValue{
Value: &commonpb.AnyValue_IntValue{
IntValue: v,
},
}
}
case []float64:
av = make([]*commonpb.AnyValue, len(val))
for i, v := range val {
av[i] = &commonpb.AnyValue{
Value: &commonpb.AnyValue_DoubleValue{
DoubleValue: v,
},
}
}
case []string:
av = make([]*commonpb.AnyValue, len(val))
for i, v := range val {
av[i] = &commonpb.AnyValue{
Value: &commonpb.AnyValue_StringValue{
StringValue: v,
},
}
}
}
return av
}

View File

@ -0,0 +1,509 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package metrictransform
import (
"context"
"errors"
"fmt"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/metric"
"go.opentelemetry.io/otel/metric/number"
export "go.opentelemetry.io/otel/sdk/export/metric"
"go.opentelemetry.io/otel/sdk/export/metric/aggregation"
"go.opentelemetry.io/otel/sdk/export/metric/metrictest"
arrAgg "go.opentelemetry.io/otel/sdk/metric/aggregator/exact"
"go.opentelemetry.io/otel/sdk/metric/aggregator/lastvalue"
lvAgg "go.opentelemetry.io/otel/sdk/metric/aggregator/lastvalue"
"go.opentelemetry.io/otel/sdk/metric/aggregator/minmaxsumcount"
"go.opentelemetry.io/otel/sdk/metric/aggregator/sum"
sumAgg "go.opentelemetry.io/otel/sdk/metric/aggregator/sum"
"go.opentelemetry.io/otel/sdk/resource"
commonpb "go.opentelemetry.io/proto/otlp/common/v1"
metricpb "go.opentelemetry.io/proto/otlp/metrics/v1"
)
var (
// Timestamps used in this test:
intervalStart = time.Now()
intervalEnd = intervalStart.Add(time.Hour)
)
const (
otelCumulative = metricpb.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE
otelDelta = metricpb.AggregationTemporality_AGGREGATION_TEMPORALITY_DELTA
)
func TestStringKeyValues(t *testing.T) {
tests := []struct {
kvs []attribute.KeyValue
expected []*commonpb.KeyValue
}{
{
nil,
nil,
},
{
[]attribute.KeyValue{},
nil,
},
{
[]attribute.KeyValue{
attribute.Bool("true", true),
attribute.Int64("one", 1),
attribute.Int64("two", 2),
attribute.Float64("three", 3),
attribute.Int("four", 4),
attribute.Int("five", 5),
attribute.Float64("six", 6),
attribute.Int("seven", 7),
attribute.Int("eight", 8),
attribute.String("the", "final word"),
},
[]*commonpb.KeyValue{
{Key: "eight", Value: &commonpb.AnyValue{Value: &commonpb.AnyValue_IntValue{IntValue: 8}}},
{Key: "five", Value: &commonpb.AnyValue{Value: &commonpb.AnyValue_IntValue{IntValue: 5}}},
{Key: "four", Value: &commonpb.AnyValue{Value: &commonpb.AnyValue_IntValue{IntValue: 4}}},
{Key: "one", Value: &commonpb.AnyValue{Value: &commonpb.AnyValue_IntValue{IntValue: 1}}},
{Key: "seven", Value: &commonpb.AnyValue{Value: &commonpb.AnyValue_IntValue{IntValue: 7}}},
{Key: "six", Value: &commonpb.AnyValue{Value: &commonpb.AnyValue_DoubleValue{DoubleValue: 6.0}}},
{Key: "the", Value: &commonpb.AnyValue{Value: &commonpb.AnyValue_StringValue{StringValue: "final word"}}},
{Key: "three", Value: &commonpb.AnyValue{Value: &commonpb.AnyValue_DoubleValue{DoubleValue: 3.0}}},
{Key: "true", Value: &commonpb.AnyValue{Value: &commonpb.AnyValue_BoolValue{BoolValue: true}}},
{Key: "two", Value: &commonpb.AnyValue{Value: &commonpb.AnyValue_IntValue{IntValue: 2}}},
},
},
}
for _, test := range tests {
labels := attribute.NewSet(test.kvs...)
assert.Equal(t, test.expected, keyValues(labels.Iter()))
}
}
func TestMinMaxSumCountValue(t *testing.T) {
mmsc, ckpt := metrictest.Unslice2(minmaxsumcount.New(2, &metric.Descriptor{}))
assert.NoError(t, mmsc.Update(context.Background(), 1, &metric.Descriptor{}))
assert.NoError(t, mmsc.Update(context.Background(), 10, &metric.Descriptor{}))
// Prior to checkpointing ErrNoData should be returned.
_, _, _, _, err := minMaxSumCountValues(ckpt.(aggregation.MinMaxSumCount))
assert.EqualError(t, err, aggregation.ErrNoData.Error())
// Checkpoint to set non-zero values
require.NoError(t, mmsc.SynchronizedMove(ckpt, &metric.Descriptor{}))
min, max, sum, count, err := minMaxSumCountValues(ckpt.(aggregation.MinMaxSumCount))
if assert.NoError(t, err) {
assert.Equal(t, min, number.NewInt64Number(1))
assert.Equal(t, max, number.NewInt64Number(10))
assert.Equal(t, sum, number.NewInt64Number(11))
assert.Equal(t, count, uint64(2))
}
}
func TestMinMaxSumCountDatapoints(t *testing.T) {
desc := metric.NewDescriptor("", metric.ValueRecorderInstrumentKind, number.Int64Kind)
labels := attribute.NewSet(attribute.String("one", "1"))
mmsc, ckpt := metrictest.Unslice2(minmaxsumcount.New(2, &desc))
assert.NoError(t, mmsc.Update(context.Background(), 1, &desc))
assert.NoError(t, mmsc.Update(context.Background(), 10, &desc))
require.NoError(t, mmsc.SynchronizedMove(ckpt, &desc))
expected := []*metricpb.SummaryDataPoint{
{
Count: 2,
Sum: 11,
StartTimeUnixNano: uint64(intervalStart.UnixNano()),
TimeUnixNano: uint64(intervalEnd.UnixNano()),
Attributes: []*commonpb.KeyValue{
{
Key: "one",
Value: &commonpb.AnyValue{Value: &commonpb.AnyValue_StringValue{StringValue: "1"}},
},
},
QuantileValues: []*metricpb.SummaryDataPoint_ValueAtQuantile{
{
Quantile: 0.0,
Value: 1.0,
},
{
Quantile: 1.0,
Value: 10.0,
},
},
},
}
record := export.NewRecord(&desc, &labels, nil, ckpt.Aggregation(), intervalStart, intervalEnd)
m, err := minMaxSumCount(record, ckpt.(aggregation.MinMaxSumCount))
if assert.NoError(t, err) {
assert.Nil(t, m.GetGauge())
assert.Nil(t, m.GetSum())
assert.Nil(t, m.GetHistogram())
assert.Equal(t, expected, m.GetSummary().DataPoints)
assert.Nil(t, m.GetIntGauge()) // nolint
assert.Nil(t, m.GetIntSum()) // nolint
assert.Nil(t, m.GetIntHistogram()) // nolint
}
}
func TestMinMaxSumCountPropagatesErrors(t *testing.T) {
// ErrNoData should be returned by both the Min and Max values of
// a MinMaxSumCount Aggregator. Use this fact to check the error is
// correctly returned.
mmsc := &minmaxsumcount.New(1, &metric.Descriptor{})[0]
_, _, _, _, err := minMaxSumCountValues(mmsc)
assert.Error(t, err)
assert.Equal(t, aggregation.ErrNoData, err)
}
func TestSumIntDataPoints(t *testing.T) {
desc := metric.NewDescriptor("", metric.ValueRecorderInstrumentKind, number.Int64Kind)
labels := attribute.NewSet(attribute.String("one", "1"))
s, ckpt := metrictest.Unslice2(sumAgg.New(2))
assert.NoError(t, s.Update(context.Background(), number.Number(1), &desc))
require.NoError(t, s.SynchronizedMove(ckpt, &desc))
record := export.NewRecord(&desc, &labels, nil, ckpt.Aggregation(), intervalStart, intervalEnd)
sum, ok := ckpt.(aggregation.Sum)
require.True(t, ok, "ckpt is not an aggregation.Sum: %T", ckpt)
value, err := sum.Sum()
require.NoError(t, err)
if m, err := sumPoint(record, value, record.StartTime(), record.EndTime(), export.CumulativeExportKind, true); assert.NoError(t, err) {
assert.Nil(t, m.GetGauge())
assert.Equal(t, &metricpb.Sum{
AggregationTemporality: otelCumulative,
IsMonotonic: true,
DataPoints: []*metricpb.NumberDataPoint{{
StartTimeUnixNano: uint64(intervalStart.UnixNano()),
TimeUnixNano: uint64(intervalEnd.UnixNano()),
Attributes: []*commonpb.KeyValue{
{
Key: "one",
Value: &commonpb.AnyValue{Value: &commonpb.AnyValue_StringValue{StringValue: "1"}},
},
},
Value: &metricpb.NumberDataPoint_AsInt{
AsInt: 1,
},
}},
}, m.GetSum())
assert.Nil(t, m.GetHistogram())
assert.Nil(t, m.GetSummary())
assert.Nil(t, m.GetIntGauge()) // nolint
assert.Nil(t, m.GetIntSum()) // nolint
assert.Nil(t, m.GetIntHistogram()) // nolint
}
}
func TestSumFloatDataPoints(t *testing.T) {
desc := metric.NewDescriptor("", metric.ValueRecorderInstrumentKind, number.Float64Kind)
labels := attribute.NewSet(attribute.String("one", "1"))
s, ckpt := metrictest.Unslice2(sumAgg.New(2))
assert.NoError(t, s.Update(context.Background(), number.NewFloat64Number(1), &desc))
require.NoError(t, s.SynchronizedMove(ckpt, &desc))
record := export.NewRecord(&desc, &labels, nil, ckpt.Aggregation(), intervalStart, intervalEnd)
sum, ok := ckpt.(aggregation.Sum)
require.True(t, ok, "ckpt is not an aggregation.Sum: %T", ckpt)
value, err := sum.Sum()
require.NoError(t, err)
if m, err := sumPoint(record, value, record.StartTime(), record.EndTime(), export.DeltaExportKind, false); assert.NoError(t, err) {
assert.Nil(t, m.GetGauge())
assert.Equal(t, &metricpb.Sum{
IsMonotonic: false,
AggregationTemporality: otelDelta,
DataPoints: []*metricpb.NumberDataPoint{{
Value: &metricpb.NumberDataPoint_AsDouble{
AsDouble: 1.0,
},
StartTimeUnixNano: uint64(intervalStart.UnixNano()),
TimeUnixNano: uint64(intervalEnd.UnixNano()),
Attributes: []*commonpb.KeyValue{
{
Key: "one",
Value: &commonpb.AnyValue{Value: &commonpb.AnyValue_StringValue{StringValue: "1"}},
},
},
}}}, m.GetSum())
assert.Nil(t, m.GetHistogram())
assert.Nil(t, m.GetSummary())
assert.Nil(t, m.GetIntGauge()) // nolint
assert.Nil(t, m.GetIntSum()) // nolint
assert.Nil(t, m.GetIntHistogram()) // nolint
}
}
func TestLastValueIntDataPoints(t *testing.T) {
desc := metric.NewDescriptor("", metric.ValueRecorderInstrumentKind, number.Int64Kind)
labels := attribute.NewSet(attribute.String("one", "1"))
s, ckpt := metrictest.Unslice2(lvAgg.New(2))
assert.NoError(t, s.Update(context.Background(), number.Number(100), &desc))
require.NoError(t, s.SynchronizedMove(ckpt, &desc))
record := export.NewRecord(&desc, &labels, nil, ckpt.Aggregation(), intervalStart, intervalEnd)
sum, ok := ckpt.(aggregation.LastValue)
require.True(t, ok, "ckpt is not an aggregation.LastValue: %T", ckpt)
value, timestamp, err := sum.LastValue()
require.NoError(t, err)
if m, err := gaugePoint(record, value, time.Time{}, timestamp); assert.NoError(t, err) {
assert.Equal(t, []*metricpb.NumberDataPoint{{
StartTimeUnixNano: 0,
TimeUnixNano: uint64(timestamp.UnixNano()),
Attributes: []*commonpb.KeyValue{
{
Key: "one",
Value: &commonpb.AnyValue{Value: &commonpb.AnyValue_StringValue{StringValue: "1"}},
},
},
Value: &metricpb.NumberDataPoint_AsInt{
AsInt: 100,
},
}}, m.GetGauge().DataPoints)
assert.Nil(t, m.GetSum())
assert.Nil(t, m.GetHistogram())
assert.Nil(t, m.GetSummary())
assert.Nil(t, m.GetIntGauge()) // nolint
assert.Nil(t, m.GetIntSum()) // nolint
assert.Nil(t, m.GetIntHistogram()) // nolint
}
}
func TestExactIntDataPoints(t *testing.T) {
desc := metric.NewDescriptor("", metric.ValueRecorderInstrumentKind, number.Int64Kind)
labels := attribute.NewSet(attribute.String("one", "1"))
e, ckpt := metrictest.Unslice2(arrAgg.New(2))
assert.NoError(t, e.Update(context.Background(), number.Number(100), &desc))
require.NoError(t, e.SynchronizedMove(ckpt, &desc))
record := export.NewRecord(&desc, &labels, nil, ckpt.Aggregation(), intervalStart, intervalEnd)
p, ok := ckpt.(aggregation.Points)
require.True(t, ok, "ckpt is not an aggregation.Points: %T", ckpt)
pts, err := p.Points()
require.NoError(t, err)
if m, err := gaugeArray(record, pts); assert.NoError(t, err) {
assert.Equal(t, []*metricpb.NumberDataPoint{{
StartTimeUnixNano: toNanos(intervalStart),
TimeUnixNano: toNanos(intervalEnd),
Attributes: []*commonpb.KeyValue{
{
Key: "one",
Value: &commonpb.AnyValue{Value: &commonpb.AnyValue_StringValue{StringValue: "1"}},
},
},
Value: &metricpb.NumberDataPoint_AsInt{
AsInt: 100,
},
}}, m.GetGauge().DataPoints)
assert.Nil(t, m.GetSum())
assert.Nil(t, m.GetHistogram())
assert.Nil(t, m.GetSummary())
assert.Nil(t, m.GetIntGauge()) // nolint
assert.Nil(t, m.GetIntSum()) // nolint
assert.Nil(t, m.GetIntHistogram()) // nolint
}
}
func TestExactFloatDataPoints(t *testing.T) {
desc := metric.NewDescriptor("", metric.ValueRecorderInstrumentKind, number.Float64Kind)
labels := attribute.NewSet(attribute.String("one", "1"))
e, ckpt := metrictest.Unslice2(arrAgg.New(2))
assert.NoError(t, e.Update(context.Background(), number.NewFloat64Number(100), &desc))
require.NoError(t, e.SynchronizedMove(ckpt, &desc))
record := export.NewRecord(&desc, &labels, nil, ckpt.Aggregation(), intervalStart, intervalEnd)
p, ok := ckpt.(aggregation.Points)
require.True(t, ok, "ckpt is not an aggregation.Points: %T", ckpt)
pts, err := p.Points()
require.NoError(t, err)
if m, err := gaugeArray(record, pts); assert.NoError(t, err) {
assert.Equal(t, []*metricpb.NumberDataPoint{{
Value: &metricpb.NumberDataPoint_AsDouble{
AsDouble: 100,
},
StartTimeUnixNano: toNanos(intervalStart),
TimeUnixNano: toNanos(intervalEnd),
Attributes: []*commonpb.KeyValue{
{
Key: "one",
Value: &commonpb.AnyValue{Value: &commonpb.AnyValue_StringValue{StringValue: "1"}},
},
},
}}, m.GetGauge().DataPoints)
assert.Nil(t, m.GetSum())
assert.Nil(t, m.GetHistogram())
assert.Nil(t, m.GetSummary())
assert.Nil(t, m.GetIntGauge()) // nolint
assert.Nil(t, m.GetIntSum()) // nolint
assert.Nil(t, m.GetIntHistogram()) // nolint
}
}
func TestSumErrUnknownValueType(t *testing.T) {
desc := metric.NewDescriptor("", metric.ValueRecorderInstrumentKind, number.Kind(-1))
labels := attribute.NewSet()
s := &sumAgg.New(1)[0]
record := export.NewRecord(&desc, &labels, nil, s, intervalStart, intervalEnd)
value, err := s.Sum()
require.NoError(t, err)
_, err = sumPoint(record, value, record.StartTime(), record.EndTime(), export.CumulativeExportKind, true)
assert.Error(t, err)
if !errors.Is(err, ErrUnknownValueType) {
t.Errorf("expected ErrUnknownValueType, got %v", err)
}
}
type testAgg struct {
kind aggregation.Kind
agg aggregation.Aggregation
}
func (t *testAgg) Kind() aggregation.Kind {
return t.kind
}
func (t *testAgg) Aggregation() aggregation.Aggregation {
return t.agg
}
// None of these three are used:
func (t *testAgg) Update(ctx context.Context, number number.Number, descriptor *metric.Descriptor) error {
return nil
}
func (t *testAgg) SynchronizedMove(destination export.Aggregator, descriptor *metric.Descriptor) error {
return nil
}
func (t *testAgg) Merge(aggregator export.Aggregator, descriptor *metric.Descriptor) error {
return nil
}
type testErrSum struct {
err error
}
type testErrLastValue struct {
err error
}
type testErrMinMaxSumCount struct {
testErrSum
}
func (te *testErrLastValue) LastValue() (number.Number, time.Time, error) {
return 0, time.Time{}, te.err
}
func (te *testErrLastValue) Kind() aggregation.Kind {
return aggregation.LastValueKind
}
func (te *testErrSum) Sum() (number.Number, error) {
return 0, te.err
}
func (te *testErrSum) Kind() aggregation.Kind {
return aggregation.SumKind
}
func (te *testErrMinMaxSumCount) Min() (number.Number, error) {
return 0, te.err
}
func (te *testErrMinMaxSumCount) Max() (number.Number, error) {
return 0, te.err
}
func (te *testErrMinMaxSumCount) Count() (uint64, error) {
return 0, te.err
}
var _ export.Aggregator = &testAgg{}
var _ aggregation.Aggregation = &testAgg{}
var _ aggregation.Sum = &testErrSum{}
var _ aggregation.LastValue = &testErrLastValue{}
var _ aggregation.MinMaxSumCount = &testErrMinMaxSumCount{}
func TestRecordAggregatorIncompatibleErrors(t *testing.T) {
makeMpb := func(kind aggregation.Kind, agg aggregation.Aggregation) (*metricpb.Metric, error) {
desc := metric.NewDescriptor("things", metric.CounterInstrumentKind, number.Int64Kind)
labels := attribute.NewSet()
res := resource.Empty()
test := &testAgg{
kind: kind,
agg: agg,
}
return Record(export.CumulativeExportKindSelector(), export.NewRecord(&desc, &labels, res, test, intervalStart, intervalEnd))
}
mpb, err := makeMpb(aggregation.SumKind, &lastvalue.New(1)[0])
require.Error(t, err)
require.Nil(t, mpb)
require.True(t, errors.Is(err, ErrIncompatibleAgg))
mpb, err = makeMpb(aggregation.LastValueKind, &sum.New(1)[0])
require.Error(t, err)
require.Nil(t, mpb)
require.True(t, errors.Is(err, ErrIncompatibleAgg))
mpb, err = makeMpb(aggregation.MinMaxSumCountKind, &lastvalue.New(1)[0])
require.Error(t, err)
require.Nil(t, mpb)
require.True(t, errors.Is(err, ErrIncompatibleAgg))
mpb, err = makeMpb(aggregation.ExactKind, &lastvalue.New(1)[0])
require.Error(t, err)
require.Nil(t, mpb)
require.True(t, errors.Is(err, ErrIncompatibleAgg))
}
func TestRecordAggregatorUnexpectedErrors(t *testing.T) {
makeMpb := func(kind aggregation.Kind, agg aggregation.Aggregation) (*metricpb.Metric, error) {
desc := metric.NewDescriptor("things", metric.CounterInstrumentKind, number.Int64Kind)
labels := attribute.NewSet()
res := resource.Empty()
return Record(export.CumulativeExportKindSelector(), export.NewRecord(&desc, &labels, res, agg, intervalStart, intervalEnd))
}
errEx := fmt.Errorf("timeout")
mpb, err := makeMpb(aggregation.SumKind, &testErrSum{errEx})
require.Error(t, err)
require.Nil(t, mpb)
require.True(t, errors.Is(err, errEx))
mpb, err = makeMpb(aggregation.LastValueKind, &testErrLastValue{errEx})
require.Error(t, err)
require.Nil(t, mpb)
require.True(t, errors.Is(err, errEx))
mpb, err = makeMpb(aggregation.MinMaxSumCountKind, &testErrMinMaxSumCount{testErrSum{errEx}})
require.Error(t, err)
require.Nil(t, mpb)
require.True(t, errors.Is(err, errEx))
}

View File

@ -0,0 +1,29 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package metrictransform
import (
resourcepb "go.opentelemetry.io/proto/otlp/resource/v1"
"go.opentelemetry.io/otel/sdk/resource"
)
// Resource transforms a Resource into an OTLP Resource.
func Resource(r *resource.Resource) *resourcepb.Resource {
if r == nil {
return nil
}
return &resourcepb.Resource{Attributes: ResourceAttributes(r)}
}

View File

@ -0,0 +1,48 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package metrictransform
import (
"testing"
"github.com/stretchr/testify/assert"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/sdk/resource"
)
func TestNilResource(t *testing.T) {
assert.Empty(t, Resource(nil))
}
func TestEmptyResource(t *testing.T) {
assert.Empty(t, Resource(&resource.Resource{}))
}
/*
* This does not include any testing on the ordering of Resource Attributes.
* They are stored as a map internally to the Resource and their order is not
* guaranteed.
*/
func TestResourceAttributes(t *testing.T) {
attrs := []attribute.KeyValue{attribute.Int("one", 1), attribute.Int("two", 2)}
got := Resource(resource.NewSchemaless(attrs...)).GetAttributes()
if !assert.Len(t, attrs, 2) {
return
}
assert.ElementsMatch(t, Attributes(attrs), got)
}

View File

@ -0,0 +1,196 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package otlpconfig
import (
"crypto/tls"
"fmt"
"io/ioutil"
"net/url"
"os"
"regexp"
"strconv"
"strings"
"time"
"go.opentelemetry.io/otel"
)
var httpSchemeRegexp = regexp.MustCompile(`(?i)^http://|https://`)
func ApplyGRPCEnvConfigs(cfg *Config) {
e := EnvOptionsReader{
GetEnv: os.Getenv,
ReadFile: ioutil.ReadFile,
}
e.ApplyGRPCEnvConfigs(cfg)
}
func ApplyHTTPEnvConfigs(cfg *Config) {
e := EnvOptionsReader{
GetEnv: os.Getenv,
ReadFile: ioutil.ReadFile,
}
e.ApplyHTTPEnvConfigs(cfg)
}
type EnvOptionsReader struct {
GetEnv func(string) string
ReadFile func(filename string) ([]byte, error)
}
func (e *EnvOptionsReader) ApplyHTTPEnvConfigs(cfg *Config) {
opts := e.GetOptionsFromEnv()
for _, opt := range opts {
opt.ApplyHTTPOption(cfg)
}
}
func (e *EnvOptionsReader) ApplyGRPCEnvConfigs(cfg *Config) {
opts := e.GetOptionsFromEnv()
for _, opt := range opts {
opt.ApplyGRPCOption(cfg)
}
}
func (e *EnvOptionsReader) GetOptionsFromEnv() []GenericOption {
var opts []GenericOption
// Endpoint
if v, ok := e.getEnvValue("ENDPOINT"); ok {
if isInsecureEndpoint(v) {
opts = append(opts, WithInsecure())
} else {
opts = append(opts, WithSecure())
}
opts = append(opts, WithEndpoint(trimSchema(v)))
}
if v, ok := e.getEnvValue("METRICS_ENDPOINT"); ok {
if isInsecureEndpoint(v) {
opts = append(opts, WithInsecure())
} else {
opts = append(opts, WithSecure())
}
opts = append(opts, WithEndpoint(trimSchema(v)))
}
// Certificate File
if path, ok := e.getEnvValue("CERTIFICATE"); ok {
if tls, err := e.readTLSConfig(path); err == nil {
opts = append(opts, WithTLSClientConfig(tls))
} else {
otel.Handle(fmt.Errorf("failed to configure otlp exporter certificate '%s': %w", path, err))
}
}
if path, ok := e.getEnvValue("METRICS_CERTIFICATE"); ok {
if tls, err := e.readTLSConfig(path); err == nil {
opts = append(opts, WithTLSClientConfig(tls))
} else {
otel.Handle(fmt.Errorf("failed to configure otlp exporter certificate '%s': %w", path, err))
}
}
// Headers
if h, ok := e.getEnvValue("HEADERS"); ok {
opts = append(opts, WithHeaders(stringToHeader(h)))
}
if h, ok := e.getEnvValue("METRICS_HEADERS"); ok {
opts = append(opts, WithHeaders(stringToHeader(h)))
}
// Compression
if c, ok := e.getEnvValue("COMPRESSION"); ok {
opts = append(opts, WithCompression(stringToCompression(c)))
}
if c, ok := e.getEnvValue("METRICS_COMPRESSION"); ok {
opts = append(opts, WithCompression(stringToCompression(c)))
}
// Timeout
if t, ok := e.getEnvValue("TIMEOUT"); ok {
if d, err := strconv.Atoi(t); err == nil {
opts = append(opts, WithTimeout(time.Duration(d)*time.Millisecond))
}
}
if t, ok := e.getEnvValue("METRICS_TIMEOUT"); ok {
if d, err := strconv.Atoi(t); err == nil {
opts = append(opts, WithTimeout(time.Duration(d)*time.Millisecond))
}
}
return opts
}
func isInsecureEndpoint(endpoint string) bool {
return strings.HasPrefix(strings.ToLower(endpoint), "http://")
}
func trimSchema(endpoint string) string {
return httpSchemeRegexp.ReplaceAllString(endpoint, "")
}
// getEnvValue gets an OTLP environment variable value of the specified key using the GetEnv function.
// This function already prepends the OTLP prefix to all key lookup.
func (e *EnvOptionsReader) getEnvValue(key string) (string, bool) {
v := strings.TrimSpace(e.GetEnv(fmt.Sprintf("OTEL_EXPORTER_OTLP_%s", key)))
return v, v != ""
}
func (e *EnvOptionsReader) readTLSConfig(path string) (*tls.Config, error) {
b, err := e.ReadFile(path)
if err != nil {
return nil, err
}
return CreateTLSConfig(b)
}
func stringToCompression(value string) Compression {
switch value {
case "gzip":
return GzipCompression
}
return NoCompression
}
func stringToHeader(value string) map[string]string {
headersPairs := strings.Split(value, ",")
headers := make(map[string]string)
for _, header := range headersPairs {
nameValue := strings.SplitN(header, "=", 2)
if len(nameValue) < 2 {
continue
}
name, err := url.QueryUnescape(nameValue[0])
if err != nil {
continue
}
trimmedName := strings.TrimSpace(name)
value, err := url.QueryUnescape(nameValue[1])
if err != nil {
continue
}
trimmedValue := strings.TrimSpace(value)
headers[trimmedName] = trimmedValue
}
return headers
}

View File

@ -0,0 +1,75 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package otlpconfig
import (
"reflect"
"testing"
)
func TestStringToHeader(t *testing.T) {
tests := []struct {
name string
value string
want map[string]string
}{
{
name: "simple test",
value: "userId=alice",
want: map[string]string{"userId": "alice"},
},
{
name: "simple test with spaces",
value: " userId = alice ",
want: map[string]string{"userId": "alice"},
},
{
name: "multiples headers encoded",
value: "userId=alice,serverNode=DF%3A28,isProduction=false",
want: map[string]string{
"userId": "alice",
"serverNode": "DF:28",
"isProduction": "false",
},
},
{
name: "invalid headers format",
value: "userId:alice",
want: map[string]string{},
},
{
name: "invalid key",
value: "%XX=missing,userId=alice",
want: map[string]string{
"userId": "alice",
},
},
{
name: "invalid value",
value: "missing=%XX,userId=alice",
want: map[string]string{
"userId": "alice",
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := stringToHeader(tt.value); !reflect.DeepEqual(got, tt.want) {
t.Errorf("stringToHeader() = %v, want %v", got, tt.want)
}
})
}
}

View File

@ -0,0 +1,245 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/internal/otlpconfig"
import (
"crypto/tls"
"fmt"
"time"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
)
const (
// DefaultMetricsPath is a default URL path for endpoint that
// receives metrics.
DefaultMetricsPath string = "/v1/metrics"
// DefaultTimeout is a default max waiting time for the backend to process
// each span or metrics batch.
DefaultTimeout time.Duration = 10 * time.Second
)
var (
// defaultRetrySettings is a default settings for the retry policy.
defaultRetrySettings = RetrySettings{
Enabled: true,
InitialInterval: 5 * time.Second,
MaxInterval: 30 * time.Second,
MaxElapsedTime: time.Minute,
}
)
type (
SignalConfig struct {
Endpoint string
Insecure bool
TLSCfg *tls.Config
Headers map[string]string
Compression Compression
Timeout time.Duration
URLPath string
// gRPC configurations
GRPCCredentials credentials.TransportCredentials
}
Config struct {
// Signal specific configurations
Metrics SignalConfig
// gRPC configurations
ReconnectionPeriod time.Duration
ServiceConfig string
DialOptions []grpc.DialOption
RetrySettings RetrySettings
}
)
func NewDefaultConfig() Config {
c := Config{
Metrics: SignalConfig{
Endpoint: fmt.Sprintf("%s:%d", DefaultCollectorHost, DefaultCollectorPort),
URLPath: DefaultMetricsPath,
Compression: NoCompression,
Timeout: DefaultTimeout,
},
RetrySettings: defaultRetrySettings,
}
return c
}
type (
// GenericOption applies an option to the HTTP or gRPC driver.
GenericOption interface {
ApplyHTTPOption(*Config)
ApplyGRPCOption(*Config)
// A private method to prevent users implementing the
// interface and so future additions to it will not
// violate compatibility.
private()
}
// HTTPOption applies an option to the HTTP driver.
HTTPOption interface {
ApplyHTTPOption(*Config)
// A private method to prevent users implementing the
// interface and so future additions to it will not
// violate compatibility.
private()
}
// GRPCOption applies an option to the gRPC driver.
GRPCOption interface {
ApplyGRPCOption(*Config)
// A private method to prevent users implementing the
// interface and so future additions to it will not
// violate compatibility.
private()
}
)
// genericOption is an option that applies the same logic
// for both gRPC and HTTP.
type genericOption struct {
fn func(*Config)
}
func (g *genericOption) ApplyGRPCOption(cfg *Config) {
g.fn(cfg)
}
func (g *genericOption) ApplyHTTPOption(cfg *Config) {
g.fn(cfg)
}
func (genericOption) private() {}
func newGenericOption(fn func(cfg *Config)) GenericOption {
return &genericOption{fn: fn}
}
// splitOption is an option that applies different logics
// for gRPC and HTTP.
type splitOption struct {
httpFn func(*Config)
grpcFn func(*Config)
}
func (g *splitOption) ApplyGRPCOption(cfg *Config) {
g.grpcFn(cfg)
}
func (g *splitOption) ApplyHTTPOption(cfg *Config) {
g.httpFn(cfg)
}
func (splitOption) private() {}
func newSplitOption(httpFn func(cfg *Config), grpcFn func(cfg *Config)) GenericOption {
return &splitOption{httpFn: httpFn, grpcFn: grpcFn}
}
// httpOption is an option that is only applied to the HTTP driver.
type httpOption struct {
fn func(*Config)
}
func (h *httpOption) ApplyHTTPOption(cfg *Config) {
h.fn(cfg)
}
func (httpOption) private() {}
func NewHTTPOption(fn func(cfg *Config)) HTTPOption {
return &httpOption{fn: fn}
}
// grpcOption is an option that is only applied to the gRPC driver.
type grpcOption struct {
fn func(*Config)
}
func (h *grpcOption) ApplyGRPCOption(cfg *Config) {
h.fn(cfg)
}
func (grpcOption) private() {}
func NewGRPCOption(fn func(cfg *Config)) GRPCOption {
return &grpcOption{fn: fn}
}
// Generic Options
func WithEndpoint(endpoint string) GenericOption {
return newGenericOption(func(cfg *Config) {
cfg.Metrics.Endpoint = endpoint
})
}
func WithCompression(compression Compression) GenericOption {
return newGenericOption(func(cfg *Config) {
cfg.Metrics.Compression = compression
})
}
func WithURLPath(urlPath string) GenericOption {
return newGenericOption(func(cfg *Config) {
cfg.Metrics.URLPath = urlPath
})
}
func WithRetry(settings RetrySettings) GenericOption {
return newGenericOption(func(cfg *Config) {
cfg.RetrySettings = settings
})
}
func WithTLSClientConfig(tlsCfg *tls.Config) GenericOption {
return newSplitOption(func(cfg *Config) {
cfg.Metrics.TLSCfg = tlsCfg.Clone()
}, func(cfg *Config) {
cfg.Metrics.GRPCCredentials = credentials.NewTLS(tlsCfg)
})
}
func WithInsecure() GenericOption {
return newGenericOption(func(cfg *Config) {
cfg.Metrics.Insecure = true
})
}
func WithSecure() GenericOption {
return newGenericOption(func(cfg *Config) {
cfg.Metrics.Insecure = false
})
}
func WithHeaders(headers map[string]string) GenericOption {
return newGenericOption(func(cfg *Config) {
cfg.Metrics.Headers = headers
})
}
func WithTimeout(duration time.Duration) GenericOption {
return newGenericOption(func(cfg *Config) {
cfg.Metrics.Timeout = duration
})
}

View File

@ -0,0 +1,393 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package otlpconfig_test
import (
"errors"
"testing"
"time"
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/otlpconfig"
"github.com/stretchr/testify/assert"
)
const (
WeakCertificate = `
-----BEGIN CERTIFICATE-----
MIIBhzCCASygAwIBAgIRANHpHgAWeTnLZpTSxCKs0ggwCgYIKoZIzj0EAwIwEjEQ
MA4GA1UEChMHb3RlbC1nbzAeFw0yMTA0MDExMzU5MDNaFw0yMTA0MDExNDU5MDNa
MBIxEDAOBgNVBAoTB290ZWwtZ28wWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAS9
nWSkmPCxShxnp43F+PrOtbGV7sNfkbQ/kxzi9Ego0ZJdiXxkmv/C05QFddCW7Y0Z
sJCLHGogQsYnWJBXUZOVo2MwYTAOBgNVHQ8BAf8EBAMCB4AwEwYDVR0lBAwwCgYI
KwYBBQUHAwEwDAYDVR0TAQH/BAIwADAsBgNVHREEJTAjgglsb2NhbGhvc3SHEAAA
AAAAAAAAAAAAAAAAAAGHBH8AAAEwCgYIKoZIzj0EAwIDSQAwRgIhANwZVVKvfvQ/
1HXsTvgH+xTQswOwSSKYJ1cVHQhqK7ZbAiEAus8NxpTRnp5DiTMuyVmhVNPB+bVH
Lhnm4N/QDk5rek0=
-----END CERTIFICATE-----
`
WeakPrivateKey = `
-----BEGIN PRIVATE KEY-----
MIGHAgEAMBMGByqGSM49AgEGCCqGSM49AwEHBG0wawIBAQQgN8HEXiXhvByrJ1zK
SFT6Y2l2KqDWwWzKf+t4CyWrNKehRANCAAS9nWSkmPCxShxnp43F+PrOtbGV7sNf
kbQ/kxzi9Ego0ZJdiXxkmv/C05QFddCW7Y0ZsJCLHGogQsYnWJBXUZOV
-----END PRIVATE KEY-----
`
)
type env map[string]string
func (e *env) getEnv(env string) string {
return (*e)[env]
}
type fileReader map[string][]byte
func (f *fileReader) readFile(filename string) ([]byte, error) {
if b, ok := (*f)[filename]; ok {
return b, nil
}
return nil, errors.New("File not found")
}
func TestConfigs(t *testing.T) {
tlsCert, err := otlpconfig.CreateTLSConfig([]byte(WeakCertificate))
assert.NoError(t, err)
tests := []struct {
name string
opts []otlpconfig.GenericOption
env env
fileReader fileReader
asserts func(t *testing.T, c *otlpconfig.Config, grpcOption bool)
}{
{
name: "Test default configs",
asserts: func(t *testing.T, c *otlpconfig.Config, grpcOption bool) {
assert.Equal(t, "localhost:4317", c.Metrics.Endpoint)
assert.Equal(t, otlpconfig.NoCompression, c.Metrics.Compression)
assert.Equal(t, map[string]string(nil), c.Metrics.Headers)
assert.Equal(t, 10*time.Second, c.Metrics.Timeout)
},
},
// Endpoint Tests
{
name: "Test With Endpoint",
opts: []otlpconfig.GenericOption{
otlpconfig.WithEndpoint("someendpoint"),
},
asserts: func(t *testing.T, c *otlpconfig.Config, grpcOption bool) {
assert.Equal(t, "someendpoint", c.Metrics.Endpoint)
},
},
{
name: "Test Environment Endpoint",
env: map[string]string{
"OTEL_EXPORTER_OTLP_ENDPOINT": "env_endpoint",
},
asserts: func(t *testing.T, c *otlpconfig.Config, grpcOption bool) {
assert.Equal(t, "env_endpoint", c.Metrics.Endpoint)
},
},
{
name: "Test Environment Signal Specific Endpoint",
env: map[string]string{
"OTEL_EXPORTER_OTLP_ENDPOINT": "overrode_by_signal_specific",
"OTEL_EXPORTER_OTLP_METRICS_ENDPOINT": "env_metrics_endpoint",
},
asserts: func(t *testing.T, c *otlpconfig.Config, grpcOption bool) {
assert.Equal(t, "env_metrics_endpoint", c.Metrics.Endpoint)
},
},
{
name: "Test Mixed Environment and With Endpoint",
opts: []otlpconfig.GenericOption{
otlpconfig.WithEndpoint("metrics_endpoint"),
},
env: map[string]string{
"OTEL_EXPORTER_OTLP_ENDPOINT": "env_endpoint",
},
asserts: func(t *testing.T, c *otlpconfig.Config, grpcOption bool) {
assert.Equal(t, "metrics_endpoint", c.Metrics.Endpoint)
},
},
{
name: "Test Environment Endpoint with HTTP scheme",
env: map[string]string{
"OTEL_EXPORTER_OTLP_ENDPOINT": "http://env_endpoint",
},
asserts: func(t *testing.T, c *otlpconfig.Config, grpcOption bool) {
assert.Equal(t, "env_endpoint", c.Metrics.Endpoint)
assert.Equal(t, true, c.Metrics.Insecure)
},
},
{
name: "Test Environment Endpoint with HTTP scheme and leading & trailingspaces",
env: map[string]string{
"OTEL_EXPORTER_OTLP_ENDPOINT": " http://env_endpoint ",
},
asserts: func(t *testing.T, c *otlpconfig.Config, grpcOption bool) {
assert.Equal(t, "env_endpoint", c.Metrics.Endpoint)
assert.Equal(t, true, c.Metrics.Insecure)
},
},
{
name: "Test Environment Endpoint with HTTPS scheme",
env: map[string]string{
"OTEL_EXPORTER_OTLP_ENDPOINT": "https://env_endpoint",
},
asserts: func(t *testing.T, c *otlpconfig.Config, grpcOption bool) {
assert.Equal(t, "env_endpoint", c.Metrics.Endpoint)
assert.Equal(t, false, c.Metrics.Insecure)
},
},
{
name: "Test Environment Signal Specific Endpoint",
env: map[string]string{
"OTEL_EXPORTER_OTLP_ENDPOINT": "http://overrode_by_signal_specific",
"OTEL_EXPORTER_OTLP_METRICS_ENDPOINT": "https://env_metrics_endpoint",
},
asserts: func(t *testing.T, c *otlpconfig.Config, grpcOption bool) {
assert.Equal(t, "env_metrics_endpoint", c.Metrics.Endpoint)
assert.Equal(t, false, c.Metrics.Insecure)
},
},
{
name: "Test Environment Signal Specific Endpoint #2",
env: map[string]string{
"OTEL_EXPORTER_OTLP_ENDPOINT": "http://overrode_by_signal_specific",
"OTEL_EXPORTER_OTLP_METRICS_ENDPOINT": "env_metrics_endpoint",
},
asserts: func(t *testing.T, c *otlpconfig.Config, grpcOption bool) {
assert.Equal(t, "env_metrics_endpoint", c.Metrics.Endpoint)
assert.Equal(t, false, c.Metrics.Insecure)
},
},
{
name: "Test Environment Signal Specific Endpoint with uppercase scheme",
env: map[string]string{
"OTEL_EXPORTER_OTLP_ENDPOINT": "HTTP://overrode_by_signal_specific",
"OTEL_EXPORTER_OTLP_METRICS_ENDPOINT": "env_metrics_endpoint",
},
asserts: func(t *testing.T, c *otlpconfig.Config, grpcOption bool) {
assert.Equal(t, "env_metrics_endpoint", c.Metrics.Endpoint)
assert.Equal(t, false, c.Metrics.Insecure)
},
},
// Certificate tests
{
name: "Test With Certificate",
opts: []otlpconfig.GenericOption{
otlpconfig.WithTLSClientConfig(tlsCert),
},
asserts: func(t *testing.T, c *otlpconfig.Config, grpcOption bool) {
if grpcOption {
//TODO: make sure gRPC's credentials actually works
assert.NotNil(t, c.Metrics.GRPCCredentials)
} else {
assert.Equal(t, tlsCert.RootCAs.Subjects(), c.Metrics.TLSCfg.RootCAs.Subjects())
}
},
},
{
name: "Test Environment Certificate",
env: map[string]string{
"OTEL_EXPORTER_OTLP_CERTIFICATE": "cert_path",
},
fileReader: fileReader{
"cert_path": []byte(WeakCertificate),
},
asserts: func(t *testing.T, c *otlpconfig.Config, grpcOption bool) {
if grpcOption {
assert.NotNil(t, c.Metrics.GRPCCredentials)
} else {
assert.Equal(t, tlsCert.RootCAs.Subjects(), c.Metrics.TLSCfg.RootCAs.Subjects())
}
},
},
{
name: "Test Environment Signal Specific Certificate",
env: map[string]string{
"OTEL_EXPORTER_OTLP_CERTIFICATE": "overrode_by_signal_specific",
"OTEL_EXPORTER_OTLP_METRICS_CERTIFICATE": "cert_path",
},
fileReader: fileReader{
"cert_path": []byte(WeakCertificate),
"invalid_cert": []byte("invalid certificate file."),
},
asserts: func(t *testing.T, c *otlpconfig.Config, grpcOption bool) {
if grpcOption {
assert.NotNil(t, c.Metrics.GRPCCredentials)
} else {
assert.Equal(t, tlsCert.RootCAs.Subjects(), c.Metrics.TLSCfg.RootCAs.Subjects())
}
},
},
{
name: "Test Mixed Environment and With Certificate",
opts: []otlpconfig.GenericOption{},
env: map[string]string{
"OTEL_EXPORTER_OTLP_CERTIFICATE": "cert_path",
},
fileReader: fileReader{
"cert_path": []byte(WeakCertificate),
},
asserts: func(t *testing.T, c *otlpconfig.Config, grpcOption bool) {
if grpcOption {
assert.NotNil(t, c.Metrics.GRPCCredentials)
} else {
assert.Equal(t, 1, len(c.Metrics.TLSCfg.RootCAs.Subjects()))
}
},
},
// Headers tests
{
name: "Test With Headers",
opts: []otlpconfig.GenericOption{
otlpconfig.WithHeaders(map[string]string{"h1": "v1"}),
},
asserts: func(t *testing.T, c *otlpconfig.Config, grpcOption bool) {
assert.Equal(t, map[string]string{"h1": "v1"}, c.Metrics.Headers)
},
},
{
name: "Test Environment Headers",
env: map[string]string{"OTEL_EXPORTER_OTLP_HEADERS": "h1=v1,h2=v2"},
asserts: func(t *testing.T, c *otlpconfig.Config, grpcOption bool) {
assert.Equal(t, map[string]string{"h1": "v1", "h2": "v2"}, c.Metrics.Headers)
},
},
{
name: "Test Environment Signal Specific Headers",
env: map[string]string{
"OTEL_EXPORTER_OTLP_HEADERS": "overrode_by_signal_specific",
"OTEL_EXPORTER_OTLP_METRICS_HEADERS": "h1=v1,h2=v2",
},
asserts: func(t *testing.T, c *otlpconfig.Config, grpcOption bool) {
assert.Equal(t, map[string]string{"h1": "v1", "h2": "v2"}, c.Metrics.Headers)
},
},
{
name: "Test Mixed Environment and With Headers",
env: map[string]string{"OTEL_EXPORTER_OTLP_HEADERS": "h1=v1,h2=v2"},
opts: []otlpconfig.GenericOption{
otlpconfig.WithHeaders(map[string]string{"m1": "mv1"}),
},
asserts: func(t *testing.T, c *otlpconfig.Config, grpcOption bool) {
assert.Equal(t, map[string]string{"m1": "mv1"}, c.Metrics.Headers)
},
},
// Compression Tests
{
name: "Test With Compression",
opts: []otlpconfig.GenericOption{
otlpconfig.WithCompression(otlpconfig.GzipCompression),
},
asserts: func(t *testing.T, c *otlpconfig.Config, grpcOption bool) {
assert.Equal(t, otlpconfig.GzipCompression, c.Metrics.Compression)
},
},
{
name: "Test Environment Compression",
env: map[string]string{
"OTEL_EXPORTER_OTLP_COMPRESSION": "gzip",
},
asserts: func(t *testing.T, c *otlpconfig.Config, grpcOption bool) {
assert.Equal(t, otlpconfig.GzipCompression, c.Metrics.Compression)
},
},
{
name: "Test Environment Signal Specific Compression",
env: map[string]string{
"OTEL_EXPORTER_OTLP_METRICS_COMPRESSION": "gzip",
},
asserts: func(t *testing.T, c *otlpconfig.Config, grpcOption bool) {
assert.Equal(t, otlpconfig.GzipCompression, c.Metrics.Compression)
},
},
// Timeout Tests
{
name: "Test With Timeout",
opts: []otlpconfig.GenericOption{
otlpconfig.WithTimeout(time.Duration(5 * time.Second)),
},
asserts: func(t *testing.T, c *otlpconfig.Config, grpcOption bool) {
assert.Equal(t, 5*time.Second, c.Metrics.Timeout)
},
},
{
name: "Test Environment Timeout",
env: map[string]string{
"OTEL_EXPORTER_OTLP_TIMEOUT": "15000",
},
asserts: func(t *testing.T, c *otlpconfig.Config, grpcOption bool) {
assert.Equal(t, c.Metrics.Timeout, 15*time.Second)
},
},
{
name: "Test Environment Signal Specific Timeout",
env: map[string]string{
"OTEL_EXPORTER_OTLP_TIMEOUT": "15000",
"OTEL_EXPORTER_OTLP_METRICS_TIMEOUT": "28000",
},
asserts: func(t *testing.T, c *otlpconfig.Config, grpcOption bool) {
assert.Equal(t, c.Metrics.Timeout, 28*time.Second)
},
},
{
name: "Test Mixed Environment and With Timeout",
env: map[string]string{
"OTEL_EXPORTER_OTLP_TIMEOUT": "15000",
"OTEL_EXPORTER_OTLP_METRICS_TIMEOUT": "28000",
},
opts: []otlpconfig.GenericOption{
otlpconfig.WithTimeout(5 * time.Second),
},
asserts: func(t *testing.T, c *otlpconfig.Config, grpcOption bool) {
assert.Equal(t, c.Metrics.Timeout, 5*time.Second)
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
e := otlpconfig.EnvOptionsReader{
GetEnv: tt.env.getEnv,
ReadFile: tt.fileReader.readFile,
}
// Tests Generic options as HTTP Options
cfg := otlpconfig.NewDefaultConfig()
e.ApplyHTTPEnvConfigs(&cfg)
for _, opt := range tt.opts {
opt.ApplyHTTPOption(&cfg)
}
tt.asserts(t, &cfg, false)
// Tests Generic options as gRPC Options
cfg = otlpconfig.NewDefaultConfig()
e.ApplyGRPCEnvConfigs(&cfg)
for _, opt := range tt.opts {
opt.ApplyGRPCOption(&cfg)
}
tt.asserts(t, &cfg, true)
})
}
}

View File

@ -0,0 +1,54 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/otlpconfig"
import "time"
const (
// DefaultCollectorPort is the port the Exporter will attempt connect to
// if no collector port is provided.
DefaultCollectorPort uint16 = 4317
// DefaultCollectorHost is the host address the Exporter will attempt
// connect to if no collector address is provided.
DefaultCollectorHost string = "localhost"
)
// Compression describes the compression used for payloads sent to the
// collector.
type Compression int
const (
// NoCompression tells the driver to send payloads without
// compression.
NoCompression Compression = iota
// GzipCompression tells the driver to send payloads after
// compressing them with gzip.
GzipCompression
)
// RetrySettings defines configuration for retrying batches in case of export failure
// using an exponential backoff.
type RetrySettings struct {
// Enabled indicates whether to not retry sending batches in case of export failure.
Enabled bool
// InitialInterval the time to wait after the first failure before retrying.
InitialInterval time.Duration
// MaxInterval is the upper bound on backoff interval. Once this value is reached the delay between
// consecutive retries will always be `MaxInterval`.
MaxInterval time.Duration
// MaxElapsedTime is the maximum amount of time (including retries) spent trying to send a request/batch.
// Once this value is reached, the data is discarded.
MaxElapsedTime time.Duration
}

View File

@ -0,0 +1,46 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package otlpconfig
import (
"crypto/tls"
"crypto/x509"
"errors"
"io/ioutil"
)
// ReadTLSConfigFromFile reads a PEM certificate file and creates
// a tls.Config that will use this certifate to verify a server certificate.
func ReadTLSConfigFromFile(path string) (*tls.Config, error) {
b, err := ioutil.ReadFile(path)
if err != nil {
return nil, err
}
return CreateTLSConfig(b)
}
// CreateTLSConfig creates a tls.Config from a raw certificate bytes
// to verify a server certificate.
func CreateTLSConfig(certBytes []byte) (*tls.Config, error) {
cp := x509.NewCertPool()
if ok := cp.AppendCertsFromPEM(certBytes); !ok {
return nil, errors.New("failed to append certificate to the cert pool")
}
return &tls.Config{
RootCAs: cp,
}, nil
}

View File

@ -0,0 +1,116 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package otlpmetrictest
import (
"context"
"errors"
"sync"
"testing"
"time"
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric"
)
func RunExporterShutdownTest(t *testing.T, factory func() otlpmetric.Client) {
t.Run("testClientStopHonorsTimeout", func(t *testing.T) {
testClientStopHonorsTimeout(t, factory())
})
t.Run("testClientStopHonorsCancel", func(t *testing.T) {
testClientStopHonorsCancel(t, factory())
})
t.Run("testClientStopNoError", func(t *testing.T) {
testClientStopNoError(t, factory())
})
t.Run("testClientStopManyTimes", func(t *testing.T) {
testClientStopManyTimes(t, factory())
})
}
func initializeExporter(t *testing.T, client otlpmetric.Client) *otlpmetric.Exporter {
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute)
defer cancel()
e, err := otlpmetric.New(ctx, client)
if err != nil {
t.Fatalf("failed to create exporter")
}
return e
}
func testClientStopHonorsTimeout(t *testing.T, client otlpmetric.Client) {
e := initializeExporter(t, client)
innerCtx, innerCancel := context.WithTimeout(context.Background(), time.Microsecond)
<-innerCtx.Done()
if err := e.Shutdown(innerCtx); err == nil {
t.Error("expected context DeadlineExceeded error, got nil")
} else if !errors.Is(err, context.DeadlineExceeded) {
t.Errorf("expected context DeadlineExceeded error, got %v", err)
}
innerCancel()
}
func testClientStopHonorsCancel(t *testing.T, client otlpmetric.Client) {
e := initializeExporter(t, client)
ctx, innerCancel := context.WithCancel(context.Background())
innerCancel()
if err := e.Shutdown(ctx); err == nil {
t.Error("expected context canceled error, got nil")
} else if !errors.Is(err, context.Canceled) {
t.Errorf("expected context canceled error, got %v", err)
}
}
func testClientStopNoError(t *testing.T, client otlpmetric.Client) {
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute)
defer cancel()
e := initializeExporter(t, client)
if err := e.Shutdown(ctx); err != nil {
t.Errorf("shutdown errored: expected nil, got %v", err)
}
}
func testClientStopManyTimes(t *testing.T, client otlpmetric.Client) {
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute)
defer cancel()
e := initializeExporter(t, client)
ch := make(chan struct{})
wg := sync.WaitGroup{}
const num int = 20
wg.Add(num)
errs := make([]error, num)
for i := 0; i < num; i++ {
go func(idx int) {
defer wg.Done()
<-ch
errs[idx] = e.Shutdown(ctx)
}(i)
}
close(ch)
wg.Wait()
for _, err := range errs {
if err != nil {
t.Fatalf("failed to shutdown exporter: %v", err)
}
}
}

View File

@ -0,0 +1,55 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package otlpmetrictest
import (
collectormetricpb "go.opentelemetry.io/proto/otlp/collector/metrics/v1"
metricpb "go.opentelemetry.io/proto/otlp/metrics/v1"
)
// Collector is an interface that mock collectors should implements,
// so they can be used for the end-to-end testing.
type Collector interface {
Stop() error
GetMetrics() []*metricpb.Metric
}
// MetricsStorage stores the metrics. Mock collectors could use it to
// store metrics they have received.
type MetricsStorage struct {
metrics []*metricpb.Metric
}
// NewMetricsStorage creates a new metrics storage.
func NewMetricsStorage() MetricsStorage {
return MetricsStorage{}
}
// AddMetrics adds metrics to the metrics storage.
func (s *MetricsStorage) AddMetrics(request *collectormetricpb.ExportMetricsServiceRequest) {
for _, rm := range request.GetResourceMetrics() {
// TODO (rghetia) handle multiple resource and library info.
if len(rm.InstrumentationLibraryMetrics) > 0 {
s.metrics = append(s.metrics, rm.InstrumentationLibraryMetrics[0].Metrics...)
}
}
}
// GetMetrics returns the stored metrics.
func (s *MetricsStorage) GetMetrics() []*metricpb.Metric {
// copy in order to not change.
m := make([]*metricpb.Metric, 0, len(s.metrics))
return append(m, s.metrics...)
}

View File

@ -0,0 +1,101 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package otlpmetrictest
import (
"context"
"fmt"
"time"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/metric"
"go.opentelemetry.io/otel/metric/number"
exportmetric "go.opentelemetry.io/otel/sdk/export/metric"
"go.opentelemetry.io/otel/sdk/metric/aggregator/sum"
"go.opentelemetry.io/otel/sdk/resource"
)
// Used to avoid implementing locking functions for test
// checkpointsets.
type noopLocker struct{}
// Lock implements sync.Locker, which is needed for
// exportmetric.CheckpointSet.
func (noopLocker) Lock() {}
// Unlock implements sync.Locker, which is needed for
// exportmetric.CheckpointSet.
func (noopLocker) Unlock() {}
// RLock implements exportmetric.CheckpointSet.
func (noopLocker) RLock() {}
// RUnlock implements exportmetric.CheckpointSet.
func (noopLocker) RUnlock() {}
// OneRecordCheckpointSet is a CheckpointSet that returns just one
// filled record. It may be useful for testing driver's metrics
// export.
type OneRecordCheckpointSet struct {
noopLocker
}
var _ exportmetric.CheckpointSet = OneRecordCheckpointSet{}
// ForEach implements exportmetric.CheckpointSet. It always invokes
// the callback once with always the same record.
func (OneRecordCheckpointSet) ForEach(kindSelector exportmetric.ExportKindSelector, recordFunc func(exportmetric.Record) error) error {
desc := metric.NewDescriptor(
"foo",
metric.CounterInstrumentKind,
number.Int64Kind,
)
res := resource.NewSchemaless(attribute.String("a", "b"))
agg := sum.New(1)
if err := agg[0].Update(context.Background(), number.NewInt64Number(42), &desc); err != nil {
return err
}
start := time.Date(2020, time.December, 8, 19, 15, 0, 0, time.UTC)
end := time.Date(2020, time.December, 8, 19, 16, 0, 0, time.UTC)
labels := attribute.NewSet(attribute.String("abc", "def"), attribute.Int64("one", 1))
rec := exportmetric.NewRecord(&desc, &labels, res, agg[0].Aggregation(), start, end)
return recordFunc(rec)
}
// EmptyCheckpointSet is a checkpointer that has no records at all.
type EmptyCheckpointSet struct {
noopLocker
}
var _ exportmetric.CheckpointSet = EmptyCheckpointSet{}
// ForEach implements exportmetric.CheckpointSet. It never invokes the
// callback.
func (EmptyCheckpointSet) ForEach(kindSelector exportmetric.ExportKindSelector, recordFunc func(exportmetric.Record) error) error {
return nil
}
// FailCheckpointSet is a checkpointer that returns an error during
// ForEach.
type FailCheckpointSet struct {
noopLocker
}
var _ exportmetric.CheckpointSet = FailCheckpointSet{}
// ForEach implements exportmetric.CheckpointSet. It always fails.
func (FailCheckpointSet) ForEach(kindSelector exportmetric.ExportKindSelector, recordFunc func(exportmetric.Record) error) error {
return fmt.Errorf("fail")
}

View File

@ -0,0 +1,170 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package otlpmetrictest
import (
"context"
"fmt"
"testing"
"time"
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric"
metricpb "go.opentelemetry.io/proto/otlp/metrics/v1"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/metric"
"go.opentelemetry.io/otel/metric/number"
exportmetric "go.opentelemetry.io/otel/sdk/export/metric"
controller "go.opentelemetry.io/otel/sdk/metric/controller/basic"
processor "go.opentelemetry.io/otel/sdk/metric/processor/basic"
"go.opentelemetry.io/otel/sdk/metric/selector/simple"
)
// RunEndToEndTest can be used by protocol driver tests to validate
// themselves.
func RunEndToEndTest(ctx context.Context, t *testing.T, exp *otlpmetric.Exporter, mcMetrics Collector) {
selector := simple.NewWithInexpensiveDistribution()
proc := processor.New(selector, exportmetric.StatelessExportKindSelector())
cont := controller.New(proc, controller.WithExporter(exp))
require.NoError(t, cont.Start(ctx))
meter := cont.MeterProvider().Meter("test-meter")
labels := []attribute.KeyValue{attribute.Bool("test", true)}
type data struct {
iKind metric.InstrumentKind
nKind number.Kind
val int64
}
instruments := map[string]data{
"test-int64-counter": {metric.CounterInstrumentKind, number.Int64Kind, 1},
"test-float64-counter": {metric.CounterInstrumentKind, number.Float64Kind, 1},
"test-int64-valuerecorder": {metric.ValueRecorderInstrumentKind, number.Int64Kind, 2},
"test-float64-valuerecorder": {metric.ValueRecorderInstrumentKind, number.Float64Kind, 2},
"test-int64-valueobserver": {metric.ValueObserverInstrumentKind, number.Int64Kind, 3},
"test-float64-valueobserver": {metric.ValueObserverInstrumentKind, number.Float64Kind, 3},
}
for name, data := range instruments {
data := data
switch data.iKind {
case metric.CounterInstrumentKind:
switch data.nKind {
case number.Int64Kind:
metric.Must(meter).NewInt64Counter(name).Add(ctx, data.val, labels...)
case number.Float64Kind:
metric.Must(meter).NewFloat64Counter(name).Add(ctx, float64(data.val), labels...)
default:
assert.Failf(t, "unsupported number testing kind", data.nKind.String())
}
case metric.ValueRecorderInstrumentKind:
switch data.nKind {
case number.Int64Kind:
metric.Must(meter).NewInt64ValueRecorder(name).Record(ctx, data.val, labels...)
case number.Float64Kind:
metric.Must(meter).NewFloat64ValueRecorder(name).Record(ctx, float64(data.val), labels...)
default:
assert.Failf(t, "unsupported number testing kind", data.nKind.String())
}
case metric.ValueObserverInstrumentKind:
switch data.nKind {
case number.Int64Kind:
metric.Must(meter).NewInt64ValueObserver(name,
func(_ context.Context, result metric.Int64ObserverResult) {
result.Observe(data.val, labels...)
},
)
case number.Float64Kind:
callback := func(v float64) metric.Float64ObserverFunc {
return metric.Float64ObserverFunc(func(_ context.Context, result metric.Float64ObserverResult) { result.Observe(v, labels...) })
}(float64(data.val))
metric.Must(meter).NewFloat64ValueObserver(name, callback)
default:
assert.Failf(t, "unsupported number testing kind", data.nKind.String())
}
default:
assert.Failf(t, "unsupported metrics testing kind", data.iKind.String())
}
}
// Flush and close.
require.NoError(t, cont.Stop(ctx))
// Wait >2 cycles.
<-time.After(40 * time.Millisecond)
// Now shutdown the exporter
ctx, cancel := context.WithTimeout(ctx, 10*time.Second)
defer cancel()
if err := exp.Shutdown(ctx); err != nil {
t.Fatalf("failed to stop the exporter: %v", err)
}
// Shutdown the collector too so that we can begin
// verification checks of expected data back.
_ = mcMetrics.Stop()
metrics := mcMetrics.GetMetrics()
assert.Len(t, metrics, len(instruments), "not enough metrics exported")
seen := make(map[string]struct{}, len(instruments))
for _, m := range metrics {
data, ok := instruments[m.Name]
if !ok {
assert.Failf(t, "unknown metrics", m.Name)
continue
}
seen[m.Name] = struct{}{}
switch data.iKind {
case metric.CounterInstrumentKind, metric.ValueObserverInstrumentKind:
var dp []*metricpb.NumberDataPoint
switch data.iKind {
case metric.CounterInstrumentKind:
require.NotNil(t, m.GetSum())
dp = m.GetSum().GetDataPoints()
case metric.ValueObserverInstrumentKind:
require.NotNil(t, m.GetGauge())
dp = m.GetGauge().GetDataPoints()
}
if assert.Len(t, dp, 1) {
switch data.nKind {
case number.Int64Kind:
v := &metricpb.NumberDataPoint_AsInt{AsInt: data.val}
assert.Equal(t, v, dp[0].Value, "invalid value for %q", m.Name)
case number.Float64Kind:
v := &metricpb.NumberDataPoint_AsDouble{AsDouble: float64(data.val)}
assert.Equal(t, v, dp[0].Value, "invalid value for %q", m.Name)
}
}
case metric.ValueRecorderInstrumentKind:
require.NotNil(t, m.GetSummary())
if dp := m.GetSummary().DataPoints; assert.Len(t, dp, 1) {
count := dp[0].Count
assert.Equal(t, uint64(1), count, "invalid count for %q", m.Name)
assert.Equal(t, float64(data.val*int64(count)), dp[0].Sum, "invalid sum for %q (value %d)", m.Name, data.val)
}
default:
assert.Failf(t, "invalid metrics kind", data.iKind.String())
}
}
for i := range instruments {
if _, ok := seen[i]; !ok {
assert.Fail(t, fmt.Sprintf("no metric(s) exported for %q", i))
}
}
}

View File

@ -0,0 +1,42 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package otlpmetric
import metricsdk "go.opentelemetry.io/otel/sdk/export/metric"
// Option are setting options passed to an Exporter on creation.
type Option interface {
apply(*config)
}
type exporterOptionFunc func(*config)
func (fn exporterOptionFunc) apply(cfg *config) {
fn(cfg)
}
type config struct {
exportKindSelector metricsdk.ExportKindSelector
}
// WithMetricExportKindSelector defines the ExportKindSelector used
// for selecting AggregationTemporality (i.e., Cumulative vs. Delta
// aggregation). If not specified otherwise, exporter will use a
// cumulative export kind selector.
func WithMetricExportKindSelector(selector metricsdk.ExportKindSelector) Option {
return exporterOptionFunc(func(cfg *config) {
cfg.exportKindSelector = selector
})
}

View File

@ -0,0 +1,108 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package otlpmetricgrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc"
import (
"context"
"errors"
"fmt"
"sync"
"google.golang.org/grpc"
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric"
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/connection"
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/otlpconfig"
colmetricpb "go.opentelemetry.io/proto/otlp/collector/metrics/v1"
metricpb "go.opentelemetry.io/proto/otlp/metrics/v1"
)
type client struct {
connection *connection.Connection
lock sync.Mutex
metricsClient colmetricpb.MetricsServiceClient
}
var (
errNoClient = errors.New("no client")
)
// NewClient creates a new gRPC metric client.
func NewClient(opts ...Option) otlpmetric.Client {
cfg := otlpconfig.NewDefaultConfig()
otlpconfig.ApplyGRPCEnvConfigs(&cfg)
for _, opt := range opts {
opt.applyGRPCOption(&cfg)
}
c := &client{}
c.connection = connection.NewConnection(cfg, cfg.Metrics, c.handleNewConnection)
return c
}
func (c *client) handleNewConnection(cc *grpc.ClientConn) {
c.lock.Lock()
defer c.lock.Unlock()
if cc != nil {
c.metricsClient = colmetricpb.NewMetricsServiceClient(cc)
} else {
c.metricsClient = nil
}
}
// Start establishes a connection to the collector.
func (c *client) Start(ctx context.Context) error {
return c.connection.StartConnection(ctx)
}
// Stop shuts down the connection to the collector.
func (c *client) Stop(ctx context.Context) error {
return c.connection.Shutdown(ctx)
}
// UploadMetrics sends a batch of metrics to the collector.
func (c *client) UploadMetrics(ctx context.Context, protoMetrics []*metricpb.ResourceMetrics) error {
if !c.connection.Connected() {
return fmt.Errorf("metrics exporter is disconnected from the server %s: %w", c.connection.SCfg.Endpoint, c.connection.LastConnectError())
}
ctx, cancel := c.connection.ContextWithStop(ctx)
defer cancel()
ctx, tCancel := context.WithTimeout(ctx, c.connection.SCfg.Timeout)
defer tCancel()
ctx = c.connection.ContextWithMetadata(ctx)
err := func() error {
c.lock.Lock()
defer c.lock.Unlock()
if c.metricsClient == nil {
return errNoClient
}
return c.connection.DoRequest(ctx, func(ctx context.Context) error {
_, err := c.metricsClient.Export(ctx, &colmetricpb.ExportMetricsServiceRequest{
ResourceMetrics: protoMetrics,
})
return err
})
}()
if err != nil {
c.connection.SetStateDisconnected(err)
}
return err
}

View File

@ -0,0 +1,738 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package otlpmetricgrpc_test
import (
"context"
"fmt"
"net"
"strings"
"testing"
"time"
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric"
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/otlpmetrictest"
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc"
"google.golang.org/genproto/googleapis/rpc/errdetails"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"google.golang.org/protobuf/types/known/durationpb"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"google.golang.org/grpc"
"google.golang.org/grpc/encoding/gzip"
)
var (
oneRecord = otlpmetrictest.OneRecordCheckpointSet{}
)
func TestNewExporter_endToEnd(t *testing.T) {
tests := []struct {
name string
additionalOpts []otlpmetricgrpc.Option
}{
{
name: "StandardExporter",
},
{
name: "WithCompressor",
additionalOpts: []otlpmetricgrpc.Option{
otlpmetricgrpc.WithCompressor(gzip.Name),
},
},
{
name: "WithServiceConfig",
additionalOpts: []otlpmetricgrpc.Option{
otlpmetricgrpc.WithServiceConfig("{}"),
},
},
{
name: "WithDialOptions",
additionalOpts: []otlpmetricgrpc.Option{
otlpmetricgrpc.WithDialOption(grpc.WithBlock()),
},
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
newExporterEndToEndTest(t, test.additionalOpts)
})
}
}
func newGRPCExporter(t *testing.T, ctx context.Context, endpoint string, additionalOpts ...otlpmetricgrpc.Option) *otlpmetric.Exporter {
opts := []otlpmetricgrpc.Option{
otlpmetricgrpc.WithInsecure(),
otlpmetricgrpc.WithEndpoint(endpoint),
otlpmetricgrpc.WithReconnectionPeriod(50 * time.Millisecond),
}
opts = append(opts, additionalOpts...)
client := otlpmetricgrpc.NewClient(opts...)
exp, err := otlpmetric.New(ctx, client)
if err != nil {
t.Fatalf("failed to create a new collector exporter: %v", err)
}
return exp
}
func newExporterEndToEndTest(t *testing.T, additionalOpts []otlpmetricgrpc.Option) {
mc := runMockCollectorAtEndpoint(t, "localhost:56561")
defer func() {
_ = mc.stop()
}()
<-time.After(5 * time.Millisecond)
ctx := context.Background()
exp := newGRPCExporter(t, ctx, mc.endpoint, additionalOpts...)
defer func() {
ctx, cancel := context.WithTimeout(ctx, 10*time.Second)
defer cancel()
if err := exp.Shutdown(ctx); err != nil {
panic(err)
}
}()
otlpmetrictest.RunEndToEndTest(ctx, t, exp, mc)
}
func TestExporterShutdown(t *testing.T) {
mc := runMockCollectorAtEndpoint(t, "localhost:56561")
defer func() {
_ = mc.Stop()
}()
<-time.After(5 * time.Millisecond)
otlpmetrictest.RunExporterShutdownTest(t, func() otlpmetric.Client {
return otlpmetricgrpc.NewClient(
otlpmetricgrpc.WithInsecure(),
otlpmetricgrpc.WithEndpoint(mc.endpoint),
otlpmetricgrpc.WithReconnectionPeriod(50*time.Millisecond),
)
})
}
func TestNewExporter_invokeStartThenStopManyTimes(t *testing.T) {
mc := runMockCollector(t)
defer func() {
_ = mc.stop()
}()
ctx := context.Background()
exp := newGRPCExporter(t, ctx, mc.endpoint)
defer func() {
if err := exp.Shutdown(ctx); err != nil {
panic(err)
}
}()
// Invoke Start numerous times, should return errAlreadyStarted
for i := 0; i < 10; i++ {
if err := exp.Start(ctx); err == nil || !strings.Contains(err.Error(), "already started") {
t.Fatalf("#%d unexpected Start error: %v", i, err)
}
}
if err := exp.Shutdown(ctx); err != nil {
t.Fatalf("failed to Shutdown the exporter: %v", err)
}
// Invoke Shutdown numerous times
for i := 0; i < 10; i++ {
if err := exp.Shutdown(ctx); err != nil {
t.Fatalf(`#%d got error (%v) expected none`, i, err)
}
}
}
func TestNewExporter_collectorConnectionDiesThenReconnectsWhenInRestMode(t *testing.T) {
mc := runMockCollector(t)
reconnectionPeriod := 20 * time.Millisecond
ctx := context.Background()
exp := newGRPCExporter(t, ctx, mc.endpoint,
otlpmetricgrpc.WithRetry(otlpmetricgrpc.RetrySettings{Enabled: false}),
otlpmetricgrpc.WithReconnectionPeriod(reconnectionPeriod))
defer func() { require.NoError(t, exp.Shutdown(ctx)) }()
// Wait for a connection.
mc.ln.WaitForConn()
// We'll now stop the collector right away to simulate a connection
// dying in the midst of communication or even not existing before.
require.NoError(t, mc.stop())
// first export, it will send disconnected message to the channel on export failure,
// trigger almost immediate reconnection
require.Error(t, exp.Export(ctx, oneRecord))
// second export, it will detect connection issue, change state of exporter to disconnected and
// send message to disconnected channel but this time reconnection gouroutine will be in (rest mode, not listening to the disconnected channel)
require.Error(t, exp.Export(ctx, oneRecord))
// as a result we have exporter in disconnected state waiting for disconnection message to reconnect
// resurrect collector
nmc := runMockCollectorAtEndpoint(t, mc.endpoint)
// make sure reconnection loop hits beginning and goes back to waiting mode
// after hitting beginning of the loop it should reconnect
nmc.ln.WaitForConn()
n := 10
for i := 0; i < n; i++ {
// when disconnected exp.Export doesnt send disconnected messages again
// it just quits and return last connection error
require.NoError(t, exp.Export(ctx, oneRecord))
}
nmaMetrics := nmc.getMetrics()
if g, w := len(nmaMetrics), n; g != w {
t.Fatalf("Connected collector: metrics: got %d want %d", g, w)
}
dMetrics := mc.getMetrics()
// Expecting 0 metrics to have been received by the original but now dead collector
if g, w := len(dMetrics), 0; g != w {
t.Fatalf("Disconnected collector: spans: got %d want %d", g, w)
}
require.NoError(t, nmc.Stop())
}
func TestExporterExportFailureAndRecoveryModes(t *testing.T) {
tts := []struct {
name string
errors []error
rs otlpmetricgrpc.RetrySettings
fn func(t *testing.T, ctx context.Context, exp *otlpmetric.Exporter, mc *mockCollector)
opts []otlpmetricgrpc.Option
}{
{
name: "Do not retry if succeeded",
fn: func(t *testing.T, ctx context.Context, exp *otlpmetric.Exporter, mc *mockCollector) {
require.NoError(t, exp.Export(ctx, oneRecord))
metrics := mc.getMetrics()
require.Len(t, metrics, 1)
require.Equal(t, 1, mc.metricSvc.requests, "metric service must receive 1 success request.")
},
},
{
name: "Do not retry if 'error' is ok",
errors: []error{
status.Error(codes.OK, ""),
},
fn: func(t *testing.T, ctx context.Context, exp *otlpmetric.Exporter, mc *mockCollector) {
require.NoError(t, exp.Export(ctx, oneRecord))
metrics := mc.getMetrics()
require.Len(t, metrics, 0)
require.Equal(t, 1, mc.metricSvc.requests, "metric service must receive 1 error OK request.")
},
},
{
name: "Fail three times and succeed",
rs: otlpmetricgrpc.RetrySettings{
Enabled: true,
MaxElapsedTime: 300 * time.Millisecond,
InitialInterval: 2 * time.Millisecond,
MaxInterval: 10 * time.Millisecond,
},
errors: []error{
status.Error(codes.Unavailable, "backend under pressure"),
status.Error(codes.Unavailable, "backend under pressure"),
status.Error(codes.Unavailable, "backend under pressure"),
},
fn: func(t *testing.T, ctx context.Context, exp *otlpmetric.Exporter, mc *mockCollector) {
require.NoError(t, exp.Export(ctx, oneRecord))
metrics := mc.getMetrics()
require.Len(t, metrics, 1)
require.Equal(t, 4, mc.metricSvc.requests, "metric service must receive 3 failure requests and 1 success request.")
},
},
{
name: "Permanent error should not be retried",
rs: otlpmetricgrpc.RetrySettings{
Enabled: true,
MaxElapsedTime: 300 * time.Millisecond,
InitialInterval: 2 * time.Millisecond,
MaxInterval: 10 * time.Millisecond,
},
errors: []error{
status.Error(codes.InvalidArgument, "invalid arguments"),
},
fn: func(t *testing.T, ctx context.Context, exp *otlpmetric.Exporter, mc *mockCollector) {
require.Error(t, exp.Export(ctx, oneRecord))
metric := mc.getMetrics()
require.Len(t, metric, 0)
require.Equal(t, 1, mc.metricSvc.requests, "metric service must receive 1 error requests.")
},
},
{
name: "Test all transient errors and succeed",
rs: otlpmetricgrpc.RetrySettings{
Enabled: true,
MaxElapsedTime: 500 * time.Millisecond,
InitialInterval: 1 * time.Millisecond,
MaxInterval: 2 * time.Millisecond,
},
errors: []error{
status.Error(codes.Canceled, ""),
status.Error(codes.DeadlineExceeded, ""),
status.Error(codes.ResourceExhausted, ""),
status.Error(codes.Aborted, ""),
status.Error(codes.OutOfRange, ""),
status.Error(codes.Unavailable, ""),
status.Error(codes.DataLoss, ""),
},
fn: func(t *testing.T, ctx context.Context, exp *otlpmetric.Exporter, mc *mockCollector) {
require.NoError(t, exp.Export(ctx, oneRecord))
metrics := mc.getMetrics()
require.Len(t, metrics, 1)
require.Equal(t, 8, mc.metricSvc.requests, "metric service must receive 9 failure requests and 1 success request.")
},
},
{
name: "Retry should honor server throttling",
rs: otlpmetricgrpc.RetrySettings{
Enabled: true,
MaxElapsedTime: time.Minute,
InitialInterval: time.Nanosecond,
MaxInterval: time.Nanosecond,
},
opts: []otlpmetricgrpc.Option{
otlpmetricgrpc.WithTimeout(time.Millisecond * 100),
},
errors: []error{
newThrottlingError(codes.ResourceExhausted, time.Second*30),
},
fn: func(t *testing.T, ctx context.Context, exp *otlpmetric.Exporter, mc *mockCollector) {
err := exp.Export(ctx, oneRecord)
require.Error(t, err)
require.Equal(t, "context deadline exceeded", err.Error())
metrics := mc.getMetrics()
require.Len(t, metrics, 0)
require.Equal(t, 1, mc.metricSvc.requests, "metric service must receive 1 failure requests and 1 success request.")
},
},
{
name: "Retry should fail if server throttling is higher than the MaxElapsedTime",
rs: otlpmetricgrpc.RetrySettings{
Enabled: true,
MaxElapsedTime: time.Millisecond * 100,
InitialInterval: time.Nanosecond,
MaxInterval: time.Nanosecond,
},
errors: []error{
newThrottlingError(codes.ResourceExhausted, time.Minute),
},
fn: func(t *testing.T, ctx context.Context, exp *otlpmetric.Exporter, mc *mockCollector) {
err := exp.Export(ctx, oneRecord)
require.Error(t, err)
require.Equal(t, "max elapsed time expired when respecting server throttle: rpc error: code = ResourceExhausted desc = ", err.Error())
metrics := mc.getMetrics()
require.Len(t, metrics, 0)
require.Equal(t, 1, mc.metricSvc.requests, "metric service must receive 1 failure requests and 1 success request.")
},
},
{
name: "Retry stops if takes too long",
rs: otlpmetricgrpc.RetrySettings{
Enabled: true,
MaxElapsedTime: time.Millisecond * 100,
InitialInterval: time.Millisecond * 50,
MaxInterval: time.Millisecond * 50,
},
errors: []error{
status.Error(codes.Unavailable, "unavailable"),
status.Error(codes.Unavailable, "unavailable"),
status.Error(codes.Unavailable, "unavailable"),
status.Error(codes.Unavailable, "unavailable"),
status.Error(codes.Unavailable, "unavailable"),
status.Error(codes.Unavailable, "unavailable"),
},
fn: func(t *testing.T, ctx context.Context, exp *otlpmetric.Exporter, mc *mockCollector) {
err := exp.Export(ctx, oneRecord)
require.Error(t, err)
require.Equal(t, "max elapsed time expired: rpc error: code = Unavailable desc = unavailable", err.Error())
metrics := mc.getMetrics()
require.Len(t, metrics, 0)
require.LessOrEqual(t, 1, mc.metricSvc.requests, "metric service must receive at least 1 failure requests.")
},
},
{
name: "Disabled retry",
rs: otlpmetricgrpc.RetrySettings{
Enabled: false,
},
errors: []error{
status.Error(codes.Unavailable, "unavailable"),
},
fn: func(t *testing.T, ctx context.Context, exp *otlpmetric.Exporter, mc *mockCollector) {
err := exp.Export(ctx, oneRecord)
require.Error(t, err)
require.Equal(t, "rpc error: code = Unavailable desc = unavailable", err.Error())
metrics := mc.getMetrics()
require.Len(t, metrics, 0)
require.Equal(t, 1, mc.metricSvc.requests, "metric service must receive 1 failure requests.")
},
},
}
for _, tt := range tts {
t.Run(tt.name, func(t *testing.T) {
ctx := context.Background()
mc := runMockCollectorWithConfig(t, &mockConfig{
errors: tt.errors,
})
opts := []otlpmetricgrpc.Option{
otlpmetricgrpc.WithRetry(tt.rs),
}
if len(tt.opts) != 0 {
opts = append(opts, tt.opts...)
}
exp := newGRPCExporter(t, ctx, mc.endpoint, opts...)
tt.fn(t, ctx, exp, mc)
require.NoError(t, mc.Stop())
require.NoError(t, exp.Shutdown(ctx))
})
}
}
func TestPermanentErrorsShouldNotBeRetried(t *testing.T) {
permanentErrors := []*status.Status{
status.New(codes.Unknown, "Unknown"),
status.New(codes.InvalidArgument, "InvalidArgument"),
status.New(codes.NotFound, "NotFound"),
status.New(codes.AlreadyExists, "AlreadyExists"),
status.New(codes.FailedPrecondition, "FailedPrecondition"),
status.New(codes.Unimplemented, "Unimplemented"),
status.New(codes.Internal, "Internal"),
status.New(codes.PermissionDenied, ""),
status.New(codes.Unauthenticated, ""),
}
for _, sts := range permanentErrors {
t.Run(sts.Code().String(), func(t *testing.T) {
ctx := context.Background()
mc := runMockCollectorWithConfig(t, &mockConfig{
errors: []error{sts.Err()},
})
exp := newGRPCExporter(t, ctx, mc.endpoint)
err := exp.Export(ctx, oneRecord)
require.Error(t, err)
require.Len(t, mc.getMetrics(), 0)
require.Equal(t, 1, mc.metricSvc.requests, "metric service must receive 1 permanent error requests.")
require.NoError(t, mc.Stop())
require.NoError(t, exp.Shutdown(ctx))
})
}
}
func newThrottlingError(code codes.Code, duration time.Duration) error {
s := status.New(code, "")
s, _ = s.WithDetails(&errdetails.RetryInfo{RetryDelay: durationpb.New(duration)})
return s.Err()
}
func TestNewExporter_collectorConnectionDiesThenReconnects(t *testing.T) {
mc := runMockCollector(t)
reconnectionPeriod := 50 * time.Millisecond
ctx := context.Background()
exp := newGRPCExporter(t, ctx, mc.endpoint,
otlpmetricgrpc.WithRetry(otlpmetricgrpc.RetrySettings{Enabled: false}),
otlpmetricgrpc.WithReconnectionPeriod(reconnectionPeriod))
defer func() { require.NoError(t, exp.Shutdown(ctx)) }()
mc.ln.WaitForConn()
// We'll now stop the collector right away to simulate a connection
// dying in the midst of communication or even not existing before.
require.NoError(t, mc.stop())
// In the test below, we'll stop the collector many times,
// while exporting metrics and test to ensure that we can
// reconnect.
for j := 0; j < 3; j++ {
// No endpoint up.
require.Error(t, exp.Export(ctx, oneRecord))
// Now resurrect the collector by making a new one but reusing the
// old endpoint, and the collector should reconnect automatically.
nmc := runMockCollectorAtEndpoint(t, mc.endpoint)
// Give the exporter sometime to reconnect
nmc.ln.WaitForConn()
n := 10
for i := 0; i < n; i++ {
require.NoError(t, exp.Export(ctx, oneRecord))
}
nmaMetrics := nmc.getMetrics()
// Expecting 10 metrics that were sampled, given that
if g, w := len(nmaMetrics), n; g != w {
t.Fatalf("Round #%d: Connected collector: spans: got %d want %d", j, g, w)
}
dMetrics := mc.getMetrics()
// Expecting 0 metrics to have been received by the original but now dead collector
if g, w := len(dMetrics), 0; g != w {
t.Fatalf("Round #%d: Disconnected collector: spans: got %d want %d", j, g, w)
}
// Disconnect for the next try.
require.NoError(t, nmc.stop())
}
}
// This test takes a long time to run: to skip it, run tests using: -short
func TestNewExporter_collectorOnBadConnection(t *testing.T) {
if testing.Short() {
t.Skipf("Skipping this long running test")
}
ln, err := net.Listen("tcp", "localhost:0")
if err != nil {
t.Fatalf("Failed to grab an available port: %v", err)
}
// Firstly close the "collector's" channel: optimistically this endpoint won't get reused ASAP
// However, our goal of closing it is to simulate an unavailable connection
_ = ln.Close()
_, collectorPortStr, _ := net.SplitHostPort(ln.Addr().String())
endpoint := fmt.Sprintf("localhost:%s", collectorPortStr)
ctx := context.Background()
exp := newGRPCExporter(t, ctx, endpoint)
_ = exp.Shutdown(ctx)
}
func TestNewExporter_withEndpoint(t *testing.T) {
mc := runMockCollector(t)
defer func() {
_ = mc.stop()
}()
ctx := context.Background()
exp := newGRPCExporter(t, ctx, mc.endpoint)
_ = exp.Shutdown(ctx)
}
func TestNewExporter_withHeaders(t *testing.T) {
mc := runMockCollector(t)
defer func() {
_ = mc.stop()
}()
ctx := context.Background()
exp := newGRPCExporter(t, ctx, mc.endpoint,
otlpmetricgrpc.WithHeaders(map[string]string{"header1": "value1"}))
require.NoError(t, exp.Export(ctx, oneRecord))
defer func() {
_ = exp.Shutdown(ctx)
}()
headers := mc.getHeaders()
require.Len(t, headers.Get("header1"), 1)
assert.Equal(t, "value1", headers.Get("header1")[0])
}
func TestNewExporter_WithTimeout(t *testing.T) {
tts := []struct {
name string
fn func(exp *otlpmetric.Exporter) error
timeout time.Duration
metrics int
spans int
code codes.Code
delay bool
}{
{
name: "Timeout Metrics",
fn: func(exp *otlpmetric.Exporter) error {
return exp.Export(context.Background(), oneRecord)
},
timeout: time.Millisecond * 100,
code: codes.DeadlineExceeded,
delay: true,
},
{
name: "No Timeout Metrics",
fn: func(exp *otlpmetric.Exporter) error {
return exp.Export(context.Background(), oneRecord)
},
timeout: time.Minute,
metrics: 1,
code: codes.OK,
},
}
for _, tt := range tts {
t.Run(tt.name, func(t *testing.T) {
mc := runMockCollector(t)
if tt.delay {
mc.metricSvc.delay = time.Second * 10
}
defer func() {
_ = mc.stop()
}()
ctx := context.Background()
exp := newGRPCExporter(t, ctx, mc.endpoint, otlpmetricgrpc.WithTimeout(tt.timeout), otlpmetricgrpc.WithRetry(otlpmetricgrpc.RetrySettings{Enabled: false}))
defer func() {
_ = exp.Shutdown(ctx)
}()
err := tt.fn(exp)
if tt.code == codes.OK {
require.NoError(t, err)
} else {
require.Error(t, err)
}
s := status.Convert(err)
require.Equal(t, tt.code, s.Code())
require.Len(t, mc.getMetrics(), tt.metrics)
})
}
}
func TestNewExporter_withInvalidSecurityConfiguration(t *testing.T) {
mc := runMockCollector(t)
defer func() {
_ = mc.stop()
}()
ctx := context.Background()
client := otlpmetricgrpc.NewClient(otlpmetricgrpc.WithEndpoint(mc.endpoint))
exp, err := otlpmetric.New(ctx, client)
if err != nil {
t.Fatalf("failed to create a new collector exporter: %v", err)
}
err = exp.Export(ctx, oneRecord)
expectedErr := fmt.Sprintf("metrics exporter is disconnected from the server %s: grpc: no transport security set (use grpc.WithInsecure() explicitly or set credentials)", mc.endpoint)
require.Error(t, err)
require.Equal(t, expectedErr, err.Error())
defer func() {
_ = exp.Shutdown(ctx)
}()
}
func TestDisconnected(t *testing.T) {
ctx := context.Background()
// The endpoint is whatever, we want to be disconnected. But we
// setting a blocking connection, so dialing to the invalid
// endpoint actually fails.
exp := newGRPCExporter(t, ctx, "invalid",
otlpmetricgrpc.WithReconnectionPeriod(time.Hour),
otlpmetricgrpc.WithDialOption(
grpc.WithBlock(),
grpc.FailOnNonTempDialError(true),
),
)
defer func() {
assert.NoError(t, exp.Shutdown(ctx))
}()
assert.Error(t, exp.Export(ctx, oneRecord))
}
func TestEmptyData(t *testing.T) {
mc := runMockCollectorAtEndpoint(t, "localhost:56561")
defer func() {
_ = mc.stop()
}()
<-time.After(5 * time.Millisecond)
ctx := context.Background()
exp := newGRPCExporter(t, ctx, mc.endpoint)
defer func() {
assert.NoError(t, exp.Shutdown(ctx))
}()
assert.NoError(t, exp.Export(ctx, otlpmetrictest.EmptyCheckpointSet{}))
}
func TestFailedMetricTransform(t *testing.T) {
mc := runMockCollectorAtEndpoint(t, "localhost:56561")
defer func() {
_ = mc.stop()
}()
<-time.After(5 * time.Millisecond)
ctx := context.Background()
exp := newGRPCExporter(t, ctx, mc.endpoint)
defer func() {
assert.NoError(t, exp.Shutdown(ctx))
}()
assert.Error(t, exp.Export(ctx, otlpmetrictest.FailCheckpointSet{}))
}

View File

@ -0,0 +1,203 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package otlpmetricgrpc_test
import (
"context"
"log"
"time"
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric"
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc"
"go.opentelemetry.io/otel/sdk/metric/selector/simple"
"google.golang.org/grpc/credentials"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/metric"
"go.opentelemetry.io/otel/metric/global"
controller "go.opentelemetry.io/otel/sdk/metric/controller/basic"
processor "go.opentelemetry.io/otel/sdk/metric/processor/basic"
)
func Example_insecure() {
ctx := context.Background()
client := otlpmetricgrpc.NewClient(otlpmetricgrpc.WithInsecure())
exp, err := otlpmetric.New(ctx, client)
if err != nil {
log.Fatalf("Failed to create the collector exporter: %v", err)
}
defer func() {
ctx, cancel := context.WithTimeout(ctx, time.Second)
defer cancel()
if err := exp.Shutdown(ctx); err != nil {
otel.Handle(err)
}
}()
pusher := controller.New(
processor.New(
simple.NewWithExactDistribution(),
exp,
),
controller.WithExporter(exp),
controller.WithCollectPeriod(2*time.Second),
)
global.SetMeterProvider(pusher.MeterProvider())
if err := pusher.Start(ctx); err != nil {
log.Fatalf("could not start metric controoler: %v", err)
}
defer func() {
ctx, cancel := context.WithTimeout(ctx, time.Second)
defer cancel()
// pushes any last exports to the receiver
if err := pusher.Stop(ctx); err != nil {
otel.Handle(err)
}
}()
meter := global.Meter("test-meter")
// Recorder metric example
counter := metric.Must(meter).
NewFloat64Counter(
"an_important_metric",
metric.WithDescription("Measures the cumulative epicness of the app"),
)
for i := 0; i < 10; i++ {
log.Printf("Doing really hard work (%d / 10)\n", i+1)
counter.Add(ctx, 1.0)
}
}
func Example_withTLS() {
// Please take at look at https://pkg.go.dev/google.golang.org/grpc/credentials#TransportCredentials
// for ways on how to initialize gRPC TransportCredentials.
creds, err := credentials.NewClientTLSFromFile("my-cert.pem", "")
if err != nil {
log.Fatalf("failed to create gRPC client TLS credentials: %v", err)
}
ctx := context.Background()
client := otlpmetricgrpc.NewClient(otlpmetricgrpc.WithTLSCredentials(creds))
exp, err := otlpmetric.New(ctx, client)
if err != nil {
log.Fatalf("failed to create the collector exporter: %v", err)
}
defer func() {
ctx, cancel := context.WithTimeout(ctx, time.Second)
defer cancel()
if err := exp.Shutdown(ctx); err != nil {
otel.Handle(err)
}
}()
pusher := controller.New(
processor.New(
simple.NewWithExactDistribution(),
exp,
),
controller.WithExporter(exp),
controller.WithCollectPeriod(2*time.Second),
)
global.SetMeterProvider(pusher.MeterProvider())
if err := pusher.Start(ctx); err != nil {
log.Fatalf("could not start metric controoler: %v", err)
}
defer func() {
ctx, cancel := context.WithTimeout(ctx, time.Second)
defer cancel()
// pushes any last exports to the receiver
if err := pusher.Stop(ctx); err != nil {
otel.Handle(err)
}
}()
meter := global.Meter("test-meter")
// Recorder metric example
counter := metric.Must(meter).
NewFloat64Counter(
"an_important_metric",
metric.WithDescription("Measures the cumulative epicness of the app"),
)
for i := 0; i < 10; i++ {
log.Printf("Doing really hard work (%d / 10)\n", i+1)
counter.Add(ctx, 1.0)
}
}
func Example_withDifferentSignalCollectors() {
client := otlpmetricgrpc.NewClient(
otlpmetricgrpc.WithInsecure(),
otlpmetricgrpc.WithEndpoint("localhost:30080"),
)
ctx := context.Background()
exp, err := otlpmetric.New(ctx, client)
if err != nil {
log.Fatalf("failed to create the collector exporter: %v", err)
}
defer func() {
ctx, cancel := context.WithTimeout(ctx, time.Second)
defer cancel()
if err := exp.Shutdown(ctx); err != nil {
otel.Handle(err)
}
}()
pusher := controller.New(
processor.New(
simple.NewWithExactDistribution(),
exp,
),
controller.WithExporter(exp),
controller.WithCollectPeriod(2*time.Second),
)
global.SetMeterProvider(pusher.MeterProvider())
if err := pusher.Start(ctx); err != nil {
log.Fatalf("could not start metric controoler: %v", err)
}
defer func() {
ctx, cancel := context.WithTimeout(ctx, time.Second)
defer cancel()
// pushes any last exports to the receiver
if err := pusher.Stop(ctx); err != nil {
otel.Handle(err)
}
}()
meter := global.Meter("test-meter")
// Recorder metric example
counter := metric.Must(meter).
NewFloat64Counter(
"an_important_metric",
metric.WithDescription("Measures the cumulative epicness of the app"),
)
for i := 0; i < 10; i++ {
log.Printf("Doing really hard work (%d / 10)\n", i+1)
counter.Add(ctx, 1.0)
}
log.Printf("Done!")
}

View File

@ -0,0 +1,31 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package otlpmetricgrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc"
import (
"context"
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric"
)
// New constructs a new Exporter and starts it.
func New(ctx context.Context, opts ...Option) (*otlpmetric.Exporter, error) {
return otlpmetric.New(ctx, NewClient(opts...))
}
// NewUnstarted constructs a new Exporter and does not start it.
func NewUnstarted(opts ...Option) *otlpmetric.Exporter {
return otlpmetric.NewUnstarted(NewClient(opts...))
}

View File

@ -0,0 +1,73 @@
module go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc
go 1.15
require (
github.com/stretchr/testify v1.7.0
go.opentelemetry.io/otel v0.20.0
go.opentelemetry.io/otel/exporters/otlp/otlpmetric v0.0.0-00010101000000-000000000000
go.opentelemetry.io/otel/metric v0.20.0
go.opentelemetry.io/otel/sdk/metric v0.20.0
go.opentelemetry.io/proto/otlp v0.9.0
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013
google.golang.org/grpc v1.38.0
google.golang.org/protobuf v1.26.0
)
replace go.opentelemetry.io/otel => ../../../..
replace go.opentelemetry.io/otel/sdk => ../../../../sdk
replace go.opentelemetry.io/otel/sdk/metric => ../../../../sdk/metric
replace go.opentelemetry.io/otel/exporters/otlp => ../..
replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric => ../
replace go.opentelemetry.io/otel/metric => ../../../../metric
replace go.opentelemetry.io/otel/oteltest => ../../../../oteltest
replace go.opentelemetry.io/otel/trace => ../../../../trace
replace go.opentelemetry.io/otel/bridge/opencensus => ../../../../bridge/opencensus
replace go.opentelemetry.io/otel/bridge/opentracing => ../../../../bridge/opentracing
replace go.opentelemetry.io/otel/example/jaeger => ../../../../example/jaeger
replace go.opentelemetry.io/otel/example/namedtracer => ../../../../example/namedtracer
replace go.opentelemetry.io/otel/example/opencensus => ../../../../example/opencensus
replace go.opentelemetry.io/otel/example/otel-collector => ../../../../example/otel-collector
replace go.opentelemetry.io/otel/example/passthrough => ../../../../example/passthrough
replace go.opentelemetry.io/otel/example/prom-collector => ../../../../example/prom-collector
replace go.opentelemetry.io/otel/example/prometheus => ../../../../example/prometheus
replace go.opentelemetry.io/otel/example/zipkin => ../../../../example/zipkin
replace go.opentelemetry.io/otel/exporters/metric/prometheus => ../../../metric/prometheus
replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc => ./
replace go.opentelemetry.io/otel/exporters/otlp/otlptrace => ../../otlptrace
replace go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc => ../../otlptrace/otlptracegrpc
replace go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp => ../../otlptrace/otlptracehttp
replace go.opentelemetry.io/otel/exporters/stdout => ../../../stdout
replace go.opentelemetry.io/otel/exporters/trace/jaeger => ../../../trace/jaeger
replace go.opentelemetry.io/otel/exporters/trace/zipkin => ../../../trace/zipkin
replace go.opentelemetry.io/otel/internal/tools => ../../../../internal/tools
replace go.opentelemetry.io/otel/sdk/export/metric => ../../../../sdk/export/metric
replace go.opentelemetry.io/otel/internal/metric => ../../../../internal/metric

View File

@ -0,0 +1,125 @@
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8=
github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
github.com/cenkalti/backoff/v4 v4.1.1 h1:G2HAfAmvm/GcKan2oOQpBXOd2tT2G57ZnZGWa1PxPBQ=
github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ=
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo=
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
go.opentelemetry.io/proto/otlp v0.9.0 h1:C0g6TWmQYvjKRnljRULLWUVJGy8Uvu0NEL/5frY2/t4=
go.opentelemetry.io/proto/otlp v0.9.0/go.mod h1:1vKfU9rv61e9EVGthD1zNvUbiwPcimSsOPU9brfSHJg=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20200822124328-c89045814202 h1:VvcQYSHwXgi7W+TpUR6A9g6Up98WAHf3f/ulnJ62IyA=
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd h1:xhmwyvizuTgC2qz7ZlMluP20uW+C3Rm0FD/WLDX8884=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 h1:+kGHl1aib/qcwaRi1CbqBZ1rk19r85MNUf8HaBghugY=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
google.golang.org/grpc v1.38.0 h1:/9BgsAsa5nWe26HqOlvlgJnqBuktYOLCgjCPqsa56W0=
google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=

View File

@ -0,0 +1,231 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package otlpmetricgrpc_test
import (
"context"
"fmt"
"net"
"runtime"
"strings"
"sync"
"testing"
"time"
"google.golang.org/grpc/metadata"
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/otlpmetrictest"
"google.golang.org/grpc"
collectormetricpb "go.opentelemetry.io/proto/otlp/collector/metrics/v1"
metricpb "go.opentelemetry.io/proto/otlp/metrics/v1"
)
func makeMockCollector(t *testing.T, mockConfig *mockConfig) *mockCollector {
return &mockCollector{
t: t,
metricSvc: &mockMetricService{
storage: otlpmetrictest.NewMetricsStorage(),
errors: mockConfig.errors,
},
}
}
type mockMetricService struct {
collectormetricpb.UnimplementedMetricsServiceServer
requests int
errors []error
headers metadata.MD
mu sync.RWMutex
storage otlpmetrictest.MetricsStorage
delay time.Duration
}
func (mms *mockMetricService) getHeaders() metadata.MD {
mms.mu.RLock()
defer mms.mu.RUnlock()
return mms.headers
}
func (mms *mockMetricService) getMetrics() []*metricpb.Metric {
mms.mu.RLock()
defer mms.mu.RUnlock()
return mms.storage.GetMetrics()
}
func (mms *mockMetricService) Export(ctx context.Context, exp *collectormetricpb.ExportMetricsServiceRequest) (*collectormetricpb.ExportMetricsServiceResponse, error) {
if mms.delay > 0 {
time.Sleep(mms.delay)
}
mms.mu.Lock()
defer func() {
mms.requests++
mms.mu.Unlock()
}()
reply := &collectormetricpb.ExportMetricsServiceResponse{}
if mms.requests < len(mms.errors) {
idx := mms.requests
return reply, mms.errors[idx]
}
mms.headers, _ = metadata.FromIncomingContext(ctx)
mms.storage.AddMetrics(exp)
return reply, nil
}
type mockCollector struct {
t *testing.T
metricSvc *mockMetricService
endpoint string
ln *listener
stopFunc func()
stopOnce sync.Once
}
type mockConfig struct {
errors []error
endpoint string
}
var _ collectormetricpb.MetricsServiceServer = (*mockMetricService)(nil)
var errAlreadyStopped = fmt.Errorf("already stopped")
func (mc *mockCollector) stop() error {
var err = errAlreadyStopped
mc.stopOnce.Do(func() {
err = nil
if mc.stopFunc != nil {
mc.stopFunc()
}
})
// Give it sometime to shutdown.
<-time.After(160 * time.Millisecond)
// Wait for services to finish reading/writing.
// Getting the lock ensures the metricSvc is done flushing.
mc.metricSvc.mu.Lock()
defer mc.metricSvc.mu.Unlock()
return err
}
func (mc *mockCollector) Stop() error {
return mc.stop()
}
func (mc *mockCollector) getHeaders() metadata.MD {
return mc.metricSvc.getHeaders()
}
func (mc *mockCollector) getMetrics() []*metricpb.Metric {
return mc.metricSvc.getMetrics()
}
func (mc *mockCollector) GetMetrics() []*metricpb.Metric {
return mc.getMetrics()
}
// runMockCollector is a helper function to create a mock Collector
func runMockCollector(t *testing.T) *mockCollector {
return runMockCollectorAtEndpoint(t, "localhost:0")
}
func runMockCollectorAtEndpoint(t *testing.T, endpoint string) *mockCollector {
return runMockCollectorWithConfig(t, &mockConfig{endpoint: endpoint})
}
func runMockCollectorWithConfig(t *testing.T, mockConfig *mockConfig) *mockCollector {
ln, err := net.Listen("tcp", mockConfig.endpoint)
if err != nil {
t.Fatalf("Failed to get an endpoint: %v", err)
}
srv := grpc.NewServer()
mc := makeMockCollector(t, mockConfig)
collectormetricpb.RegisterMetricsServiceServer(srv, mc.metricSvc)
mc.ln = newListener(ln)
go func() {
_ = srv.Serve((net.Listener)(mc.ln))
}()
mc.endpoint = ln.Addr().String()
// srv.Stop calls Close on mc.ln.
mc.stopFunc = srv.Stop
return mc
}
type listener struct {
closeOnce sync.Once
wrapped net.Listener
C chan struct{}
}
func newListener(wrapped net.Listener) *listener {
return &listener{
wrapped: wrapped,
C: make(chan struct{}, 1),
}
}
func (l *listener) Close() error { return l.wrapped.Close() }
func (l *listener) Addr() net.Addr { return l.wrapped.Addr() }
// Accept waits for and returns the next connection to the listener. It will
// send a signal on l.C that a connection has been made before returning.
func (l *listener) Accept() (net.Conn, error) {
conn, err := l.wrapped.Accept()
if err != nil {
// Go 1.16 exported net.ErrClosed that could clean up this check, but to
// remain backwards compatible with previous versions of Go that we
// support the following string evaluation is used instead to keep in line
// with the previously recommended way to check this:
// https://github.com/golang/go/issues/4373#issuecomment-353076799
if strings.Contains(err.Error(), "use of closed network connection") {
// If the listener has been closed, do not allow callers of
// WaitForConn to wait for a connection that will never come.
l.closeOnce.Do(func() { close(l.C) })
}
return conn, err
}
select {
case l.C <- struct{}{}:
default:
// If C is full, assume nobody is listening and move on.
}
return conn, nil
}
// WaitForConn will wait indefintely for a connection to be estabilished with
// the listener before returning.
func (l *listener) WaitForConn() {
for {
select {
case <-l.C:
return
default:
runtime.Gosched()
}
}
}

View File

@ -0,0 +1,132 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package otlpmetricgrpc
import (
"fmt"
"time"
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/otlpconfig"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
"go.opentelemetry.io/otel"
)
// Option applies an option to the gRPC client.
type Option interface {
applyGRPCOption(*otlpconfig.Config)
}
// RetrySettings defines configuration for retrying batches in case of export failure
// using an exponential backoff.
type RetrySettings otlpconfig.RetrySettings
type wrappedOption struct {
otlpconfig.GRPCOption
}
func (w wrappedOption) applyGRPCOption(cfg *otlpconfig.Config) {
w.ApplyGRPCOption(cfg)
}
// WithInsecure disables client transport security for the exporter's gRPC connection
// just like grpc.WithInsecure() https://pkg.go.dev/google.golang.org/grpc#WithInsecure
// does. Note, by default, client security is required unless WithInsecure is used.
func WithInsecure() Option {
return wrappedOption{otlpconfig.WithInsecure()}
}
// WithEndpoint allows one to set the endpoint that the exporter will
// connect to the collector on. If unset, it will instead try to use
// connect to DefaultCollectorHost:DefaultCollectorPort.
func WithEndpoint(endpoint string) Option {
return wrappedOption{otlpconfig.WithEndpoint(endpoint)}
}
// WithReconnectionPeriod allows one to set the delay between next connection attempt
// after failing to connect with the collector.
func WithReconnectionPeriod(rp time.Duration) Option {
return wrappedOption{otlpconfig.NewGRPCOption(func(cfg *otlpconfig.Config) {
cfg.ReconnectionPeriod = rp
})}
}
func compressorToCompression(compressor string) otlpconfig.Compression {
switch compressor {
case "gzip":
return otlpconfig.GzipCompression
}
otel.Handle(fmt.Errorf("invalid compression type: '%s', using no compression as default", compressor))
return otlpconfig.NoCompression
}
// WithCompressor will set the compressor for the gRPC client to use when sending requests.
// It is the responsibility of the caller to ensure that the compressor set has been registered
// with google.golang.org/grpc/encoding. This can be done by encoding.RegisterCompressor. Some
// compressors auto-register on import, such as gzip, which can be registered by calling
// `import _ "google.golang.org/grpc/encoding/gzip"`.
func WithCompressor(compressor string) Option {
return wrappedOption{otlpconfig.WithCompression(compressorToCompression(compressor))}
}
// WithHeaders will send the provided headers with gRPC requests.
func WithHeaders(headers map[string]string) Option {
return wrappedOption{otlpconfig.WithHeaders(headers)}
}
// WithTLSCredentials allows the connection to use TLS credentials
// when talking to the server. It takes in grpc.TransportCredentials instead
// of say a Certificate file or a tls.Certificate, because the retrieving of
// these credentials can be done in many ways e.g. plain file, in code tls.Config
// or by certificate rotation, so it is up to the caller to decide what to use.
func WithTLSCredentials(creds credentials.TransportCredentials) Option {
return wrappedOption{otlpconfig.NewGRPCOption(func(cfg *otlpconfig.Config) {
cfg.Metrics.GRPCCredentials = creds
})}
}
// WithServiceConfig defines the default gRPC service config used.
func WithServiceConfig(serviceConfig string) Option {
return wrappedOption{otlpconfig.NewGRPCOption(func(cfg *otlpconfig.Config) {
cfg.ServiceConfig = serviceConfig
})}
}
// WithDialOption opens support to any grpc.DialOption to be used. If it conflicts
// with some other configuration the GRPC specified via the collector the ones here will
// take preference since they are set last.
func WithDialOption(opts ...grpc.DialOption) Option {
return wrappedOption{otlpconfig.NewGRPCOption(func(cfg *otlpconfig.Config) {
cfg.DialOptions = opts
})}
}
// WithTimeout tells the client the max waiting time for the backend to process
// each metrics batch. If unset, the default will be 10 seconds.
func WithTimeout(duration time.Duration) Option {
return wrappedOption{otlpconfig.WithTimeout(duration)}
}
// WithRetry configures the retry policy for transient errors that may occurs when
// exporting metrics. An exponential back-off algorithm is used to
// ensure endpoints are not overwhelmed with retries. If unset, the default
// retry policy will retry after 5 seconds and increase exponentially after each
// error for a total of 1 minute.
func WithRetry(settings RetrySettings) Option {
return wrappedOption{otlpconfig.WithRetry(otlpconfig.RetrySettings(settings))}
}

View File

@ -67,3 +67,7 @@ replace go.opentelemetry.io/otel/example/passthrough => ../../../example/passthr
replace go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp => ./otlptracehttp
replace go.opentelemetry.io/otel/internal/metric => ../../../internal/metric
replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric => ../otlpmetric
replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc => ../otlpmetric/otlpmetricgrpc

View File

@ -64,3 +64,7 @@ replace go.opentelemetry.io/otel/example/passthrough => ../../../../example/pass
replace go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp => ../otlptracehttp
replace go.opentelemetry.io/otel/internal/metric => ../../../../internal/metric
replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric => ../../otlpmetric
replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc => ../../otlpmetric/otlpmetricgrpc

View File

@ -61,3 +61,7 @@ replace go.opentelemetry.io/otel/sdk/metric => ../../../../sdk/metric
replace go.opentelemetry.io/otel/trace => ../../../../trace
replace go.opentelemetry.io/otel/internal/metric => ../../../../internal/metric
replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric => ../../otlpmetric
replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc => ../../otlpmetric/otlpmetricgrpc

View File

@ -65,3 +65,7 @@ replace go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc => ../ot
replace go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp => ../otlp/otlptrace/otlptracehttp
replace go.opentelemetry.io/otel/internal/metric => ../../internal/metric
replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric => ../otlp/otlpmetric
replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc => ../otlp/otlpmetric/otlpmetricgrpc

View File

@ -62,3 +62,7 @@ replace go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc => ../..
replace go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp => ../../otlp/otlptrace/otlptracehttp
replace go.opentelemetry.io/otel/internal/metric => ../../../internal/metric
replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric => ../../otlp/otlpmetric
replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc => ../../otlp/otlpmetric/otlpmetricgrpc

View File

@ -63,3 +63,7 @@ replace go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc => ../..
replace go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp => ../../otlp/otlptrace/otlptracehttp
replace go.opentelemetry.io/otel/internal/metric => ../../../internal/metric
replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric => ../../otlp/otlpmetric
replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc => ../../otlp/otlpmetric/otlpmetricgrpc

4
go.mod
View File

@ -60,3 +60,7 @@ replace go.opentelemetry.io/otel/exporters/otlp/otlptrace => ./exporters/otlp/ot
replace go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc => ./exporters/otlp/otlptrace/otlptracegrpc
replace go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp => ./exporters/otlp/otlptrace/otlptracehttp
replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric => ./exporters/otlp/otlpmetric
replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc => ./exporters/otlp/otlpmetric/otlpmetricgrpc

View File

@ -59,3 +59,7 @@ replace go.opentelemetry.io/otel/sdk/export/metric => ../../sdk/export/metric
replace go.opentelemetry.io/otel/sdk/metric => ../../sdk/metric
replace go.opentelemetry.io/otel/trace => ../../trace
replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric => ../../exporters/otlp/otlpmetric
replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc => ../../exporters/otlp/otlpmetric/otlpmetricgrpc

View File

@ -63,3 +63,7 @@ replace go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc => ../..
replace go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp => ../../exporters/otlp/otlptrace/otlptracehttp
replace go.opentelemetry.io/otel/internal/metric => ../metric
replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric => ../../exporters/otlp/otlpmetric
replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc => ../../exporters/otlp/otlpmetric/otlpmetricgrpc

View File

@ -60,3 +60,7 @@ replace go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc => ../ex
replace go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp => ../exporters/otlp/otlptrace/otlptracehttp
replace go.opentelemetry.io/otel/internal/metric => ../internal/metric
replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric => ../exporters/otlp/otlpmetric
replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc => ../exporters/otlp/otlpmetric/otlpmetricgrpc

View File

@ -59,3 +59,7 @@ replace go.opentelemetry.io/otel/exporters/otlp/otlptrace => ../exporters/otlp/o
replace go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc => ../exporters/otlp/otlptrace/otlptracegrpc
replace go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp => ../exporters/otlp/otlptrace/otlptracehttp
replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric => ../exporters/otlp/otlpmetric
replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc => ../exporters/otlp/otlpmetric/otlpmetricgrpc

View File

@ -60,3 +60,7 @@ replace go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc => ../..
replace go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp => ../../../exporters/otlp/otlptrace/otlptracehttp
replace go.opentelemetry.io/otel/internal/metric => ../../../internal/metric
replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric => ../../../exporters/otlp/otlpmetric
replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc => ../../../exporters/otlp/otlpmetric/otlpmetricgrpc

View File

@ -61,3 +61,7 @@ replace go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc => ../ex
replace go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp => ../exporters/otlp/otlptrace/otlptracehttp
replace go.opentelemetry.io/otel/internal/metric => ../internal/metric
replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric => ../exporters/otlp/otlpmetric
replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc => ../exporters/otlp/otlpmetric/otlpmetricgrpc

View File

@ -63,3 +63,7 @@ replace go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc => ../..
replace go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp => ../../exporters/otlp/otlptrace/otlptracehttp
replace go.opentelemetry.io/otel/internal/metric => ../../internal/metric
replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric => ../../exporters/otlp/otlpmetric
replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc => ../../exporters/otlp/otlpmetric/otlpmetricgrpc

View File

@ -59,3 +59,7 @@ replace go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc => ../ex
replace go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp => ../exporters/otlp/otlptrace/otlptracehttp
replace go.opentelemetry.io/otel/internal/metric => ../internal/metric
replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric => ../exporters/otlp/otlpmetric
replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc => ../exporters/otlp/otlpmetric/otlpmetricgrpc