2020-04-15 21:04:44 +02:00
|
|
|
// Copyright The OpenTelemetry Authors
|
|
|
|
//
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
|
|
|
package otlp
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
2020-05-19 03:37:41 +02:00
|
|
|
"sync"
|
2020-04-15 21:04:44 +02:00
|
|
|
"testing"
|
2020-06-18 19:16:33 +02:00
|
|
|
"time"
|
2020-04-15 21:04:44 +02:00
|
|
|
|
|
|
|
"github.com/stretchr/testify/assert"
|
|
|
|
"github.com/stretchr/testify/require"
|
|
|
|
|
2020-08-11 04:55:52 +02:00
|
|
|
commonpb "go.opentelemetry.io/otel/exporters/otlp/internal/opentelemetry-proto-gen/common/v1"
|
|
|
|
metricpb "go.opentelemetry.io/otel/exporters/otlp/internal/opentelemetry-proto-gen/metrics/v1"
|
|
|
|
resourcepb "go.opentelemetry.io/otel/exporters/otlp/internal/opentelemetry-proto-gen/resource/v1"
|
2020-08-18 05:25:03 +02:00
|
|
|
"go.opentelemetry.io/otel/label"
|
2020-11-12 17:28:32 +02:00
|
|
|
"go.opentelemetry.io/otel/metric"
|
2020-11-11 17:24:12 +02:00
|
|
|
"go.opentelemetry.io/otel/metric/number"
|
2020-04-15 21:04:44 +02:00
|
|
|
metricsdk "go.opentelemetry.io/otel/sdk/export/metric"
|
2020-06-10 07:53:30 +02:00
|
|
|
"go.opentelemetry.io/otel/sdk/export/metric/aggregation"
|
2020-07-25 05:32:52 +02:00
|
|
|
"go.opentelemetry.io/otel/sdk/export/metric/metrictest"
|
2020-09-29 02:58:15 +02:00
|
|
|
"go.opentelemetry.io/otel/sdk/metric/aggregator/histogram"
|
2020-04-15 21:04:44 +02:00
|
|
|
"go.opentelemetry.io/otel/sdk/metric/aggregator/sum"
|
|
|
|
"go.opentelemetry.io/otel/sdk/resource"
|
|
|
|
)
|
|
|
|
|
2020-06-18 19:16:33 +02:00
|
|
|
var (
|
|
|
|
// Timestamps used in this test:
|
|
|
|
|
|
|
|
intervalStart = time.Now()
|
|
|
|
intervalEnd = intervalStart.Add(time.Hour)
|
|
|
|
)
|
|
|
|
|
|
|
|
func startTime() uint64 {
|
|
|
|
return uint64(intervalStart.UnixNano())
|
|
|
|
}
|
|
|
|
|
|
|
|
func pointTime() uint64 {
|
|
|
|
return uint64(intervalEnd.UnixNano())
|
|
|
|
}
|
|
|
|
|
2020-04-15 21:04:44 +02:00
|
|
|
type checkpointSet struct {
|
2020-05-19 03:37:41 +02:00
|
|
|
sync.RWMutex
|
2020-04-15 21:04:44 +02:00
|
|
|
records []metricsdk.Record
|
|
|
|
}
|
|
|
|
|
2020-06-23 07:59:51 +02:00
|
|
|
func (m *checkpointSet) ForEach(_ metricsdk.ExportKindSelector, fn func(metricsdk.Record) error) error {
|
2020-04-15 21:04:44 +02:00
|
|
|
for _, r := range m.records {
|
2020-06-10 07:53:30 +02:00
|
|
|
if err := fn(r); err != nil && err != aggregation.ErrNoData {
|
2020-04-15 21:04:44 +02:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
type record struct {
|
2020-04-24 18:44:21 +02:00
|
|
|
name string
|
2020-11-12 17:28:32 +02:00
|
|
|
iKind metric.InstrumentKind
|
2020-11-11 17:24:12 +02:00
|
|
|
nKind number.Kind
|
2020-04-24 18:44:21 +02:00
|
|
|
resource *resource.Resource
|
2020-11-12 17:28:32 +02:00
|
|
|
opts []metric.InstrumentOption
|
2020-08-18 05:25:03 +02:00
|
|
|
labels []label.KeyValue
|
2020-04-15 21:04:44 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
var (
|
2020-08-18 05:25:03 +02:00
|
|
|
baseKeyValues = []label.KeyValue{label.String("host", "test.com")}
|
|
|
|
cpuKey = label.Key("CPU")
|
2020-04-15 21:04:44 +02:00
|
|
|
|
2020-10-31 20:16:55 +02:00
|
|
|
testInstA = resource.NewWithAttributes(label.String("instance", "tester-a"))
|
|
|
|
testInstB = resource.NewWithAttributes(label.String("instance", "tester-b"))
|
2020-04-15 21:04:44 +02:00
|
|
|
|
2020-09-29 02:58:15 +02:00
|
|
|
testHistogramBoundaries = []float64{2.0, 4.0, 8.0}
|
|
|
|
|
2020-06-24 23:02:13 +02:00
|
|
|
cpu1Labels = []*commonpb.StringKeyValue{
|
|
|
|
{
|
|
|
|
Key: "CPU",
|
|
|
|
Value: "1",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Key: "host",
|
|
|
|
Value: "test.com",
|
2020-04-15 21:04:44 +02:00
|
|
|
},
|
|
|
|
}
|
2020-06-24 23:02:13 +02:00
|
|
|
cpu2Labels = []*commonpb.StringKeyValue{
|
|
|
|
{
|
|
|
|
Key: "CPU",
|
|
|
|
Value: "2",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Key: "host",
|
|
|
|
Value: "test.com",
|
2020-04-15 21:04:44 +02:00
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
testerAResource = &resourcepb.Resource{
|
2020-06-24 23:02:13 +02:00
|
|
|
Attributes: []*commonpb.KeyValue{
|
2020-04-15 21:04:44 +02:00
|
|
|
{
|
2020-06-24 23:02:13 +02:00
|
|
|
Key: "instance",
|
|
|
|
Value: &commonpb.AnyValue{
|
|
|
|
Value: &commonpb.AnyValue_StringValue{
|
|
|
|
StringValue: "tester-a",
|
|
|
|
},
|
|
|
|
},
|
2020-04-15 21:04:44 +02:00
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
testerBResource = &resourcepb.Resource{
|
2020-06-24 23:02:13 +02:00
|
|
|
Attributes: []*commonpb.KeyValue{
|
2020-04-15 21:04:44 +02:00
|
|
|
{
|
2020-06-24 23:02:13 +02:00
|
|
|
Key: "instance",
|
|
|
|
Value: &commonpb.AnyValue{
|
|
|
|
Value: &commonpb.AnyValue_StringValue{
|
|
|
|
StringValue: "tester-b",
|
|
|
|
},
|
|
|
|
},
|
2020-04-15 21:04:44 +02:00
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
)
|
|
|
|
|
|
|
|
func TestNoGroupingExport(t *testing.T) {
|
|
|
|
runMetricExportTests(
|
|
|
|
t,
|
2020-11-10 17:44:42 +02:00
|
|
|
nil,
|
2020-04-15 21:04:44 +02:00
|
|
|
[]record{
|
|
|
|
{
|
|
|
|
"int64-count",
|
2020-11-12 17:28:32 +02:00
|
|
|
metric.CounterInstrumentKind,
|
2020-11-11 17:24:12 +02:00
|
|
|
number.Int64Kind,
|
2020-04-24 18:44:21 +02:00
|
|
|
nil,
|
|
|
|
nil,
|
2020-04-15 21:04:44 +02:00
|
|
|
append(baseKeyValues, cpuKey.Int(1)),
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"int64-count",
|
2020-11-12 17:28:32 +02:00
|
|
|
metric.CounterInstrumentKind,
|
2020-11-11 17:24:12 +02:00
|
|
|
number.Int64Kind,
|
2020-04-24 18:44:21 +02:00
|
|
|
nil,
|
|
|
|
nil,
|
2020-04-15 21:04:44 +02:00
|
|
|
append(baseKeyValues, cpuKey.Int(2)),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
[]metricpb.ResourceMetrics{
|
|
|
|
{
|
2020-04-23 21:10:58 +02:00
|
|
|
Resource: nil,
|
2020-04-15 21:04:44 +02:00
|
|
|
InstrumentationLibraryMetrics: []*metricpb.InstrumentationLibraryMetrics{
|
|
|
|
{
|
|
|
|
Metrics: []*metricpb.Metric{
|
|
|
|
{
|
2020-10-09 05:07:39 +02:00
|
|
|
Name: "int64-count",
|
|
|
|
Data: &metricpb.Metric_IntSum{
|
|
|
|
IntSum: &metricpb.IntSum{
|
|
|
|
IsMonotonic: true,
|
|
|
|
AggregationTemporality: metricpb.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE,
|
|
|
|
DataPoints: []*metricpb.IntDataPoint{
|
|
|
|
{
|
|
|
|
Value: 11,
|
|
|
|
Labels: cpu1Labels,
|
|
|
|
StartTimeUnixNano: startTime(),
|
|
|
|
TimeUnixNano: pointTime(),
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Value: 11,
|
|
|
|
Labels: cpu2Labels,
|
|
|
|
StartTimeUnixNano: startTime(),
|
|
|
|
TimeUnixNano: pointTime(),
|
|
|
|
},
|
|
|
|
},
|
2020-04-15 21:04:44 +02:00
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
)
|
|
|
|
}
|
|
|
|
|
2020-05-16 07:11:12 +02:00
|
|
|
func TestValuerecorderMetricGroupingExport(t *testing.T) {
|
2020-04-15 21:04:44 +02:00
|
|
|
r := record{
|
2020-05-16 07:11:12 +02:00
|
|
|
"valuerecorder",
|
2020-11-12 17:28:32 +02:00
|
|
|
metric.ValueRecorderInstrumentKind,
|
2020-11-11 17:24:12 +02:00
|
|
|
number.Int64Kind,
|
2020-04-24 18:44:21 +02:00
|
|
|
nil,
|
|
|
|
nil,
|
2020-04-15 21:04:44 +02:00
|
|
|
append(baseKeyValues, cpuKey.Int(1)),
|
|
|
|
}
|
|
|
|
expected := []metricpb.ResourceMetrics{
|
|
|
|
{
|
2020-04-23 21:10:58 +02:00
|
|
|
Resource: nil,
|
2020-04-15 21:04:44 +02:00
|
|
|
InstrumentationLibraryMetrics: []*metricpb.InstrumentationLibraryMetrics{
|
|
|
|
{
|
|
|
|
Metrics: []*metricpb.Metric{
|
|
|
|
{
|
2020-10-09 05:07:39 +02:00
|
|
|
Name: "valuerecorder",
|
|
|
|
Data: &metricpb.Metric_IntHistogram{
|
|
|
|
IntHistogram: &metricpb.IntHistogram{
|
|
|
|
AggregationTemporality: metricpb.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE,
|
|
|
|
DataPoints: []*metricpb.IntHistogramDataPoint{
|
2020-06-24 23:02:13 +02:00
|
|
|
{
|
2020-10-09 05:07:39 +02:00
|
|
|
Labels: []*commonpb.StringKeyValue{
|
|
|
|
{
|
|
|
|
Key: "CPU",
|
|
|
|
Value: "1",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Key: "host",
|
|
|
|
Value: "test.com",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
StartTimeUnixNano: startTime(),
|
|
|
|
TimeUnixNano: pointTime(),
|
|
|
|
Count: 2,
|
|
|
|
Sum: 11,
|
|
|
|
ExplicitBounds: testHistogramBoundaries,
|
|
|
|
BucketCounts: []uint64{1, 0, 0, 1},
|
2020-06-24 23:02:13 +02:00
|
|
|
},
|
|
|
|
{
|
2020-10-09 05:07:39 +02:00
|
|
|
Labels: []*commonpb.StringKeyValue{
|
|
|
|
{
|
|
|
|
Key: "CPU",
|
|
|
|
Value: "1",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Key: "host",
|
|
|
|
Value: "test.com",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
Count: 2,
|
|
|
|
Sum: 11,
|
|
|
|
ExplicitBounds: testHistogramBoundaries,
|
|
|
|
BucketCounts: []uint64{1, 0, 0, 1},
|
|
|
|
StartTimeUnixNano: startTime(),
|
|
|
|
TimeUnixNano: pointTime(),
|
2020-06-24 23:02:13 +02:00
|
|
|
},
|
|
|
|
},
|
2020-04-15 21:04:44 +02:00
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
2020-11-10 17:44:42 +02:00
|
|
|
runMetricExportTests(t, nil, []record{r, r}, expected)
|
2020-04-15 21:04:44 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
func TestCountInt64MetricGroupingExport(t *testing.T) {
|
|
|
|
r := record{
|
|
|
|
"int64-count",
|
2020-11-12 17:28:32 +02:00
|
|
|
metric.CounterInstrumentKind,
|
2020-11-11 17:24:12 +02:00
|
|
|
number.Int64Kind,
|
2020-04-24 18:44:21 +02:00
|
|
|
nil,
|
|
|
|
nil,
|
2020-04-15 21:04:44 +02:00
|
|
|
append(baseKeyValues, cpuKey.Int(1)),
|
|
|
|
}
|
|
|
|
runMetricExportTests(
|
|
|
|
t,
|
2020-11-10 17:44:42 +02:00
|
|
|
nil,
|
2020-04-15 21:04:44 +02:00
|
|
|
[]record{r, r},
|
|
|
|
[]metricpb.ResourceMetrics{
|
|
|
|
{
|
2020-04-23 21:10:58 +02:00
|
|
|
Resource: nil,
|
2020-04-15 21:04:44 +02:00
|
|
|
InstrumentationLibraryMetrics: []*metricpb.InstrumentationLibraryMetrics{
|
|
|
|
{
|
|
|
|
Metrics: []*metricpb.Metric{
|
|
|
|
{
|
2020-10-09 05:07:39 +02:00
|
|
|
Name: "int64-count",
|
|
|
|
Data: &metricpb.Metric_IntSum{
|
|
|
|
IntSum: &metricpb.IntSum{
|
|
|
|
IsMonotonic: true,
|
|
|
|
AggregationTemporality: metricpb.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE,
|
|
|
|
DataPoints: []*metricpb.IntDataPoint{
|
|
|
|
{
|
|
|
|
Value: 11,
|
|
|
|
Labels: cpu1Labels,
|
|
|
|
StartTimeUnixNano: startTime(),
|
|
|
|
TimeUnixNano: pointTime(),
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Value: 11,
|
|
|
|
Labels: cpu1Labels,
|
|
|
|
StartTimeUnixNano: startTime(),
|
|
|
|
TimeUnixNano: pointTime(),
|
|
|
|
},
|
|
|
|
},
|
2020-04-15 21:04:44 +02:00
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
)
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestCountFloat64MetricGroupingExport(t *testing.T) {
|
|
|
|
r := record{
|
|
|
|
"float64-count",
|
2020-11-12 17:28:32 +02:00
|
|
|
metric.CounterInstrumentKind,
|
2020-11-11 17:24:12 +02:00
|
|
|
number.Float64Kind,
|
2020-04-24 18:44:21 +02:00
|
|
|
nil,
|
|
|
|
nil,
|
2020-04-15 21:04:44 +02:00
|
|
|
append(baseKeyValues, cpuKey.Int(1)),
|
|
|
|
}
|
|
|
|
runMetricExportTests(
|
|
|
|
t,
|
2020-11-10 17:44:42 +02:00
|
|
|
nil,
|
2020-04-15 21:04:44 +02:00
|
|
|
[]record{r, r},
|
|
|
|
[]metricpb.ResourceMetrics{
|
|
|
|
{
|
2020-04-23 21:10:58 +02:00
|
|
|
Resource: nil,
|
2020-04-15 21:04:44 +02:00
|
|
|
InstrumentationLibraryMetrics: []*metricpb.InstrumentationLibraryMetrics{
|
|
|
|
{
|
|
|
|
Metrics: []*metricpb.Metric{
|
|
|
|
{
|
2020-10-09 05:07:39 +02:00
|
|
|
Name: "float64-count",
|
|
|
|
Data: &metricpb.Metric_DoubleSum{
|
|
|
|
DoubleSum: &metricpb.DoubleSum{
|
|
|
|
IsMonotonic: true,
|
|
|
|
AggregationTemporality: metricpb.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE,
|
|
|
|
DataPoints: []*metricpb.DoubleDataPoint{
|
2020-06-24 23:02:13 +02:00
|
|
|
{
|
2020-10-09 05:07:39 +02:00
|
|
|
Value: 11,
|
|
|
|
Labels: []*commonpb.StringKeyValue{
|
|
|
|
{
|
|
|
|
Key: "CPU",
|
|
|
|
Value: "1",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Key: "host",
|
|
|
|
Value: "test.com",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
StartTimeUnixNano: startTime(),
|
|
|
|
TimeUnixNano: pointTime(),
|
2020-06-24 23:02:13 +02:00
|
|
|
},
|
|
|
|
{
|
2020-10-09 05:07:39 +02:00
|
|
|
Value: 11,
|
|
|
|
Labels: []*commonpb.StringKeyValue{
|
|
|
|
{
|
|
|
|
Key: "CPU",
|
|
|
|
Value: "1",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Key: "host",
|
|
|
|
Value: "test.com",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
StartTimeUnixNano: startTime(),
|
|
|
|
TimeUnixNano: pointTime(),
|
2020-06-24 23:02:13 +02:00
|
|
|
},
|
|
|
|
},
|
2020-04-15 21:04:44 +02:00
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
)
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestResourceMetricGroupingExport(t *testing.T) {
|
|
|
|
runMetricExportTests(
|
|
|
|
t,
|
2020-11-10 17:44:42 +02:00
|
|
|
nil,
|
2020-04-15 21:04:44 +02:00
|
|
|
[]record{
|
|
|
|
{
|
|
|
|
"int64-count",
|
2020-11-12 17:28:32 +02:00
|
|
|
metric.CounterInstrumentKind,
|
2020-11-11 17:24:12 +02:00
|
|
|
number.Int64Kind,
|
2020-04-24 18:44:21 +02:00
|
|
|
testInstA,
|
|
|
|
nil,
|
2020-04-15 21:04:44 +02:00
|
|
|
append(baseKeyValues, cpuKey.Int(1)),
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"int64-count",
|
2020-11-12 17:28:32 +02:00
|
|
|
metric.CounterInstrumentKind,
|
2020-11-11 17:24:12 +02:00
|
|
|
number.Int64Kind,
|
2020-04-24 18:44:21 +02:00
|
|
|
testInstA,
|
|
|
|
nil,
|
2020-04-15 21:04:44 +02:00
|
|
|
append(baseKeyValues, cpuKey.Int(1)),
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"int64-count",
|
2020-11-12 17:28:32 +02:00
|
|
|
metric.CounterInstrumentKind,
|
2020-11-11 17:24:12 +02:00
|
|
|
number.Int64Kind,
|
2020-04-24 18:44:21 +02:00
|
|
|
testInstA,
|
|
|
|
nil,
|
2020-04-15 21:04:44 +02:00
|
|
|
append(baseKeyValues, cpuKey.Int(2)),
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"int64-count",
|
2020-11-12 17:28:32 +02:00
|
|
|
metric.CounterInstrumentKind,
|
2020-11-11 17:24:12 +02:00
|
|
|
number.Int64Kind,
|
2020-04-24 18:44:21 +02:00
|
|
|
testInstB,
|
|
|
|
nil,
|
2020-04-15 21:04:44 +02:00
|
|
|
append(baseKeyValues, cpuKey.Int(1)),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
[]metricpb.ResourceMetrics{
|
|
|
|
{
|
|
|
|
Resource: testerAResource,
|
|
|
|
InstrumentationLibraryMetrics: []*metricpb.InstrumentationLibraryMetrics{
|
|
|
|
{
|
|
|
|
Metrics: []*metricpb.Metric{
|
|
|
|
{
|
2020-10-09 05:07:39 +02:00
|
|
|
Name: "int64-count",
|
|
|
|
Data: &metricpb.Metric_IntSum{
|
|
|
|
IntSum: &metricpb.IntSum{
|
|
|
|
IsMonotonic: true,
|
|
|
|
AggregationTemporality: metricpb.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE,
|
|
|
|
DataPoints: []*metricpb.IntDataPoint{
|
|
|
|
{
|
|
|
|
Value: 11,
|
|
|
|
Labels: cpu1Labels,
|
|
|
|
StartTimeUnixNano: startTime(),
|
|
|
|
TimeUnixNano: pointTime(),
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Value: 11,
|
|
|
|
Labels: cpu1Labels,
|
|
|
|
StartTimeUnixNano: startTime(),
|
|
|
|
TimeUnixNano: pointTime(),
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Value: 11,
|
|
|
|
Labels: cpu2Labels,
|
|
|
|
StartTimeUnixNano: startTime(),
|
|
|
|
TimeUnixNano: pointTime(),
|
|
|
|
},
|
|
|
|
},
|
2020-04-15 21:04:44 +02:00
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Resource: testerBResource,
|
|
|
|
InstrumentationLibraryMetrics: []*metricpb.InstrumentationLibraryMetrics{
|
|
|
|
{
|
|
|
|
Metrics: []*metricpb.Metric{
|
|
|
|
{
|
2020-10-09 05:07:39 +02:00
|
|
|
Name: "int64-count",
|
|
|
|
Data: &metricpb.Metric_IntSum{
|
|
|
|
IntSum: &metricpb.IntSum{
|
|
|
|
IsMonotonic: true,
|
|
|
|
AggregationTemporality: metricpb.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE,
|
|
|
|
DataPoints: []*metricpb.IntDataPoint{
|
|
|
|
{
|
|
|
|
Value: 11,
|
|
|
|
Labels: cpu1Labels,
|
|
|
|
StartTimeUnixNano: startTime(),
|
|
|
|
TimeUnixNano: pointTime(),
|
|
|
|
},
|
|
|
|
},
|
2020-04-15 21:04:44 +02:00
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
)
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestResourceInstLibMetricGroupingExport(t *testing.T) {
|
2020-11-12 17:28:32 +02:00
|
|
|
countingLib1 := []metric.InstrumentOption{
|
|
|
|
metric.WithInstrumentationName("counting-lib"),
|
|
|
|
metric.WithInstrumentationVersion("v1"),
|
2020-06-12 18:11:17 +02:00
|
|
|
}
|
2020-11-12 17:28:32 +02:00
|
|
|
countingLib2 := []metric.InstrumentOption{
|
|
|
|
metric.WithInstrumentationName("counting-lib"),
|
|
|
|
metric.WithInstrumentationVersion("v2"),
|
2020-06-12 18:11:17 +02:00
|
|
|
}
|
2020-11-12 17:28:32 +02:00
|
|
|
summingLib := []metric.InstrumentOption{
|
|
|
|
metric.WithInstrumentationName("summing-lib"),
|
2020-06-12 18:11:17 +02:00
|
|
|
}
|
2020-04-15 21:04:44 +02:00
|
|
|
runMetricExportTests(
|
|
|
|
t,
|
2020-11-10 17:44:42 +02:00
|
|
|
nil,
|
2020-04-15 21:04:44 +02:00
|
|
|
[]record{
|
|
|
|
{
|
|
|
|
"int64-count",
|
2020-11-12 17:28:32 +02:00
|
|
|
metric.CounterInstrumentKind,
|
2020-11-11 17:24:12 +02:00
|
|
|
number.Int64Kind,
|
2020-04-24 18:44:21 +02:00
|
|
|
testInstA,
|
2020-06-12 18:11:17 +02:00
|
|
|
countingLib1,
|
2020-04-15 21:04:44 +02:00
|
|
|
append(baseKeyValues, cpuKey.Int(1)),
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"int64-count",
|
2020-11-12 17:28:32 +02:00
|
|
|
metric.CounterInstrumentKind,
|
2020-11-11 17:24:12 +02:00
|
|
|
number.Int64Kind,
|
2020-04-24 18:44:21 +02:00
|
|
|
testInstA,
|
2020-06-12 18:11:17 +02:00
|
|
|
countingLib2,
|
2020-04-15 21:04:44 +02:00
|
|
|
append(baseKeyValues, cpuKey.Int(1)),
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"int64-count",
|
2020-11-12 17:28:32 +02:00
|
|
|
metric.CounterInstrumentKind,
|
2020-11-11 17:24:12 +02:00
|
|
|
number.Int64Kind,
|
2020-04-24 18:44:21 +02:00
|
|
|
testInstA,
|
2020-06-12 18:11:17 +02:00
|
|
|
countingLib1,
|
|
|
|
append(baseKeyValues, cpuKey.Int(1)),
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"int64-count",
|
2020-11-12 17:28:32 +02:00
|
|
|
metric.CounterInstrumentKind,
|
2020-11-11 17:24:12 +02:00
|
|
|
number.Int64Kind,
|
2020-06-12 18:11:17 +02:00
|
|
|
testInstA,
|
|
|
|
countingLib1,
|
2020-04-15 21:04:44 +02:00
|
|
|
append(baseKeyValues, cpuKey.Int(2)),
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"int64-count",
|
2020-11-12 17:28:32 +02:00
|
|
|
metric.CounterInstrumentKind,
|
2020-11-11 17:24:12 +02:00
|
|
|
number.Int64Kind,
|
2020-04-24 18:44:21 +02:00
|
|
|
testInstA,
|
2020-06-12 18:11:17 +02:00
|
|
|
summingLib,
|
2020-04-15 21:04:44 +02:00
|
|
|
append(baseKeyValues, cpuKey.Int(1)),
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"int64-count",
|
2020-11-12 17:28:32 +02:00
|
|
|
metric.CounterInstrumentKind,
|
2020-11-11 17:24:12 +02:00
|
|
|
number.Int64Kind,
|
2020-04-24 18:44:21 +02:00
|
|
|
testInstB,
|
2020-06-12 18:11:17 +02:00
|
|
|
countingLib1,
|
2020-04-15 21:04:44 +02:00
|
|
|
append(baseKeyValues, cpuKey.Int(1)),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
[]metricpb.ResourceMetrics{
|
|
|
|
{
|
|
|
|
Resource: testerAResource,
|
|
|
|
InstrumentationLibraryMetrics: []*metricpb.InstrumentationLibraryMetrics{
|
|
|
|
{
|
|
|
|
InstrumentationLibrary: &commonpb.InstrumentationLibrary{
|
2020-06-12 18:11:17 +02:00
|
|
|
Name: "counting-lib",
|
|
|
|
Version: "v1",
|
2020-04-15 21:04:44 +02:00
|
|
|
},
|
|
|
|
Metrics: []*metricpb.Metric{
|
|
|
|
{
|
2020-10-09 05:07:39 +02:00
|
|
|
Name: "int64-count",
|
|
|
|
Data: &metricpb.Metric_IntSum{
|
|
|
|
IntSum: &metricpb.IntSum{
|
|
|
|
IsMonotonic: true,
|
|
|
|
AggregationTemporality: metricpb.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE,
|
|
|
|
DataPoints: []*metricpb.IntDataPoint{
|
|
|
|
{
|
|
|
|
Value: 11,
|
|
|
|
Labels: cpu1Labels,
|
|
|
|
StartTimeUnixNano: startTime(),
|
|
|
|
TimeUnixNano: pointTime(),
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Value: 11,
|
|
|
|
Labels: cpu1Labels,
|
|
|
|
StartTimeUnixNano: startTime(),
|
|
|
|
TimeUnixNano: pointTime(),
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Value: 11,
|
|
|
|
Labels: cpu2Labels,
|
|
|
|
StartTimeUnixNano: startTime(),
|
|
|
|
TimeUnixNano: pointTime(),
|
|
|
|
},
|
|
|
|
},
|
2020-04-15 21:04:44 +02:00
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
2020-06-12 18:11:17 +02:00
|
|
|
{
|
|
|
|
InstrumentationLibrary: &commonpb.InstrumentationLibrary{
|
|
|
|
Name: "counting-lib",
|
|
|
|
Version: "v2",
|
|
|
|
},
|
|
|
|
Metrics: []*metricpb.Metric{
|
|
|
|
{
|
2020-10-09 05:07:39 +02:00
|
|
|
Name: "int64-count",
|
|
|
|
Data: &metricpb.Metric_IntSum{
|
|
|
|
IntSum: &metricpb.IntSum{
|
|
|
|
IsMonotonic: true,
|
|
|
|
AggregationTemporality: metricpb.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE,
|
|
|
|
DataPoints: []*metricpb.IntDataPoint{
|
|
|
|
{
|
|
|
|
Value: 11,
|
|
|
|
Labels: cpu1Labels,
|
|
|
|
StartTimeUnixNano: startTime(),
|
|
|
|
TimeUnixNano: pointTime(),
|
|
|
|
},
|
|
|
|
},
|
2020-06-12 18:11:17 +02:00
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
2020-04-15 21:04:44 +02:00
|
|
|
{
|
|
|
|
InstrumentationLibrary: &commonpb.InstrumentationLibrary{
|
|
|
|
Name: "summing-lib",
|
|
|
|
},
|
|
|
|
Metrics: []*metricpb.Metric{
|
|
|
|
{
|
2020-10-09 05:07:39 +02:00
|
|
|
Name: "int64-count",
|
|
|
|
Data: &metricpb.Metric_IntSum{
|
|
|
|
IntSum: &metricpb.IntSum{
|
|
|
|
IsMonotonic: true,
|
|
|
|
AggregationTemporality: metricpb.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE,
|
|
|
|
DataPoints: []*metricpb.IntDataPoint{
|
|
|
|
{
|
|
|
|
Value: 11,
|
|
|
|
Labels: cpu1Labels,
|
|
|
|
StartTimeUnixNano: startTime(),
|
|
|
|
TimeUnixNano: pointTime(),
|
|
|
|
},
|
|
|
|
},
|
2020-04-15 21:04:44 +02:00
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Resource: testerBResource,
|
|
|
|
InstrumentationLibraryMetrics: []*metricpb.InstrumentationLibraryMetrics{
|
|
|
|
{
|
|
|
|
InstrumentationLibrary: &commonpb.InstrumentationLibrary{
|
2020-06-12 18:11:17 +02:00
|
|
|
Name: "counting-lib",
|
|
|
|
Version: "v1",
|
2020-04-15 21:04:44 +02:00
|
|
|
},
|
|
|
|
Metrics: []*metricpb.Metric{
|
|
|
|
{
|
2020-10-09 05:07:39 +02:00
|
|
|
Name: "int64-count",
|
|
|
|
Data: &metricpb.Metric_IntSum{
|
|
|
|
IntSum: &metricpb.IntSum{
|
|
|
|
IsMonotonic: true,
|
|
|
|
AggregationTemporality: metricpb.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE,
|
|
|
|
DataPoints: []*metricpb.IntDataPoint{
|
|
|
|
{
|
|
|
|
Value: 11,
|
|
|
|
Labels: cpu1Labels,
|
|
|
|
StartTimeUnixNano: startTime(),
|
|
|
|
TimeUnixNano: pointTime(),
|
|
|
|
},
|
|
|
|
},
|
2020-04-15 21:04:44 +02:00
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
)
|
|
|
|
}
|
|
|
|
|
2020-11-10 17:44:42 +02:00
|
|
|
func TestStatelessExportKind(t *testing.T) {
|
|
|
|
type testcase struct {
|
|
|
|
name string
|
2020-11-12 17:28:32 +02:00
|
|
|
instrumentKind metric.InstrumentKind
|
2020-11-10 17:44:42 +02:00
|
|
|
aggTemporality metricpb.AggregationTemporality
|
|
|
|
monotonic bool
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, k := range []testcase{
|
2020-11-12 17:28:32 +02:00
|
|
|
{"counter", metric.CounterInstrumentKind, metricpb.AggregationTemporality_AGGREGATION_TEMPORALITY_DELTA, true},
|
|
|
|
{"updowncounter", metric.UpDownCounterInstrumentKind, metricpb.AggregationTemporality_AGGREGATION_TEMPORALITY_DELTA, false},
|
|
|
|
{"sumobserver", metric.SumObserverInstrumentKind, metricpb.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE, true},
|
|
|
|
{"updownsumobserver", metric.UpDownSumObserverInstrumentKind, metricpb.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE, false},
|
2020-11-10 17:44:42 +02:00
|
|
|
} {
|
|
|
|
t.Run(k.name, func(t *testing.T) {
|
|
|
|
runMetricExportTests(
|
|
|
|
t,
|
|
|
|
[]ExporterOption{
|
|
|
|
WithMetricExportKindSelector(
|
|
|
|
metricsdk.StatelessExportKindSelector(),
|
|
|
|
),
|
|
|
|
},
|
|
|
|
[]record{
|
|
|
|
{
|
|
|
|
"instrument",
|
|
|
|
k.instrumentKind,
|
2020-11-11 17:24:12 +02:00
|
|
|
number.Int64Kind,
|
2020-11-10 17:44:42 +02:00
|
|
|
testInstA,
|
|
|
|
nil,
|
|
|
|
append(baseKeyValues, cpuKey.Int(1)),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
[]metricpb.ResourceMetrics{
|
|
|
|
{
|
|
|
|
Resource: testerAResource,
|
|
|
|
InstrumentationLibraryMetrics: []*metricpb.InstrumentationLibraryMetrics{
|
|
|
|
{
|
|
|
|
Metrics: []*metricpb.Metric{
|
|
|
|
{
|
|
|
|
Name: "instrument",
|
|
|
|
Data: &metricpb.Metric_IntSum{
|
|
|
|
IntSum: &metricpb.IntSum{
|
|
|
|
IsMonotonic: k.monotonic,
|
|
|
|
AggregationTemporality: k.aggTemporality,
|
|
|
|
DataPoints: []*metricpb.IntDataPoint{
|
|
|
|
{
|
|
|
|
Value: 11,
|
|
|
|
Labels: cpu1Labels,
|
|
|
|
StartTimeUnixNano: startTime(),
|
|
|
|
TimeUnixNano: pointTime(),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func runMetricExportTests(t *testing.T, opts []ExporterOption, rs []record, expected []metricpb.ResourceMetrics) {
|
Split connection management away from exporter (#1369)
* Split protocol handling away from exporter
This commits adds a ProtocolDriver interface, which the exporter
will use to connect to the collector and send both metrics and traces
to it. That way, the Exporter type is free from dealing with any
connection/protocol details, as this business is taken over by the
implementations of the ProtocolDriver interface.
The gRPC code from the exporter is moved into the implementation of
ProtocolDriver. Currently it only maintains a single connection,
just as the Exporter used to do.
With the split, most of the Exporter options became actually gRPC
connection manager's options. Currently the only option that remained
to be Exporter's is about setting the export kind selector.
* Update changelog
* Increase the test coverage of GRPC driver
* Do not close a channel with multiple senders
The disconnected channel can be used for sending by multiple
goroutines (for example, by metric controller and span processor), so
this channel should not be closed at all. Dropping this line closes a
race between closing a channel and sending to it.
* Simplify new connection handler
The callbacks never return an error, so drop the return type from it.
* Access clients under a lock
The client may change as a result on reconnection in background, so
guard against a racy access.
* Simplify the GRPC driver a bit
The config type was exported earlier to have a consistent way of
configuring the driver, when also the multiple connection driver would
appear. Since we are not going to add a multiple connection driver,
pass the options directly to the driver constructor. Also shorten the
name of the constructor to `NewGRPCDriver`.
* Merge common gRPC code back into the driver
The common code was supposed to be shared between single connection
driver and multiple connection driver, but since the latter won't be
happening, it makes no sense to keep the not-so-common code in a
separate file. Also drop some abstraction too.
* Rename the file with gRPC driver implementation
* Update changelog
* Sleep for a second to trigger the timeout
Sometimes CI has it's better moments, so it's blazing fast and manages
to finish shutting the exporter down within the 1 microsecond timeout.
* Increase the timeout for shutting down the exporter
One millisecond is quite short, and I was getting failures locally or
in CI:
go test ./... + race in ./exporters/otlp
2020/12/14 18:27:54 rpc error: code = Canceled desc = context canceled
2020/12/14 18:27:54 context deadline exceeded
--- FAIL: TestNewExporter_withMultipleAttributeTypes (0.37s)
otlp_integration_test.go:541: resource span count: got 0, want 1
FAIL
FAIL go.opentelemetry.io/otel/exporters/otlp 5.278s
or
go test ./... + coverage in ./exporters/otlp
2020/12/14 17:41:16 rpc error: code = Canceled desc = context canceled
2020/12/14 17:41:16 exporter disconnected
--- FAIL: TestNewExporter_endToEnd (1.53s)
--- FAIL: TestNewExporter_endToEnd/WithCompressor (0.41s)
otlp_integration_test.go:246: span counts: got 3, want 4
2020/12/14 17:41:18 context canceled
FAIL
coverage: 35.3% of statements in ./...
FAIL go.opentelemetry.io/otel/exporters/otlp 4.753s
* Shut down the providers in end to end test
This is to make sure that all batched spans are actually flushed
before closing the exporter.
2020-12-21 22:49:45 +02:00
|
|
|
exp, driver := newExporter(t, opts...)
|
2020-04-15 21:04:44 +02:00
|
|
|
|
2020-04-24 18:44:21 +02:00
|
|
|
recs := map[label.Distinct][]metricsdk.Record{}
|
|
|
|
resources := map[label.Distinct]*resource.Resource{}
|
2020-04-15 21:04:44 +02:00
|
|
|
for _, r := range rs {
|
2020-11-10 17:44:42 +02:00
|
|
|
lcopy := make([]label.KeyValue, len(r.labels))
|
|
|
|
copy(lcopy, r.labels)
|
2020-11-12 17:28:32 +02:00
|
|
|
desc := metric.NewDescriptor(r.name, r.iKind, r.nKind, r.opts...)
|
2020-11-10 17:44:42 +02:00
|
|
|
labs := label.NewSet(lcopy...)
|
2020-04-15 21:04:44 +02:00
|
|
|
|
2020-06-13 09:55:01 +02:00
|
|
|
var agg, ckpt metricsdk.Aggregator
|
2020-11-10 17:44:42 +02:00
|
|
|
if r.iKind.Adding() {
|
2020-07-25 05:32:52 +02:00
|
|
|
agg, ckpt = metrictest.Unslice2(sum.New(2))
|
2020-11-10 17:44:42 +02:00
|
|
|
} else {
|
2020-09-29 02:58:15 +02:00
|
|
|
agg, ckpt = metrictest.Unslice2(histogram.New(2, &desc, testHistogramBoundaries))
|
2020-04-15 21:04:44 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
ctx := context.Background()
|
2020-11-10 17:44:42 +02:00
|
|
|
if r.iKind.Synchronous() {
|
|
|
|
// For synchronous instruments, perform two updates: 1 and 10
|
|
|
|
switch r.nKind {
|
2020-11-11 17:24:12 +02:00
|
|
|
case number.Int64Kind:
|
|
|
|
require.NoError(t, agg.Update(ctx, number.NewInt64Number(1), &desc))
|
|
|
|
require.NoError(t, agg.Update(ctx, number.NewInt64Number(10), &desc))
|
|
|
|
case number.Float64Kind:
|
|
|
|
require.NoError(t, agg.Update(ctx, number.NewFloat64Number(1), &desc))
|
|
|
|
require.NoError(t, agg.Update(ctx, number.NewFloat64Number(10), &desc))
|
2020-11-10 17:44:42 +02:00
|
|
|
default:
|
|
|
|
t.Fatalf("invalid number kind: %v", r.nKind)
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// For asynchronous instruments, perform a single update: 11
|
|
|
|
switch r.nKind {
|
2020-11-11 17:24:12 +02:00
|
|
|
case number.Int64Kind:
|
|
|
|
require.NoError(t, agg.Update(ctx, number.NewInt64Number(11), &desc))
|
|
|
|
case number.Float64Kind:
|
|
|
|
require.NoError(t, agg.Update(ctx, number.NewFloat64Number(11), &desc))
|
2020-11-10 17:44:42 +02:00
|
|
|
default:
|
|
|
|
t.Fatalf("invalid number kind: %v", r.nKind)
|
|
|
|
}
|
2020-04-15 21:04:44 +02:00
|
|
|
}
|
2020-06-23 19:41:11 +02:00
|
|
|
require.NoError(t, agg.SynchronizedMove(ckpt, &desc))
|
2020-04-15 21:04:44 +02:00
|
|
|
|
2020-04-24 18:44:21 +02:00
|
|
|
equiv := r.resource.Equivalent()
|
|
|
|
resources[equiv] = r.resource
|
2020-06-18 19:16:33 +02:00
|
|
|
recs[equiv] = append(recs[equiv], metricsdk.NewRecord(&desc, &labs, r.resource, ckpt.Aggregation(), intervalStart, intervalEnd))
|
2020-04-24 18:44:21 +02:00
|
|
|
}
|
2020-05-19 02:44:28 +02:00
|
|
|
for _, records := range recs {
|
2020-05-19 03:37:41 +02:00
|
|
|
assert.NoError(t, exp.Export(context.Background(), &checkpointSet{records: records}))
|
2020-04-15 21:04:44 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// assert.ElementsMatch does not equate nested slices of different order,
|
|
|
|
// therefore this requires the top level slice to be broken down.
|
|
|
|
// Build a map of Resource/InstrumentationLibrary pairs to Metrics, from
|
|
|
|
// that validate the metric elements match for all expected pairs. Finally,
|
|
|
|
// make we saw all expected pairs.
|
|
|
|
type key struct {
|
|
|
|
resource, instrumentationLibrary string
|
|
|
|
}
|
|
|
|
got := map[key][]*metricpb.Metric{}
|
Split connection management away from exporter (#1369)
* Split protocol handling away from exporter
This commits adds a ProtocolDriver interface, which the exporter
will use to connect to the collector and send both metrics and traces
to it. That way, the Exporter type is free from dealing with any
connection/protocol details, as this business is taken over by the
implementations of the ProtocolDriver interface.
The gRPC code from the exporter is moved into the implementation of
ProtocolDriver. Currently it only maintains a single connection,
just as the Exporter used to do.
With the split, most of the Exporter options became actually gRPC
connection manager's options. Currently the only option that remained
to be Exporter's is about setting the export kind selector.
* Update changelog
* Increase the test coverage of GRPC driver
* Do not close a channel with multiple senders
The disconnected channel can be used for sending by multiple
goroutines (for example, by metric controller and span processor), so
this channel should not be closed at all. Dropping this line closes a
race between closing a channel and sending to it.
* Simplify new connection handler
The callbacks never return an error, so drop the return type from it.
* Access clients under a lock
The client may change as a result on reconnection in background, so
guard against a racy access.
* Simplify the GRPC driver a bit
The config type was exported earlier to have a consistent way of
configuring the driver, when also the multiple connection driver would
appear. Since we are not going to add a multiple connection driver,
pass the options directly to the driver constructor. Also shorten the
name of the constructor to `NewGRPCDriver`.
* Merge common gRPC code back into the driver
The common code was supposed to be shared between single connection
driver and multiple connection driver, but since the latter won't be
happening, it makes no sense to keep the not-so-common code in a
separate file. Also drop some abstraction too.
* Rename the file with gRPC driver implementation
* Update changelog
* Sleep for a second to trigger the timeout
Sometimes CI has it's better moments, so it's blazing fast and manages
to finish shutting the exporter down within the 1 microsecond timeout.
* Increase the timeout for shutting down the exporter
One millisecond is quite short, and I was getting failures locally or
in CI:
go test ./... + race in ./exporters/otlp
2020/12/14 18:27:54 rpc error: code = Canceled desc = context canceled
2020/12/14 18:27:54 context deadline exceeded
--- FAIL: TestNewExporter_withMultipleAttributeTypes (0.37s)
otlp_integration_test.go:541: resource span count: got 0, want 1
FAIL
FAIL go.opentelemetry.io/otel/exporters/otlp 5.278s
or
go test ./... + coverage in ./exporters/otlp
2020/12/14 17:41:16 rpc error: code = Canceled desc = context canceled
2020/12/14 17:41:16 exporter disconnected
--- FAIL: TestNewExporter_endToEnd (1.53s)
--- FAIL: TestNewExporter_endToEnd/WithCompressor (0.41s)
otlp_integration_test.go:246: span counts: got 3, want 4
2020/12/14 17:41:18 context canceled
FAIL
coverage: 35.3% of statements in ./...
FAIL go.opentelemetry.io/otel/exporters/otlp 4.753s
* Shut down the providers in end to end test
This is to make sure that all batched spans are actually flushed
before closing the exporter.
2020-12-21 22:49:45 +02:00
|
|
|
for _, rm := range driver.rm {
|
2020-04-15 21:04:44 +02:00
|
|
|
for _, ilm := range rm.InstrumentationLibraryMetrics {
|
|
|
|
k := key{
|
|
|
|
resource: rm.GetResource().String(),
|
|
|
|
instrumentationLibrary: ilm.GetInstrumentationLibrary().String(),
|
|
|
|
}
|
|
|
|
got[k] = ilm.GetMetrics()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
seen := map[key]struct{}{}
|
|
|
|
for _, rm := range expected {
|
|
|
|
for _, ilm := range rm.InstrumentationLibraryMetrics {
|
|
|
|
k := key{
|
|
|
|
resource: rm.GetResource().String(),
|
|
|
|
instrumentationLibrary: ilm.GetInstrumentationLibrary().String(),
|
|
|
|
}
|
|
|
|
seen[k] = struct{}{}
|
|
|
|
g, ok := got[k]
|
|
|
|
if !ok {
|
|
|
|
t.Errorf("missing metrics for:\n\tResource: %s\n\tInstrumentationLibrary: %s\n", k.resource, k.instrumentationLibrary)
|
|
|
|
continue
|
|
|
|
}
|
2020-06-24 23:02:13 +02:00
|
|
|
if !assert.Len(t, g, len(ilm.GetMetrics())) {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
for i, expected := range ilm.GetMetrics() {
|
2020-10-09 05:07:39 +02:00
|
|
|
assert.Equal(t, expected.Name, g[i].Name)
|
|
|
|
assert.Equal(t, expected.Unit, g[i].Unit)
|
|
|
|
assert.Equal(t, expected.Description, g[i].Description)
|
|
|
|
switch g[i].Data.(type) {
|
|
|
|
case *metricpb.Metric_IntGauge:
|
|
|
|
assert.ElementsMatch(t, expected.GetIntGauge().DataPoints, g[i].GetIntGauge().DataPoints)
|
|
|
|
case *metricpb.Metric_IntHistogram:
|
2020-11-10 17:44:42 +02:00
|
|
|
assert.Equal(t,
|
|
|
|
expected.GetIntHistogram().GetAggregationTemporality(),
|
|
|
|
g[i].GetIntHistogram().GetAggregationTemporality(),
|
|
|
|
)
|
2020-10-09 05:07:39 +02:00
|
|
|
assert.ElementsMatch(t, expected.GetIntHistogram().DataPoints, g[i].GetIntHistogram().DataPoints)
|
|
|
|
case *metricpb.Metric_IntSum:
|
2020-11-10 17:44:42 +02:00
|
|
|
assert.Equal(t,
|
|
|
|
expected.GetIntSum().GetAggregationTemporality(),
|
|
|
|
g[i].GetIntSum().GetAggregationTemporality(),
|
|
|
|
)
|
|
|
|
assert.Equal(t,
|
|
|
|
expected.GetIntSum().GetIsMonotonic(),
|
|
|
|
g[i].GetIntSum().GetIsMonotonic(),
|
|
|
|
)
|
2020-10-09 05:07:39 +02:00
|
|
|
assert.ElementsMatch(t, expected.GetIntSum().DataPoints, g[i].GetIntSum().DataPoints)
|
|
|
|
case *metricpb.Metric_DoubleGauge:
|
|
|
|
assert.ElementsMatch(t, expected.GetDoubleGauge().DataPoints, g[i].GetDoubleGauge().DataPoints)
|
|
|
|
case *metricpb.Metric_DoubleHistogram:
|
2020-11-10 17:44:42 +02:00
|
|
|
assert.Equal(t,
|
|
|
|
expected.GetDoubleHistogram().GetAggregationTemporality(),
|
|
|
|
g[i].GetDoubleHistogram().GetAggregationTemporality(),
|
|
|
|
)
|
2020-10-09 05:07:39 +02:00
|
|
|
assert.ElementsMatch(t, expected.GetDoubleHistogram().DataPoints, g[i].GetDoubleHistogram().DataPoints)
|
|
|
|
case *metricpb.Metric_DoubleSum:
|
2020-11-10 17:44:42 +02:00
|
|
|
assert.Equal(t,
|
|
|
|
expected.GetDoubleSum().GetAggregationTemporality(),
|
|
|
|
g[i].GetDoubleSum().GetAggregationTemporality(),
|
|
|
|
)
|
|
|
|
assert.Equal(t,
|
|
|
|
expected.GetDoubleSum().GetIsMonotonic(),
|
|
|
|
g[i].GetDoubleSum().GetIsMonotonic(),
|
|
|
|
)
|
2020-10-09 05:07:39 +02:00
|
|
|
assert.ElementsMatch(t, expected.GetDoubleSum().DataPoints, g[i].GetDoubleSum().DataPoints)
|
|
|
|
default:
|
|
|
|
assert.Failf(t, "unknown data type", g[i].Name)
|
|
|
|
}
|
2020-06-24 23:02:13 +02:00
|
|
|
}
|
2020-04-15 21:04:44 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
for k := range got {
|
|
|
|
if _, ok := seen[k]; !ok {
|
|
|
|
t.Errorf("did not expect metrics for:\n\tResource: %s\n\tInstrumentationLibrary: %s\n", k.resource, k.instrumentationLibrary)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestEmptyMetricExport(t *testing.T) {
|
Split connection management away from exporter (#1369)
* Split protocol handling away from exporter
This commits adds a ProtocolDriver interface, which the exporter
will use to connect to the collector and send both metrics and traces
to it. That way, the Exporter type is free from dealing with any
connection/protocol details, as this business is taken over by the
implementations of the ProtocolDriver interface.
The gRPC code from the exporter is moved into the implementation of
ProtocolDriver. Currently it only maintains a single connection,
just as the Exporter used to do.
With the split, most of the Exporter options became actually gRPC
connection manager's options. Currently the only option that remained
to be Exporter's is about setting the export kind selector.
* Update changelog
* Increase the test coverage of GRPC driver
* Do not close a channel with multiple senders
The disconnected channel can be used for sending by multiple
goroutines (for example, by metric controller and span processor), so
this channel should not be closed at all. Dropping this line closes a
race between closing a channel and sending to it.
* Simplify new connection handler
The callbacks never return an error, so drop the return type from it.
* Access clients under a lock
The client may change as a result on reconnection in background, so
guard against a racy access.
* Simplify the GRPC driver a bit
The config type was exported earlier to have a consistent way of
configuring the driver, when also the multiple connection driver would
appear. Since we are not going to add a multiple connection driver,
pass the options directly to the driver constructor. Also shorten the
name of the constructor to `NewGRPCDriver`.
* Merge common gRPC code back into the driver
The common code was supposed to be shared between single connection
driver and multiple connection driver, but since the latter won't be
happening, it makes no sense to keep the not-so-common code in a
separate file. Also drop some abstraction too.
* Rename the file with gRPC driver implementation
* Update changelog
* Sleep for a second to trigger the timeout
Sometimes CI has it's better moments, so it's blazing fast and manages
to finish shutting the exporter down within the 1 microsecond timeout.
* Increase the timeout for shutting down the exporter
One millisecond is quite short, and I was getting failures locally or
in CI:
go test ./... + race in ./exporters/otlp
2020/12/14 18:27:54 rpc error: code = Canceled desc = context canceled
2020/12/14 18:27:54 context deadline exceeded
--- FAIL: TestNewExporter_withMultipleAttributeTypes (0.37s)
otlp_integration_test.go:541: resource span count: got 0, want 1
FAIL
FAIL go.opentelemetry.io/otel/exporters/otlp 5.278s
or
go test ./... + coverage in ./exporters/otlp
2020/12/14 17:41:16 rpc error: code = Canceled desc = context canceled
2020/12/14 17:41:16 exporter disconnected
--- FAIL: TestNewExporter_endToEnd (1.53s)
--- FAIL: TestNewExporter_endToEnd/WithCompressor (0.41s)
otlp_integration_test.go:246: span counts: got 3, want 4
2020/12/14 17:41:18 context canceled
FAIL
coverage: 35.3% of statements in ./...
FAIL go.opentelemetry.io/otel/exporters/otlp 4.753s
* Shut down the providers in end to end test
This is to make sure that all batched spans are actually flushed
before closing the exporter.
2020-12-21 22:49:45 +02:00
|
|
|
exp, driver := newExporter(t)
|
2020-04-15 21:04:44 +02:00
|
|
|
|
|
|
|
for _, test := range []struct {
|
|
|
|
records []metricsdk.Record
|
|
|
|
want []metricpb.ResourceMetrics
|
|
|
|
}{
|
|
|
|
{
|
|
|
|
[]metricsdk.Record(nil),
|
|
|
|
[]metricpb.ResourceMetrics(nil),
|
|
|
|
},
|
|
|
|
{
|
|
|
|
[]metricsdk.Record{},
|
|
|
|
[]metricpb.ResourceMetrics(nil),
|
|
|
|
},
|
|
|
|
} {
|
Split connection management away from exporter (#1369)
* Split protocol handling away from exporter
This commits adds a ProtocolDriver interface, which the exporter
will use to connect to the collector and send both metrics and traces
to it. That way, the Exporter type is free from dealing with any
connection/protocol details, as this business is taken over by the
implementations of the ProtocolDriver interface.
The gRPC code from the exporter is moved into the implementation of
ProtocolDriver. Currently it only maintains a single connection,
just as the Exporter used to do.
With the split, most of the Exporter options became actually gRPC
connection manager's options. Currently the only option that remained
to be Exporter's is about setting the export kind selector.
* Update changelog
* Increase the test coverage of GRPC driver
* Do not close a channel with multiple senders
The disconnected channel can be used for sending by multiple
goroutines (for example, by metric controller and span processor), so
this channel should not be closed at all. Dropping this line closes a
race between closing a channel and sending to it.
* Simplify new connection handler
The callbacks never return an error, so drop the return type from it.
* Access clients under a lock
The client may change as a result on reconnection in background, so
guard against a racy access.
* Simplify the GRPC driver a bit
The config type was exported earlier to have a consistent way of
configuring the driver, when also the multiple connection driver would
appear. Since we are not going to add a multiple connection driver,
pass the options directly to the driver constructor. Also shorten the
name of the constructor to `NewGRPCDriver`.
* Merge common gRPC code back into the driver
The common code was supposed to be shared between single connection
driver and multiple connection driver, but since the latter won't be
happening, it makes no sense to keep the not-so-common code in a
separate file. Also drop some abstraction too.
* Rename the file with gRPC driver implementation
* Update changelog
* Sleep for a second to trigger the timeout
Sometimes CI has it's better moments, so it's blazing fast and manages
to finish shutting the exporter down within the 1 microsecond timeout.
* Increase the timeout for shutting down the exporter
One millisecond is quite short, and I was getting failures locally or
in CI:
go test ./... + race in ./exporters/otlp
2020/12/14 18:27:54 rpc error: code = Canceled desc = context canceled
2020/12/14 18:27:54 context deadline exceeded
--- FAIL: TestNewExporter_withMultipleAttributeTypes (0.37s)
otlp_integration_test.go:541: resource span count: got 0, want 1
FAIL
FAIL go.opentelemetry.io/otel/exporters/otlp 5.278s
or
go test ./... + coverage in ./exporters/otlp
2020/12/14 17:41:16 rpc error: code = Canceled desc = context canceled
2020/12/14 17:41:16 exporter disconnected
--- FAIL: TestNewExporter_endToEnd (1.53s)
--- FAIL: TestNewExporter_endToEnd/WithCompressor (0.41s)
otlp_integration_test.go:246: span counts: got 3, want 4
2020/12/14 17:41:18 context canceled
FAIL
coverage: 35.3% of statements in ./...
FAIL go.opentelemetry.io/otel/exporters/otlp 4.753s
* Shut down the providers in end to end test
This is to make sure that all batched spans are actually flushed
before closing the exporter.
2020-12-21 22:49:45 +02:00
|
|
|
driver.Reset()
|
2020-05-19 03:37:41 +02:00
|
|
|
require.NoError(t, exp.Export(context.Background(), &checkpointSet{records: test.records}))
|
Split connection management away from exporter (#1369)
* Split protocol handling away from exporter
This commits adds a ProtocolDriver interface, which the exporter
will use to connect to the collector and send both metrics and traces
to it. That way, the Exporter type is free from dealing with any
connection/protocol details, as this business is taken over by the
implementations of the ProtocolDriver interface.
The gRPC code from the exporter is moved into the implementation of
ProtocolDriver. Currently it only maintains a single connection,
just as the Exporter used to do.
With the split, most of the Exporter options became actually gRPC
connection manager's options. Currently the only option that remained
to be Exporter's is about setting the export kind selector.
* Update changelog
* Increase the test coverage of GRPC driver
* Do not close a channel with multiple senders
The disconnected channel can be used for sending by multiple
goroutines (for example, by metric controller and span processor), so
this channel should not be closed at all. Dropping this line closes a
race between closing a channel and sending to it.
* Simplify new connection handler
The callbacks never return an error, so drop the return type from it.
* Access clients under a lock
The client may change as a result on reconnection in background, so
guard against a racy access.
* Simplify the GRPC driver a bit
The config type was exported earlier to have a consistent way of
configuring the driver, when also the multiple connection driver would
appear. Since we are not going to add a multiple connection driver,
pass the options directly to the driver constructor. Also shorten the
name of the constructor to `NewGRPCDriver`.
* Merge common gRPC code back into the driver
The common code was supposed to be shared between single connection
driver and multiple connection driver, but since the latter won't be
happening, it makes no sense to keep the not-so-common code in a
separate file. Also drop some abstraction too.
* Rename the file with gRPC driver implementation
* Update changelog
* Sleep for a second to trigger the timeout
Sometimes CI has it's better moments, so it's blazing fast and manages
to finish shutting the exporter down within the 1 microsecond timeout.
* Increase the timeout for shutting down the exporter
One millisecond is quite short, and I was getting failures locally or
in CI:
go test ./... + race in ./exporters/otlp
2020/12/14 18:27:54 rpc error: code = Canceled desc = context canceled
2020/12/14 18:27:54 context deadline exceeded
--- FAIL: TestNewExporter_withMultipleAttributeTypes (0.37s)
otlp_integration_test.go:541: resource span count: got 0, want 1
FAIL
FAIL go.opentelemetry.io/otel/exporters/otlp 5.278s
or
go test ./... + coverage in ./exporters/otlp
2020/12/14 17:41:16 rpc error: code = Canceled desc = context canceled
2020/12/14 17:41:16 exporter disconnected
--- FAIL: TestNewExporter_endToEnd (1.53s)
--- FAIL: TestNewExporter_endToEnd/WithCompressor (0.41s)
otlp_integration_test.go:246: span counts: got 3, want 4
2020/12/14 17:41:18 context canceled
FAIL
coverage: 35.3% of statements in ./...
FAIL go.opentelemetry.io/otel/exporters/otlp 4.753s
* Shut down the providers in end to end test
This is to make sure that all batched spans are actually flushed
before closing the exporter.
2020-12-21 22:49:45 +02:00
|
|
|
assert.Equal(t, test.want, driver.rm)
|
2020-04-15 21:04:44 +02:00
|
|
|
}
|
|
|
|
}
|