You've already forked opentelemetry-go
							
							
				mirror of
				https://github.com/open-telemetry/opentelemetry-go.git
				synced 2025-10-31 00:07:40 +02:00 
			
		
		
		
	Move Resource into the metric export Record (#739)
* Checkpoint * Tests pass
This commit is contained in:
		| @@ -66,7 +66,7 @@ func ExampleNewExportPipeline() { | |||||||
|  |  | ||||||
| 	// Simulate a push | 	// Simulate a push | ||||||
| 	meterImpl.Collect(ctx) | 	meterImpl.Collect(ctx) | ||||||
| 	err = exporter.Export(ctx, nil, integrator.CheckpointSet()) | 	err = exporter.Export(ctx, integrator.CheckpointSet()) | ||||||
| 	if err != nil { | 	if err != nil { | ||||||
| 		panic(err) | 		panic(err) | ||||||
| 	} | 	} | ||||||
|   | |||||||
| @@ -32,7 +32,6 @@ import ( | |||||||
| 	"go.opentelemetry.io/otel/sdk/metric/controller/push" | 	"go.opentelemetry.io/otel/sdk/metric/controller/push" | ||||||
| 	integrator "go.opentelemetry.io/otel/sdk/metric/integrator/simple" | 	integrator "go.opentelemetry.io/otel/sdk/metric/integrator/simple" | ||||||
| 	"go.opentelemetry.io/otel/sdk/metric/selector/simple" | 	"go.opentelemetry.io/otel/sdk/metric/selector/simple" | ||||||
| 	"go.opentelemetry.io/otel/sdk/resource" |  | ||||||
| ) | ) | ||||||
|  |  | ||||||
| // Exporter is an implementation of metric.Exporter that sends metrics to | // Exporter is an implementation of metric.Exporter that sends metrics to | ||||||
| @@ -169,8 +168,7 @@ func NewExportPipeline(config Config, period time.Duration) (*push.Controller, h | |||||||
| } | } | ||||||
|  |  | ||||||
| // Export exports the provide metric record to prometheus. | // Export exports the provide metric record to prometheus. | ||||||
| func (e *Exporter) Export(_ context.Context, _ *resource.Resource, checkpointSet export.CheckpointSet) error { | func (e *Exporter) Export(_ context.Context, checkpointSet export.CheckpointSet) error { | ||||||
| 	// TODO: Use the resource value in this exporter. |  | ||||||
| 	e.snapshot = checkpointSet | 	e.snapshot = checkpointSet | ||||||
| 	return nil | 	return nil | ||||||
| } | } | ||||||
| @@ -211,6 +209,7 @@ func (c *collector) Collect(ch chan<- prometheus.Metric) { | |||||||
| 	err := c.exp.snapshot.ForEach(func(record export.Record) error { | 	err := c.exp.snapshot.ForEach(func(record export.Record) error { | ||||||
| 		agg := record.Aggregator() | 		agg := record.Aggregator() | ||||||
| 		numberKind := record.Descriptor().NumberKind() | 		numberKind := record.Descriptor().NumberKind() | ||||||
|  | 		// TODO: Use the resource value in this record. | ||||||
| 		labels := labelValues(record.Labels()) | 		labels := labelValues(record.Labels()) | ||||||
| 		desc := c.toDesc(&record) | 		desc := c.toDesc(&record) | ||||||
|  |  | ||||||
|   | |||||||
| @@ -39,7 +39,7 @@ func TestPrometheusExporter(t *testing.T) { | |||||||
| 	} | 	} | ||||||
|  |  | ||||||
| 	var expected []string | 	var expected []string | ||||||
| 	checkpointSet := test.NewCheckpointSet() | 	checkpointSet := test.NewCheckpointSet(nil) | ||||||
|  |  | ||||||
| 	counter := metric.NewDescriptor( | 	counter := metric.NewDescriptor( | ||||||
| 		"counter", metric.CounterKind, metric.Float64NumberKind) | 		"counter", metric.CounterKind, metric.Float64NumberKind) | ||||||
| @@ -117,7 +117,7 @@ func TestPrometheusExporter(t *testing.T) { | |||||||
| } | } | ||||||
|  |  | ||||||
| func compareExport(t *testing.T, exporter *prometheus.Exporter, checkpointSet *test.CheckpointSet, expected []string) { | func compareExport(t *testing.T, exporter *prometheus.Exporter, checkpointSet *test.CheckpointSet, expected []string) { | ||||||
| 	err := exporter.Export(context.Background(), nil, checkpointSet) | 	err := exporter.Export(context.Background(), checkpointSet) | ||||||
| 	require.Nil(t, err) | 	require.Nil(t, err) | ||||||
|  |  | ||||||
| 	rec := httptest.NewRecorder() | 	rec := httptest.NewRecorder() | ||||||
|   | |||||||
| @@ -25,7 +25,6 @@ import ( | |||||||
|  |  | ||||||
| 	"go.opentelemetry.io/otel/api/global" | 	"go.opentelemetry.io/otel/api/global" | ||||||
| 	"go.opentelemetry.io/otel/api/label" | 	"go.opentelemetry.io/otel/api/label" | ||||||
| 	"go.opentelemetry.io/otel/sdk/resource" |  | ||||||
|  |  | ||||||
| 	export "go.opentelemetry.io/otel/sdk/export/metric" | 	export "go.opentelemetry.io/otel/sdk/export/metric" | ||||||
| 	"go.opentelemetry.io/otel/sdk/export/metric/aggregator" | 	"go.opentelemetry.io/otel/sdk/export/metric/aggregator" | ||||||
| @@ -145,18 +144,18 @@ func NewExportPipeline(config Config, period time.Duration, opts ...push.Option) | |||||||
| 	return pusher, nil | 	return pusher, nil | ||||||
| } | } | ||||||
|  |  | ||||||
| func (e *Exporter) Export(_ context.Context, resource *resource.Resource, checkpointSet export.CheckpointSet) error { | func (e *Exporter) Export(_ context.Context, checkpointSet export.CheckpointSet) error { | ||||||
| 	var aggError error | 	var aggError error | ||||||
| 	var batch expoBatch | 	var batch expoBatch | ||||||
| 	if !e.config.DoNotPrintTime { | 	if !e.config.DoNotPrintTime { | ||||||
| 		ts := time.Now() | 		ts := time.Now() | ||||||
| 		batch.Timestamp = &ts | 		batch.Timestamp = &ts | ||||||
| 	} | 	} | ||||||
| 	encodedResource := resource.Encoded(e.config.LabelEncoder) |  | ||||||
| 	aggError = checkpointSet.ForEach(func(record export.Record) error { | 	aggError = checkpointSet.ForEach(func(record export.Record) error { | ||||||
| 		desc := record.Descriptor() | 		desc := record.Descriptor() | ||||||
| 		agg := record.Aggregator() | 		agg := record.Aggregator() | ||||||
| 		kind := desc.NumberKind() | 		kind := desc.NumberKind() | ||||||
|  | 		encodedResource := record.Resource().Encoded(e.config.LabelEncoder) | ||||||
|  |  | ||||||
| 		var expose expoLine | 		var expose expoLine | ||||||
|  |  | ||||||
|   | |||||||
| @@ -44,10 +44,11 @@ type testFixture struct { | |||||||
| 	ctx      context.Context | 	ctx      context.Context | ||||||
| 	exporter *stdout.Exporter | 	exporter *stdout.Exporter | ||||||
| 	output   *bytes.Buffer | 	output   *bytes.Buffer | ||||||
| 	resource *resource.Resource |  | ||||||
| } | } | ||||||
|  |  | ||||||
| func newFixture(t *testing.T, resource *resource.Resource, config stdout.Config) testFixture { | var testResource = resource.New(kv.String("R", "V")) | ||||||
|  |  | ||||||
|  | func newFixture(t *testing.T, config stdout.Config) testFixture { | ||||||
| 	buf := &bytes.Buffer{} | 	buf := &bytes.Buffer{} | ||||||
| 	config.Writer = buf | 	config.Writer = buf | ||||||
| 	config.DoNotPrintTime = true | 	config.DoNotPrintTime = true | ||||||
| @@ -60,7 +61,6 @@ func newFixture(t *testing.T, resource *resource.Resource, config stdout.Config) | |||||||
| 		ctx:      context.Background(), | 		ctx:      context.Background(), | ||||||
| 		exporter: exp, | 		exporter: exp, | ||||||
| 		output:   buf, | 		output:   buf, | ||||||
| 		resource: resource, |  | ||||||
| 	} | 	} | ||||||
| } | } | ||||||
|  |  | ||||||
| @@ -69,7 +69,7 @@ func (fix testFixture) Output() string { | |||||||
| } | } | ||||||
|  |  | ||||||
| func (fix testFixture) Export(checkpointSet export.CheckpointSet) { | func (fix testFixture) Export(checkpointSet export.CheckpointSet) { | ||||||
| 	err := fix.exporter.Export(fix.ctx, fix.resource, checkpointSet) | 	err := fix.exporter.Export(fix.ctx, checkpointSet) | ||||||
| 	if err != nil { | 	if err != nil { | ||||||
| 		fix.t.Error("export failed: ", err) | 		fix.t.Error("export failed: ", err) | ||||||
| 	} | 	} | ||||||
| @@ -95,7 +95,7 @@ func TestStdoutTimestamp(t *testing.T) { | |||||||
|  |  | ||||||
| 	before := time.Now() | 	before := time.Now() | ||||||
|  |  | ||||||
| 	checkpointSet := test.NewCheckpointSet() | 	checkpointSet := test.NewCheckpointSet(testResource) | ||||||
|  |  | ||||||
| 	ctx := context.Background() | 	ctx := context.Background() | ||||||
| 	desc := metric.NewDescriptor("test.name", metric.ValueObserverKind, metric.Int64NumberKind) | 	desc := metric.NewDescriptor("test.name", metric.ValueObserverKind, metric.Int64NumberKind) | ||||||
| @@ -105,7 +105,7 @@ func TestStdoutTimestamp(t *testing.T) { | |||||||
|  |  | ||||||
| 	checkpointSet.Add(&desc, lvagg) | 	checkpointSet.Add(&desc, lvagg) | ||||||
|  |  | ||||||
| 	if err := exporter.Export(ctx, nil, checkpointSet); err != nil { | 	if err := exporter.Export(ctx, checkpointSet); err != nil { | ||||||
| 		t.Fatal("Unexpected export error: ", err) | 		t.Fatal("Unexpected export error: ", err) | ||||||
| 	} | 	} | ||||||
|  |  | ||||||
| @@ -139,9 +139,9 @@ func TestStdoutTimestamp(t *testing.T) { | |||||||
| } | } | ||||||
|  |  | ||||||
| func TestStdoutCounterFormat(t *testing.T) { | func TestStdoutCounterFormat(t *testing.T) { | ||||||
| 	fix := newFixture(t, nil, stdout.Config{}) | 	fix := newFixture(t, stdout.Config{}) | ||||||
|  |  | ||||||
| 	checkpointSet := test.NewCheckpointSet() | 	checkpointSet := test.NewCheckpointSet(testResource) | ||||||
|  |  | ||||||
| 	desc := metric.NewDescriptor("test.name", metric.CounterKind, metric.Int64NumberKind) | 	desc := metric.NewDescriptor("test.name", metric.CounterKind, metric.Int64NumberKind) | ||||||
| 	cagg := sum.New() | 	cagg := sum.New() | ||||||
| @@ -152,13 +152,13 @@ func TestStdoutCounterFormat(t *testing.T) { | |||||||
|  |  | ||||||
| 	fix.Export(checkpointSet) | 	fix.Export(checkpointSet) | ||||||
|  |  | ||||||
| 	require.Equal(t, `{"updates":[{"name":"test.name{A=B,C=D}","sum":123}]}`, fix.Output()) | 	require.Equal(t, `{"updates":[{"name":"test.name{R=V,A=B,C=D}","sum":123}]}`, fix.Output()) | ||||||
| } | } | ||||||
|  |  | ||||||
| func TestStdoutLastValueFormat(t *testing.T) { | func TestStdoutLastValueFormat(t *testing.T) { | ||||||
| 	fix := newFixture(t, nil, stdout.Config{}) | 	fix := newFixture(t, stdout.Config{}) | ||||||
|  |  | ||||||
| 	checkpointSet := test.NewCheckpointSet() | 	checkpointSet := test.NewCheckpointSet(testResource) | ||||||
|  |  | ||||||
| 	desc := metric.NewDescriptor("test.name", metric.ValueObserverKind, metric.Float64NumberKind) | 	desc := metric.NewDescriptor("test.name", metric.ValueObserverKind, metric.Float64NumberKind) | ||||||
| 	lvagg := lastvalue.New() | 	lvagg := lastvalue.New() | ||||||
| @@ -169,13 +169,13 @@ func TestStdoutLastValueFormat(t *testing.T) { | |||||||
|  |  | ||||||
| 	fix.Export(checkpointSet) | 	fix.Export(checkpointSet) | ||||||
|  |  | ||||||
| 	require.Equal(t, `{"updates":[{"name":"test.name{A=B,C=D}","last":123.456}]}`, fix.Output()) | 	require.Equal(t, `{"updates":[{"name":"test.name{R=V,A=B,C=D}","last":123.456}]}`, fix.Output()) | ||||||
| } | } | ||||||
|  |  | ||||||
| func TestStdoutMinMaxSumCount(t *testing.T) { | func TestStdoutMinMaxSumCount(t *testing.T) { | ||||||
| 	fix := newFixture(t, nil, stdout.Config{}) | 	fix := newFixture(t, stdout.Config{}) | ||||||
|  |  | ||||||
| 	checkpointSet := test.NewCheckpointSet() | 	checkpointSet := test.NewCheckpointSet(testResource) | ||||||
|  |  | ||||||
| 	desc := metric.NewDescriptor("test.name", metric.ValueRecorderKind, metric.Float64NumberKind) | 	desc := metric.NewDescriptor("test.name", metric.ValueRecorderKind, metric.Float64NumberKind) | ||||||
| 	magg := minmaxsumcount.New(&desc) | 	magg := minmaxsumcount.New(&desc) | ||||||
| @@ -187,15 +187,15 @@ func TestStdoutMinMaxSumCount(t *testing.T) { | |||||||
|  |  | ||||||
| 	fix.Export(checkpointSet) | 	fix.Export(checkpointSet) | ||||||
|  |  | ||||||
| 	require.Equal(t, `{"updates":[{"name":"test.name{A=B,C=D}","min":123.456,"max":876.543,"sum":999.999,"count":2}]}`, fix.Output()) | 	require.Equal(t, `{"updates":[{"name":"test.name{R=V,A=B,C=D}","min":123.456,"max":876.543,"sum":999.999,"count":2}]}`, fix.Output()) | ||||||
| } | } | ||||||
|  |  | ||||||
| func TestStdoutValueRecorderFormat(t *testing.T) { | func TestStdoutValueRecorderFormat(t *testing.T) { | ||||||
| 	fix := newFixture(t, nil, stdout.Config{ | 	fix := newFixture(t, stdout.Config{ | ||||||
| 		PrettyPrint: true, | 		PrettyPrint: true, | ||||||
| 	}) | 	}) | ||||||
|  |  | ||||||
| 	checkpointSet := test.NewCheckpointSet() | 	checkpointSet := test.NewCheckpointSet(testResource) | ||||||
|  |  | ||||||
| 	desc := metric.NewDescriptor("test.name", metric.ValueRecorderKind, metric.Float64NumberKind) | 	desc := metric.NewDescriptor("test.name", metric.ValueRecorderKind, metric.Float64NumberKind) | ||||||
| 	magg := array.New() | 	magg := array.New() | ||||||
| @@ -213,7 +213,7 @@ func TestStdoutValueRecorderFormat(t *testing.T) { | |||||||
| 	require.Equal(t, `{ | 	require.Equal(t, `{ | ||||||
| 	"updates": [ | 	"updates": [ | ||||||
| 		{ | 		{ | ||||||
| 			"name": "test.name{A=B,C=D}", | 			"name": "test.name{R=V,A=B,C=D}", | ||||||
| 			"min": 0.5, | 			"min": 0.5, | ||||||
| 			"max": 999.5, | 			"max": 999.5, | ||||||
| 			"sum": 500000, | 			"sum": 500000, | ||||||
| @@ -247,9 +247,9 @@ func TestStdoutNoData(t *testing.T) { | |||||||
| 		t.Run(name, func(t *testing.T) { | 		t.Run(name, func(t *testing.T) { | ||||||
| 			t.Parallel() | 			t.Parallel() | ||||||
|  |  | ||||||
| 			fix := newFixture(t, nil, stdout.Config{}) | 			fix := newFixture(t, stdout.Config{}) | ||||||
|  |  | ||||||
| 			checkpointSet := test.NewCheckpointSet() | 			checkpointSet := test.NewCheckpointSet(testResource) | ||||||
|  |  | ||||||
| 			magg := tc | 			magg := tc | ||||||
| 			magg.Checkpoint(fix.ctx, &desc) | 			magg.Checkpoint(fix.ctx, &desc) | ||||||
| @@ -264,9 +264,9 @@ func TestStdoutNoData(t *testing.T) { | |||||||
| } | } | ||||||
|  |  | ||||||
| func TestStdoutLastValueNotSet(t *testing.T) { | func TestStdoutLastValueNotSet(t *testing.T) { | ||||||
| 	fix := newFixture(t, nil, stdout.Config{}) | 	fix := newFixture(t, stdout.Config{}) | ||||||
|  |  | ||||||
| 	checkpointSet := test.NewCheckpointSet() | 	checkpointSet := test.NewCheckpointSet(testResource) | ||||||
|  |  | ||||||
| 	desc := metric.NewDescriptor("test.name", metric.ValueObserverKind, metric.Float64NumberKind) | 	desc := metric.NewDescriptor("test.name", metric.ValueObserverKind, metric.Float64NumberKind) | ||||||
| 	lvagg := lastvalue.New() | 	lvagg := lastvalue.New() | ||||||
| @@ -314,9 +314,9 @@ func TestStdoutResource(t *testing.T) { | |||||||
| 	} | 	} | ||||||
|  |  | ||||||
| 	for _, tc := range testCases { | 	for _, tc := range testCases { | ||||||
| 		fix := newFixture(t, tc.res, stdout.Config{}) | 		fix := newFixture(t, stdout.Config{}) | ||||||
|  |  | ||||||
| 		checkpointSet := test.NewCheckpointSet() | 		checkpointSet := test.NewCheckpointSet(tc.res) | ||||||
|  |  | ||||||
| 		desc := metric.NewDescriptor("test.name", metric.ValueObserverKind, metric.Float64NumberKind) | 		desc := metric.NewDescriptor("test.name", metric.ValueObserverKind, metric.Float64NumberKind) | ||||||
| 		lvagg := lastvalue.New() | 		lvagg := lastvalue.New() | ||||||
|   | |||||||
| @@ -27,6 +27,7 @@ import ( | |||||||
| 	"go.opentelemetry.io/otel/sdk/metric/aggregator/histogram" | 	"go.opentelemetry.io/otel/sdk/metric/aggregator/histogram" | ||||||
| 	"go.opentelemetry.io/otel/sdk/metric/aggregator/lastvalue" | 	"go.opentelemetry.io/otel/sdk/metric/aggregator/lastvalue" | ||||||
| 	"go.opentelemetry.io/otel/sdk/metric/aggregator/sum" | 	"go.opentelemetry.io/otel/sdk/metric/aggregator/sum" | ||||||
|  | 	"go.opentelemetry.io/otel/sdk/resource" | ||||||
| ) | ) | ||||||
|  |  | ||||||
| type mapkey struct { | type mapkey struct { | ||||||
| @@ -35,15 +36,17 @@ type mapkey struct { | |||||||
| } | } | ||||||
|  |  | ||||||
| type CheckpointSet struct { | type CheckpointSet struct { | ||||||
| 	records map[mapkey]export.Record | 	records  map[mapkey]export.Record | ||||||
| 	updates []export.Record | 	resource *resource.Resource | ||||||
|  | 	updates  []export.Record | ||||||
| } | } | ||||||
|  |  | ||||||
| // NewCheckpointSet returns a test CheckpointSet that new records could be added. | // NewCheckpointSet returns a test CheckpointSet that new records could be added. | ||||||
| // Records are grouped by their encoded labels. | // Records are grouped by their encoded labels. | ||||||
| func NewCheckpointSet() *CheckpointSet { | func NewCheckpointSet(resource *resource.Resource) *CheckpointSet { | ||||||
| 	return &CheckpointSet{ | 	return &CheckpointSet{ | ||||||
| 		records: make(map[mapkey]export.Record), | 		records:  make(map[mapkey]export.Record), | ||||||
|  | 		resource: resource, | ||||||
| 	} | 	} | ||||||
| } | } | ||||||
|  |  | ||||||
| @@ -67,7 +70,7 @@ func (p *CheckpointSet) Add(desc *metric.Descriptor, newAgg export.Aggregator, l | |||||||
| 		return record.Aggregator(), false | 		return record.Aggregator(), false | ||||||
| 	} | 	} | ||||||
|  |  | ||||||
| 	rec := export.NewRecord(desc, &elabels, newAgg) | 	rec := export.NewRecord(desc, &elabels, p.resource, newAgg) | ||||||
| 	p.updates = append(p.updates, rec) | 	p.updates = append(p.updates, rec) | ||||||
| 	p.records[key] = rec | 	p.records[key] = rec | ||||||
| 	return newAgg, true | 	return newAgg, true | ||||||
|   | |||||||
| @@ -61,7 +61,7 @@ type result struct { | |||||||
|  |  | ||||||
| // CheckpointSet transforms all records contained in a checkpoint into | // CheckpointSet transforms all records contained in a checkpoint into | ||||||
| // batched OTLP ResourceMetrics. | // batched OTLP ResourceMetrics. | ||||||
| func CheckpointSet(ctx context.Context, resource *resource.Resource, cps export.CheckpointSet, numWorkers uint) ([]*metricpb.ResourceMetrics, error) { | func CheckpointSet(ctx context.Context, cps export.CheckpointSet, numWorkers uint) ([]*metricpb.ResourceMetrics, error) { | ||||||
| 	records, errc := source(ctx, cps) | 	records, errc := source(ctx, cps) | ||||||
|  |  | ||||||
| 	// Start a fixed number of goroutines to transform records. | 	// Start a fixed number of goroutines to transform records. | ||||||
| @@ -71,7 +71,7 @@ func CheckpointSet(ctx context.Context, resource *resource.Resource, cps export. | |||||||
| 	for i := uint(0); i < numWorkers; i++ { | 	for i := uint(0); i < numWorkers; i++ { | ||||||
| 		go func() { | 		go func() { | ||||||
| 			defer wg.Done() | 			defer wg.Done() | ||||||
| 			transformer(ctx, resource, records, transformed) | 			transformer(ctx, records, transformed) | ||||||
| 		}() | 		}() | ||||||
| 	} | 	} | ||||||
| 	go func() { | 	go func() { | ||||||
| @@ -116,7 +116,7 @@ func source(ctx context.Context, cps export.CheckpointSet) (<-chan export.Record | |||||||
|  |  | ||||||
| // transformer transforms records read from the passed in chan into | // transformer transforms records read from the passed in chan into | ||||||
| // OTLP Metrics which are sent on the out chan. | // OTLP Metrics which are sent on the out chan. | ||||||
| func transformer(ctx context.Context, resource *resource.Resource, in <-chan export.Record, out chan<- result) { | func transformer(ctx context.Context, in <-chan export.Record, out chan<- result) { | ||||||
| 	for r := range in { | 	for r := range in { | ||||||
| 		m, err := Record(r) | 		m, err := Record(r) | ||||||
| 		// Propagate errors, but do not send empty results. | 		// Propagate errors, but do not send empty results. | ||||||
| @@ -124,7 +124,7 @@ func transformer(ctx context.Context, resource *resource.Resource, in <-chan exp | |||||||
| 			continue | 			continue | ||||||
| 		} | 		} | ||||||
| 		res := result{ | 		res := result{ | ||||||
| 			Resource: resource, | 			Resource: r.Resource(), | ||||||
| 			Library:  r.Descriptor().LibraryName(), | 			Library:  r.Descriptor().LibraryName(), | ||||||
| 			Metric:   m, | 			Metric:   m, | ||||||
| 			Err:      err, | 			Err:      err, | ||||||
|   | |||||||
| @@ -31,7 +31,6 @@ import ( | |||||||
| 	"go.opentelemetry.io/otel/exporters/otlp/internal/transform" | 	"go.opentelemetry.io/otel/exporters/otlp/internal/transform" | ||||||
| 	metricsdk "go.opentelemetry.io/otel/sdk/export/metric" | 	metricsdk "go.opentelemetry.io/otel/sdk/export/metric" | ||||||
| 	tracesdk "go.opentelemetry.io/otel/sdk/export/trace" | 	tracesdk "go.opentelemetry.io/otel/sdk/export/trace" | ||||||
| 	"go.opentelemetry.io/otel/sdk/resource" |  | ||||||
| ) | ) | ||||||
|  |  | ||||||
| type Exporter struct { | type Exporter struct { | ||||||
| @@ -212,7 +211,7 @@ func (e *Exporter) Stop() error { | |||||||
| // Export implements the "go.opentelemetry.io/otel/sdk/export/metric".Exporter | // Export implements the "go.opentelemetry.io/otel/sdk/export/metric".Exporter | ||||||
| // interface. It transforms and batches metric Records into OTLP Metrics and | // interface. It transforms and batches metric Records into OTLP Metrics and | ||||||
| // transmits them to the configured collector. | // transmits them to the configured collector. | ||||||
| func (e *Exporter) Export(parent context.Context, resource *resource.Resource, cps metricsdk.CheckpointSet) error { | func (e *Exporter) Export(parent context.Context, cps metricsdk.CheckpointSet) error { | ||||||
| 	// Unify the parent context Done signal with the exporter stopCh. | 	// Unify the parent context Done signal with the exporter stopCh. | ||||||
| 	ctx, cancel := context.WithCancel(parent) | 	ctx, cancel := context.WithCancel(parent) | ||||||
| 	defer cancel() | 	defer cancel() | ||||||
| @@ -224,7 +223,7 @@ func (e *Exporter) Export(parent context.Context, resource *resource.Resource, c | |||||||
| 		} | 		} | ||||||
| 	}(ctx, cancel) | 	}(ctx, cancel) | ||||||
|  |  | ||||||
| 	rms, err := transform.CheckpointSet(ctx, resource, cps, e.c.numWorkers) | 	rms, err := transform.CheckpointSet(ctx, cps, e.c.numWorkers) | ||||||
| 	if err != nil { | 	if err != nil { | ||||||
| 		return err | 		return err | ||||||
| 	} | 	} | ||||||
|   | |||||||
| @@ -659,11 +659,10 @@ func runMetricExportTest(t *testing.T, exp *Exporter, rs []record, expected []me | |||||||
|  |  | ||||||
| 		equiv := r.resource.Equivalent() | 		equiv := r.resource.Equivalent() | ||||||
| 		resources[equiv] = r.resource | 		resources[equiv] = r.resource | ||||||
| 		recs[equiv] = append(recs[equiv], metricsdk.NewRecord(&desc, &labs, agg)) | 		recs[equiv] = append(recs[equiv], metricsdk.NewRecord(&desc, &labs, r.resource, agg)) | ||||||
| 	} | 	} | ||||||
| 	for equiv, records := range recs { | 	for _, records := range recs { | ||||||
| 		resource := resources[equiv] | 		assert.NoError(t, exp.Export(context.Background(), checkpointSet{records: records})) | ||||||
| 		assert.NoError(t, exp.Export(context.Background(), resource, checkpointSet{records: records})) |  | ||||||
| 	} | 	} | ||||||
|  |  | ||||||
| 	// assert.ElementsMatch does not equate nested slices of different order, | 	// assert.ElementsMatch does not equate nested slices of different order, | ||||||
| @@ -713,8 +712,6 @@ func TestEmptyMetricExport(t *testing.T) { | |||||||
| 	exp.metricExporter = msc | 	exp.metricExporter = msc | ||||||
| 	exp.started = true | 	exp.started = true | ||||||
|  |  | ||||||
| 	resource := resource.New(kv.String("R", "S")) |  | ||||||
|  |  | ||||||
| 	for _, test := range []struct { | 	for _, test := range []struct { | ||||||
| 		records []metricsdk.Record | 		records []metricsdk.Record | ||||||
| 		want    []metricpb.ResourceMetrics | 		want    []metricpb.ResourceMetrics | ||||||
| @@ -729,7 +726,7 @@ func TestEmptyMetricExport(t *testing.T) { | |||||||
| 		}, | 		}, | ||||||
| 	} { | 	} { | ||||||
| 		msc.Reset() | 		msc.Reset() | ||||||
| 		require.NoError(t, exp.Export(context.Background(), resource, checkpointSet{records: test.records})) | 		require.NoError(t, exp.Export(context.Background(), checkpointSet{records: test.records})) | ||||||
| 		assert.Equal(t, test.want, msc.ResourceMetrics()) | 		assert.Equal(t, test.want, msc.ResourceMetrics()) | ||||||
| 	} | 	} | ||||||
| } | } | ||||||
|   | |||||||
| @@ -154,12 +154,9 @@ type Exporter interface { | |||||||
| 	// The Context comes from the controller that initiated | 	// The Context comes from the controller that initiated | ||||||
| 	// collection. | 	// collection. | ||||||
| 	// | 	// | ||||||
| 	// The Resource contains common attributes that apply to all |  | ||||||
| 	// metric events in the SDK. |  | ||||||
| 	// |  | ||||||
| 	// The CheckpointSet interface refers to the Integrator that just | 	// The CheckpointSet interface refers to the Integrator that just | ||||||
| 	// completed collection. | 	// completed collection. | ||||||
| 	Export(context.Context, *resource.Resource, CheckpointSet) error | 	Export(context.Context, CheckpointSet) error | ||||||
| } | } | ||||||
|  |  | ||||||
| // CheckpointSet allows a controller to access a complete checkpoint of | // CheckpointSet allows a controller to access a complete checkpoint of | ||||||
| @@ -183,16 +180,18 @@ type CheckpointSet interface { | |||||||
| type Record struct { | type Record struct { | ||||||
| 	descriptor *metric.Descriptor | 	descriptor *metric.Descriptor | ||||||
| 	labels     *label.Set | 	labels     *label.Set | ||||||
|  | 	resource   *resource.Resource | ||||||
| 	aggregator Aggregator | 	aggregator Aggregator | ||||||
| } | } | ||||||
|  |  | ||||||
| // NewRecord allows Integrator implementations to construct export | // NewRecord allows Integrator implementations to construct export | ||||||
| // records.  The Descriptor, Labels, and Aggregator represent | // records.  The Descriptor, Labels, and Aggregator represent | ||||||
| // aggregate metric events received over a single collection period. | // aggregate metric events received over a single collection period. | ||||||
| func NewRecord(descriptor *metric.Descriptor, labels *label.Set, aggregator Aggregator) Record { | func NewRecord(descriptor *metric.Descriptor, labels *label.Set, resource *resource.Resource, aggregator Aggregator) Record { | ||||||
| 	return Record{ | 	return Record{ | ||||||
| 		descriptor: descriptor, | 		descriptor: descriptor, | ||||||
| 		labels:     labels, | 		labels:     labels, | ||||||
|  | 		resource:   resource, | ||||||
| 		aggregator: aggregator, | 		aggregator: aggregator, | ||||||
| 	} | 	} | ||||||
| } | } | ||||||
| @@ -213,3 +212,8 @@ func (r Record) Descriptor() *metric.Descriptor { | |||||||
| func (r Record) Labels() *label.Set { | func (r Record) Labels() *label.Set { | ||||||
| 	return r.labels | 	return r.labels | ||||||
| } | } | ||||||
|  |  | ||||||
|  | // Resource contains common attributes that apply to this metric event. | ||||||
|  | func (r Record) Resource() *resource.Resource { | ||||||
|  | 	return r.resource | ||||||
|  | } | ||||||
|   | |||||||
| @@ -14,6 +14,8 @@ | |||||||
|  |  | ||||||
| package metric | package metric | ||||||
|  |  | ||||||
|  | import "go.opentelemetry.io/otel/sdk/resource" | ||||||
|  |  | ||||||
| // Config contains configuration for an SDK. | // Config contains configuration for an SDK. | ||||||
| type Config struct { | type Config struct { | ||||||
| 	// ErrorHandler is the function called when the SDK encounters an error. | 	// ErrorHandler is the function called when the SDK encounters an error. | ||||||
| @@ -21,6 +23,10 @@ type Config struct { | |||||||
| 	// This option can be overridden after instantiation of the SDK | 	// This option can be overridden after instantiation of the SDK | ||||||
| 	// with the `SetErrorHandler` method. | 	// with the `SetErrorHandler` method. | ||||||
| 	ErrorHandler ErrorHandler | 	ErrorHandler ErrorHandler | ||||||
|  |  | ||||||
|  | 	// Resource describes all the metric records processed by the | ||||||
|  | 	// Accumulator. | ||||||
|  | 	Resource *resource.Resource | ||||||
| } | } | ||||||
|  |  | ||||||
| // Option is the interface that applies the value to a configuration option. | // Option is the interface that applies the value to a configuration option. | ||||||
| @@ -39,3 +45,16 @@ type errorHandlerOption ErrorHandler | |||||||
| func (o errorHandlerOption) Apply(config *Config) { | func (o errorHandlerOption) Apply(config *Config) { | ||||||
| 	config.ErrorHandler = ErrorHandler(o) | 	config.ErrorHandler = ErrorHandler(o) | ||||||
| } | } | ||||||
|  |  | ||||||
|  | // WithResource sets the Resource configuration option of a Config. | ||||||
|  | func WithResource(res *resource.Resource) Option { | ||||||
|  | 	return resourceOption{res} | ||||||
|  | } | ||||||
|  |  | ||||||
|  | type resourceOption struct { | ||||||
|  | 	*resource.Resource | ||||||
|  | } | ||||||
|  |  | ||||||
|  | func (o resourceOption) Apply(config *Config) { | ||||||
|  | 	config.Resource = o.Resource | ||||||
|  | } | ||||||
|   | |||||||
| @@ -31,7 +31,7 @@ type Controller struct { | |||||||
| 	lock         sync.Mutex | 	lock         sync.Mutex | ||||||
| 	collectLock  sync.Mutex | 	collectLock  sync.Mutex | ||||||
| 	accumulator  *sdk.Accumulator | 	accumulator  *sdk.Accumulator | ||||||
| 	resource     *resource.Resource | 	provider     *registry.Provider | ||||||
| 	errorHandler sdk.ErrorHandler | 	errorHandler sdk.ErrorHandler | ||||||
| 	integrator   export.Integrator | 	integrator   export.Integrator | ||||||
| 	exporter     export.Exporter | 	exporter     export.Exporter | ||||||
| @@ -40,7 +40,6 @@ type Controller struct { | |||||||
| 	period       time.Duration | 	period       time.Duration | ||||||
| 	ticker       Ticker | 	ticker       Ticker | ||||||
| 	clock        Clock | 	clock        Clock | ||||||
| 	provider     *registry.Provider |  | ||||||
| } | } | ||||||
|  |  | ||||||
| // Several types below are created to match "github.com/benbjohnson/clock" | // Several types below are created to match "github.com/benbjohnson/clock" | ||||||
| @@ -71,15 +70,17 @@ var _ Ticker = realTicker{} | |||||||
| // configuration options to configure an SDK with periodic collection. | // configuration options to configure an SDK with periodic collection. | ||||||
| // The integrator itself is configured with the aggregation selector policy. | // The integrator itself is configured with the aggregation selector policy. | ||||||
| func New(integrator export.Integrator, exporter export.Exporter, period time.Duration, opts ...Option) *Controller { | func New(integrator export.Integrator, exporter export.Exporter, period time.Duration, opts ...Option) *Controller { | ||||||
| 	c := &Config{ErrorHandler: sdk.DefaultErrorHandler} | 	c := &Config{ | ||||||
|  | 		ErrorHandler: sdk.DefaultErrorHandler, | ||||||
|  | 		Resource:     resource.Empty(), | ||||||
|  | 	} | ||||||
| 	for _, opt := range opts { | 	for _, opt := range opts { | ||||||
| 		opt.Apply(c) | 		opt.Apply(c) | ||||||
| 	} | 	} | ||||||
|  |  | ||||||
| 	impl := sdk.NewAccumulator(integrator, sdk.WithErrorHandler(c.ErrorHandler)) | 	impl := sdk.NewAccumulator(integrator, sdk.WithErrorHandler(c.ErrorHandler), sdk.WithResource(c.Resource)) | ||||||
| 	return &Controller{ | 	return &Controller{ | ||||||
| 		accumulator:  impl, | 		accumulator:  impl, | ||||||
| 		resource:     c.Resource, |  | ||||||
| 		provider:     registry.NewProvider(impl), | 		provider:     registry.NewProvider(impl), | ||||||
| 		errorHandler: c.ErrorHandler, | 		errorHandler: c.ErrorHandler, | ||||||
| 		integrator:   integrator, | 		integrator:   integrator, | ||||||
| @@ -166,7 +167,7 @@ func (c *Controller) tick() { | |||||||
| 		mtx:      &c.collectLock, | 		mtx:      &c.collectLock, | ||||||
| 		delegate: c.integrator.CheckpointSet(), | 		delegate: c.integrator.CheckpointSet(), | ||||||
| 	} | 	} | ||||||
| 	err := c.exporter.Export(ctx, c.resource, checkpointSet) | 	err := c.exporter.Export(ctx, checkpointSet) | ||||||
| 	c.integrator.FinishedCollection() | 	c.integrator.FinishedCollection() | ||||||
|  |  | ||||||
| 	if err != nil { | 	if err != nil { | ||||||
|   | |||||||
| @@ -25,6 +25,8 @@ import ( | |||||||
| 	"github.com/benbjohnson/clock" | 	"github.com/benbjohnson/clock" | ||||||
| 	"github.com/stretchr/testify/require" | 	"github.com/stretchr/testify/require" | ||||||
|  |  | ||||||
|  | 	"go.opentelemetry.io/otel/api/kv" | ||||||
|  | 	"go.opentelemetry.io/otel/api/label" | ||||||
| 	"go.opentelemetry.io/otel/api/metric" | 	"go.opentelemetry.io/otel/api/metric" | ||||||
| 	"go.opentelemetry.io/otel/exporters/metric/test" | 	"go.opentelemetry.io/otel/exporters/metric/test" | ||||||
| 	export "go.opentelemetry.io/otel/sdk/export/metric" | 	export "go.opentelemetry.io/otel/sdk/export/metric" | ||||||
| @@ -42,6 +44,8 @@ type testIntegrator struct { | |||||||
| 	finishes      int | 	finishes      int | ||||||
| } | } | ||||||
|  |  | ||||||
|  | var testResource = resource.New(kv.String("R", "V")) | ||||||
|  |  | ||||||
| type testExporter struct { | type testExporter struct { | ||||||
| 	t         *testing.T | 	t         *testing.T | ||||||
| 	lock      sync.Mutex | 	lock      sync.Mutex | ||||||
| @@ -68,7 +72,7 @@ var _ push.Clock = mockClock{} | |||||||
| var _ push.Ticker = mockTicker{} | var _ push.Ticker = mockTicker{} | ||||||
|  |  | ||||||
| func newFixture(t *testing.T) testFixture { | func newFixture(t *testing.T) testFixture { | ||||||
| 	checkpointSet := test.NewCheckpointSet() | 	checkpointSet := test.NewCheckpointSet(testResource) | ||||||
|  |  | ||||||
| 	integrator := &testIntegrator{ | 	integrator := &testIntegrator{ | ||||||
| 		t:             t, | 		t:             t, | ||||||
| @@ -115,7 +119,7 @@ func (b *testIntegrator) getCounts() (checkpoints, finishes int) { | |||||||
| 	return b.checkpoints, b.finishes | 	return b.checkpoints, b.finishes | ||||||
| } | } | ||||||
|  |  | ||||||
| func (e *testExporter) Export(_ context.Context, _ *resource.Resource, checkpointSet export.CheckpointSet) error { | func (e *testExporter) Export(_ context.Context, checkpointSet export.CheckpointSet) error { | ||||||
| 	e.lock.Lock() | 	e.lock.Lock() | ||||||
| 	defer e.lock.Unlock() | 	defer e.lock.Unlock() | ||||||
| 	e.exports++ | 	e.exports++ | ||||||
| @@ -213,6 +217,7 @@ func TestPushTicker(t *testing.T) { | |||||||
| 	require.Equal(t, 1, exports) | 	require.Equal(t, 1, exports) | ||||||
| 	require.Equal(t, 1, len(records)) | 	require.Equal(t, 1, len(records)) | ||||||
| 	require.Equal(t, "counter", records[0].Descriptor().Name()) | 	require.Equal(t, "counter", records[0].Descriptor().Name()) | ||||||
|  | 	require.Equal(t, "R=V", records[0].Resource().Encoded(label.DefaultEncoder())) | ||||||
|  |  | ||||||
| 	sum, err := records[0].Aggregator().(aggregator.Sum).Sum() | 	sum, err := records[0].Aggregator().(aggregator.Sum).Sum() | ||||||
| 	require.Equal(t, int64(3), sum.AsInt64()) | 	require.Equal(t, int64(3), sum.AsInt64()) | ||||||
| @@ -232,6 +237,7 @@ func TestPushTicker(t *testing.T) { | |||||||
| 	require.Equal(t, 2, exports) | 	require.Equal(t, 2, exports) | ||||||
| 	require.Equal(t, 1, len(records)) | 	require.Equal(t, 1, len(records)) | ||||||
| 	require.Equal(t, "counter", records[0].Descriptor().Name()) | 	require.Equal(t, "counter", records[0].Descriptor().Name()) | ||||||
|  | 	require.Equal(t, "R=V", records[0].Resource().Encoded(label.DefaultEncoder())) | ||||||
|  |  | ||||||
| 	sum, err = records[0].Aggregator().(aggregator.Sum).Sum() | 	sum, err = records[0].Aggregator().(aggregator.Sum).Sum() | ||||||
| 	require.Equal(t, int64(7), sum.AsInt64()) | 	require.Equal(t, int64(7), sum.AsInt64()) | ||||||
| @@ -256,8 +262,8 @@ func TestPushExportError(t *testing.T) { | |||||||
| 		expectedDescriptors []string | 		expectedDescriptors []string | ||||||
| 		expectedError       error | 		expectedError       error | ||||||
| 	}{ | 	}{ | ||||||
| 		{"errNone", nil, []string{"counter1", "counter2"}, nil}, | 		{"errNone", nil, []string{"counter1{R=V,X=Y}", "counter2{R=V,}"}, nil}, | ||||||
| 		{"errNoData", aggregator.ErrNoData, []string{"counter2"}, nil}, | 		{"errNoData", aggregator.ErrNoData, []string{"counter2{R=V,}"}, nil}, | ||||||
| 		{"errUnexpected", errAggregator, []string{}, errAggregator}, | 		{"errUnexpected", errAggregator, []string{}, errAggregator}, | ||||||
| 	} | 	} | ||||||
| 	for _, tt := range tests { | 	for _, tt := range tests { | ||||||
| @@ -287,7 +293,7 @@ func TestPushExportError(t *testing.T) { | |||||||
| 			p.Start() | 			p.Start() | ||||||
| 			runtime.Gosched() | 			runtime.Gosched() | ||||||
|  |  | ||||||
| 			counter1.Add(ctx, 3) | 			counter1.Add(ctx, 3, kv.String("X", "Y")) | ||||||
| 			counter2.Add(ctx, 5) | 			counter2.Add(ctx, 5) | ||||||
|  |  | ||||||
| 			require.Equal(t, 0, fix.exporter.exports) | 			require.Equal(t, 0, fix.exporter.exports) | ||||||
| @@ -311,11 +317,16 @@ func TestPushExportError(t *testing.T) { | |||||||
| 			lock.Unlock() | 			lock.Unlock() | ||||||
| 			require.Equal(t, len(tt.expectedDescriptors), len(records)) | 			require.Equal(t, len(tt.expectedDescriptors), len(records)) | ||||||
| 			for _, r := range records { | 			for _, r := range records { | ||||||
| 				require.Contains(t, tt.expectedDescriptors, r.Descriptor().Name()) | 				require.Contains(t, tt.expectedDescriptors, | ||||||
|  | 					fmt.Sprintf("%s{%s,%s}", | ||||||
|  | 						r.Descriptor().Name(), | ||||||
|  | 						r.Resource().Encoded(label.DefaultEncoder()), | ||||||
|  | 						r.Labels().Encoded(label.DefaultEncoder()), | ||||||
|  | 					), | ||||||
|  | 				) | ||||||
| 			} | 			} | ||||||
|  |  | ||||||
| 			p.Stop() | 			p.Stop() | ||||||
|  |  | ||||||
| 		}) | 		}) | ||||||
| 	} | 	} | ||||||
| } | } | ||||||
|   | |||||||
| @@ -33,9 +33,11 @@ import ( | |||||||
| 	"go.opentelemetry.io/otel/sdk/metric/aggregator/array" | 	"go.opentelemetry.io/otel/sdk/metric/aggregator/array" | ||||||
| 	"go.opentelemetry.io/otel/sdk/metric/aggregator/sum" | 	"go.opentelemetry.io/otel/sdk/metric/aggregator/sum" | ||||||
| 	batchTest "go.opentelemetry.io/otel/sdk/metric/integrator/test" | 	batchTest "go.opentelemetry.io/otel/sdk/metric/integrator/test" | ||||||
|  | 	"go.opentelemetry.io/otel/sdk/resource" | ||||||
| ) | ) | ||||||
|  |  | ||||||
| var Must = metric.Must | var Must = metric.Must | ||||||
|  | var testResource = resource.New(kv.String("R", "V")) | ||||||
|  |  | ||||||
| type correctnessIntegrator struct { | type correctnessIntegrator struct { | ||||||
| 	newAggCount int64 | 	newAggCount int64 | ||||||
| @@ -45,6 +47,15 @@ type correctnessIntegrator struct { | |||||||
| 	records []export.Record | 	records []export.Record | ||||||
| } | } | ||||||
|  |  | ||||||
|  | func newSDK(t *testing.T) (metric.Meter, *metricsdk.Accumulator, *correctnessIntegrator) { | ||||||
|  | 	integrator := &correctnessIntegrator{ | ||||||
|  | 		t: t, | ||||||
|  | 	} | ||||||
|  | 	accum := metricsdk.NewAccumulator(integrator, metricsdk.WithResource(testResource)) | ||||||
|  | 	meter := metric.WrapMeterImpl(accum, "test") | ||||||
|  | 	return meter, accum, integrator | ||||||
|  | } | ||||||
|  |  | ||||||
| func (cb *correctnessIntegrator) AggregatorFor(descriptor *metric.Descriptor) (agg export.Aggregator) { | func (cb *correctnessIntegrator) AggregatorFor(descriptor *metric.Descriptor) (agg export.Aggregator) { | ||||||
| 	name := descriptor.Name() | 	name := descriptor.Name() | ||||||
|  |  | ||||||
| @@ -77,11 +88,7 @@ func (cb *correctnessIntegrator) Process(_ context.Context, record export.Record | |||||||
|  |  | ||||||
| func TestInputRangeTestCounter(t *testing.T) { | func TestInputRangeTestCounter(t *testing.T) { | ||||||
| 	ctx := context.Background() | 	ctx := context.Background() | ||||||
| 	integrator := &correctnessIntegrator{ | 	meter, sdk, integrator := newSDK(t) | ||||||
| 		t: t, |  | ||||||
| 	} |  | ||||||
| 	sdk := metricsdk.NewAccumulator(integrator) |  | ||||||
| 	meter := metric.WrapMeterImpl(sdk, "test") |  | ||||||
|  |  | ||||||
| 	var sdkErr error | 	var sdkErr error | ||||||
| 	sdk.SetErrorHandler(func(handleErr error) { | 	sdk.SetErrorHandler(func(handleErr error) { | ||||||
| @@ -109,11 +116,7 @@ func TestInputRangeTestCounter(t *testing.T) { | |||||||
|  |  | ||||||
| func TestInputRangeTestValueRecorder(t *testing.T) { | func TestInputRangeTestValueRecorder(t *testing.T) { | ||||||
| 	ctx := context.Background() | 	ctx := context.Background() | ||||||
| 	integrator := &correctnessIntegrator{ | 	meter, sdk, integrator := newSDK(t) | ||||||
| 		t: t, |  | ||||||
| 	} |  | ||||||
| 	sdk := metricsdk.NewAccumulator(integrator) |  | ||||||
| 	meter := metric.WrapMeterImpl(sdk, "test") |  | ||||||
|  |  | ||||||
| 	var sdkErr error | 	var sdkErr error | ||||||
| 	sdk.SetErrorHandler(func(handleErr error) { | 	sdk.SetErrorHandler(func(handleErr error) { | ||||||
| @@ -144,11 +147,7 @@ func TestInputRangeTestValueRecorder(t *testing.T) { | |||||||
|  |  | ||||||
| func TestDisabledInstrument(t *testing.T) { | func TestDisabledInstrument(t *testing.T) { | ||||||
| 	ctx := context.Background() | 	ctx := context.Background() | ||||||
| 	integrator := &correctnessIntegrator{ | 	meter, sdk, integrator := newSDK(t) | ||||||
| 		t: t, |  | ||||||
| 	} |  | ||||||
| 	sdk := metricsdk.NewAccumulator(integrator) |  | ||||||
| 	meter := metric.WrapMeterImpl(sdk, "test") |  | ||||||
|  |  | ||||||
| 	valuerecorder := Must(meter).NewFloat64ValueRecorder("name.disabled") | 	valuerecorder := Must(meter).NewFloat64ValueRecorder("name.disabled") | ||||||
|  |  | ||||||
| @@ -161,12 +160,7 @@ func TestDisabledInstrument(t *testing.T) { | |||||||
|  |  | ||||||
| func TestRecordNaN(t *testing.T) { | func TestRecordNaN(t *testing.T) { | ||||||
| 	ctx := context.Background() | 	ctx := context.Background() | ||||||
| 	integrator := &correctnessIntegrator{ | 	meter, sdk, _ := newSDK(t) | ||||||
| 		t: t, |  | ||||||
| 	} |  | ||||||
|  |  | ||||||
| 	sdk := metricsdk.NewAccumulator(integrator) |  | ||||||
| 	meter := metric.WrapMeterImpl(sdk, "test") |  | ||||||
|  |  | ||||||
| 	var sdkErr error | 	var sdkErr error | ||||||
| 	sdk.SetErrorHandler(func(handleErr error) { | 	sdk.SetErrorHandler(func(handleErr error) { | ||||||
| @@ -181,11 +175,7 @@ func TestRecordNaN(t *testing.T) { | |||||||
|  |  | ||||||
| func TestSDKLabelsDeduplication(t *testing.T) { | func TestSDKLabelsDeduplication(t *testing.T) { | ||||||
| 	ctx := context.Background() | 	ctx := context.Background() | ||||||
| 	integrator := &correctnessIntegrator{ | 	meter, sdk, integrator := newSDK(t) | ||||||
| 		t: t, |  | ||||||
| 	} |  | ||||||
| 	sdk := metricsdk.NewAccumulator(integrator) |  | ||||||
| 	meter := metric.WrapMeterImpl(sdk, "test") |  | ||||||
|  |  | ||||||
| 	counter := Must(meter).NewInt64Counter("counter") | 	counter := Must(meter).NewInt64Counter("counter") | ||||||
|  |  | ||||||
| @@ -284,12 +274,7 @@ func TestDefaultLabelEncoder(t *testing.T) { | |||||||
|  |  | ||||||
| func TestObserverCollection(t *testing.T) { | func TestObserverCollection(t *testing.T) { | ||||||
| 	ctx := context.Background() | 	ctx := context.Background() | ||||||
| 	integrator := &correctnessIntegrator{ | 	meter, sdk, integrator := newSDK(t) | ||||||
| 		t: t, |  | ||||||
| 	} |  | ||||||
|  |  | ||||||
| 	sdk := metricsdk.NewAccumulator(integrator) |  | ||||||
| 	meter := metric.WrapMeterImpl(sdk, "test") |  | ||||||
|  |  | ||||||
| 	_ = Must(meter).RegisterFloat64ValueObserver("float.valueobserver", func(result metric.Float64ObserverResult) { | 	_ = Must(meter).RegisterFloat64ValueObserver("float.valueobserver", func(result metric.Float64ObserverResult) { | ||||||
| 		result.Observe(1, kv.String("A", "B")) | 		result.Observe(1, kv.String("A", "B")) | ||||||
| @@ -317,21 +302,16 @@ func TestObserverCollection(t *testing.T) { | |||||||
| 		_ = out.AddTo(rec) | 		_ = out.AddTo(rec) | ||||||
| 	} | 	} | ||||||
| 	require.EqualValues(t, map[string]float64{ | 	require.EqualValues(t, map[string]float64{ | ||||||
| 		"float.valueobserver/A=B": -1, | 		"float.valueobserver/A=B/R=V": -1, | ||||||
| 		"float.valueobserver/C=D": -1, | 		"float.valueobserver/C=D/R=V": -1, | ||||||
| 		"int.valueobserver/":      1, | 		"int.valueobserver//R=V":      1, | ||||||
| 		"int.valueobserver/A=B":   1, | 		"int.valueobserver/A=B/R=V":   1, | ||||||
| 	}, out.Map) | 	}, out.Map) | ||||||
| } | } | ||||||
|  |  | ||||||
| func TestObserverBatch(t *testing.T) { | func TestObserverBatch(t *testing.T) { | ||||||
| 	ctx := context.Background() | 	ctx := context.Background() | ||||||
| 	integrator := &correctnessIntegrator{ | 	meter, sdk, integrator := newSDK(t) | ||||||
| 		t: t, |  | ||||||
| 	} |  | ||||||
|  |  | ||||||
| 	sdk := metricsdk.NewAccumulator(integrator) |  | ||||||
| 	meter := metric.WrapMeterImpl(sdk, "test") |  | ||||||
|  |  | ||||||
| 	var floatObs metric.Float64ValueObserver | 	var floatObs metric.Float64ValueObserver | ||||||
| 	var intObs metric.Int64ValueObserver | 	var intObs metric.Int64ValueObserver | ||||||
| @@ -371,21 +351,16 @@ func TestObserverBatch(t *testing.T) { | |||||||
| 		_ = out.AddTo(rec) | 		_ = out.AddTo(rec) | ||||||
| 	} | 	} | ||||||
| 	require.EqualValues(t, map[string]float64{ | 	require.EqualValues(t, map[string]float64{ | ||||||
| 		"float.valueobserver/A=B": -1, | 		"float.valueobserver/A=B/R=V": -1, | ||||||
| 		"float.valueobserver/C=D": -1, | 		"float.valueobserver/C=D/R=V": -1, | ||||||
| 		"int.valueobserver/":      1, | 		"int.valueobserver//R=V":      1, | ||||||
| 		"int.valueobserver/A=B":   1, | 		"int.valueobserver/A=B/R=V":   1, | ||||||
| 	}, out.Map) | 	}, out.Map) | ||||||
| } | } | ||||||
|  |  | ||||||
| func TestRecordBatch(t *testing.T) { | func TestRecordBatch(t *testing.T) { | ||||||
| 	ctx := context.Background() | 	ctx := context.Background() | ||||||
| 	integrator := &correctnessIntegrator{ | 	meter, sdk, integrator := newSDK(t) | ||||||
| 		t: t, |  | ||||||
| 	} |  | ||||||
|  |  | ||||||
| 	sdk := metricsdk.NewAccumulator(integrator) |  | ||||||
| 	meter := metric.WrapMeterImpl(sdk, "test") |  | ||||||
|  |  | ||||||
| 	counter1 := Must(meter).NewInt64Counter("int64.counter") | 	counter1 := Must(meter).NewInt64Counter("int64.counter") | ||||||
| 	counter2 := Must(meter).NewFloat64Counter("float64.counter") | 	counter2 := Must(meter).NewFloat64Counter("float64.counter") | ||||||
| @@ -411,10 +386,10 @@ func TestRecordBatch(t *testing.T) { | |||||||
| 		_ = out.AddTo(rec) | 		_ = out.AddTo(rec) | ||||||
| 	} | 	} | ||||||
| 	require.EqualValues(t, map[string]float64{ | 	require.EqualValues(t, map[string]float64{ | ||||||
| 		"int64.counter/A=B,C=D":         1, | 		"int64.counter/A=B,C=D/R=V":         1, | ||||||
| 		"float64.counter/A=B,C=D":       2, | 		"float64.counter/A=B,C=D/R=V":       2, | ||||||
| 		"int64.valuerecorder/A=B,C=D":   3, | 		"int64.valuerecorder/A=B,C=D/R=V":   3, | ||||||
| 		"float64.valuerecorder/A=B,C=D": 4, | 		"float64.valuerecorder/A=B,C=D/R=V": 4, | ||||||
| 	}, out.Map) | 	}, out.Map) | ||||||
| } | } | ||||||
|  |  | ||||||
| @@ -423,12 +398,7 @@ func TestRecordBatch(t *testing.T) { | |||||||
| // that its encoded labels will be cached across collection intervals. | // that its encoded labels will be cached across collection intervals. | ||||||
| func TestRecordPersistence(t *testing.T) { | func TestRecordPersistence(t *testing.T) { | ||||||
| 	ctx := context.Background() | 	ctx := context.Background() | ||||||
| 	integrator := &correctnessIntegrator{ | 	meter, sdk, integrator := newSDK(t) | ||||||
| 		t: t, |  | ||||||
| 	} |  | ||||||
|  |  | ||||||
| 	sdk := metricsdk.NewAccumulator(integrator) |  | ||||||
| 	meter := metric.WrapMeterImpl(sdk, "test") |  | ||||||
|  |  | ||||||
| 	c := Must(meter).NewFloat64Counter("sum.name") | 	c := Must(meter).NewFloat64Counter("sum.name") | ||||||
| 	b := c.Bind(kv.String("bound", "true")) | 	b := c.Bind(kv.String("bound", "true")) | ||||||
|   | |||||||
| @@ -22,6 +22,7 @@ import ( | |||||||
| 	"go.opentelemetry.io/otel/api/metric" | 	"go.opentelemetry.io/otel/api/metric" | ||||||
| 	export "go.opentelemetry.io/otel/sdk/export/metric" | 	export "go.opentelemetry.io/otel/sdk/export/metric" | ||||||
| 	"go.opentelemetry.io/otel/sdk/export/metric/aggregator" | 	"go.opentelemetry.io/otel/sdk/export/metric/aggregator" | ||||||
|  | 	"go.opentelemetry.io/otel/sdk/resource" | ||||||
| ) | ) | ||||||
|  |  | ||||||
| type ( | type ( | ||||||
| @@ -34,11 +35,13 @@ type ( | |||||||
| 	batchKey struct { | 	batchKey struct { | ||||||
| 		descriptor *metric.Descriptor | 		descriptor *metric.Descriptor | ||||||
| 		distinct   label.Distinct | 		distinct   label.Distinct | ||||||
|  | 		resource   label.Distinct | ||||||
| 	} | 	} | ||||||
|  |  | ||||||
| 	batchValue struct { | 	batchValue struct { | ||||||
| 		aggregator export.Aggregator | 		aggregator export.Aggregator | ||||||
| 		labels     *label.Set | 		labels     *label.Set | ||||||
|  | 		resource   *resource.Resource | ||||||
| 	} | 	} | ||||||
|  |  | ||||||
| 	batchMap map[batchKey]batchValue | 	batchMap map[batchKey]batchValue | ||||||
| @@ -64,6 +67,7 @@ func (b *Integrator) Process(_ context.Context, record export.Record) error { | |||||||
| 	key := batchKey{ | 	key := batchKey{ | ||||||
| 		descriptor: desc, | 		descriptor: desc, | ||||||
| 		distinct:   record.Labels().Equivalent(), | 		distinct:   record.Labels().Equivalent(), | ||||||
|  | 		resource:   record.Resource().Equivalent(), | ||||||
| 	} | 	} | ||||||
| 	agg := record.Aggregator() | 	agg := record.Aggregator() | ||||||
| 	value, ok := b.batchMap[key] | 	value, ok := b.batchMap[key] | ||||||
| @@ -91,6 +95,7 @@ func (b *Integrator) Process(_ context.Context, record export.Record) error { | |||||||
| 	b.batchMap[key] = batchValue{ | 	b.batchMap[key] = batchValue{ | ||||||
| 		aggregator: agg, | 		aggregator: agg, | ||||||
| 		labels:     record.Labels(), | 		labels:     record.Labels(), | ||||||
|  | 		resource:   record.Resource(), | ||||||
| 	} | 	} | ||||||
| 	return nil | 	return nil | ||||||
| } | } | ||||||
| @@ -110,6 +115,7 @@ func (c batchMap) ForEach(f func(export.Record) error) error { | |||||||
| 		if err := f(export.NewRecord( | 		if err := f(export.NewRecord( | ||||||
| 			key.descriptor, | 			key.descriptor, | ||||||
| 			value.labels, | 			value.labels, | ||||||
|  | 			value.resource, | ||||||
| 			value.aggregator, | 			value.aggregator, | ||||||
| 		)); err != nil && !errors.Is(err, aggregator.ErrNoData) { | 		)); err != nil && !errors.Is(err, aggregator.ErrNoData) { | ||||||
| 			return err | 			return err | ||||||
|   | |||||||
| @@ -68,18 +68,18 @@ func TestUngroupedStateless(t *testing.T) { | |||||||
| 	// Output lastvalue should have only the "G=H" and "G=" keys. | 	// Output lastvalue should have only the "G=H" and "G=" keys. | ||||||
| 	// Output counter should have only the "C=D" and "C=" keys. | 	// Output counter should have only the "C=D" and "C=" keys. | ||||||
| 	require.EqualValues(t, map[string]float64{ | 	require.EqualValues(t, map[string]float64{ | ||||||
| 		"sum.a/C~D&G~H":       60, // labels1 | 		"sum.a/C~D&G~H/R~V":       60, // labels1 | ||||||
| 		"sum.a/C~D&E~F":       20, // labels2 | 		"sum.a/C~D&E~F/R~V":       20, // labels2 | ||||||
| 		"sum.a/":              40, // labels3 | 		"sum.a//R~V":              40, // labels3 | ||||||
| 		"sum.b/C~D&G~H":       60, // labels1 | 		"sum.b/C~D&G~H/R~V":       60, // labels1 | ||||||
| 		"sum.b/C~D&E~F":       20, // labels2 | 		"sum.b/C~D&E~F/R~V":       20, // labels2 | ||||||
| 		"sum.b/":              40, // labels3 | 		"sum.b//R~V":              40, // labels3 | ||||||
| 		"lastvalue.a/C~D&G~H": 50, // labels1 | 		"lastvalue.a/C~D&G~H/R~V": 50, // labels1 | ||||||
| 		"lastvalue.a/C~D&E~F": 20, // labels2 | 		"lastvalue.a/C~D&E~F/R~V": 20, // labels2 | ||||||
| 		"lastvalue.a/":        30, // labels3 | 		"lastvalue.a//R~V":        30, // labels3 | ||||||
| 		"lastvalue.b/C~D&G~H": 50, // labels1 | 		"lastvalue.b/C~D&G~H/R~V": 50, // labels1 | ||||||
| 		"lastvalue.b/C~D&E~F": 20, // labels2 | 		"lastvalue.b/C~D&E~F/R~V": 20, // labels2 | ||||||
| 		"lastvalue.b/":        30, // labels3 | 		"lastvalue.b//R~V":        30, // labels3 | ||||||
| 	}, records.Map) | 	}, records.Map) | ||||||
|  |  | ||||||
| 	// Verify that state was reset | 	// Verify that state was reset | ||||||
| @@ -110,8 +110,8 @@ func TestUngroupedStateful(t *testing.T) { | |||||||
| 	_ = checkpointSet.ForEach(records1.AddTo) | 	_ = checkpointSet.ForEach(records1.AddTo) | ||||||
|  |  | ||||||
| 	require.EqualValues(t, map[string]float64{ | 	require.EqualValues(t, map[string]float64{ | ||||||
| 		"sum.a/C~D&G~H": 10, // labels1 | 		"sum.a/C~D&G~H/R~V": 10, // labels1 | ||||||
| 		"sum.b/C~D&G~H": 10, // labels1 | 		"sum.b/C~D&G~H/R~V": 10, // labels1 | ||||||
| 	}, records1.Map) | 	}, records1.Map) | ||||||
|  |  | ||||||
| 	// Test that state was NOT reset | 	// Test that state was NOT reset | ||||||
| @@ -140,8 +140,8 @@ func TestUngroupedStateful(t *testing.T) { | |||||||
| 	require.EqualValues(t, records1.Map, records3.Map) | 	require.EqualValues(t, records1.Map, records3.Map) | ||||||
|  |  | ||||||
| 	// Now process the second update | 	// Now process the second update | ||||||
| 	_ = b.Process(ctx, export.NewRecord(&test.CounterADesc, test.Labels1, caggA)) | 	_ = b.Process(ctx, export.NewRecord(&test.CounterADesc, test.Labels1, test.Resource, caggA)) | ||||||
| 	_ = b.Process(ctx, export.NewRecord(&test.CounterBDesc, test.Labels1, caggB)) | 	_ = b.Process(ctx, export.NewRecord(&test.CounterBDesc, test.Labels1, test.Resource, caggB)) | ||||||
|  |  | ||||||
| 	checkpointSet = b.CheckpointSet() | 	checkpointSet = b.CheckpointSet() | ||||||
| 	b.FinishedCollection() | 	b.FinishedCollection() | ||||||
| @@ -150,7 +150,7 @@ func TestUngroupedStateful(t *testing.T) { | |||||||
| 	_ = checkpointSet.ForEach(records4.AddTo) | 	_ = checkpointSet.ForEach(records4.AddTo) | ||||||
|  |  | ||||||
| 	require.EqualValues(t, map[string]float64{ | 	require.EqualValues(t, map[string]float64{ | ||||||
| 		"sum.a/C~D&G~H": 30, | 		"sum.a/C~D&G~H/R~V": 30, | ||||||
| 		"sum.b/C~D&G~H": 30, | 		"sum.b/C~D&G~H/R~V": 30, | ||||||
| 	}, records4.Map) | 	}, records4.Map) | ||||||
| } | } | ||||||
|   | |||||||
| @@ -26,6 +26,7 @@ import ( | |||||||
| 	"go.opentelemetry.io/otel/sdk/export/metric/aggregator" | 	"go.opentelemetry.io/otel/sdk/export/metric/aggregator" | ||||||
| 	"go.opentelemetry.io/otel/sdk/metric/aggregator/lastvalue" | 	"go.opentelemetry.io/otel/sdk/metric/aggregator/lastvalue" | ||||||
| 	"go.opentelemetry.io/otel/sdk/metric/aggregator/sum" | 	"go.opentelemetry.io/otel/sdk/metric/aggregator/sum" | ||||||
|  | 	"go.opentelemetry.io/otel/sdk/resource" | ||||||
| ) | ) | ||||||
|  |  | ||||||
| type ( | type ( | ||||||
| @@ -45,6 +46,9 @@ type ( | |||||||
| ) | ) | ||||||
|  |  | ||||||
| var ( | var ( | ||||||
|  | 	// Resource is applied to all test records built in this package. | ||||||
|  | 	Resource = resource.New(kv.String("R", "V")) | ||||||
|  |  | ||||||
| 	// LastValueADesc and LastValueBDesc group by "G" | 	// LastValueADesc and LastValueBDesc group by "G" | ||||||
| 	LastValueADesc = metric.NewDescriptor( | 	LastValueADesc = metric.NewDescriptor( | ||||||
| 		"lastvalue.a", metric.ValueObserverKind, metric.Int64NumberKind) | 		"lastvalue.a", metric.ValueObserverKind, metric.Int64NumberKind) | ||||||
| @@ -133,12 +137,12 @@ func LastValueAgg(desc *metric.Descriptor, v int64) export.Aggregator { | |||||||
|  |  | ||||||
| // Convenience method for building a test exported lastValue record. | // Convenience method for building a test exported lastValue record. | ||||||
| func NewLastValueRecord(desc *metric.Descriptor, labels *label.Set, value int64) export.Record { | func NewLastValueRecord(desc *metric.Descriptor, labels *label.Set, value int64) export.Record { | ||||||
| 	return export.NewRecord(desc, labels, LastValueAgg(desc, value)) | 	return export.NewRecord(desc, labels, Resource, LastValueAgg(desc, value)) | ||||||
| } | } | ||||||
|  |  | ||||||
| // Convenience method for building a test exported counter record. | // Convenience method for building a test exported counter record. | ||||||
| func NewCounterRecord(desc *metric.Descriptor, labels *label.Set, value int64) export.Record { | func NewCounterRecord(desc *metric.Descriptor, labels *label.Set, value int64) export.Record { | ||||||
| 	return export.NewRecord(desc, labels, CounterAgg(desc, value)) | 	return export.NewRecord(desc, labels, Resource, CounterAgg(desc, value)) | ||||||
| } | } | ||||||
|  |  | ||||||
| // CounterAgg returns a checkpointed counter aggregator w/ the specified descriptor and value. | // CounterAgg returns a checkpointed counter aggregator w/ the specified descriptor and value. | ||||||
| @@ -154,7 +158,8 @@ func CounterAgg(desc *metric.Descriptor, v int64) export.Aggregator { | |||||||
| // value to the output map. | // value to the output map. | ||||||
| func (o Output) AddTo(rec export.Record) error { | func (o Output) AddTo(rec export.Record) error { | ||||||
| 	encoded := rec.Labels().Encoded(o.labelEncoder) | 	encoded := rec.Labels().Encoded(o.labelEncoder) | ||||||
| 	key := fmt.Sprint(rec.Descriptor().Name(), "/", encoded) | 	rencoded := rec.Resource().Encoded(o.labelEncoder) | ||||||
|  | 	key := fmt.Sprint(rec.Descriptor().Name(), "/", encoded, "/", rencoded) | ||||||
| 	var value float64 | 	var value float64 | ||||||
|  |  | ||||||
| 	if s, ok := rec.Aggregator().(aggregator.Sum); ok { | 	if s, ok := rec.Aggregator().(aggregator.Sum); ok { | ||||||
|   | |||||||
| @@ -29,6 +29,7 @@ import ( | |||||||
| 	internal "go.opentelemetry.io/otel/internal/metric" | 	internal "go.opentelemetry.io/otel/internal/metric" | ||||||
| 	export "go.opentelemetry.io/otel/sdk/export/metric" | 	export "go.opentelemetry.io/otel/sdk/export/metric" | ||||||
| 	"go.opentelemetry.io/otel/sdk/export/metric/aggregator" | 	"go.opentelemetry.io/otel/sdk/export/metric/aggregator" | ||||||
|  | 	"go.opentelemetry.io/otel/sdk/resource" | ||||||
| ) | ) | ||||||
|  |  | ||||||
| type ( | type ( | ||||||
| @@ -68,6 +69,9 @@ type ( | |||||||
| 		// place for sorting during labels creation to avoid | 		// place for sorting during labels creation to avoid | ||||||
| 		// allocation.  It is cleared after use. | 		// allocation.  It is cleared after use. | ||||||
| 		asyncSortSlice label.Sortable | 		asyncSortSlice label.Sortable | ||||||
|  |  | ||||||
|  | 		// resource is applied to all records in this Accumulator. | ||||||
|  | 		resource *resource.Resource | ||||||
| 	} | 	} | ||||||
|  |  | ||||||
| 	syncInstrument struct { | 	syncInstrument struct { | ||||||
| @@ -317,6 +321,7 @@ func NewAccumulator(integrator export.Integrator, opts ...Option) *Accumulator { | |||||||
| 		integrator:       integrator, | 		integrator:       integrator, | ||||||
| 		errorHandler:     c.ErrorHandler, | 		errorHandler:     c.ErrorHandler, | ||||||
| 		asyncInstruments: internal.NewAsyncInstrumentState(c.ErrorHandler), | 		asyncInstruments: internal.NewAsyncInstrumentState(c.ErrorHandler), | ||||||
|  | 		resource:         c.Resource, | ||||||
| 	} | 	} | ||||||
| } | } | ||||||
|  |  | ||||||
| @@ -472,7 +477,7 @@ func (m *Accumulator) checkpoint(ctx context.Context, descriptor *metric.Descrip | |||||||
| 	} | 	} | ||||||
| 	recorder.Checkpoint(ctx, descriptor) | 	recorder.Checkpoint(ctx, descriptor) | ||||||
|  |  | ||||||
| 	exportRecord := export.NewRecord(descriptor, labels, recorder) | 	exportRecord := export.NewRecord(descriptor, labels, m.resource, recorder) | ||||||
| 	err := m.integrator.Process(ctx, exportRecord) | 	err := m.integrator.Process(ctx, exportRecord) | ||||||
| 	if err != nil { | 	if err != nil { | ||||||
| 		m.errorHandler(err) | 		m.errorHandler(err) | ||||||
|   | |||||||
		Reference in New Issue
	
	Block a user