mirror of
https://github.com/open-telemetry/opentelemetry-go.git
synced 2025-02-09 13:37:12 +02:00
Separate InstrumentationLibrary from metric.Descriptor (#2197)
* factor instrumentation library out of the instrument descriptor * SDK tests pass * checkpoint work * otlp and opencensus tests passing * prometheus * tests pass, working on lint * lint applied: MetricReader->Reader * comments * Changelog * Apply suggestions from code review Co-authored-by: alrex <alrex.boten@gmail.com> * remove an interdependency * fix build * re-indent one * Apply suggestions from code review Co-authored-by: Tyler Yahn <MrAlias@users.noreply.github.com> * Lint&feedback * update after rename * comment fix * style fix for meter options * remove libraryReader, let Controller implement the reader API directly * rename 'impl' field to 'provider' * remove a type assertion * move metric/registry into internal; move registry.MeterProvider into metric controller * add test for controller registry function * CheckpointSet->Reader everywhere * lint * remove two unnecessary accessor methods; Controller implements MeterProvider and InstrumentationLibraryReader directly, no need to get these * use a sync.Map * ensure the initOnce is always called; handle multiple errors * Apply suggestions from code review Co-authored-by: Anthony Mirabella <a9@aneurysm9.com> * cleanup locking in metrictest * Revert "ensure the initOnce is always called; handle multiple errors" This reverts commit 3356eb5ed0c288ac3edcc2bc2e853aecda7f29b3. * Revert "use a sync.Map" This reverts commit ea7bc599bd3a24c4acb4cd9facd13f08cd702237. * restore the TODO about sync.Map Co-authored-by: alrex <alrex.boten@gmail.com> Co-authored-by: Tyler Yahn <MrAlias@users.noreply.github.com> Co-authored-by: Anthony Mirabella <a9@aneurysm9.com>
This commit is contained in:
parent
92551d3933
commit
3c8e1853f0
@ -61,6 +61,9 @@ This release includes an API and SDK for the tracing signal that will comply wit
|
||||
- The Metrics SDK export record no longer contains a Resource pointer, the SDK `"go.opentelemetry.io/otel/sdk/trace/export/metric".Exporter.Export()` function for push-based exporters now takes a single Resource argument, pull-based exporters use `"go.opentelemetry.io/otel/sdk/metric/controller/basic".Controller.Resource()`. (#2120)
|
||||
- The JSON output of the `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` is harmonized now such that the output is "plain" JSON objects after each other of the form `{ ... } { ... } { ... }`. Earlier the JSON objects describing a span were wrapped in a slice for each `Exporter.ExportSpans` call, like `[ { ... } ][ { ... } { ... } ]`. Outputting JSON object directly after each other is consistent with JSON loggers, and a bit easier to parse and read. (#2196)
|
||||
- Update the `NewTracerConfig`, `NewSpanStartConfig`, `NewSpanEndConfig`, and `NewEventConfig` function in the `go.opentelemetry.io/otel/trace` package to return their respective configurations as structs instead of pointers to the struct. (#2212)
|
||||
- The Metric SDK `Export()` function takes a new two-level reader interface for iterating over results one instrumentation library at a time. (#2197)
|
||||
- The former `"go.opentelemetry.io/otel/sdk/export/metric".CheckpointSet` is renamed `Reader`.
|
||||
- The new interface is named `"go.opentelemetry.io/otel/sdk/export/metric".InstrumentationLibraryReader`.
|
||||
|
||||
### Deprecated
|
||||
|
||||
|
@ -32,6 +32,7 @@ import (
|
||||
"go.opentelemetry.io/otel/metric/unit"
|
||||
export "go.opentelemetry.io/otel/sdk/export/metric"
|
||||
"go.opentelemetry.io/otel/sdk/export/metric/aggregation"
|
||||
"go.opentelemetry.io/otel/sdk/instrumentation"
|
||||
"go.opentelemetry.io/otel/sdk/resource"
|
||||
)
|
||||
|
||||
@ -55,18 +56,30 @@ func (e *exporter) ExportMetrics(ctx context.Context, metrics []*metricdata.Metr
|
||||
if len(metrics) != 0 {
|
||||
res = convertResource(metrics[0].Resource)
|
||||
}
|
||||
return e.base.Export(ctx, res, &checkpointSet{metrics: metrics})
|
||||
return e.base.Export(ctx, res, &censusLibraryReader{metrics: metrics})
|
||||
}
|
||||
|
||||
type checkpointSet struct {
|
||||
// RWMutex implements locking for the `CheckpointSet` interface.
|
||||
type censusLibraryReader struct {
|
||||
metrics []*metricdata.Metric
|
||||
}
|
||||
|
||||
func (r censusLibraryReader) ForEach(readerFunc func(instrumentation.Library, export.Reader) error) error {
|
||||
return readerFunc(instrumentation.Library{
|
||||
Name: "OpenCensus Bridge",
|
||||
}, &metricReader{metrics: r.metrics})
|
||||
}
|
||||
|
||||
type metricReader struct {
|
||||
// RWMutex implements locking for the `Reader` interface.
|
||||
sync.RWMutex
|
||||
metrics []*metricdata.Metric
|
||||
}
|
||||
|
||||
// ForEach iterates through the CheckpointSet, passing an
|
||||
// export.Record with the appropriate aggregation to an exporter.
|
||||
func (d *checkpointSet) ForEach(exporter export.ExportKindSelector, f func(export.Record) error) error {
|
||||
var _ export.Reader = &metricReader{}
|
||||
|
||||
// ForEach iterates through the metrics data, synthesizing an
|
||||
// export.Record with the appropriate aggregation for the exporter.
|
||||
func (d *metricReader) ForEach(exporter export.ExportKindSelector, f func(export.Record) error) error {
|
||||
for _, m := range d.metrics {
|
||||
descriptor, err := convertDescriptor(m.Descriptor)
|
||||
if err != nil {
|
||||
@ -158,7 +171,6 @@ func convertDescriptor(ocDescriptor metricdata.Descriptor) (metric.Descriptor, e
|
||||
}
|
||||
opts := []metric.InstrumentOption{
|
||||
metric.WithDescription(ocDescriptor.Description),
|
||||
metric.WithInstrumentationName("OpenCensus Bridge"),
|
||||
}
|
||||
switch ocDescriptor.Unit {
|
||||
case metricdata.UnitDimensionless:
|
||||
@ -168,5 +180,6 @@ func convertDescriptor(ocDescriptor metricdata.Descriptor) (metric.Descriptor, e
|
||||
case metricdata.UnitMilliseconds:
|
||||
opts = append(opts, metric.WithUnit(unit.Milliseconds))
|
||||
}
|
||||
return metric.NewDescriptor(ocDescriptor.Name, ikind, nkind, opts...), nil
|
||||
cfg := metric.NewInstrumentConfig(opts...)
|
||||
return metric.NewDescriptor(ocDescriptor.Name, ikind, nkind, cfg.Description(), cfg.Unit()), nil
|
||||
}
|
||||
|
@ -28,12 +28,15 @@ import (
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/metric"
|
||||
"go.opentelemetry.io/otel/metric/metrictest"
|
||||
"go.opentelemetry.io/otel/metric/number"
|
||||
"go.opentelemetry.io/otel/metric/sdkapi"
|
||||
"go.opentelemetry.io/otel/metric/unit"
|
||||
export "go.opentelemetry.io/otel/sdk/export/metric"
|
||||
exportmetric "go.opentelemetry.io/otel/sdk/export/metric"
|
||||
"go.opentelemetry.io/otel/sdk/export/metric/aggregation"
|
||||
"go.opentelemetry.io/otel/sdk/instrumentation"
|
||||
"go.opentelemetry.io/otel/sdk/metric/controller/controllertest"
|
||||
"go.opentelemetry.io/otel/sdk/resource"
|
||||
)
|
||||
|
||||
@ -44,12 +47,13 @@ type fakeExporter struct {
|
||||
err error
|
||||
}
|
||||
|
||||
func (f *fakeExporter) Export(ctx context.Context, res *resource.Resource, cps exportmetric.CheckpointSet) error {
|
||||
return cps.ForEach(f, func(record exportmetric.Record) error {
|
||||
f.resource = res
|
||||
f.records = append(f.records, record)
|
||||
return f.err
|
||||
})
|
||||
func (f *fakeExporter) Export(ctx context.Context, res *resource.Resource, ilr exportmetric.InstrumentationLibraryReader) error {
|
||||
return controllertest.ReadAll(ilr, export.StatelessExportKindSelector(),
|
||||
func(_ instrumentation.Library, record exportmetric.Record) error {
|
||||
f.resource = res
|
||||
f.records = append(f.records, record)
|
||||
return f.err
|
||||
})
|
||||
}
|
||||
|
||||
type fakeErrorHandler struct {
|
||||
@ -71,11 +75,10 @@ func (f *fakeErrorHandler) matches(err error) error {
|
||||
|
||||
func TestExportMetrics(t *testing.T) {
|
||||
now := time.Now()
|
||||
basicDesc := metric.NewDescriptor(
|
||||
basicDesc := metrictest.NewDescriptor(
|
||||
"",
|
||||
sdkapi.GaugeObserverInstrumentKind,
|
||||
number.Int64Kind,
|
||||
metric.WithInstrumentationName("OpenCensus Bridge"),
|
||||
)
|
||||
fakeErrorHandler := &fakeErrorHandler{}
|
||||
otel.SetErrorHandler(fakeErrorHandler)
|
||||
@ -393,11 +396,10 @@ func TestConvertDescriptor(t *testing.T) {
|
||||
}{
|
||||
{
|
||||
desc: "empty descriptor",
|
||||
expected: metric.NewDescriptor(
|
||||
expected: metrictest.NewDescriptor(
|
||||
"",
|
||||
sdkapi.GaugeObserverInstrumentKind,
|
||||
number.Int64Kind,
|
||||
metric.WithInstrumentationName("OpenCensus Bridge"),
|
||||
),
|
||||
},
|
||||
{
|
||||
@ -408,11 +410,10 @@ func TestConvertDescriptor(t *testing.T) {
|
||||
Unit: metricdata.UnitBytes,
|
||||
Type: metricdata.TypeGaugeInt64,
|
||||
},
|
||||
expected: metric.NewDescriptor(
|
||||
expected: metrictest.NewDescriptor(
|
||||
"foo",
|
||||
sdkapi.GaugeObserverInstrumentKind,
|
||||
number.Int64Kind,
|
||||
metric.WithInstrumentationName("OpenCensus Bridge"),
|
||||
metric.WithDescription("bar"),
|
||||
metric.WithUnit(unit.Bytes),
|
||||
),
|
||||
@ -425,11 +426,10 @@ func TestConvertDescriptor(t *testing.T) {
|
||||
Unit: metricdata.UnitMilliseconds,
|
||||
Type: metricdata.TypeGaugeFloat64,
|
||||
},
|
||||
expected: metric.NewDescriptor(
|
||||
expected: metrictest.NewDescriptor(
|
||||
"foo",
|
||||
sdkapi.GaugeObserverInstrumentKind,
|
||||
number.Float64Kind,
|
||||
metric.WithInstrumentationName("OpenCensus Bridge"),
|
||||
metric.WithDescription("bar"),
|
||||
metric.WithUnit(unit.Milliseconds),
|
||||
),
|
||||
@ -442,11 +442,10 @@ func TestConvertDescriptor(t *testing.T) {
|
||||
Unit: metricdata.UnitDimensionless,
|
||||
Type: metricdata.TypeCumulativeInt64,
|
||||
},
|
||||
expected: metric.NewDescriptor(
|
||||
expected: metrictest.NewDescriptor(
|
||||
"foo",
|
||||
sdkapi.CounterObserverInstrumentKind,
|
||||
number.Int64Kind,
|
||||
metric.WithInstrumentationName("OpenCensus Bridge"),
|
||||
metric.WithDescription("bar"),
|
||||
metric.WithUnit(unit.Dimensionless),
|
||||
),
|
||||
@ -459,11 +458,10 @@ func TestConvertDescriptor(t *testing.T) {
|
||||
Unit: metricdata.UnitDimensionless,
|
||||
Type: metricdata.TypeCumulativeFloat64,
|
||||
},
|
||||
expected: metric.NewDescriptor(
|
||||
expected: metrictest.NewDescriptor(
|
||||
"foo",
|
||||
sdkapi.CounterObserverInstrumentKind,
|
||||
number.Float64Kind,
|
||||
metric.WithInstrumentationName("OpenCensus Bridge"),
|
||||
metric.WithDescription("bar"),
|
||||
metric.WithUnit(unit.Dimensionless),
|
||||
),
|
||||
|
@ -8,6 +8,7 @@ require (
|
||||
go.opentelemetry.io/otel/metric v0.23.0
|
||||
go.opentelemetry.io/otel/sdk v1.0.0
|
||||
go.opentelemetry.io/otel/sdk/export/metric v0.23.0
|
||||
go.opentelemetry.io/otel/sdk/metric v0.23.0
|
||||
go.opentelemetry.io/otel/trace v1.0.0
|
||||
)
|
||||
|
||||
|
@ -1,5 +1,7 @@
|
||||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8=
|
||||
github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
|
@ -1,5 +1,7 @@
|
||||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8=
|
||||
github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
||||
|
@ -40,7 +40,7 @@ var (
|
||||
func initMeter() {
|
||||
config := prometheus.Config{}
|
||||
c := controller.New(
|
||||
processor.New(
|
||||
processor.NewFactory(
|
||||
selector.NewWithHistogramDistribution(
|
||||
histogram.WithExplicitBoundaries(config.DefaultHistogramBoundaries),
|
||||
),
|
||||
|
@ -24,6 +24,7 @@ import (
|
||||
metricsdk "go.opentelemetry.io/otel/sdk/export/metric"
|
||||
"go.opentelemetry.io/otel/sdk/export/metric/aggregation"
|
||||
"go.opentelemetry.io/otel/sdk/resource"
|
||||
metricpb "go.opentelemetry.io/proto/otlp/metrics/v1"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -43,16 +44,20 @@ type Exporter struct {
|
||||
}
|
||||
|
||||
// Export exports a batch of metrics.
|
||||
func (e *Exporter) Export(ctx context.Context, res *resource.Resource, checkpointSet metricsdk.CheckpointSet) error {
|
||||
rms, err := metrictransform.CheckpointSet(ctx, e, res, checkpointSet, 1)
|
||||
func (e *Exporter) Export(ctx context.Context, res *resource.Resource, ilr metricsdk.InstrumentationLibraryReader) error {
|
||||
rm, err := metrictransform.InstrumentationLibraryReader(ctx, e, res, ilr, 1)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(rms) == 0 {
|
||||
if rm == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return e.client.UploadMetrics(ctx, rms)
|
||||
// TODO: There is never more than one resource emitted by this
|
||||
// call, as per the specification. We can change the
|
||||
// signature of UploadMetrics correspondingly. Here create a
|
||||
// singleton list to reduce the size of the current PR:
|
||||
return e.client.UploadMetrics(ctx, []*metricpb.ResourceMetrics{rm})
|
||||
}
|
||||
|
||||
// Start establishes a connection to the receiving endpoint.
|
||||
|
@ -17,7 +17,6 @@ package otlpmetric_test
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@ -30,12 +29,14 @@ import (
|
||||
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric"
|
||||
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/metrictransform"
|
||||
"go.opentelemetry.io/otel/metric"
|
||||
"go.opentelemetry.io/otel/metric/metrictest"
|
||||
"go.opentelemetry.io/otel/metric/number"
|
||||
"go.opentelemetry.io/otel/metric/sdkapi"
|
||||
metricsdk "go.opentelemetry.io/otel/sdk/export/metric"
|
||||
"go.opentelemetry.io/otel/sdk/export/metric/aggregation"
|
||||
"go.opentelemetry.io/otel/sdk/instrumentation"
|
||||
"go.opentelemetry.io/otel/sdk/metric/aggregator/histogram"
|
||||
"go.opentelemetry.io/otel/sdk/metric/aggregator/sum"
|
||||
"go.opentelemetry.io/otel/sdk/metric/processor/processortest"
|
||||
"go.opentelemetry.io/otel/sdk/resource"
|
||||
commonpb "go.opentelemetry.io/proto/otlp/common/v1"
|
||||
metricpb "go.opentelemetry.io/proto/otlp/metrics/v1"
|
||||
@ -85,26 +86,31 @@ func pointTime() uint64 {
|
||||
return uint64(intervalEnd.UnixNano())
|
||||
}
|
||||
|
||||
type checkpointSet struct {
|
||||
sync.RWMutex
|
||||
records []metricsdk.Record
|
||||
}
|
||||
|
||||
func (m *checkpointSet) ForEach(_ metricsdk.ExportKindSelector, fn func(metricsdk.Record) error) error {
|
||||
for _, r := range m.records {
|
||||
if err := fn(r); err != nil && err != aggregation.ErrNoData {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type record struct {
|
||||
type testRecord struct {
|
||||
name string
|
||||
iKind sdkapi.InstrumentKind
|
||||
nKind number.Kind
|
||||
opts []metric.InstrumentOption
|
||||
labels []attribute.KeyValue
|
||||
|
||||
meterName string
|
||||
meterOpts []metric.MeterOption
|
||||
}
|
||||
|
||||
func record(
|
||||
name string,
|
||||
iKind sdkapi.InstrumentKind,
|
||||
nKind number.Kind,
|
||||
labels []attribute.KeyValue,
|
||||
meterName string,
|
||||
meterOpts ...metric.MeterOption) testRecord {
|
||||
return testRecord{
|
||||
name: name,
|
||||
iKind: iKind,
|
||||
nKind: nKind,
|
||||
labels: labels,
|
||||
meterName: meterName,
|
||||
meterOpts: meterOpts,
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
@ -154,26 +160,31 @@ var (
|
||||
testerAResourcePb = metrictransform.Resource(testerAResource)
|
||||
)
|
||||
|
||||
const (
|
||||
// Most of this test uses an empty instrumentation library name.
|
||||
testLibName = ""
|
||||
)
|
||||
|
||||
func TestNoGroupingExport(t *testing.T) {
|
||||
runMetricExportTests(
|
||||
t,
|
||||
nil,
|
||||
nil,
|
||||
[]record{
|
||||
{
|
||||
resource.Empty(),
|
||||
[]testRecord{
|
||||
record(
|
||||
"int64-count",
|
||||
sdkapi.CounterInstrumentKind,
|
||||
number.Int64Kind,
|
||||
nil,
|
||||
append(baseKeyValues, cpuKey.Int(1)),
|
||||
},
|
||||
{
|
||||
testLibName,
|
||||
),
|
||||
record(
|
||||
"int64-count",
|
||||
sdkapi.CounterInstrumentKind,
|
||||
number.Int64Kind,
|
||||
nil,
|
||||
append(baseKeyValues, cpuKey.Int(2)),
|
||||
},
|
||||
testLibName,
|
||||
),
|
||||
},
|
||||
[]*metricpb.ResourceMetrics{
|
||||
{
|
||||
@ -213,13 +224,13 @@ func TestNoGroupingExport(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestHistogramMetricGroupingExport(t *testing.T) {
|
||||
r := record{
|
||||
r := record(
|
||||
"histogram",
|
||||
sdkapi.HistogramInstrumentKind,
|
||||
number.Int64Kind,
|
||||
nil,
|
||||
append(baseKeyValues, cpuKey.Int(1)),
|
||||
}
|
||||
testLibName,
|
||||
)
|
||||
expected := []*metricpb.ResourceMetrics{
|
||||
{
|
||||
Resource: nil,
|
||||
@ -259,22 +270,22 @@ func TestHistogramMetricGroupingExport(t *testing.T) {
|
||||
},
|
||||
},
|
||||
}
|
||||
runMetricExportTests(t, nil, nil, []record{r, r}, expected)
|
||||
runMetricExportTests(t, nil, resource.Empty(), []testRecord{r, r}, expected)
|
||||
}
|
||||
|
||||
func TestCountInt64MetricGroupingExport(t *testing.T) {
|
||||
r := record{
|
||||
r := record(
|
||||
"int64-count",
|
||||
sdkapi.CounterInstrumentKind,
|
||||
number.Int64Kind,
|
||||
nil,
|
||||
append(baseKeyValues, cpuKey.Int(1)),
|
||||
}
|
||||
testLibName,
|
||||
)
|
||||
runMetricExportTests(
|
||||
t,
|
||||
nil,
|
||||
nil,
|
||||
[]record{r, r},
|
||||
resource.Empty(),
|
||||
[]testRecord{r, r},
|
||||
[]*metricpb.ResourceMetrics{
|
||||
{
|
||||
Resource: nil,
|
||||
@ -313,18 +324,18 @@ func TestCountInt64MetricGroupingExport(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestCountFloat64MetricGroupingExport(t *testing.T) {
|
||||
r := record{
|
||||
r := record(
|
||||
"float64-count",
|
||||
sdkapi.CounterInstrumentKind,
|
||||
number.Float64Kind,
|
||||
nil,
|
||||
append(baseKeyValues, cpuKey.Int(1)),
|
||||
}
|
||||
testLibName,
|
||||
)
|
||||
runMetricExportTests(
|
||||
t,
|
||||
nil,
|
||||
nil,
|
||||
[]record{r, r},
|
||||
resource.Empty(),
|
||||
[]testRecord{r, r},
|
||||
[]*metricpb.ResourceMetrics{
|
||||
{
|
||||
Resource: nil,
|
||||
@ -367,35 +378,35 @@ func TestResourceMetricGroupingExport(t *testing.T) {
|
||||
t,
|
||||
nil,
|
||||
testerAResource,
|
||||
[]record{
|
||||
{
|
||||
[]testRecord{
|
||||
record(
|
||||
"int64-count",
|
||||
sdkapi.CounterInstrumentKind,
|
||||
number.Int64Kind,
|
||||
nil,
|
||||
append(baseKeyValues, cpuKey.Int(1)),
|
||||
},
|
||||
{
|
||||
testLibName,
|
||||
),
|
||||
record(
|
||||
"int64-count",
|
||||
sdkapi.CounterInstrumentKind,
|
||||
number.Int64Kind,
|
||||
nil,
|
||||
append(baseKeyValues, cpuKey.Int(1)),
|
||||
},
|
||||
{
|
||||
testLibName,
|
||||
),
|
||||
record(
|
||||
"int64-count",
|
||||
sdkapi.CounterInstrumentKind,
|
||||
number.Int64Kind,
|
||||
nil,
|
||||
append(baseKeyValues, cpuKey.Int(2)),
|
||||
},
|
||||
{
|
||||
testLibName,
|
||||
),
|
||||
record(
|
||||
"int64-count",
|
||||
sdkapi.CounterInstrumentKind,
|
||||
number.Int64Kind,
|
||||
nil,
|
||||
append(baseKeyValues, cpuKey.Int(1)),
|
||||
},
|
||||
testLibName,
|
||||
),
|
||||
},
|
||||
[]*metricpb.ResourceMetrics{
|
||||
{
|
||||
@ -447,57 +458,56 @@ func TestResourceMetricGroupingExport(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestResourceInstLibMetricGroupingExport(t *testing.T) {
|
||||
countingLib1 := []metric.InstrumentOption{
|
||||
metric.WithInstrumentationName("counting-lib"),
|
||||
metric.WithInstrumentationVersion("v1"),
|
||||
}
|
||||
countingLib2 := []metric.InstrumentOption{
|
||||
metric.WithInstrumentationName("counting-lib"),
|
||||
metric.WithInstrumentationVersion("v2"),
|
||||
}
|
||||
summingLib := []metric.InstrumentOption{
|
||||
metric.WithInstrumentationName("summing-lib"),
|
||||
}
|
||||
version1 := metric.WithInstrumentationVersion("v1")
|
||||
version2 := metric.WithInstrumentationVersion("v2")
|
||||
specialSchema := metric.WithSchemaURL("schurl")
|
||||
summingLib := "summing-lib"
|
||||
countingLib := "counting-lib"
|
||||
runMetricExportTests(
|
||||
t,
|
||||
nil,
|
||||
testerAResource,
|
||||
[]record{
|
||||
{
|
||||
[]testRecord{
|
||||
record(
|
||||
"int64-count",
|
||||
sdkapi.CounterInstrumentKind,
|
||||
number.Int64Kind,
|
||||
countingLib1,
|
||||
append(baseKeyValues, cpuKey.Int(1)),
|
||||
},
|
||||
{
|
||||
countingLib,
|
||||
version1,
|
||||
),
|
||||
record(
|
||||
"int64-count",
|
||||
sdkapi.CounterInstrumentKind,
|
||||
number.Int64Kind,
|
||||
countingLib2,
|
||||
append(baseKeyValues, cpuKey.Int(1)),
|
||||
},
|
||||
{
|
||||
countingLib,
|
||||
version2,
|
||||
),
|
||||
record(
|
||||
"int64-count",
|
||||
sdkapi.CounterInstrumentKind,
|
||||
number.Int64Kind,
|
||||
countingLib1,
|
||||
append(baseKeyValues, cpuKey.Int(1)),
|
||||
},
|
||||
{
|
||||
countingLib,
|
||||
version1,
|
||||
),
|
||||
record(
|
||||
"int64-count",
|
||||
sdkapi.CounterInstrumentKind,
|
||||
number.Int64Kind,
|
||||
countingLib1,
|
||||
append(baseKeyValues, cpuKey.Int(2)),
|
||||
},
|
||||
{
|
||||
countingLib,
|
||||
version1,
|
||||
),
|
||||
record(
|
||||
"int64-count",
|
||||
sdkapi.CounterInstrumentKind,
|
||||
number.Int64Kind,
|
||||
summingLib,
|
||||
append(baseKeyValues, cpuKey.Int(1)),
|
||||
},
|
||||
summingLib,
|
||||
specialSchema,
|
||||
),
|
||||
},
|
||||
[]*metricpb.ResourceMetrics{
|
||||
{
|
||||
@ -569,6 +579,7 @@ func TestResourceInstLibMetricGroupingExport(t *testing.T) {
|
||||
InstrumentationLibrary: &commonpb.InstrumentationLibrary{
|
||||
Name: "summing-lib",
|
||||
},
|
||||
SchemaUrl: "schurl",
|
||||
Metrics: []*metricpb.Metric{
|
||||
{
|
||||
Name: "int64-count",
|
||||
@ -618,14 +629,14 @@ func TestStatelessExportKind(t *testing.T) {
|
||||
),
|
||||
},
|
||||
testerAResource,
|
||||
[]record{
|
||||
{
|
||||
[]testRecord{
|
||||
record(
|
||||
"instrument",
|
||||
k.instrumentKind,
|
||||
number.Int64Kind,
|
||||
nil,
|
||||
append(baseKeyValues, cpuKey.Int(1)),
|
||||
},
|
||||
testLibName,
|
||||
),
|
||||
},
|
||||
[]*metricpb.ResourceMetrics{
|
||||
{
|
||||
@ -660,14 +671,14 @@ func TestStatelessExportKind(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func runMetricExportTests(t *testing.T, opts []otlpmetric.Option, res *resource.Resource, records []record, expected []*metricpb.ResourceMetrics) {
|
||||
func runMetricExportTests(t *testing.T, opts []otlpmetric.Option, res *resource.Resource, records []testRecord, expected []*metricpb.ResourceMetrics) {
|
||||
exp, driver := newExporter(t, opts...)
|
||||
|
||||
recs := []metricsdk.Record{}
|
||||
libraryRecs := map[instrumentation.Library][]metricsdk.Record{}
|
||||
for _, r := range records {
|
||||
lcopy := make([]attribute.KeyValue, len(r.labels))
|
||||
copy(lcopy, r.labels)
|
||||
desc := metric.NewDescriptor(r.name, r.iKind, r.nKind, r.opts...)
|
||||
desc := metrictest.NewDescriptor(r.name, r.iKind, r.nKind)
|
||||
labs := attribute.NewSet(lcopy...)
|
||||
|
||||
var agg, ckpt metricsdk.Aggregator
|
||||
@ -705,9 +716,15 @@ func runMetricExportTests(t *testing.T, opts []otlpmetric.Option, res *resource.
|
||||
}
|
||||
require.NoError(t, agg.SynchronizedMove(ckpt, &desc))
|
||||
|
||||
recs = append(recs, metricsdk.NewRecord(&desc, &labs, ckpt.Aggregation(), intervalStart, intervalEnd))
|
||||
meterCfg := metric.NewMeterConfig(r.meterOpts...)
|
||||
lib := instrumentation.Library{
|
||||
Name: r.meterName,
|
||||
Version: meterCfg.InstrumentationVersion(),
|
||||
SchemaURL: meterCfg.SchemaURL(),
|
||||
}
|
||||
libraryRecs[lib] = append(libraryRecs[lib], metricsdk.NewRecord(&desc, &labs, ckpt.Aggregation(), intervalStart, intervalEnd))
|
||||
}
|
||||
assert.NoError(t, exp.Export(context.Background(), res, &checkpointSet{records: recs}))
|
||||
assert.NoError(t, exp.Export(context.Background(), res, processortest.MultiInstrumentationLibraryReader(libraryRecs)))
|
||||
|
||||
// assert.ElementsMatch does not equate nested slices of different order,
|
||||
// therefore this requires the top level slice to be broken down.
|
||||
@ -715,7 +732,7 @@ func runMetricExportTests(t *testing.T, opts []otlpmetric.Option, res *resource.
|
||||
// that validate the metric elements match for all expected pairs. Finally,
|
||||
// make we saw all expected pairs.
|
||||
keyFor := func(ilm *metricpb.InstrumentationLibraryMetrics) string {
|
||||
return fmt.Sprintf("%s/%s", ilm.GetInstrumentationLibrary().GetName(), ilm.GetInstrumentationLibrary().GetVersion())
|
||||
return fmt.Sprintf("%s/%s/%s", ilm.GetInstrumentationLibrary().GetName(), ilm.GetInstrumentationLibrary().GetVersion(), ilm.GetSchemaUrl())
|
||||
}
|
||||
got := map[string][]*metricpb.Metric{}
|
||||
for _, rm := range driver.rm {
|
||||
@ -767,7 +784,11 @@ func TestEmptyMetricExport(t *testing.T) {
|
||||
},
|
||||
} {
|
||||
driver.Reset()
|
||||
require.NoError(t, exp.Export(context.Background(), resource.Empty(), &checkpointSet{records: test.records}))
|
||||
require.NoError(t, exp.Export(context.Background(), resource.Empty(), processortest.MultiInstrumentationLibraryReader(map[instrumentation.Library][]metricsdk.Record{
|
||||
{
|
||||
Name: testLibName,
|
||||
}: test.records,
|
||||
})))
|
||||
assert.Equal(t, test.want, driver.rm)
|
||||
}
|
||||
}
|
||||
|
@ -52,15 +52,14 @@ var (
|
||||
// transformation.
|
||||
ErrContextCanceled = errors.New("context canceled")
|
||||
|
||||
// ErrTransforming is returned when an unexected error is encoutered transforming.
|
||||
// ErrTransforming is returned when an unexected error is encountered transforming.
|
||||
ErrTransforming = errors.New("transforming failed")
|
||||
)
|
||||
|
||||
// result is the product of transforming Records into OTLP Metrics.
|
||||
type result struct {
|
||||
InstrumentationLibrary instrumentation.Library
|
||||
Metric *metricpb.Metric
|
||||
Err error
|
||||
Metric *metricpb.Metric
|
||||
Err error
|
||||
}
|
||||
|
||||
// toNanos returns the number of nanoseconds since the UNIX epoch.
|
||||
@ -71,50 +70,78 @@ func toNanos(t time.Time) uint64 {
|
||||
return uint64(t.UnixNano())
|
||||
}
|
||||
|
||||
// CheckpointSet transforms all records contained in a checkpoint into
|
||||
// InstrumentationLibraryReader transforms all records contained in a checkpoint into
|
||||
// batched OTLP ResourceMetrics.
|
||||
func CheckpointSet(ctx context.Context, exportSelector export.ExportKindSelector, res *resource.Resource, cps export.CheckpointSet, numWorkers uint) ([]*metricpb.ResourceMetrics, error) {
|
||||
records, errc := source(ctx, exportSelector, cps)
|
||||
func InstrumentationLibraryReader(ctx context.Context, exportSelector export.ExportKindSelector, res *resource.Resource, ilmr export.InstrumentationLibraryReader, numWorkers uint) (*metricpb.ResourceMetrics, error) {
|
||||
var ilms []*metricpb.InstrumentationLibraryMetrics
|
||||
|
||||
// Start a fixed number of goroutines to transform records.
|
||||
transformed := make(chan result)
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(int(numWorkers))
|
||||
for i := uint(0); i < numWorkers; i++ {
|
||||
err := ilmr.ForEach(func(lib instrumentation.Library, mr export.Reader) error {
|
||||
|
||||
records, errc := source(ctx, exportSelector, mr)
|
||||
|
||||
// Start a fixed number of goroutines to transform records.
|
||||
transformed := make(chan result)
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(int(numWorkers))
|
||||
for i := uint(0); i < numWorkers; i++ {
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
transformer(ctx, exportSelector, records, transformed)
|
||||
}()
|
||||
}
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
transformer(ctx, exportSelector, records, transformed)
|
||||
wg.Wait()
|
||||
close(transformed)
|
||||
}()
|
||||
}
|
||||
go func() {
|
||||
wg.Wait()
|
||||
close(transformed)
|
||||
}()
|
||||
|
||||
// Synchronously collect the transformed records and transmit.
|
||||
rms, err := sink(ctx, res, transformed)
|
||||
if err != nil {
|
||||
// Synchronously collect the transformed records and transmit.
|
||||
ms, err := sink(ctx, transformed)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// source is complete, check for any errors.
|
||||
if err := <-errc; err != nil {
|
||||
return err
|
||||
}
|
||||
if len(ms) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
ilms = append(ilms, &metricpb.InstrumentationLibraryMetrics{
|
||||
Metrics: ms,
|
||||
SchemaUrl: lib.SchemaURL,
|
||||
InstrumentationLibrary: &commonpb.InstrumentationLibrary{
|
||||
Name: lib.Name,
|
||||
Version: lib.Version,
|
||||
},
|
||||
})
|
||||
return nil
|
||||
})
|
||||
if len(ilms) == 0 {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// source is complete, check for any errors.
|
||||
if err := <-errc; err != nil {
|
||||
return nil, err
|
||||
rms := &metricpb.ResourceMetrics{
|
||||
Resource: Resource(res),
|
||||
SchemaUrl: res.SchemaURL(),
|
||||
InstrumentationLibraryMetrics: ilms,
|
||||
}
|
||||
return rms, nil
|
||||
|
||||
return rms, err
|
||||
}
|
||||
|
||||
// source starts a goroutine that sends each one of the Records yielded by
|
||||
// the CheckpointSet on the returned chan. Any error encoutered will be sent
|
||||
// the Reader on the returned chan. Any error encountered will be sent
|
||||
// on the returned error chan after seeding is complete.
|
||||
func source(ctx context.Context, exportSelector export.ExportKindSelector, cps export.CheckpointSet) (<-chan export.Record, <-chan error) {
|
||||
func source(ctx context.Context, exportSelector export.ExportKindSelector, mr export.Reader) (<-chan export.Record, <-chan error) {
|
||||
errc := make(chan error, 1)
|
||||
out := make(chan export.Record)
|
||||
// Seed records into process.
|
||||
go func() {
|
||||
defer close(out)
|
||||
// No select is needed since errc is buffered.
|
||||
errc <- cps.ForEach(exportSelector, func(r export.Record) error {
|
||||
errc <- mr.ForEach(exportSelector, func(r export.Record) error {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ErrContextCanceled
|
||||
@ -136,10 +163,6 @@ func transformer(ctx context.Context, exportSelector export.ExportKindSelector,
|
||||
continue
|
||||
}
|
||||
res := result{
|
||||
InstrumentationLibrary: instrumentation.Library{
|
||||
Name: r.Descriptor().InstrumentationName(),
|
||||
Version: r.Descriptor().InstrumentationVersion(),
|
||||
},
|
||||
Metric: m,
|
||||
Err: err,
|
||||
}
|
||||
@ -153,32 +176,34 @@ func transformer(ctx context.Context, exportSelector export.ExportKindSelector,
|
||||
|
||||
// sink collects transformed Records and batches them.
|
||||
//
|
||||
// Any errors encoutered transforming input will be reported with an
|
||||
// Any errors encountered transforming input will be reported with an
|
||||
// ErrTransforming as well as the completed ResourceMetrics. It is up to the
|
||||
// caller to handle any incorrect data in these ResourceMetrics.
|
||||
func sink(ctx context.Context, res *resource.Resource, in <-chan result) ([]*metricpb.ResourceMetrics, error) {
|
||||
// caller to handle any incorrect data in these ResourceMetric.
|
||||
func sink(ctx context.Context, in <-chan result) ([]*metricpb.Metric, error) {
|
||||
var errStrings []string
|
||||
|
||||
// Group by instrumentation library name and then the MetricDescriptor.
|
||||
grouped := map[instrumentation.Library]map[string]*metricpb.Metric{}
|
||||
// Group by the MetricDescriptor.
|
||||
grouped := map[string]*metricpb.Metric{}
|
||||
for res := range in {
|
||||
if res.Err != nil {
|
||||
errStrings = append(errStrings, res.Err.Error())
|
||||
continue
|
||||
}
|
||||
|
||||
mb, ok := grouped[res.InstrumentationLibrary]
|
||||
if !ok {
|
||||
mb = make(map[string]*metricpb.Metric)
|
||||
grouped[res.InstrumentationLibrary] = mb
|
||||
}
|
||||
|
||||
mID := res.Metric.GetName()
|
||||
m, ok := mb[mID]
|
||||
m, ok := grouped[mID]
|
||||
if !ok {
|
||||
mb[mID] = res.Metric
|
||||
grouped[mID] = res.Metric
|
||||
continue
|
||||
|
||||
}
|
||||
// Note: There is extra work happening in this code
|
||||
// that can be improved when the work described in
|
||||
// #2119 is completed. The SDK has a guarantee that
|
||||
// no more than one point per period per label set is
|
||||
// produced, so this fallthrough should never happen.
|
||||
// The final step of #2119 is to remove all the
|
||||
// grouping logic here.
|
||||
switch res.Metric.Data.(type) {
|
||||
case *metricpb.Metric_Gauge:
|
||||
m.GetGauge().DataPoints = append(m.GetGauge().DataPoints, res.Metric.GetGauge().DataPoints...)
|
||||
@ -198,33 +223,16 @@ func sink(ctx context.Context, res *resource.Resource, in <-chan result) ([]*met
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
rm := &metricpb.ResourceMetrics{
|
||||
Resource: Resource(res),
|
||||
}
|
||||
if res != nil {
|
||||
rm.SchemaUrl = res.SchemaURL()
|
||||
}
|
||||
|
||||
rms := []*metricpb.ResourceMetrics{rm}
|
||||
for il, mb := range grouped {
|
||||
ilm := &metricpb.InstrumentationLibraryMetrics{
|
||||
Metrics: make([]*metricpb.Metric, 0, len(mb)),
|
||||
InstrumentationLibrary: &commonpb.InstrumentationLibrary{
|
||||
Name: il.Name,
|
||||
Version: il.Version,
|
||||
},
|
||||
}
|
||||
for _, m := range mb {
|
||||
ilm.Metrics = append(ilm.Metrics, m)
|
||||
}
|
||||
rm.InstrumentationLibraryMetrics = append(rm.InstrumentationLibraryMetrics, ilm)
|
||||
ms := make([]*metricpb.Metric, 0, len(grouped))
|
||||
for _, m := range grouped {
|
||||
ms = append(ms, m)
|
||||
}
|
||||
|
||||
// Report any transform errors.
|
||||
if len(errStrings) > 0 {
|
||||
return rms, fmt.Errorf("%w:\n -%s", ErrTransforming, strings.Join(errStrings, "\n -"))
|
||||
return ms, fmt.Errorf("%w:\n -%s", ErrTransforming, strings.Join(errStrings, "\n -"))
|
||||
}
|
||||
return rms, nil
|
||||
return ms, nil
|
||||
}
|
||||
|
||||
// Record transforms a Record into an OTLP Metric. An ErrIncompatibleAgg
|
||||
|
@ -26,6 +26,7 @@ import (
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/metric"
|
||||
"go.opentelemetry.io/otel/metric/metrictest"
|
||||
"go.opentelemetry.io/otel/metric/number"
|
||||
"go.opentelemetry.io/otel/metric/sdkapi"
|
||||
export "go.opentelemetry.io/otel/sdk/export/metric"
|
||||
@ -122,7 +123,7 @@ func TestMinMaxSumCountValue(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestMinMaxSumCountDatapoints(t *testing.T) {
|
||||
desc := metric.NewDescriptor("", sdkapi.HistogramInstrumentKind, number.Int64Kind)
|
||||
desc := metrictest.NewDescriptor("", sdkapi.HistogramInstrumentKind, number.Int64Kind)
|
||||
labels := attribute.NewSet(attribute.String("one", "1"))
|
||||
mmscs := minmaxsumcount.New(2, &metric.Descriptor{})
|
||||
mmsc, ckpt := &mmscs[0], &mmscs[1]
|
||||
@ -178,7 +179,7 @@ func TestMinMaxSumCountPropagatesErrors(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestSumIntDataPoints(t *testing.T) {
|
||||
desc := metric.NewDescriptor("", sdkapi.HistogramInstrumentKind, number.Int64Kind)
|
||||
desc := metrictest.NewDescriptor("", sdkapi.HistogramInstrumentKind, number.Int64Kind)
|
||||
labels := attribute.NewSet(attribute.String("one", "1"))
|
||||
sums := sumAgg.New(2)
|
||||
s, ckpt := &sums[0], &sums[1]
|
||||
@ -218,7 +219,7 @@ func TestSumIntDataPoints(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestSumFloatDataPoints(t *testing.T) {
|
||||
desc := metric.NewDescriptor("", sdkapi.HistogramInstrumentKind, number.Float64Kind)
|
||||
desc := metrictest.NewDescriptor("", sdkapi.HistogramInstrumentKind, number.Float64Kind)
|
||||
labels := attribute.NewSet(attribute.String("one", "1"))
|
||||
sums := sumAgg.New(2)
|
||||
s, ckpt := &sums[0], &sums[1]
|
||||
@ -256,7 +257,7 @@ func TestSumFloatDataPoints(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestLastValueIntDataPoints(t *testing.T) {
|
||||
desc := metric.NewDescriptor("", sdkapi.HistogramInstrumentKind, number.Int64Kind)
|
||||
desc := metrictest.NewDescriptor("", sdkapi.HistogramInstrumentKind, number.Int64Kind)
|
||||
labels := attribute.NewSet(attribute.String("one", "1"))
|
||||
lvs := lvAgg.New(2)
|
||||
lv, ckpt := &lvs[0], &lvs[1]
|
||||
@ -291,7 +292,7 @@ func TestLastValueIntDataPoints(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestExactIntDataPoints(t *testing.T) {
|
||||
desc := metric.NewDescriptor("", sdkapi.HistogramInstrumentKind, number.Int64Kind)
|
||||
desc := metrictest.NewDescriptor("", sdkapi.HistogramInstrumentKind, number.Int64Kind)
|
||||
labels := attribute.NewSet(attribute.String("one", "1"))
|
||||
arrs := arrAgg.New(2)
|
||||
e, ckpt := &arrs[0], &arrs[1]
|
||||
@ -326,7 +327,7 @@ func TestExactIntDataPoints(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestExactFloatDataPoints(t *testing.T) {
|
||||
desc := metric.NewDescriptor("", sdkapi.HistogramInstrumentKind, number.Float64Kind)
|
||||
desc := metrictest.NewDescriptor("", sdkapi.HistogramInstrumentKind, number.Float64Kind)
|
||||
labels := attribute.NewSet(attribute.String("one", "1"))
|
||||
arrs := arrAgg.New(2)
|
||||
e, ckpt := &arrs[0], &arrs[1]
|
||||
@ -360,7 +361,7 @@ func TestExactFloatDataPoints(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestSumErrUnknownValueType(t *testing.T) {
|
||||
desc := metric.NewDescriptor("", sdkapi.HistogramInstrumentKind, number.Kind(-1))
|
||||
desc := metrictest.NewDescriptor("", sdkapi.HistogramInstrumentKind, number.Kind(-1))
|
||||
labels := attribute.NewSet()
|
||||
s := &sumAgg.New(1)[0]
|
||||
record := export.NewRecord(&desc, &labels, s, intervalStart, intervalEnd)
|
||||
@ -445,7 +446,7 @@ var _ aggregation.MinMaxSumCount = &testErrMinMaxSumCount{}
|
||||
|
||||
func TestRecordAggregatorIncompatibleErrors(t *testing.T) {
|
||||
makeMpb := func(kind aggregation.Kind, agg aggregation.Aggregation) (*metricpb.Metric, error) {
|
||||
desc := metric.NewDescriptor("things", sdkapi.CounterInstrumentKind, number.Int64Kind)
|
||||
desc := metrictest.NewDescriptor("things", sdkapi.CounterInstrumentKind, number.Int64Kind)
|
||||
labels := attribute.NewSet()
|
||||
test := &testAgg{
|
||||
kind: kind,
|
||||
@ -481,7 +482,7 @@ func TestRecordAggregatorIncompatibleErrors(t *testing.T) {
|
||||
|
||||
func TestRecordAggregatorUnexpectedErrors(t *testing.T) {
|
||||
makeMpb := func(kind aggregation.Kind, agg aggregation.Aggregation) (*metricpb.Metric, error) {
|
||||
desc := metric.NewDescriptor("things", sdkapi.CounterInstrumentKind, number.Int64Kind)
|
||||
desc := metrictest.NewDescriptor("things", sdkapi.CounterInstrumentKind, number.Int64Kind)
|
||||
labels := attribute.NewSet()
|
||||
return Record(export.CumulativeExportKindSelector(), export.NewRecord(&desc, &labels, agg, intervalStart, intervalEnd))
|
||||
}
|
||||
|
@ -20,81 +20,52 @@ import (
|
||||
"time"
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/metric"
|
||||
"go.opentelemetry.io/otel/metric/metrictest"
|
||||
"go.opentelemetry.io/otel/metric/number"
|
||||
"go.opentelemetry.io/otel/metric/sdkapi"
|
||||
exportmetric "go.opentelemetry.io/otel/sdk/export/metric"
|
||||
"go.opentelemetry.io/otel/sdk/instrumentation"
|
||||
"go.opentelemetry.io/otel/sdk/metric/aggregator/sum"
|
||||
"go.opentelemetry.io/otel/sdk/metric/processor/processortest"
|
||||
)
|
||||
|
||||
// Used to avoid implementing locking functions for test
|
||||
// checkpointsets.
|
||||
type noopLocker struct{}
|
||||
|
||||
// Lock implements sync.Locker, which is needed for
|
||||
// exportmetric.CheckpointSet.
|
||||
func (noopLocker) Lock() {}
|
||||
|
||||
// Unlock implements sync.Locker, which is needed for
|
||||
// exportmetric.CheckpointSet.
|
||||
func (noopLocker) Unlock() {}
|
||||
|
||||
// RLock implements exportmetric.CheckpointSet.
|
||||
func (noopLocker) RLock() {}
|
||||
|
||||
// RUnlock implements exportmetric.CheckpointSet.
|
||||
func (noopLocker) RUnlock() {}
|
||||
|
||||
// OneRecordCheckpointSet is a CheckpointSet that returns just one
|
||||
// OneRecordReader is a Reader that returns just one
|
||||
// filled record. It may be useful for testing driver's metrics
|
||||
// export.
|
||||
type OneRecordCheckpointSet struct {
|
||||
noopLocker
|
||||
}
|
||||
|
||||
var _ exportmetric.CheckpointSet = OneRecordCheckpointSet{}
|
||||
|
||||
// ForEach implements exportmetric.CheckpointSet. It always invokes
|
||||
// the callback once with always the same record.
|
||||
func (OneRecordCheckpointSet) ForEach(kindSelector exportmetric.ExportKindSelector, recordFunc func(exportmetric.Record) error) error {
|
||||
desc := metric.NewDescriptor(
|
||||
func OneRecordReader() exportmetric.InstrumentationLibraryReader {
|
||||
desc := metrictest.NewDescriptor(
|
||||
"foo",
|
||||
sdkapi.CounterInstrumentKind,
|
||||
number.Int64Kind,
|
||||
)
|
||||
agg := sum.New(1)
|
||||
if err := agg[0].Update(context.Background(), number.NewInt64Number(42), &desc); err != nil {
|
||||
return err
|
||||
panic(err)
|
||||
}
|
||||
start := time.Date(2020, time.December, 8, 19, 15, 0, 0, time.UTC)
|
||||
end := time.Date(2020, time.December, 8, 19, 16, 0, 0, time.UTC)
|
||||
labels := attribute.NewSet(attribute.String("abc", "def"), attribute.Int64("one", 1))
|
||||
rec := exportmetric.NewRecord(&desc, &labels, agg[0].Aggregation(), start, end)
|
||||
return recordFunc(rec)
|
||||
|
||||
return processortest.MultiInstrumentationLibraryReader(
|
||||
map[instrumentation.Library][]exportmetric.Record{
|
||||
{
|
||||
Name: "onelib",
|
||||
}: {rec},
|
||||
})
|
||||
}
|
||||
|
||||
// EmptyCheckpointSet is a checkpointer that has no records at all.
|
||||
type EmptyCheckpointSet struct {
|
||||
noopLocker
|
||||
func EmptyReader() exportmetric.InstrumentationLibraryReader {
|
||||
return processortest.MultiInstrumentationLibraryReader(nil)
|
||||
}
|
||||
|
||||
var _ exportmetric.CheckpointSet = EmptyCheckpointSet{}
|
||||
|
||||
// ForEach implements exportmetric.CheckpointSet. It never invokes the
|
||||
// callback.
|
||||
func (EmptyCheckpointSet) ForEach(kindSelector exportmetric.ExportKindSelector, recordFunc func(exportmetric.Record) error) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// FailCheckpointSet is a checkpointer that returns an error during
|
||||
// FailReader is a checkpointer that returns an error during
|
||||
// ForEach.
|
||||
type FailCheckpointSet struct {
|
||||
noopLocker
|
||||
}
|
||||
type FailReader struct{}
|
||||
|
||||
var _ exportmetric.CheckpointSet = FailCheckpointSet{}
|
||||
var _ exportmetric.InstrumentationLibraryReader = FailReader{}
|
||||
|
||||
// ForEach implements exportmetric.CheckpointSet. It always fails.
|
||||
func (FailCheckpointSet) ForEach(kindSelector exportmetric.ExportKindSelector, recordFunc func(exportmetric.Record) error) error {
|
||||
// ForEach implements exportmetric.Reader. It always fails.
|
||||
func (FailReader) ForEach(readerFunc func(instrumentation.Library, exportmetric.Reader) error) error {
|
||||
return fmt.Errorf("fail")
|
||||
}
|
||||
|
@ -40,11 +40,11 @@ import (
|
||||
// themselves.
|
||||
func RunEndToEndTest(ctx context.Context, t *testing.T, exp *otlpmetric.Exporter, mcMetrics Collector) {
|
||||
selector := simple.NewWithInexpensiveDistribution()
|
||||
proc := processor.New(selector, exportmetric.StatelessExportKindSelector())
|
||||
proc := processor.NewFactory(selector, exportmetric.StatelessExportKindSelector())
|
||||
cont := controller.New(proc, controller.WithExporter(exp))
|
||||
require.NoError(t, cont.Start(ctx))
|
||||
|
||||
meter := cont.MeterProvider().Meter("test-meter")
|
||||
meter := cont.Meter("test-meter")
|
||||
labels := []attribute.KeyValue{attribute.Bool("test", true)}
|
||||
|
||||
type data struct {
|
||||
|
@ -40,7 +40,7 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
oneRecord = otlpmetrictest.OneRecordCheckpointSet{}
|
||||
oneRecord = otlpmetrictest.OneRecordReader()
|
||||
|
||||
testResource = resource.Empty()
|
||||
)
|
||||
@ -719,7 +719,7 @@ func TestEmptyData(t *testing.T) {
|
||||
assert.NoError(t, exp.Shutdown(ctx))
|
||||
}()
|
||||
|
||||
assert.NoError(t, exp.Export(ctx, testResource, otlpmetrictest.EmptyCheckpointSet{}))
|
||||
assert.NoError(t, exp.Export(ctx, testResource, otlpmetrictest.EmptyReader()))
|
||||
}
|
||||
|
||||
func TestFailedMetricTransform(t *testing.T) {
|
||||
@ -737,5 +737,5 @@ func TestFailedMetricTransform(t *testing.T) {
|
||||
assert.NoError(t, exp.Shutdown(ctx))
|
||||
}()
|
||||
|
||||
assert.Error(t, exp.Export(ctx, testResource, otlpmetrictest.FailCheckpointSet{}))
|
||||
assert.Error(t, exp.Export(ctx, testResource, otlpmetrictest.FailReader{}))
|
||||
}
|
||||
|
@ -48,14 +48,14 @@ func Example_insecure() {
|
||||
}()
|
||||
|
||||
pusher := controller.New(
|
||||
processor.New(
|
||||
processor.NewFactory(
|
||||
simple.NewWithExactDistribution(),
|
||||
exp,
|
||||
),
|
||||
controller.WithExporter(exp),
|
||||
controller.WithCollectPeriod(2*time.Second),
|
||||
)
|
||||
global.SetMeterProvider(pusher.MeterProvider())
|
||||
global.SetMeterProvider(pusher)
|
||||
|
||||
if err := pusher.Start(ctx); err != nil {
|
||||
log.Fatalf("could not start metric controoler: %v", err)
|
||||
@ -107,14 +107,14 @@ func Example_withTLS() {
|
||||
}()
|
||||
|
||||
pusher := controller.New(
|
||||
processor.New(
|
||||
processor.NewFactory(
|
||||
simple.NewWithExactDistribution(),
|
||||
exp,
|
||||
),
|
||||
controller.WithExporter(exp),
|
||||
controller.WithCollectPeriod(2*time.Second),
|
||||
)
|
||||
global.SetMeterProvider(pusher.MeterProvider())
|
||||
global.SetMeterProvider(pusher)
|
||||
|
||||
if err := pusher.Start(ctx); err != nil {
|
||||
log.Fatalf("could not start metric controoler: %v", err)
|
||||
@ -164,14 +164,14 @@ func Example_withDifferentSignalCollectors() {
|
||||
}()
|
||||
|
||||
pusher := controller.New(
|
||||
processor.New(
|
||||
processor.NewFactory(
|
||||
simple.NewWithExactDistribution(),
|
||||
exp,
|
||||
),
|
||||
controller.WithExporter(exp),
|
||||
controller.WithCollectPeriod(2*time.Second),
|
||||
)
|
||||
global.SetMeterProvider(pusher.MeterProvider())
|
||||
global.SetMeterProvider(pusher)
|
||||
|
||||
if err := pusher.Start(ctx); err != nil {
|
||||
log.Fatalf("could not start metric controoler: %v", err)
|
||||
|
@ -38,7 +38,7 @@ const (
|
||||
)
|
||||
|
||||
var (
|
||||
oneRecord = otlpmetrictest.OneRecordCheckpointSet{}
|
||||
oneRecord = otlpmetrictest.OneRecordReader()
|
||||
|
||||
testResource = resource.Empty()
|
||||
)
|
||||
|
@ -32,6 +32,7 @@ import (
|
||||
"go.opentelemetry.io/otel/metric/number"
|
||||
export "go.opentelemetry.io/otel/sdk/export/metric"
|
||||
"go.opentelemetry.io/otel/sdk/export/metric/aggregation"
|
||||
"go.opentelemetry.io/otel/sdk/instrumentation"
|
||||
controller "go.opentelemetry.io/otel/sdk/metric/controller/basic"
|
||||
"go.opentelemetry.io/otel/sdk/resource"
|
||||
)
|
||||
@ -120,7 +121,7 @@ func New(config Config, controller *controller.Controller) (*Exporter, error) {
|
||||
|
||||
// MeterProvider returns the MeterProvider of this exporter.
|
||||
func (e *Exporter) MeterProvider() metric.MeterProvider {
|
||||
return e.controller.MeterProvider()
|
||||
return e.controller
|
||||
}
|
||||
|
||||
// Controller returns the controller object that coordinates collection for the SDK.
|
||||
@ -152,15 +153,17 @@ func (c *collector) Describe(ch chan<- *prometheus.Desc) {
|
||||
c.exp.lock.RLock()
|
||||
defer c.exp.lock.RUnlock()
|
||||
|
||||
_ = c.exp.Controller().ForEach(c.exp, func(record export.Record) error {
|
||||
var labelKeys []string
|
||||
mergeLabels(record, c.exp.controller.Resource(), &labelKeys, nil)
|
||||
ch <- c.toDesc(record, labelKeys)
|
||||
return nil
|
||||
_ = c.exp.Controller().ForEach(func(_ instrumentation.Library, reader export.Reader) error {
|
||||
return reader.ForEach(c.exp, func(record export.Record) error {
|
||||
var labelKeys []string
|
||||
mergeLabels(record, c.exp.controller.Resource(), &labelKeys, nil)
|
||||
ch <- c.toDesc(record, labelKeys)
|
||||
return nil
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
// Collect exports the last calculated CheckpointSet.
|
||||
// Collect exports the last calculated Reader state.
|
||||
//
|
||||
// Collect is invoked whenever prometheus.Gatherer is also invoked.
|
||||
// For example, when the HTTP endpoint is invoked by Prometheus.
|
||||
@ -173,36 +176,39 @@ func (c *collector) Collect(ch chan<- prometheus.Metric) {
|
||||
otel.Handle(err)
|
||||
}
|
||||
|
||||
err := ctrl.ForEach(c.exp, func(record export.Record) error {
|
||||
agg := record.Aggregation()
|
||||
numberKind := record.Descriptor().NumberKind()
|
||||
instrumentKind := record.Descriptor().InstrumentKind()
|
||||
err := ctrl.ForEach(func(_ instrumentation.Library, reader export.Reader) error {
|
||||
return reader.ForEach(c.exp, func(record export.Record) error {
|
||||
|
||||
var labelKeys, labels []string
|
||||
mergeLabels(record, c.exp.controller.Resource(), &labelKeys, &labels)
|
||||
agg := record.Aggregation()
|
||||
numberKind := record.Descriptor().NumberKind()
|
||||
instrumentKind := record.Descriptor().InstrumentKind()
|
||||
|
||||
desc := c.toDesc(record, labelKeys)
|
||||
var labelKeys, labels []string
|
||||
mergeLabels(record, c.exp.controller.Resource(), &labelKeys, &labels)
|
||||
|
||||
if hist, ok := agg.(aggregation.Histogram); ok {
|
||||
if err := c.exportHistogram(ch, hist, numberKind, desc, labels); err != nil {
|
||||
return fmt.Errorf("exporting histogram: %w", err)
|
||||
desc := c.toDesc(record, labelKeys)
|
||||
|
||||
if hist, ok := agg.(aggregation.Histogram); ok {
|
||||
if err := c.exportHistogram(ch, hist, numberKind, desc, labels); err != nil {
|
||||
return fmt.Errorf("exporting histogram: %w", err)
|
||||
}
|
||||
} else if sum, ok := agg.(aggregation.Sum); ok && instrumentKind.Monotonic() {
|
||||
if err := c.exportMonotonicCounter(ch, sum, numberKind, desc, labels); err != nil {
|
||||
return fmt.Errorf("exporting monotonic counter: %w", err)
|
||||
}
|
||||
} else if sum, ok := agg.(aggregation.Sum); ok && !instrumentKind.Monotonic() {
|
||||
if err := c.exportNonMonotonicCounter(ch, sum, numberKind, desc, labels); err != nil {
|
||||
return fmt.Errorf("exporting non monotonic counter: %w", err)
|
||||
}
|
||||
} else if lastValue, ok := agg.(aggregation.LastValue); ok {
|
||||
if err := c.exportLastValue(ch, lastValue, numberKind, desc, labels); err != nil {
|
||||
return fmt.Errorf("exporting last value: %w", err)
|
||||
}
|
||||
} else {
|
||||
return fmt.Errorf("%w: %s", ErrUnsupportedAggregator, agg.Kind())
|
||||
}
|
||||
} else if sum, ok := agg.(aggregation.Sum); ok && instrumentKind.Monotonic() {
|
||||
if err := c.exportMonotonicCounter(ch, sum, numberKind, desc, labels); err != nil {
|
||||
return fmt.Errorf("exporting monotonic counter: %w", err)
|
||||
}
|
||||
} else if sum, ok := agg.(aggregation.Sum); ok && !instrumentKind.Monotonic() {
|
||||
if err := c.exportNonMonotonicCounter(ch, sum, numberKind, desc, labels); err != nil {
|
||||
return fmt.Errorf("exporting non monotonic counter: %w", err)
|
||||
}
|
||||
} else if lastValue, ok := agg.(aggregation.LastValue); ok {
|
||||
if err := c.exportLastValue(ch, lastValue, numberKind, desc, labels); err != nil {
|
||||
return fmt.Errorf("exporting last value: %w", err)
|
||||
}
|
||||
} else {
|
||||
return fmt.Errorf("%w: %s", ErrUnsupportedAggregator, agg.Kind())
|
||||
}
|
||||
return nil
|
||||
return nil
|
||||
})
|
||||
})
|
||||
if err != nil {
|
||||
otel.Handle(err)
|
||||
|
@ -84,7 +84,7 @@ func expectHistogram(name string, values ...string) expectedMetric {
|
||||
|
||||
func newPipeline(config prometheus.Config, options ...controller.Option) (*prometheus.Exporter, error) {
|
||||
c := controller.New(
|
||||
processor.New(
|
||||
processor.NewFactory(
|
||||
selector.NewWithHistogramDistribution(
|
||||
histogram.WithExplicitBoundaries(config.DefaultHistogramBoundaries),
|
||||
),
|
||||
|
@ -72,7 +72,7 @@ func InstallExportPipeline(ctx context.Context) func() {
|
||||
}
|
||||
|
||||
pusher := controller.New(
|
||||
processor.New(
|
||||
processor.NewFactory(
|
||||
simple.NewWithInexpensiveDistribution(),
|
||||
exporter,
|
||||
),
|
||||
@ -81,7 +81,7 @@ func InstallExportPipeline(ctx context.Context) func() {
|
||||
if err = pusher.Start(ctx); err != nil {
|
||||
log.Fatalf("starting push controller: %v", err)
|
||||
}
|
||||
global.SetMeterProvider(pusher.MeterProvider())
|
||||
global.SetMeterProvider(pusher)
|
||||
|
||||
return func() {
|
||||
if err := pusher.Stop(ctx); err != nil {
|
||||
|
@ -25,6 +25,7 @@ import (
|
||||
"go.opentelemetry.io/otel/metric"
|
||||
exportmetric "go.opentelemetry.io/otel/sdk/export/metric"
|
||||
"go.opentelemetry.io/otel/sdk/export/metric/aggregation"
|
||||
"go.opentelemetry.io/otel/sdk/instrumentation"
|
||||
"go.opentelemetry.io/otel/sdk/resource"
|
||||
)
|
||||
|
||||
@ -50,93 +51,99 @@ func (e *metricExporter) ExportKindFor(desc *metric.Descriptor, kind aggregation
|
||||
return exportmetric.StatelessExportKindSelector().ExportKindFor(desc, kind)
|
||||
}
|
||||
|
||||
func (e *metricExporter) Export(_ context.Context, res *resource.Resource, checkpointSet exportmetric.CheckpointSet) error {
|
||||
func (e *metricExporter) Export(_ context.Context, res *resource.Resource, reader exportmetric.InstrumentationLibraryReader) error {
|
||||
var aggError error
|
||||
var batch []line
|
||||
aggError = checkpointSet.ForEach(e, func(record exportmetric.Record) error {
|
||||
desc := record.Descriptor()
|
||||
agg := record.Aggregation()
|
||||
kind := desc.NumberKind()
|
||||
encodedResource := res.Encoded(e.config.LabelEncoder)
|
||||
aggError = reader.ForEach(func(lib instrumentation.Library, mr exportmetric.Reader) error {
|
||||
|
||||
var instLabels []attribute.KeyValue
|
||||
if name := desc.InstrumentationName(); name != "" {
|
||||
if name := lib.Name; name != "" {
|
||||
instLabels = append(instLabels, attribute.String("instrumentation.name", name))
|
||||
if version := desc.InstrumentationVersion(); version != "" {
|
||||
if version := lib.Version; version != "" {
|
||||
instLabels = append(instLabels, attribute.String("instrumentation.version", version))
|
||||
}
|
||||
if schema := lib.SchemaURL; schema != "" {
|
||||
instLabels = append(instLabels, attribute.String("instrumentation.schema_url", schema))
|
||||
}
|
||||
}
|
||||
instSet := attribute.NewSet(instLabels...)
|
||||
encodedInstLabels := instSet.Encoded(e.config.LabelEncoder)
|
||||
|
||||
var expose line
|
||||
return mr.ForEach(e, func(record exportmetric.Record) error {
|
||||
desc := record.Descriptor()
|
||||
agg := record.Aggregation()
|
||||
kind := desc.NumberKind()
|
||||
encodedResource := res.Encoded(e.config.LabelEncoder)
|
||||
|
||||
if sum, ok := agg.(aggregation.Sum); ok {
|
||||
value, err := sum.Sum()
|
||||
if err != nil {
|
||||
return err
|
||||
var expose line
|
||||
|
||||
if sum, ok := agg.(aggregation.Sum); ok {
|
||||
value, err := sum.Sum()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
expose.Sum = value.AsInterface(kind)
|
||||
}
|
||||
expose.Sum = value.AsInterface(kind)
|
||||
}
|
||||
|
||||
if mmsc, ok := agg.(aggregation.MinMaxSumCount); ok {
|
||||
count, err := mmsc.Count()
|
||||
if err != nil {
|
||||
return err
|
||||
if mmsc, ok := agg.(aggregation.MinMaxSumCount); ok {
|
||||
count, err := mmsc.Count()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
expose.Count = count
|
||||
|
||||
max, err := mmsc.Max()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
expose.Max = max.AsInterface(kind)
|
||||
|
||||
min, err := mmsc.Min()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
expose.Min = min.AsInterface(kind)
|
||||
} else if lv, ok := agg.(aggregation.LastValue); ok {
|
||||
value, timestamp, err := lv.LastValue()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
expose.LastValue = value.AsInterface(kind)
|
||||
|
||||
if e.config.Timestamps {
|
||||
expose.Timestamp = ×tamp
|
||||
}
|
||||
}
|
||||
expose.Count = count
|
||||
|
||||
max, err := mmsc.Max()
|
||||
if err != nil {
|
||||
return err
|
||||
var encodedLabels string
|
||||
iter := record.Labels().Iter()
|
||||
if iter.Len() > 0 {
|
||||
encodedLabels = record.Labels().Encoded(e.config.LabelEncoder)
|
||||
}
|
||||
expose.Max = max.AsInterface(kind)
|
||||
|
||||
min, err := mmsc.Min()
|
||||
if err != nil {
|
||||
return err
|
||||
var sb strings.Builder
|
||||
|
||||
sb.WriteString(desc.Name())
|
||||
|
||||
if len(encodedLabels) > 0 || len(encodedResource) > 0 || len(encodedInstLabels) > 0 {
|
||||
sb.WriteRune('{')
|
||||
sb.WriteString(encodedResource)
|
||||
if len(encodedInstLabels) > 0 && len(encodedResource) > 0 {
|
||||
sb.WriteRune(',')
|
||||
}
|
||||
sb.WriteString(encodedInstLabels)
|
||||
if len(encodedLabels) > 0 && (len(encodedInstLabels) > 0 || len(encodedResource) > 0) {
|
||||
sb.WriteRune(',')
|
||||
}
|
||||
sb.WriteString(encodedLabels)
|
||||
sb.WriteRune('}')
|
||||
}
|
||||
expose.Min = min.AsInterface(kind)
|
||||
} else if lv, ok := agg.(aggregation.LastValue); ok {
|
||||
value, timestamp, err := lv.LastValue()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
expose.LastValue = value.AsInterface(kind)
|
||||
|
||||
if e.config.Timestamps {
|
||||
expose.Timestamp = ×tamp
|
||||
}
|
||||
}
|
||||
expose.Name = sb.String()
|
||||
|
||||
var encodedLabels string
|
||||
iter := record.Labels().Iter()
|
||||
if iter.Len() > 0 {
|
||||
encodedLabels = record.Labels().Encoded(e.config.LabelEncoder)
|
||||
}
|
||||
|
||||
var sb strings.Builder
|
||||
|
||||
sb.WriteString(desc.Name())
|
||||
|
||||
if len(encodedLabels) > 0 || len(encodedResource) > 0 || len(encodedInstLabels) > 0 {
|
||||
sb.WriteRune('{')
|
||||
sb.WriteString(encodedResource)
|
||||
if len(encodedInstLabels) > 0 && len(encodedResource) > 0 {
|
||||
sb.WriteRune(',')
|
||||
}
|
||||
sb.WriteString(encodedInstLabels)
|
||||
if len(encodedLabels) > 0 && (len(encodedInstLabels) > 0 || len(encodedResource) > 0) {
|
||||
sb.WriteRune(',')
|
||||
}
|
||||
sb.WriteString(encodedLabels)
|
||||
sb.WriteRune('}')
|
||||
}
|
||||
|
||||
expose.Name = sb.String()
|
||||
|
||||
batch = append(batch, expose)
|
||||
return nil
|
||||
batch = append(batch, expose)
|
||||
return nil
|
||||
})
|
||||
})
|
||||
if len(batch) == 0 {
|
||||
return aggError
|
||||
|
@ -61,14 +61,14 @@ func newFixtureWithResource(t *testing.T, res *resource.Resource, opts ...stdout
|
||||
t.Fatal("Error building fixture: ", err)
|
||||
}
|
||||
aggSel := processortest.AggregatorSelector()
|
||||
proc := processor.New(aggSel, export.StatelessExportKindSelector())
|
||||
proc := processor.NewFactory(aggSel, export.StatelessExportKindSelector())
|
||||
cont := controller.New(proc,
|
||||
controller.WithExporter(exp),
|
||||
controller.WithResource(res),
|
||||
)
|
||||
ctx := context.Background()
|
||||
require.NoError(t, cont.Start(ctx))
|
||||
meter := cont.MeterProvider().Meter("test")
|
||||
meter := cont.Meter("test")
|
||||
|
||||
return testFixture{
|
||||
t: t,
|
||||
@ -87,7 +87,7 @@ func (fix testFixture) Output() string {
|
||||
func TestStdoutTimestamp(t *testing.T) {
|
||||
var buf bytes.Buffer
|
||||
aggSel := processortest.AggregatorSelector()
|
||||
proc := processor.New(aggSel, export.CumulativeExportKindSelector())
|
||||
proc := processor.NewFactory(aggSel, export.CumulativeExportKindSelector())
|
||||
exporter, err := stdoutmetric.New(
|
||||
stdoutmetric.WithWriter(&buf),
|
||||
)
|
||||
@ -101,7 +101,7 @@ func TestStdoutTimestamp(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
require.NoError(t, cont.Start(ctx))
|
||||
meter := cont.MeterProvider().Meter("test")
|
||||
meter := cont.Meter("test")
|
||||
counter := metric.Must(meter).NewInt64Counter("name.lastvalue")
|
||||
|
||||
before := time.Now()
|
||||
|
@ -21,9 +21,9 @@ import (
|
||||
"unsafe"
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/internal/metric/registry"
|
||||
"go.opentelemetry.io/otel/metric"
|
||||
"go.opentelemetry.io/otel/metric/number"
|
||||
"go.opentelemetry.io/otel/metric/registry"
|
||||
)
|
||||
|
||||
// This file contains the forwarding implementation of MeterProvider used as
|
||||
@ -48,7 +48,9 @@ import (
|
||||
// methods of the api/metric/registry package.
|
||||
|
||||
type meterKey struct {
|
||||
Name, Version string
|
||||
InstrumentationName string
|
||||
InstrumentationVersion string
|
||||
SchemaURL string
|
||||
}
|
||||
|
||||
type meterProvider struct {
|
||||
@ -138,7 +140,7 @@ func (p *meterProvider) setDelegate(provider metric.MeterProvider) {
|
||||
|
||||
p.delegate = provider
|
||||
for key, entry := range p.meters {
|
||||
entry.impl.setDelegate(key.Name, key.Version, provider)
|
||||
entry.impl.setDelegate(key, provider)
|
||||
}
|
||||
p.meters = nil
|
||||
}
|
||||
@ -151,28 +153,38 @@ func (p *meterProvider) Meter(instrumentationName string, opts ...metric.MeterOp
|
||||
return p.delegate.Meter(instrumentationName, opts...)
|
||||
}
|
||||
|
||||
cfg := metric.NewMeterConfig(opts...)
|
||||
key := meterKey{
|
||||
Name: instrumentationName,
|
||||
Version: metric.NewMeterConfig(opts...).InstrumentationVersion(),
|
||||
InstrumentationName: instrumentationName,
|
||||
InstrumentationVersion: cfg.InstrumentationVersion(),
|
||||
SchemaURL: cfg.SchemaURL(),
|
||||
}
|
||||
entry, ok := p.meters[key]
|
||||
if !ok {
|
||||
entry = &meterEntry{}
|
||||
// Note: This code implements its own MeterProvider
|
||||
// name-uniqueness logic because there is
|
||||
// synchronization required at the moment of
|
||||
// delegation. We use the same instrument-uniqueness
|
||||
// checking the real SDK uses here:
|
||||
entry.unique = registry.NewUniqueInstrumentMeterImpl(&entry.impl)
|
||||
p.meters[key] = entry
|
||||
|
||||
}
|
||||
return metric.WrapMeterImpl(entry.unique, key.Name, metric.WithInstrumentationVersion(key.Version))
|
||||
return metric.WrapMeterImpl(entry.unique)
|
||||
}
|
||||
|
||||
// Meter interface and delegation
|
||||
|
||||
func (m *meterImpl) setDelegate(name, version string, provider metric.MeterProvider) {
|
||||
func (m *meterImpl) setDelegate(key meterKey, provider metric.MeterProvider) {
|
||||
m.lock.Lock()
|
||||
defer m.lock.Unlock()
|
||||
|
||||
d := new(metric.MeterImpl)
|
||||
*d = provider.Meter(name, metric.WithInstrumentationVersion(version)).MeterImpl()
|
||||
*d = provider.Meter(
|
||||
key.InstrumentationName,
|
||||
metric.WithInstrumentationVersion(key.InstrumentationVersion),
|
||||
metric.WithSchemaURL(key.SchemaURL),
|
||||
).MeterImpl()
|
||||
m.delegate = unsafe.Pointer(d)
|
||||
|
||||
for _, inst := range m.syncInsts {
|
||||
|
@ -39,7 +39,17 @@ func TestDirect(t *testing.T) {
|
||||
|
||||
ctx := context.Background()
|
||||
meter1 := metricglobal.Meter("test1", metric.WithInstrumentationVersion("semver:v1.0.0"))
|
||||
meter2 := metricglobal.Meter("test2")
|
||||
meter2 := metricglobal.Meter("test2", metric.WithSchemaURL("hello"))
|
||||
|
||||
library1 := metrictest.Library{
|
||||
InstrumentationName: "test1",
|
||||
InstrumentationVersion: "semver:v1.0.0",
|
||||
}
|
||||
library2 := metrictest.Library{
|
||||
InstrumentationName: "test2",
|
||||
SchemaURL: "hello",
|
||||
}
|
||||
|
||||
labels1 := []attribute.KeyValue{attribute.String("A", "B")}
|
||||
labels2 := []attribute.KeyValue{attribute.String("C", "D")}
|
||||
labels3 := []attribute.KeyValue{attribute.String("E", "F")}
|
||||
@ -66,66 +76,60 @@ func TestDirect(t *testing.T) {
|
||||
second.Record(ctx, 1, labels3...)
|
||||
second.Record(ctx, 2, labels3...)
|
||||
|
||||
mock, provider := metrictest.NewMeterProvider()
|
||||
provider := metrictest.NewMeterProvider()
|
||||
metricglobal.SetMeterProvider(provider)
|
||||
|
||||
counter.Add(ctx, 1, labels1...)
|
||||
histogram.Record(ctx, 3, labels1...)
|
||||
second.Record(ctx, 3, labels3...)
|
||||
|
||||
mock.RunAsyncInstruments()
|
||||
provider.RunAsyncInstruments()
|
||||
|
||||
measurements := metrictest.AsStructs(mock.MeasurementBatches)
|
||||
measurements := metrictest.AsStructs(provider.MeasurementBatches)
|
||||
|
||||
require.EqualValues(t,
|
||||
[]metrictest.Measured{
|
||||
{
|
||||
Name: "test.counter",
|
||||
InstrumentationName: "test1",
|
||||
InstrumentationVersion: "semver:v1.0.0",
|
||||
Labels: metrictest.LabelsToMap(labels1...),
|
||||
Number: asInt(1),
|
||||
Name: "test.counter",
|
||||
Library: library1,
|
||||
Labels: metrictest.LabelsToMap(labels1...),
|
||||
Number: asInt(1),
|
||||
},
|
||||
{
|
||||
Name: "test.histogram",
|
||||
InstrumentationName: "test1",
|
||||
InstrumentationVersion: "semver:v1.0.0",
|
||||
Labels: metrictest.LabelsToMap(labels1...),
|
||||
Number: asFloat(3),
|
||||
Name: "test.histogram",
|
||||
Library: library1,
|
||||
Labels: metrictest.LabelsToMap(labels1...),
|
||||
Number: asFloat(3),
|
||||
},
|
||||
{
|
||||
Name: "test.second",
|
||||
InstrumentationName: "test2",
|
||||
Labels: metrictest.LabelsToMap(labels3...),
|
||||
Number: asFloat(3),
|
||||
Name: "test.second",
|
||||
Library: library2,
|
||||
Labels: metrictest.LabelsToMap(labels3...),
|
||||
Number: asFloat(3),
|
||||
},
|
||||
{
|
||||
Name: "test.gauge.float",
|
||||
InstrumentationName: "test1",
|
||||
InstrumentationVersion: "semver:v1.0.0",
|
||||
Labels: metrictest.LabelsToMap(labels1...),
|
||||
Number: asFloat(1),
|
||||
Name: "test.gauge.float",
|
||||
Library: library1,
|
||||
Labels: metrictest.LabelsToMap(labels1...),
|
||||
Number: asFloat(1),
|
||||
},
|
||||
{
|
||||
Name: "test.gauge.float",
|
||||
InstrumentationName: "test1",
|
||||
InstrumentationVersion: "semver:v1.0.0",
|
||||
Labels: metrictest.LabelsToMap(labels2...),
|
||||
Number: asFloat(2),
|
||||
Name: "test.gauge.float",
|
||||
Library: library1,
|
||||
Labels: metrictest.LabelsToMap(labels2...),
|
||||
Number: asFloat(2),
|
||||
},
|
||||
{
|
||||
Name: "test.gauge.int",
|
||||
InstrumentationName: "test1",
|
||||
InstrumentationVersion: "semver:v1.0.0",
|
||||
Labels: metrictest.LabelsToMap(labels1...),
|
||||
Number: asInt(1),
|
||||
Name: "test.gauge.int",
|
||||
Library: library1,
|
||||
Labels: metrictest.LabelsToMap(labels1...),
|
||||
Number: asInt(1),
|
||||
},
|
||||
{
|
||||
Name: "test.gauge.int",
|
||||
InstrumentationName: "test1",
|
||||
InstrumentationVersion: "semver:v1.0.0",
|
||||
Labels: metrictest.LabelsToMap(labels2...),
|
||||
Number: asInt(2),
|
||||
Name: "test.gauge.int",
|
||||
Library: library1,
|
||||
Labels: metrictest.LabelsToMap(labels2...),
|
||||
Number: asInt(2),
|
||||
},
|
||||
},
|
||||
measurements,
|
||||
@ -138,7 +142,11 @@ func TestBound(t *testing.T) {
|
||||
// Note: this test uses opposite Float64/Int64 number kinds
|
||||
// vs. the above, to cover all the instruments.
|
||||
ctx := context.Background()
|
||||
glob := metricglobal.Meter("test")
|
||||
glob := metricglobal.Meter(
|
||||
"test",
|
||||
metric.WithInstrumentationVersion("semver:test-1.0"),
|
||||
metric.WithSchemaURL("schema://url"),
|
||||
)
|
||||
labels1 := []attribute.KeyValue{attribute.String("A", "B")}
|
||||
|
||||
counter := Must(glob).NewFloat64Counter("test.counter")
|
||||
@ -151,28 +159,34 @@ func TestBound(t *testing.T) {
|
||||
boundM.Record(ctx, 1)
|
||||
boundM.Record(ctx, 2)
|
||||
|
||||
mock, provider := metrictest.NewMeterProvider()
|
||||
provider := metrictest.NewMeterProvider()
|
||||
metricglobal.SetMeterProvider(provider)
|
||||
|
||||
boundC.Add(ctx, 1)
|
||||
boundM.Record(ctx, 3)
|
||||
|
||||
library := metrictest.Library{
|
||||
InstrumentationName: "test",
|
||||
InstrumentationVersion: "semver:test-1.0",
|
||||
SchemaURL: "schema://url",
|
||||
}
|
||||
|
||||
require.EqualValues(t,
|
||||
[]metrictest.Measured{
|
||||
{
|
||||
Name: "test.counter",
|
||||
InstrumentationName: "test",
|
||||
Labels: metrictest.LabelsToMap(labels1...),
|
||||
Number: asFloat(1),
|
||||
Name: "test.counter",
|
||||
Library: library,
|
||||
Labels: metrictest.LabelsToMap(labels1...),
|
||||
Number: asFloat(1),
|
||||
},
|
||||
{
|
||||
Name: "test.histogram",
|
||||
InstrumentationName: "test",
|
||||
Labels: metrictest.LabelsToMap(labels1...),
|
||||
Number: asInt(3),
|
||||
Name: "test.histogram",
|
||||
Library: library,
|
||||
Labels: metrictest.LabelsToMap(labels1...),
|
||||
Number: asInt(3),
|
||||
},
|
||||
},
|
||||
metrictest.AsStructs(mock.MeasurementBatches))
|
||||
metrictest.AsStructs(provider.MeasurementBatches))
|
||||
|
||||
boundC.Unbind()
|
||||
boundM.Unbind()
|
||||
@ -199,7 +213,7 @@ func TestUnbindThenRecordOne(t *testing.T) {
|
||||
global.ResetForTest()
|
||||
|
||||
ctx := context.Background()
|
||||
mock, provider := metrictest.NewMeterProvider()
|
||||
provider := metrictest.NewMeterProvider()
|
||||
|
||||
meter := metricglobal.Meter("test")
|
||||
counter := Must(meter).NewInt64Counter("test.counter")
|
||||
@ -210,7 +224,7 @@ func TestUnbindThenRecordOne(t *testing.T) {
|
||||
require.NotPanics(t, func() {
|
||||
boundC.Add(ctx, 1)
|
||||
})
|
||||
require.Equal(t, 0, len(mock.MeasurementBatches))
|
||||
require.Equal(t, 0, len(provider.MeasurementBatches))
|
||||
}
|
||||
|
||||
type meterProviderWithConstructorError struct {
|
||||
@ -222,7 +236,7 @@ type meterWithConstructorError struct {
|
||||
}
|
||||
|
||||
func (m *meterProviderWithConstructorError) Meter(iName string, opts ...metric.MeterOption) metric.Meter {
|
||||
return metric.WrapMeterImpl(&meterWithConstructorError{m.MeterProvider.Meter(iName, opts...).MeterImpl()}, iName, opts...)
|
||||
return metric.WrapMeterImpl(&meterWithConstructorError{m.MeterProvider.Meter(iName, opts...).MeterImpl()})
|
||||
}
|
||||
|
||||
func (m *meterWithConstructorError) NewSyncInstrument(_ metric.Descriptor) (metric.SyncImpl, error) {
|
||||
@ -238,7 +252,7 @@ func TestErrorInDeferredConstructor(t *testing.T) {
|
||||
c1 := Must(meter).NewInt64Counter("test")
|
||||
c2 := Must(meter).NewInt64Counter("test")
|
||||
|
||||
_, provider := metrictest.NewMeterProvider()
|
||||
provider := metrictest.NewMeterProvider()
|
||||
sdk := &meterProviderWithConstructorError{provider}
|
||||
|
||||
require.Panics(t, func() {
|
||||
@ -280,7 +294,7 @@ func TestImplementationIndirection(t *testing.T) {
|
||||
require.False(t, ok)
|
||||
|
||||
// Register the SDK
|
||||
_, provider := metrictest.NewMeterProvider()
|
||||
provider := metrictest.NewMeterProvider()
|
||||
metricglobal.SetMeterProvider(provider)
|
||||
|
||||
// Repeat the above tests
|
||||
@ -309,7 +323,7 @@ func TestRecordBatchMock(t *testing.T) {
|
||||
|
||||
meter.RecordBatch(context.Background(), nil, counter.Measurement(1))
|
||||
|
||||
mock, provider := metrictest.NewMeterProvider()
|
||||
provider := metrictest.NewMeterProvider()
|
||||
metricglobal.SetMeterProvider(provider)
|
||||
|
||||
meter.RecordBatch(context.Background(), nil, counter.Measurement(1))
|
||||
@ -317,11 +331,13 @@ func TestRecordBatchMock(t *testing.T) {
|
||||
require.EqualValues(t,
|
||||
[]metrictest.Measured{
|
||||
{
|
||||
Name: "test.counter",
|
||||
InstrumentationName: "builtin",
|
||||
Labels: metrictest.LabelsToMap(),
|
||||
Number: asInt(1),
|
||||
Name: "test.counter",
|
||||
Library: metrictest.Library{
|
||||
InstrumentationName: "builtin",
|
||||
},
|
||||
Labels: metrictest.LabelsToMap(),
|
||||
Number: asInt(1),
|
||||
},
|
||||
},
|
||||
metrictest.AsStructs(mock.MeasurementBatches))
|
||||
metrictest.AsStructs(provider.MeasurementBatches))
|
||||
}
|
||||
|
@ -21,8 +21,9 @@ import (
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"go.opentelemetry.io/otel/internal/metric/registry"
|
||||
"go.opentelemetry.io/otel/metric"
|
||||
"go.opentelemetry.io/otel/metric/registry"
|
||||
"go.opentelemetry.io/otel/metric/metrictest"
|
||||
)
|
||||
|
||||
type (
|
||||
@ -78,6 +79,10 @@ func TestRegistrySameInstruments(t *testing.T) {
|
||||
require.NoError(t, err1)
|
||||
require.NoError(t, err2)
|
||||
require.Equal(t, inst1, inst2)
|
||||
|
||||
SetMeterProvider(metrictest.NewMeterProvider())
|
||||
|
||||
require.Equal(t, inst1, inst2)
|
||||
}
|
||||
}
|
||||
|
||||
@ -89,6 +94,16 @@ func TestRegistryDifferentNamespace(t *testing.T) {
|
||||
|
||||
require.NoError(t, err1)
|
||||
require.NoError(t, err2)
|
||||
|
||||
if inst1.Descriptor().InstrumentKind().Synchronous() {
|
||||
// They're equal because of a `nil` pointer at this point.
|
||||
// (Only for synchronous instruments, which lack callacks.)
|
||||
require.EqualValues(t, inst1, inst2)
|
||||
}
|
||||
|
||||
SetMeterProvider(metrictest.NewMeterProvider())
|
||||
|
||||
// They're different after the deferred setup.
|
||||
require.NotEqual(t, inst1, inst2)
|
||||
}
|
||||
}
|
||||
@ -109,7 +124,7 @@ func TestRegistryDiffInstruments(t *testing.T) {
|
||||
require.Error(t, err)
|
||||
require.NotNil(t, other)
|
||||
require.True(t, errors.Is(err, registry.ErrMetricKindMismatch))
|
||||
require.Contains(t, err.Error(), "super")
|
||||
require.Contains(t, err.Error(), "by this name with another kind or number type")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -21,4 +21,4 @@ This package is currently in a pre-GA phase. Backwards incompatible changes
|
||||
may be introduced in subsequent minor version releases as we work to track the
|
||||
evolving OpenTelemetry specification and user feedback.
|
||||
*/
|
||||
package registry // import "go.opentelemetry.io/otel/metric/registry"
|
||||
package registry // import "go.opentelemetry.io/otel/internal/metric/registry"
|
@ -23,77 +23,46 @@ import (
|
||||
"go.opentelemetry.io/otel/metric"
|
||||
)
|
||||
|
||||
// MeterProvider is a standard MeterProvider for wrapping `MeterImpl`
|
||||
type MeterProvider struct {
|
||||
impl metric.MeterImpl
|
||||
}
|
||||
|
||||
var _ metric.MeterProvider = (*MeterProvider)(nil)
|
||||
|
||||
// uniqueInstrumentMeterImpl implements the metric.MeterImpl interface, adding
|
||||
// uniqueness checking for instrument descriptors. Use NewUniqueInstrumentMeter
|
||||
// to wrap an implementation with uniqueness checking.
|
||||
type uniqueInstrumentMeterImpl struct {
|
||||
// UniqueInstrumentMeterImpl implements the metric.MeterImpl interface, adding
|
||||
// uniqueness checking for instrument descriptors.
|
||||
type UniqueInstrumentMeterImpl struct {
|
||||
lock sync.Mutex
|
||||
impl metric.MeterImpl
|
||||
state map[key]metric.InstrumentImpl
|
||||
state map[string]metric.InstrumentImpl
|
||||
}
|
||||
|
||||
var _ metric.MeterImpl = (*uniqueInstrumentMeterImpl)(nil)
|
||||
|
||||
type key struct {
|
||||
instrumentName string
|
||||
instrumentationName string
|
||||
InstrumentationVersion string
|
||||
}
|
||||
|
||||
// NewMeterProvider returns a new provider that implements instrument
|
||||
// name-uniqueness checking.
|
||||
func NewMeterProvider(impl metric.MeterImpl) *MeterProvider {
|
||||
return &MeterProvider{
|
||||
impl: NewUniqueInstrumentMeterImpl(impl),
|
||||
}
|
||||
}
|
||||
|
||||
// Meter implements MeterProvider.
|
||||
func (p *MeterProvider) Meter(instrumentationName string, opts ...metric.MeterOption) metric.Meter {
|
||||
return metric.WrapMeterImpl(p.impl, instrumentationName, opts...)
|
||||
}
|
||||
var _ metric.MeterImpl = (*UniqueInstrumentMeterImpl)(nil)
|
||||
|
||||
// ErrMetricKindMismatch is the standard error for mismatched metric
|
||||
// instrument definitions.
|
||||
var ErrMetricKindMismatch = fmt.Errorf(
|
||||
"a metric was already registered by this name with another kind or number type")
|
||||
|
||||
// NewUniqueInstrumentMeterImpl returns a wrapped metric.MeterImpl with
|
||||
// the addition of uniqueness checking.
|
||||
func NewUniqueInstrumentMeterImpl(impl metric.MeterImpl) metric.MeterImpl {
|
||||
return &uniqueInstrumentMeterImpl{
|
||||
// NewUniqueInstrumentMeterImpl returns a wrapped metric.MeterImpl
|
||||
// with the addition of instrument name uniqueness checking.
|
||||
func NewUniqueInstrumentMeterImpl(impl metric.MeterImpl) *UniqueInstrumentMeterImpl {
|
||||
return &UniqueInstrumentMeterImpl{
|
||||
impl: impl,
|
||||
state: map[key]metric.InstrumentImpl{},
|
||||
state: map[string]metric.InstrumentImpl{},
|
||||
}
|
||||
}
|
||||
|
||||
// MeterImpl gives the caller access to the underlying MeterImpl
|
||||
// used by this UniqueInstrumentMeterImpl.
|
||||
func (u *UniqueInstrumentMeterImpl) MeterImpl() metric.MeterImpl {
|
||||
return u.impl
|
||||
}
|
||||
|
||||
// RecordBatch implements metric.MeterImpl.
|
||||
func (u *uniqueInstrumentMeterImpl) RecordBatch(ctx context.Context, labels []attribute.KeyValue, ms ...metric.Measurement) {
|
||||
func (u *UniqueInstrumentMeterImpl) RecordBatch(ctx context.Context, labels []attribute.KeyValue, ms ...metric.Measurement) {
|
||||
u.impl.RecordBatch(ctx, labels, ms...)
|
||||
}
|
||||
|
||||
func keyOf(descriptor metric.Descriptor) key {
|
||||
return key{
|
||||
descriptor.Name(),
|
||||
descriptor.InstrumentationName(),
|
||||
descriptor.InstrumentationVersion(),
|
||||
}
|
||||
}
|
||||
|
||||
// NewMetricKindMismatchError formats an error that describes a
|
||||
// mismatched metric instrument definition.
|
||||
func NewMetricKindMismatchError(desc metric.Descriptor) error {
|
||||
return fmt.Errorf("metric was %s (%s %s)registered as a %s %s: %w",
|
||||
return fmt.Errorf("metric %s registered as %s %s: %w",
|
||||
desc.Name(),
|
||||
desc.InstrumentationName(),
|
||||
desc.InstrumentationVersion(),
|
||||
desc.NumberKind(),
|
||||
desc.InstrumentKind(),
|
||||
ErrMetricKindMismatch)
|
||||
@ -111,8 +80,8 @@ func Compatible(candidate, existing metric.Descriptor) bool {
|
||||
// `descriptor` argument. If there is an existing compatible
|
||||
// registration, this returns the already-registered instrument. If
|
||||
// there is no conflict and no prior registration, returns (nil, nil).
|
||||
func (u *uniqueInstrumentMeterImpl) checkUniqueness(descriptor metric.Descriptor) (metric.InstrumentImpl, error) {
|
||||
impl, ok := u.state[keyOf(descriptor)]
|
||||
func (u *UniqueInstrumentMeterImpl) checkUniqueness(descriptor metric.Descriptor) (metric.InstrumentImpl, error) {
|
||||
impl, ok := u.state[descriptor.Name()]
|
||||
if !ok {
|
||||
return nil, nil
|
||||
}
|
||||
@ -125,7 +94,7 @@ func (u *uniqueInstrumentMeterImpl) checkUniqueness(descriptor metric.Descriptor
|
||||
}
|
||||
|
||||
// NewSyncInstrument implements metric.MeterImpl.
|
||||
func (u *uniqueInstrumentMeterImpl) NewSyncInstrument(descriptor metric.Descriptor) (metric.SyncImpl, error) {
|
||||
func (u *UniqueInstrumentMeterImpl) NewSyncInstrument(descriptor metric.Descriptor) (metric.SyncImpl, error) {
|
||||
u.lock.Lock()
|
||||
defer u.lock.Unlock()
|
||||
|
||||
@ -141,12 +110,12 @@ func (u *uniqueInstrumentMeterImpl) NewSyncInstrument(descriptor metric.Descript
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
u.state[keyOf(descriptor)] = syncInst
|
||||
u.state[descriptor.Name()] = syncInst
|
||||
return syncInst, nil
|
||||
}
|
||||
|
||||
// NewAsyncInstrument implements metric.MeterImpl.
|
||||
func (u *uniqueInstrumentMeterImpl) NewAsyncInstrument(
|
||||
func (u *UniqueInstrumentMeterImpl) NewAsyncInstrument(
|
||||
descriptor metric.Descriptor,
|
||||
runner metric.AsyncRunner,
|
||||
) (metric.AsyncImpl, error) {
|
||||
@ -165,6 +134,6 @@ func (u *uniqueInstrumentMeterImpl) NewAsyncInstrument(
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
u.state[keyOf(descriptor)] = asyncInst
|
||||
u.state[descriptor.Name()] = asyncInst
|
||||
return asyncInst, nil
|
||||
}
|
@ -21,9 +21,9 @@ import (
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"go.opentelemetry.io/otel/internal/metric/registry"
|
||||
"go.opentelemetry.io/otel/metric"
|
||||
"go.opentelemetry.io/otel/metric/metrictest"
|
||||
"go.opentelemetry.io/otel/metric/registry"
|
||||
)
|
||||
|
||||
type (
|
||||
@ -70,11 +70,17 @@ func unwrap(impl interface{}, err error) (metric.InstrumentImpl, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
func testMeterWithRegistry(name string) metric.Meter {
|
||||
return metric.WrapMeterImpl(
|
||||
registry.NewUniqueInstrumentMeterImpl(
|
||||
metrictest.NewMeterProvider().Meter(name).MeterImpl(),
|
||||
),
|
||||
)
|
||||
}
|
||||
|
||||
func TestRegistrySameInstruments(t *testing.T) {
|
||||
for _, nf := range allNew {
|
||||
_, provider := metrictest.NewMeterProvider()
|
||||
|
||||
meter := provider.Meter("meter")
|
||||
meter := testMeterWithRegistry("meter")
|
||||
inst1, err1 := nf(meter, "this")
|
||||
inst2, err2 := nf(meter, "this")
|
||||
|
||||
@ -86,7 +92,7 @@ func TestRegistrySameInstruments(t *testing.T) {
|
||||
|
||||
func TestRegistryDifferentNamespace(t *testing.T) {
|
||||
for _, nf := range allNew {
|
||||
_, provider := metrictest.NewMeterProvider()
|
||||
provider := metrictest.NewMeterProvider()
|
||||
|
||||
meter1 := provider.Meter("meter1")
|
||||
meter2 := provider.Meter("meter2")
|
||||
@ -101,8 +107,7 @@ func TestRegistryDifferentNamespace(t *testing.T) {
|
||||
|
||||
func TestRegistryDiffInstruments(t *testing.T) {
|
||||
for origName, origf := range allNew {
|
||||
_, provider := metrictest.NewMeterProvider()
|
||||
meter := provider.Meter("meter")
|
||||
meter := testMeterWithRegistry("meter")
|
||||
|
||||
_, err := origf(meter, "this")
|
||||
require.NoError(t, err)
|
||||
@ -119,14 +124,3 @@ func TestRegistryDiffInstruments(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMeterProvider(t *testing.T) {
|
||||
impl, _ := metrictest.NewMeter()
|
||||
p := registry.NewMeterProvider(impl)
|
||||
m1 := p.Meter("m1")
|
||||
m1p := p.Meter("m1")
|
||||
m2 := p.Meter("m2")
|
||||
|
||||
require.Equal(t, m1, m1p)
|
||||
require.NotEqual(t, m1, m2)
|
||||
}
|
@ -20,10 +20,8 @@ import (
|
||||
|
||||
// InstrumentConfig contains options for metric instrument descriptors.
|
||||
type InstrumentConfig struct {
|
||||
description string
|
||||
unit unit.Unit
|
||||
instrumentationName string
|
||||
instrumentationVersion string
|
||||
description string
|
||||
unit unit.Unit
|
||||
}
|
||||
|
||||
// Description describes the instrument in human-readable terms.
|
||||
@ -36,18 +34,6 @@ func (cfg InstrumentConfig) Unit() unit.Unit {
|
||||
return cfg.unit
|
||||
}
|
||||
|
||||
// InstrumentationName is the name of the library providing
|
||||
// instrumentation.
|
||||
func (cfg InstrumentConfig) InstrumentationName() string {
|
||||
return cfg.instrumentationName
|
||||
}
|
||||
|
||||
// InstrumentationVersion is the version of the library providing
|
||||
// instrumentation.
|
||||
func (cfg InstrumentConfig) InstrumentationVersion() string {
|
||||
return cfg.instrumentationVersion
|
||||
}
|
||||
|
||||
// InstrumentOption is an interface for applying metric instrument options.
|
||||
type InstrumentOption interface {
|
||||
// ApplyMeter is used to set a InstrumentOption value of a
|
||||
@ -85,16 +71,10 @@ func WithUnit(unit unit.Unit) InstrumentOption {
|
||||
})
|
||||
}
|
||||
|
||||
// WithInstrumentationName sets the instrumentation name.
|
||||
func WithInstrumentationName(name string) InstrumentOption {
|
||||
return instrumentOptionFunc(func(cfg *InstrumentConfig) {
|
||||
cfg.instrumentationName = name
|
||||
})
|
||||
}
|
||||
|
||||
// MeterConfig contains options for Meters.
|
||||
type MeterConfig struct {
|
||||
instrumentationVersion string
|
||||
schemaURL string
|
||||
}
|
||||
|
||||
// InstrumentationVersion is the version of the library providing instrumentation.
|
||||
@ -102,6 +82,11 @@ func (cfg MeterConfig) InstrumentationVersion() string {
|
||||
return cfg.instrumentationVersion
|
||||
}
|
||||
|
||||
// SchemaURL is the schema_url of the library providing instrumentation.
|
||||
func (cfg MeterConfig) SchemaURL() string {
|
||||
return cfg.schemaURL
|
||||
}
|
||||
|
||||
// MeterOption is an interface for applying Meter options.
|
||||
type MeterOption interface {
|
||||
// ApplyMeter is used to set a MeterOption value of a MeterConfig.
|
||||
@ -118,24 +103,22 @@ func NewMeterConfig(opts ...MeterOption) MeterConfig {
|
||||
return config
|
||||
}
|
||||
|
||||
// InstrumentMeterOption are options that can be used as both an InstrumentOption
|
||||
// and MeterOption
|
||||
type InstrumentMeterOption interface {
|
||||
InstrumentOption
|
||||
MeterOption
|
||||
type meterOptionFunc func(*MeterConfig)
|
||||
|
||||
func (fn meterOptionFunc) applyMeter(cfg *MeterConfig) {
|
||||
fn(cfg)
|
||||
}
|
||||
|
||||
// WithInstrumentationVersion sets the instrumentation version.
|
||||
func WithInstrumentationVersion(version string) InstrumentMeterOption {
|
||||
return instrumentationVersionOption(version)
|
||||
func WithInstrumentationVersion(version string) MeterOption {
|
||||
return meterOptionFunc(func(config *MeterConfig) {
|
||||
config.instrumentationVersion = version
|
||||
})
|
||||
}
|
||||
|
||||
type instrumentationVersionOption string
|
||||
|
||||
func (i instrumentationVersionOption) applyMeter(config *MeterConfig) {
|
||||
config.instrumentationVersion = string(i)
|
||||
}
|
||||
|
||||
func (i instrumentationVersionOption) applyInstrument(config *InstrumentConfig) {
|
||||
config.instrumentationVersion = string(i)
|
||||
// WithSchemaURL sets the schema URL.
|
||||
func WithSchemaURL(schemaURL string) MeterOption {
|
||||
return meterOptionFunc(func(config *MeterConfig) {
|
||||
config.schemaURL = schemaURL
|
||||
})
|
||||
}
|
||||
|
@ -38,8 +38,7 @@ type MeterProvider interface {
|
||||
//
|
||||
// An uninitialized Meter is a no-op implementation.
|
||||
type Meter struct {
|
||||
impl MeterImpl
|
||||
name, version string
|
||||
impl MeterImpl
|
||||
}
|
||||
|
||||
// RecordBatch atomically records a batch of measurements.
|
||||
@ -285,9 +284,8 @@ func (m Meter) newAsync(
|
||||
if m.impl == nil {
|
||||
return NoopAsync{}, nil
|
||||
}
|
||||
desc := NewDescriptor(name, mkind, nkind, opts...)
|
||||
desc.config.instrumentationName = m.name
|
||||
desc.config.instrumentationVersion = m.version
|
||||
cfg := NewInstrumentConfig(opts...)
|
||||
desc := NewDescriptor(name, mkind, nkind, cfg.description, cfg.unit)
|
||||
return m.impl.NewAsyncInstrument(desc, runner)
|
||||
}
|
||||
|
||||
@ -304,9 +302,8 @@ func (m Meter) newSync(
|
||||
if m.impl == nil {
|
||||
return NoopSync{}, nil
|
||||
}
|
||||
desc := NewDescriptor(name, metricKind, numberKind, opts...)
|
||||
desc.config.instrumentationName = m.name
|
||||
desc.config.instrumentationVersion = m.version
|
||||
cfg := NewInstrumentConfig(opts...)
|
||||
desc := NewDescriptor(name, metricKind, numberKind, cfg.description, cfg.unit)
|
||||
return m.impl.NewSyncInstrument(desc)
|
||||
}
|
||||
|
||||
@ -524,16 +521,18 @@ type Descriptor struct {
|
||||
name string
|
||||
instrumentKind sdkapi.InstrumentKind
|
||||
numberKind number.Kind
|
||||
config InstrumentConfig
|
||||
description string
|
||||
unit unit.Unit
|
||||
}
|
||||
|
||||
// NewDescriptor returns a Descriptor with the given contents.
|
||||
func NewDescriptor(name string, ikind sdkapi.InstrumentKind, nkind number.Kind, opts ...InstrumentOption) Descriptor {
|
||||
func NewDescriptor(name string, ikind sdkapi.InstrumentKind, nkind number.Kind, description string, unit unit.Unit) Descriptor {
|
||||
return Descriptor{
|
||||
name: name,
|
||||
instrumentKind: ikind,
|
||||
numberKind: nkind,
|
||||
config: NewInstrumentConfig(opts...),
|
||||
description: description,
|
||||
unit: unit,
|
||||
}
|
||||
}
|
||||
|
||||
@ -550,13 +549,13 @@ func (d Descriptor) InstrumentKind() sdkapi.InstrumentKind {
|
||||
// Description provides a human-readable description of the metric
|
||||
// instrument.
|
||||
func (d Descriptor) Description() string {
|
||||
return d.config.Description()
|
||||
return d.description
|
||||
}
|
||||
|
||||
// Unit describes the units of the metric instrument. Unitless
|
||||
// metrics return the empty string.
|
||||
func (d Descriptor) Unit() unit.Unit {
|
||||
return d.config.Unit()
|
||||
return d.unit
|
||||
}
|
||||
|
||||
// NumberKind returns whether this instrument is declared over int64,
|
||||
@ -564,15 +563,3 @@ func (d Descriptor) Unit() unit.Unit {
|
||||
func (d Descriptor) NumberKind() number.Kind {
|
||||
return d.numberKind
|
||||
}
|
||||
|
||||
// InstrumentationName returns the name of the library that provided
|
||||
// instrumentation for this instrument.
|
||||
func (d Descriptor) InstrumentationName() string {
|
||||
return d.config.InstrumentationName()
|
||||
}
|
||||
|
||||
// InstrumentationVersion returns the version of the library that provided
|
||||
// instrumentation for this instrument.
|
||||
func (d Descriptor) InstrumentationVersion() string {
|
||||
return d.config.InstrumentationVersion()
|
||||
}
|
||||
|
@ -86,10 +86,8 @@ type AsyncImpl interface {
|
||||
|
||||
// WrapMeterImpl constructs a `Meter` implementation from a
|
||||
// `MeterImpl` implementation.
|
||||
func WrapMeterImpl(impl MeterImpl, instrumentationName string, opts ...MeterOption) Meter {
|
||||
func WrapMeterImpl(impl MeterImpl) Meter {
|
||||
return Meter{
|
||||
impl: impl,
|
||||
name: instrumentationName,
|
||||
version: NewMeterConfig(opts...).InstrumentationVersion(),
|
||||
impl: impl,
|
||||
}
|
||||
}
|
||||
|
@ -120,16 +120,16 @@ func TestPrecomputedSum(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func checkSyncBatches(ctx context.Context, t *testing.T, labels []attribute.KeyValue, mock *metrictest.MeterImpl, nkind number.Kind, mkind sdkapi.InstrumentKind, instrument metric.InstrumentImpl, expected ...float64) {
|
||||
func checkSyncBatches(ctx context.Context, t *testing.T, labels []attribute.KeyValue, provider *metrictest.MeterProvider, nkind number.Kind, mkind sdkapi.InstrumentKind, instrument metric.InstrumentImpl, expected ...float64) {
|
||||
t.Helper()
|
||||
|
||||
batchesCount := len(mock.MeasurementBatches)
|
||||
if len(mock.MeasurementBatches) != len(expected) {
|
||||
t.Errorf("Expected %d recorded measurement batches, got %d", batchesCount, len(mock.MeasurementBatches))
|
||||
batchesCount := len(provider.MeasurementBatches)
|
||||
if len(provider.MeasurementBatches) != len(expected) {
|
||||
t.Errorf("Expected %d recorded measurement batches, got %d", batchesCount, len(provider.MeasurementBatches))
|
||||
}
|
||||
recorded := metrictest.AsStructs(mock.MeasurementBatches)
|
||||
recorded := metrictest.AsStructs(provider.MeasurementBatches)
|
||||
|
||||
for i, batch := range mock.MeasurementBatches {
|
||||
for i, batch := range provider.MeasurementBatches {
|
||||
if len(batch.Measurements) != 1 {
|
||||
t.Errorf("Expected 1 measurement in batch %d, got %d", i, len(batch.Measurements))
|
||||
}
|
||||
@ -138,10 +138,12 @@ func checkSyncBatches(ctx context.Context, t *testing.T, labels []attribute.KeyV
|
||||
descriptor := measurement.Instrument.Descriptor()
|
||||
|
||||
expected := metrictest.Measured{
|
||||
Name: descriptor.Name(),
|
||||
InstrumentationName: descriptor.InstrumentationName(),
|
||||
Labels: metrictest.LabelsToMap(labels...),
|
||||
Number: metrictest.ResolveNumberByKind(t, nkind, expected[i]),
|
||||
Name: descriptor.Name(),
|
||||
Library: metrictest.Library{
|
||||
InstrumentationName: "apitest",
|
||||
},
|
||||
Labels: metrictest.LabelsToMap(labels...),
|
||||
Number: metrictest.ResolveNumberByKind(t, nkind, expected[i]),
|
||||
}
|
||||
require.Equal(t, expected, recorded[i])
|
||||
}
|
||||
@ -149,31 +151,25 @@ func checkSyncBatches(ctx context.Context, t *testing.T, labels []attribute.KeyV
|
||||
|
||||
func TestOptions(t *testing.T) {
|
||||
type testcase struct {
|
||||
name string
|
||||
opts []metric.InstrumentOption
|
||||
desc string
|
||||
unit unit.Unit
|
||||
iName string
|
||||
iVer string
|
||||
name string
|
||||
opts []metric.InstrumentOption
|
||||
desc string
|
||||
unit unit.Unit
|
||||
}
|
||||
testcases := []testcase{
|
||||
{
|
||||
name: "no opts",
|
||||
opts: nil,
|
||||
desc: "",
|
||||
unit: "",
|
||||
iName: "",
|
||||
iVer: "",
|
||||
name: "no opts",
|
||||
opts: nil,
|
||||
desc: "",
|
||||
unit: "",
|
||||
},
|
||||
{
|
||||
name: "description",
|
||||
opts: []metric.InstrumentOption{
|
||||
metric.WithDescription("stuff"),
|
||||
},
|
||||
desc: "stuff",
|
||||
unit: "",
|
||||
iName: "",
|
||||
iVer: "",
|
||||
desc: "stuff",
|
||||
unit: "",
|
||||
},
|
||||
{
|
||||
name: "description override",
|
||||
@ -181,20 +177,16 @@ func TestOptions(t *testing.T) {
|
||||
metric.WithDescription("stuff"),
|
||||
metric.WithDescription("things"),
|
||||
},
|
||||
desc: "things",
|
||||
unit: "",
|
||||
iName: "",
|
||||
iVer: "",
|
||||
desc: "things",
|
||||
unit: "",
|
||||
},
|
||||
{
|
||||
name: "unit",
|
||||
opts: []metric.InstrumentOption{
|
||||
metric.WithUnit("s"),
|
||||
},
|
||||
desc: "",
|
||||
unit: "s",
|
||||
iName: "",
|
||||
iVer: "",
|
||||
desc: "",
|
||||
unit: "s",
|
||||
},
|
||||
{
|
||||
name: "description override",
|
||||
@ -202,20 +194,16 @@ func TestOptions(t *testing.T) {
|
||||
metric.WithDescription("stuff"),
|
||||
metric.WithDescription("things"),
|
||||
},
|
||||
desc: "things",
|
||||
unit: "",
|
||||
iName: "",
|
||||
iVer: "",
|
||||
desc: "things",
|
||||
unit: "",
|
||||
},
|
||||
{
|
||||
name: "unit",
|
||||
opts: []metric.InstrumentOption{
|
||||
metric.WithUnit("s"),
|
||||
},
|
||||
desc: "",
|
||||
unit: "s",
|
||||
iName: "",
|
||||
iVer: "",
|
||||
desc: "",
|
||||
unit: "s",
|
||||
},
|
||||
|
||||
{
|
||||
@ -224,67 +212,17 @@ func TestOptions(t *testing.T) {
|
||||
metric.WithUnit("s"),
|
||||
metric.WithUnit("h"),
|
||||
},
|
||||
desc: "",
|
||||
unit: "h",
|
||||
iName: "",
|
||||
iVer: "",
|
||||
},
|
||||
{
|
||||
name: "name",
|
||||
opts: []metric.InstrumentOption{
|
||||
metric.WithInstrumentationName("n"),
|
||||
},
|
||||
desc: "",
|
||||
unit: "",
|
||||
iName: "n",
|
||||
iVer: "",
|
||||
},
|
||||
|
||||
{
|
||||
name: "name override",
|
||||
opts: []metric.InstrumentOption{
|
||||
metric.WithInstrumentationName("n"),
|
||||
metric.WithInstrumentationName("o"),
|
||||
},
|
||||
desc: "",
|
||||
unit: "",
|
||||
iName: "o",
|
||||
iVer: "",
|
||||
},
|
||||
{
|
||||
name: "version",
|
||||
opts: []metric.InstrumentOption{
|
||||
metric.WithInstrumentationVersion("v"),
|
||||
},
|
||||
desc: "",
|
||||
unit: "",
|
||||
iName: "",
|
||||
iVer: "v",
|
||||
},
|
||||
|
||||
{
|
||||
name: "version override",
|
||||
opts: []metric.InstrumentOption{
|
||||
metric.WithInstrumentationVersion("v"),
|
||||
metric.WithInstrumentationVersion("q"),
|
||||
},
|
||||
desc: "",
|
||||
unit: "",
|
||||
iName: "",
|
||||
iVer: "q",
|
||||
desc: "",
|
||||
unit: "h",
|
||||
},
|
||||
{
|
||||
name: "all",
|
||||
opts: []metric.InstrumentOption{
|
||||
metric.WithDescription("stuff"),
|
||||
metric.WithUnit("s"),
|
||||
metric.WithInstrumentationName("n"),
|
||||
metric.WithInstrumentationVersion("v"),
|
||||
},
|
||||
desc: "stuff",
|
||||
unit: "s",
|
||||
iName: "n",
|
||||
iVer: "v",
|
||||
desc: "stuff",
|
||||
unit: "s",
|
||||
},
|
||||
}
|
||||
for idx, tt := range testcases {
|
||||
@ -296,20 +234,18 @@ func TestOptions(t *testing.T) {
|
||||
if diff := cmp.Diff(cfg.Unit(), tt.unit); diff != "" {
|
||||
t.Errorf("Compare Unit: -got +want %s", diff)
|
||||
}
|
||||
if diff := cmp.Diff(cfg.InstrumentationName(), tt.iName); diff != "" {
|
||||
t.Errorf("Compare InstrumentationNam: -got +want %s", diff)
|
||||
}
|
||||
if diff := cmp.Diff(cfg.InstrumentationVersion(), tt.iVer); diff != "" {
|
||||
t.Errorf("Compare InstrumentationVersion: -got +want %s", diff)
|
||||
}
|
||||
}
|
||||
}
|
||||
func testPair() (*metrictest.MeterProvider, metric.Meter) {
|
||||
provider := metrictest.NewMeterProvider()
|
||||
return provider, provider.Meter("apitest")
|
||||
}
|
||||
|
||||
func TestCounter(t *testing.T) {
|
||||
// N.B. the API does not check for negative
|
||||
// values, that's the SDK's responsibility.
|
||||
t.Run("float64 counter", func(t *testing.T) {
|
||||
mockSDK, meter := metrictest.NewMeter()
|
||||
provider, meter := testPair()
|
||||
c := Must(meter).NewFloat64Counter("test.counter.float")
|
||||
ctx := context.Background()
|
||||
labels := []attribute.KeyValue{attribute.String("A", "B")}
|
||||
@ -317,12 +253,12 @@ func TestCounter(t *testing.T) {
|
||||
boundInstrument := c.Bind(labels...)
|
||||
boundInstrument.Add(ctx, -742)
|
||||
meter.RecordBatch(ctx, labels, c.Measurement(42))
|
||||
checkSyncBatches(ctx, t, labels, mockSDK, number.Float64Kind, sdkapi.CounterInstrumentKind, c.SyncImpl(),
|
||||
checkSyncBatches(ctx, t, labels, provider, number.Float64Kind, sdkapi.CounterInstrumentKind, c.SyncImpl(),
|
||||
1994.1, -742, 42,
|
||||
)
|
||||
})
|
||||
t.Run("int64 counter", func(t *testing.T) {
|
||||
mockSDK, meter := metrictest.NewMeter()
|
||||
provider, meter := testPair()
|
||||
c := Must(meter).NewInt64Counter("test.counter.int")
|
||||
ctx := context.Background()
|
||||
labels := []attribute.KeyValue{attribute.String("A", "B"), attribute.String("C", "D")}
|
||||
@ -330,13 +266,13 @@ func TestCounter(t *testing.T) {
|
||||
boundInstrument := c.Bind(labels...)
|
||||
boundInstrument.Add(ctx, 4200)
|
||||
meter.RecordBatch(ctx, labels, c.Measurement(420000))
|
||||
checkSyncBatches(ctx, t, labels, mockSDK, number.Int64Kind, sdkapi.CounterInstrumentKind, c.SyncImpl(),
|
||||
checkSyncBatches(ctx, t, labels, provider, number.Int64Kind, sdkapi.CounterInstrumentKind, c.SyncImpl(),
|
||||
42, 4200, 420000,
|
||||
)
|
||||
|
||||
})
|
||||
t.Run("int64 updowncounter", func(t *testing.T) {
|
||||
mockSDK, meter := metrictest.NewMeter()
|
||||
provider, meter := testPair()
|
||||
c := Must(meter).NewInt64UpDownCounter("test.updowncounter.int")
|
||||
ctx := context.Background()
|
||||
labels := []attribute.KeyValue{attribute.String("A", "B"), attribute.String("C", "D")}
|
||||
@ -344,12 +280,12 @@ func TestCounter(t *testing.T) {
|
||||
boundInstrument := c.Bind(labels...)
|
||||
boundInstrument.Add(ctx, -100)
|
||||
meter.RecordBatch(ctx, labels, c.Measurement(42))
|
||||
checkSyncBatches(ctx, t, labels, mockSDK, number.Int64Kind, sdkapi.UpDownCounterInstrumentKind, c.SyncImpl(),
|
||||
checkSyncBatches(ctx, t, labels, provider, number.Int64Kind, sdkapi.UpDownCounterInstrumentKind, c.SyncImpl(),
|
||||
100, -100, 42,
|
||||
)
|
||||
})
|
||||
t.Run("float64 updowncounter", func(t *testing.T) {
|
||||
mockSDK, meter := metrictest.NewMeter()
|
||||
provider, meter := testPair()
|
||||
c := Must(meter).NewFloat64UpDownCounter("test.updowncounter.float")
|
||||
ctx := context.Background()
|
||||
labels := []attribute.KeyValue{attribute.String("A", "B"), attribute.String("C", "D")}
|
||||
@ -357,7 +293,7 @@ func TestCounter(t *testing.T) {
|
||||
boundInstrument := c.Bind(labels...)
|
||||
boundInstrument.Add(ctx, -76)
|
||||
meter.RecordBatch(ctx, labels, c.Measurement(-100.1))
|
||||
checkSyncBatches(ctx, t, labels, mockSDK, number.Float64Kind, sdkapi.UpDownCounterInstrumentKind, c.SyncImpl(),
|
||||
checkSyncBatches(ctx, t, labels, provider, number.Float64Kind, sdkapi.UpDownCounterInstrumentKind, c.SyncImpl(),
|
||||
100.1, -76, -100.1,
|
||||
)
|
||||
})
|
||||
@ -365,7 +301,7 @@ func TestCounter(t *testing.T) {
|
||||
|
||||
func TestHistogram(t *testing.T) {
|
||||
t.Run("float64 histogram", func(t *testing.T) {
|
||||
mockSDK, meter := metrictest.NewMeter()
|
||||
provider, meter := testPair()
|
||||
m := Must(meter).NewFloat64Histogram("test.histogram.float")
|
||||
ctx := context.Background()
|
||||
labels := []attribute.KeyValue{}
|
||||
@ -373,12 +309,12 @@ func TestHistogram(t *testing.T) {
|
||||
boundInstrument := m.Bind(labels...)
|
||||
boundInstrument.Record(ctx, 0)
|
||||
meter.RecordBatch(ctx, labels, m.Measurement(-100.5))
|
||||
checkSyncBatches(ctx, t, labels, mockSDK, number.Float64Kind, sdkapi.HistogramInstrumentKind, m.SyncImpl(),
|
||||
checkSyncBatches(ctx, t, labels, provider, number.Float64Kind, sdkapi.HistogramInstrumentKind, m.SyncImpl(),
|
||||
42, 0, -100.5,
|
||||
)
|
||||
})
|
||||
t.Run("int64 histogram", func(t *testing.T) {
|
||||
mockSDK, meter := metrictest.NewMeter()
|
||||
provider, meter := testPair()
|
||||
m := Must(meter).NewInt64Histogram("test.histogram.int")
|
||||
ctx := context.Background()
|
||||
labels := []attribute.KeyValue{attribute.Int("I", 1)}
|
||||
@ -386,7 +322,7 @@ func TestHistogram(t *testing.T) {
|
||||
boundInstrument := m.Bind(labels...)
|
||||
boundInstrument.Record(ctx, 80)
|
||||
meter.RecordBatch(ctx, labels, m.Measurement(0))
|
||||
checkSyncBatches(ctx, t, labels, mockSDK, number.Int64Kind, sdkapi.HistogramInstrumentKind, m.SyncImpl(),
|
||||
checkSyncBatches(ctx, t, labels, provider, number.Int64Kind, sdkapi.HistogramInstrumentKind, m.SyncImpl(),
|
||||
173, 80, 0,
|
||||
)
|
||||
})
|
||||
@ -395,74 +331,74 @@ func TestHistogram(t *testing.T) {
|
||||
func TestObserverInstruments(t *testing.T) {
|
||||
t.Run("float gauge", func(t *testing.T) {
|
||||
labels := []attribute.KeyValue{attribute.String("O", "P")}
|
||||
mockSDK, meter := metrictest.NewMeter()
|
||||
provider, meter := testPair()
|
||||
o := Must(meter).NewFloat64GaugeObserver("test.gauge.float", func(_ context.Context, result metric.Float64ObserverResult) {
|
||||
result.Observe(42.1, labels...)
|
||||
})
|
||||
mockSDK.RunAsyncInstruments()
|
||||
checkObserverBatch(t, labels, mockSDK, number.Float64Kind, sdkapi.GaugeObserverInstrumentKind, o.AsyncImpl(),
|
||||
provider.RunAsyncInstruments()
|
||||
checkObserverBatch(t, labels, provider, number.Float64Kind, sdkapi.GaugeObserverInstrumentKind, o.AsyncImpl(),
|
||||
42.1,
|
||||
)
|
||||
})
|
||||
t.Run("int gauge", func(t *testing.T) {
|
||||
labels := []attribute.KeyValue{}
|
||||
mockSDK, meter := metrictest.NewMeter()
|
||||
o := Must(meter).NewInt64GaugeObserver("test.observer.int", func(_ context.Context, result metric.Int64ObserverResult) {
|
||||
provider, meter := testPair()
|
||||
o := Must(meter).NewInt64GaugeObserver("test.gauge.int", func(_ context.Context, result metric.Int64ObserverResult) {
|
||||
result.Observe(-142, labels...)
|
||||
})
|
||||
mockSDK.RunAsyncInstruments()
|
||||
checkObserverBatch(t, labels, mockSDK, number.Int64Kind, sdkapi.GaugeObserverInstrumentKind, o.AsyncImpl(),
|
||||
provider.RunAsyncInstruments()
|
||||
checkObserverBatch(t, labels, provider, number.Int64Kind, sdkapi.GaugeObserverInstrumentKind, o.AsyncImpl(),
|
||||
-142,
|
||||
)
|
||||
})
|
||||
t.Run("float counterobserver", func(t *testing.T) {
|
||||
labels := []attribute.KeyValue{attribute.String("O", "P")}
|
||||
mockSDK, meter := metrictest.NewMeter()
|
||||
o := Must(meter).NewFloat64CounterObserver("test.counterobserver.float", func(_ context.Context, result metric.Float64ObserverResult) {
|
||||
provider, meter := testPair()
|
||||
o := Must(meter).NewFloat64CounterObserver("test.counter.float", func(_ context.Context, result metric.Float64ObserverResult) {
|
||||
result.Observe(42.1, labels...)
|
||||
})
|
||||
mockSDK.RunAsyncInstruments()
|
||||
checkObserverBatch(t, labels, mockSDK, number.Float64Kind, sdkapi.CounterObserverInstrumentKind, o.AsyncImpl(),
|
||||
provider.RunAsyncInstruments()
|
||||
checkObserverBatch(t, labels, provider, number.Float64Kind, sdkapi.CounterObserverInstrumentKind, o.AsyncImpl(),
|
||||
42.1,
|
||||
)
|
||||
})
|
||||
t.Run("int counterobserver", func(t *testing.T) {
|
||||
labels := []attribute.KeyValue{}
|
||||
mockSDK, meter := metrictest.NewMeter()
|
||||
o := Must(meter).NewInt64CounterObserver("test.observer.int", func(_ context.Context, result metric.Int64ObserverResult) {
|
||||
provider, meter := testPair()
|
||||
o := Must(meter).NewInt64CounterObserver("test.counter.int", func(_ context.Context, result metric.Int64ObserverResult) {
|
||||
result.Observe(-142, labels...)
|
||||
})
|
||||
mockSDK.RunAsyncInstruments()
|
||||
checkObserverBatch(t, labels, mockSDK, number.Int64Kind, sdkapi.CounterObserverInstrumentKind, o.AsyncImpl(),
|
||||
provider.RunAsyncInstruments()
|
||||
checkObserverBatch(t, labels, provider, number.Int64Kind, sdkapi.CounterObserverInstrumentKind, o.AsyncImpl(),
|
||||
-142,
|
||||
)
|
||||
})
|
||||
t.Run("float updowncounterobserver", func(t *testing.T) {
|
||||
labels := []attribute.KeyValue{attribute.String("O", "P")}
|
||||
mockSDK, meter := metrictest.NewMeter()
|
||||
o := Must(meter).NewFloat64UpDownCounterObserver("test.updowncounterobserver.float", func(_ context.Context, result metric.Float64ObserverResult) {
|
||||
provider, meter := testPair()
|
||||
o := Must(meter).NewFloat64UpDownCounterObserver("test.updowncounter.float", func(_ context.Context, result metric.Float64ObserverResult) {
|
||||
result.Observe(42.1, labels...)
|
||||
})
|
||||
mockSDK.RunAsyncInstruments()
|
||||
checkObserverBatch(t, labels, mockSDK, number.Float64Kind, sdkapi.UpDownCounterObserverInstrumentKind, o.AsyncImpl(),
|
||||
provider.RunAsyncInstruments()
|
||||
checkObserverBatch(t, labels, provider, number.Float64Kind, sdkapi.UpDownCounterObserverInstrumentKind, o.AsyncImpl(),
|
||||
42.1,
|
||||
)
|
||||
})
|
||||
t.Run("int updowncounterobserver", func(t *testing.T) {
|
||||
labels := []attribute.KeyValue{}
|
||||
mockSDK, meter := metrictest.NewMeter()
|
||||
o := Must(meter).NewInt64UpDownCounterObserver("test.observer.int", func(_ context.Context, result metric.Int64ObserverResult) {
|
||||
provider, meter := testPair()
|
||||
o := Must(meter).NewInt64UpDownCounterObserver("test..int", func(_ context.Context, result metric.Int64ObserverResult) {
|
||||
result.Observe(-142, labels...)
|
||||
})
|
||||
mockSDK.RunAsyncInstruments()
|
||||
checkObserverBatch(t, labels, mockSDK, number.Int64Kind, sdkapi.UpDownCounterObserverInstrumentKind, o.AsyncImpl(),
|
||||
provider.RunAsyncInstruments()
|
||||
checkObserverBatch(t, labels, provider, number.Int64Kind, sdkapi.UpDownCounterObserverInstrumentKind, o.AsyncImpl(),
|
||||
-142,
|
||||
)
|
||||
})
|
||||
}
|
||||
|
||||
func TestBatchObserverInstruments(t *testing.T) {
|
||||
mockSDK, meter := metrictest.NewMeter()
|
||||
provider, meter := testPair()
|
||||
|
||||
var obs1 metric.Int64GaugeObserver
|
||||
var obs2 metric.Float64GaugeObserver
|
||||
@ -480,12 +416,12 @@ func TestBatchObserverInstruments(t *testing.T) {
|
||||
)
|
||||
},
|
||||
)
|
||||
obs1 = cb.NewInt64GaugeObserver("test.observer.int")
|
||||
obs2 = cb.NewFloat64GaugeObserver("test.observer.float")
|
||||
obs1 = cb.NewInt64GaugeObserver("test.gauge.int")
|
||||
obs2 = cb.NewFloat64GaugeObserver("test.gauge.float")
|
||||
|
||||
mockSDK.RunAsyncInstruments()
|
||||
provider.RunAsyncInstruments()
|
||||
|
||||
require.Len(t, mockSDK.MeasurementBatches, 1)
|
||||
require.Len(t, provider.MeasurementBatches, 1)
|
||||
|
||||
impl1 := obs1.AsyncImpl().Implementation().(*metrictest.Async)
|
||||
impl2 := obs2.AsyncImpl().Implementation().(*metrictest.Async)
|
||||
@ -493,7 +429,7 @@ func TestBatchObserverInstruments(t *testing.T) {
|
||||
require.NotNil(t, impl1)
|
||||
require.NotNil(t, impl2)
|
||||
|
||||
got := mockSDK.MeasurementBatches[0]
|
||||
got := provider.MeasurementBatches[0]
|
||||
require.Equal(t, labels, got.Labels)
|
||||
require.Len(t, got.Measurements, 2)
|
||||
|
||||
@ -506,17 +442,17 @@ func TestBatchObserverInstruments(t *testing.T) {
|
||||
require.Equal(t, 0, m2.Number.CompareNumber(number.Float64Kind, metrictest.ResolveNumberByKind(t, number.Float64Kind, 42)))
|
||||
}
|
||||
|
||||
func checkObserverBatch(t *testing.T, labels []attribute.KeyValue, mock *metrictest.MeterImpl, nkind number.Kind, mkind sdkapi.InstrumentKind, observer metric.AsyncImpl, expected float64) {
|
||||
func checkObserverBatch(t *testing.T, labels []attribute.KeyValue, provider *metrictest.MeterProvider, nkind number.Kind, mkind sdkapi.InstrumentKind, observer metric.AsyncImpl, expected float64) {
|
||||
t.Helper()
|
||||
assert.Len(t, mock.MeasurementBatches, 1)
|
||||
if len(mock.MeasurementBatches) < 1 {
|
||||
assert.Len(t, provider.MeasurementBatches, 1)
|
||||
if len(provider.MeasurementBatches) < 1 {
|
||||
return
|
||||
}
|
||||
o := observer.Implementation().(*metrictest.Async)
|
||||
if !assert.NotNil(t, o) {
|
||||
return
|
||||
}
|
||||
got := mock.MeasurementBatches[0]
|
||||
got := provider.MeasurementBatches[0]
|
||||
assert.Equal(t, labels, got.Labels)
|
||||
assert.Len(t, got.Measurements, 1)
|
||||
if len(got.Measurements) < 1 {
|
||||
@ -547,7 +483,7 @@ func (testWrappedMeter) NewAsyncInstrument(_ metric.Descriptor, _ metric.AsyncRu
|
||||
|
||||
func TestWrappedInstrumentError(t *testing.T) {
|
||||
impl := &testWrappedMeter{}
|
||||
meter := metric.WrapMeterImpl(impl, "test")
|
||||
meter := metric.WrapMeterImpl(impl)
|
||||
|
||||
histogram, err := meter.NewInt64Histogram("test.histogram")
|
||||
|
||||
@ -562,7 +498,7 @@ func TestWrappedInstrumentError(t *testing.T) {
|
||||
|
||||
func TestNilCallbackObserverNoop(t *testing.T) {
|
||||
// Tests that a nil callback yields a no-op observer without error.
|
||||
_, meter := metrictest.NewMeter()
|
||||
_, meter := testPair()
|
||||
|
||||
observer := Must(meter).NewInt64GaugeObserver("test.observer", nil)
|
||||
|
||||
|
@ -23,7 +23,7 @@ import (
|
||||
internalmetric "go.opentelemetry.io/otel/internal/metric"
|
||||
"go.opentelemetry.io/otel/metric"
|
||||
"go.opentelemetry.io/otel/metric/number"
|
||||
"go.opentelemetry.io/otel/metric/registry"
|
||||
"go.opentelemetry.io/otel/metric/sdkapi"
|
||||
)
|
||||
|
||||
type (
|
||||
@ -32,21 +32,35 @@ type (
|
||||
Labels []attribute.KeyValue
|
||||
}
|
||||
|
||||
// Library is the same as "sdk/instrumentation".Library but there is
|
||||
// a package cycle to use it.
|
||||
Library struct {
|
||||
InstrumentationName string
|
||||
InstrumentationVersion string
|
||||
SchemaURL string
|
||||
}
|
||||
|
||||
Batch struct {
|
||||
// Measurement needs to be aligned for 64-bit atomic operations.
|
||||
Measurements []Measurement
|
||||
Ctx context.Context
|
||||
Labels []attribute.KeyValue
|
||||
LibraryName string
|
||||
Library Library
|
||||
}
|
||||
|
||||
// MeterImpl is an OpenTelemetry Meter implementation used for testing.
|
||||
MeterImpl struct {
|
||||
library Library
|
||||
provider *MeterProvider
|
||||
asyncInstruments *internalmetric.AsyncInstrumentState
|
||||
}
|
||||
|
||||
// MeterProvider is a collection of named MeterImpls used for testing.
|
||||
MeterProvider struct {
|
||||
lock sync.Mutex
|
||||
|
||||
MeasurementBatches []Batch
|
||||
|
||||
asyncInstruments *internalmetric.AsyncInstrumentState
|
||||
impls []*MeterImpl
|
||||
}
|
||||
|
||||
Measurement struct {
|
||||
@ -78,6 +92,13 @@ var (
|
||||
_ metric.AsyncImpl = &Async{}
|
||||
)
|
||||
|
||||
// NewDescriptor is a test helper for constructing test metric
|
||||
// descriptors using standard options.
|
||||
func NewDescriptor(name string, ikind sdkapi.InstrumentKind, nkind number.Kind, opts ...metric.InstrumentOption) metric.Descriptor {
|
||||
cfg := metric.NewInstrumentConfig(opts...)
|
||||
return metric.NewDescriptor(name, ikind, nkind, cfg.Description(), cfg.Unit())
|
||||
}
|
||||
|
||||
func (i Instrument) Descriptor() metric.Descriptor {
|
||||
return i.descriptor
|
||||
}
|
||||
@ -115,22 +136,32 @@ func (m *MeterImpl) doRecordSingle(ctx context.Context, labels []attribute.KeyVa
|
||||
}})
|
||||
}
|
||||
|
||||
func NewMeterProvider() (*MeterImpl, metric.MeterProvider) {
|
||||
// NewMeterProvider returns a MeterProvider suitable for testing.
|
||||
// When the test is complete, consult MeterProvider.MeasurementBatches.
|
||||
func NewMeterProvider() *MeterProvider {
|
||||
return &MeterProvider{}
|
||||
}
|
||||
|
||||
// Meter implements metric.MeterProvider.
|
||||
func (p *MeterProvider) Meter(name string, opts ...metric.MeterOption) metric.Meter {
|
||||
p.lock.Lock()
|
||||
defer p.lock.Unlock()
|
||||
cfg := metric.NewMeterConfig(opts...)
|
||||
impl := &MeterImpl{
|
||||
library: Library{
|
||||
InstrumentationName: name,
|
||||
InstrumentationVersion: cfg.InstrumentationVersion(),
|
||||
SchemaURL: cfg.SchemaURL(),
|
||||
},
|
||||
provider: p,
|
||||
asyncInstruments: internalmetric.NewAsyncInstrumentState(),
|
||||
}
|
||||
return impl, registry.NewMeterProvider(impl)
|
||||
}
|
||||
|
||||
func NewMeter() (*MeterImpl, metric.Meter) {
|
||||
impl, p := NewMeterProvider()
|
||||
return impl, p.Meter("mock")
|
||||
p.impls = append(p.impls, impl)
|
||||
return metric.WrapMeterImpl(impl)
|
||||
}
|
||||
|
||||
// NewSyncInstrument implements metric.MeterImpl.
|
||||
func (m *MeterImpl) NewSyncInstrument(descriptor metric.Descriptor) (metric.SyncImpl, error) {
|
||||
m.lock.Lock()
|
||||
defer m.lock.Unlock()
|
||||
|
||||
return &Sync{
|
||||
Instrument{
|
||||
descriptor: descriptor,
|
||||
@ -139,10 +170,8 @@ func (m *MeterImpl) NewSyncInstrument(descriptor metric.Descriptor) (metric.Sync
|
||||
}, nil
|
||||
}
|
||||
|
||||
// NewAsyncInstrument implements metric.MeterImpl.
|
||||
func (m *MeterImpl) NewAsyncInstrument(descriptor metric.Descriptor, runner metric.AsyncRunner) (metric.AsyncImpl, error) {
|
||||
m.lock.Lock()
|
||||
defer m.lock.Unlock()
|
||||
|
||||
a := &Async{
|
||||
Instrument: Instrument{
|
||||
descriptor: descriptor,
|
||||
@ -150,10 +179,11 @@ func (m *MeterImpl) NewAsyncInstrument(descriptor metric.Descriptor, runner metr
|
||||
},
|
||||
runner: runner,
|
||||
}
|
||||
m.asyncInstruments.Register(a, runner)
|
||||
m.provider.registerAsyncInstrument(a, m, runner)
|
||||
return a, nil
|
||||
}
|
||||
|
||||
// RecordBatch implements metric.MeterImpl.
|
||||
func (m *MeterImpl) RecordBatch(ctx context.Context, labels []attribute.KeyValue, measurements ...metric.Measurement) {
|
||||
mm := make([]Measurement, len(measurements))
|
||||
for i := 0; i < len(measurements); i++ {
|
||||
@ -166,6 +196,7 @@ func (m *MeterImpl) RecordBatch(ctx context.Context, labels []attribute.KeyValue
|
||||
m.collect(ctx, labels, mm)
|
||||
}
|
||||
|
||||
// CollectAsync is called from asyncInstruments.Run() with the lock held.
|
||||
func (m *MeterImpl) CollectAsync(labels []attribute.KeyValue, obs ...metric.Observation) {
|
||||
mm := make([]Measurement, len(obs))
|
||||
for i := 0; i < len(obs); i++ {
|
||||
@ -178,29 +209,55 @@ func (m *MeterImpl) CollectAsync(labels []attribute.KeyValue, obs ...metric.Obse
|
||||
m.collect(context.Background(), labels, mm)
|
||||
}
|
||||
|
||||
// collect is called from CollectAsync() or RecordBatch() with the lock held.
|
||||
func (m *MeterImpl) collect(ctx context.Context, labels []attribute.KeyValue, measurements []Measurement) {
|
||||
m.lock.Lock()
|
||||
defer m.lock.Unlock()
|
||||
|
||||
m.MeasurementBatches = append(m.MeasurementBatches, Batch{
|
||||
m.provider.addMeasurement(Batch{
|
||||
Ctx: ctx,
|
||||
Labels: labels,
|
||||
Measurements: measurements,
|
||||
Library: m.library,
|
||||
})
|
||||
}
|
||||
|
||||
func (m *MeterImpl) RunAsyncInstruments() {
|
||||
m.asyncInstruments.Run(context.Background(), m)
|
||||
// registerAsyncInstrument locks the provider and registers the new Async instrument.
|
||||
func (p *MeterProvider) registerAsyncInstrument(a *Async, m *MeterImpl, runner metric.AsyncRunner) {
|
||||
p.lock.Lock()
|
||||
defer p.lock.Unlock()
|
||||
|
||||
m.asyncInstruments.Register(a, runner)
|
||||
}
|
||||
|
||||
// addMeasurement locks the provider and adds the new measurement batch.
|
||||
func (p *MeterProvider) addMeasurement(b Batch) {
|
||||
p.lock.Lock()
|
||||
defer p.lock.Unlock()
|
||||
p.MeasurementBatches = append(p.MeasurementBatches, b)
|
||||
}
|
||||
|
||||
// copyImpls locks the provider and copies the current list of *MeterImpls.
|
||||
func (p *MeterProvider) copyImpls() []*MeterImpl {
|
||||
p.lock.Lock()
|
||||
defer p.lock.Unlock()
|
||||
cpy := make([]*MeterImpl, len(p.impls))
|
||||
copy(cpy, p.impls)
|
||||
return cpy
|
||||
}
|
||||
|
||||
// RunAsyncInstruments is used in tests to trigger collection from
|
||||
// asynchronous instruments.
|
||||
func (p *MeterProvider) RunAsyncInstruments() {
|
||||
for _, impl := range p.copyImpls() {
|
||||
impl.asyncInstruments.Run(context.Background(), impl)
|
||||
}
|
||||
}
|
||||
|
||||
// Measured is the helper struct which provides flat representation of recorded measurements
|
||||
// to simplify testing
|
||||
type Measured struct {
|
||||
Name string
|
||||
InstrumentationName string
|
||||
InstrumentationVersion string
|
||||
Labels map[attribute.Key]attribute.Value
|
||||
Number number.Number
|
||||
Name string
|
||||
Labels map[attribute.Key]attribute.Value
|
||||
Number number.Number
|
||||
Library Library
|
||||
}
|
||||
|
||||
// LabelsToMap converts label set to keyValue map, to be easily used in tests
|
||||
@ -218,11 +275,10 @@ func AsStructs(batches []Batch) []Measured {
|
||||
for _, batch := range batches {
|
||||
for _, m := range batch.Measurements {
|
||||
r = append(r, Measured{
|
||||
Name: m.Instrument.Descriptor().Name(),
|
||||
InstrumentationName: m.Instrument.Descriptor().InstrumentationName(),
|
||||
InstrumentationVersion: m.Instrument.Descriptor().InstrumentationVersion(),
|
||||
Labels: LabelsToMap(batch.Labels...),
|
||||
Number: m.Number,
|
||||
Name: m.Instrument.Descriptor().Name(),
|
||||
Labels: LabelsToMap(batch.Labels...),
|
||||
Number: m.Number,
|
||||
Library: batch.Library,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@ -19,7 +19,7 @@ import (
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"go.opentelemetry.io/otel/metric"
|
||||
"go.opentelemetry.io/otel/metric/metrictest"
|
||||
"go.opentelemetry.io/otel/metric/number"
|
||||
"go.opentelemetry.io/otel/metric/sdkapi"
|
||||
"go.opentelemetry.io/otel/sdk/export/metric/aggregation"
|
||||
@ -60,7 +60,7 @@ func TestExportKindSelectors(t *testing.T) {
|
||||
seks := StatelessExportKindSelector()
|
||||
|
||||
for _, ikind := range append(deltaMemoryKinds, cumulativeMemoryKinds...) {
|
||||
desc := metric.NewDescriptor("instrument", ikind, number.Int64Kind)
|
||||
desc := metrictest.NewDescriptor("instrument", ikind, number.Int64Kind)
|
||||
|
||||
var akind aggregation.Kind
|
||||
if ikind.Adding() {
|
||||
|
@ -26,6 +26,7 @@ import (
|
||||
"go.opentelemetry.io/otel/metric/number"
|
||||
"go.opentelemetry.io/otel/metric/sdkapi"
|
||||
"go.opentelemetry.io/otel/sdk/export/metric/aggregation"
|
||||
"go.opentelemetry.io/otel/sdk/instrumentation"
|
||||
"go.opentelemetry.io/otel/sdk/resource"
|
||||
)
|
||||
|
||||
@ -111,13 +112,13 @@ type Checkpointer interface {
|
||||
// any time.
|
||||
Processor
|
||||
|
||||
// CheckpointSet returns the current data set. This may be
|
||||
// Reader returns the current data set. This may be
|
||||
// called before and after collection. The
|
||||
// implementation is required to return the same value
|
||||
// throughout its lifetime, since CheckpointSet exposes a
|
||||
// throughout its lifetime, since Reader exposes a
|
||||
// sync.Locker interface. The caller is responsible for
|
||||
// locking the CheckpointSet before initiating collection.
|
||||
CheckpointSet() CheckpointSet
|
||||
// locking the Reader before initiating collection.
|
||||
Reader() Reader
|
||||
|
||||
// StartCollection begins a collection interval.
|
||||
StartCollection()
|
||||
@ -126,6 +127,12 @@ type Checkpointer interface {
|
||||
FinishCollection() error
|
||||
}
|
||||
|
||||
// CheckpointerFactory is an interface for producing configured
|
||||
// Checkpointer instances.
|
||||
type CheckpointerFactory interface {
|
||||
NewCheckpointer() Checkpointer
|
||||
}
|
||||
|
||||
// Aggregator implements a specific aggregation behavior, e.g., a
|
||||
// behavior to track a sequence of updates to an instrument. Counter
|
||||
// instruments commonly use a simple Sum aggregator, but for the
|
||||
@ -209,9 +216,9 @@ type Exporter interface {
|
||||
// The Context comes from the controller that initiated
|
||||
// collection.
|
||||
//
|
||||
// The CheckpointSet interface refers to the Processor that just
|
||||
// completed collection.
|
||||
Export(ctx context.Context, resource *resource.Resource, checkpointSet CheckpointSet) error
|
||||
// The InstrumentationLibraryReader interface refers to the
|
||||
// Processor that just completed collection.
|
||||
Export(ctx context.Context, resource *resource.Resource, reader InstrumentationLibraryReader) error
|
||||
|
||||
// ExportKindSelector is an interface used by the Processor
|
||||
// in deciding whether to compute Delta or Cumulative
|
||||
@ -229,11 +236,20 @@ type ExportKindSelector interface {
|
||||
ExportKindFor(descriptor *metric.Descriptor, aggregatorKind aggregation.Kind) ExportKind
|
||||
}
|
||||
|
||||
// CheckpointSet allows a controller to access a complete checkpoint of
|
||||
// aggregated metrics from the Processor. This is passed to the
|
||||
// Exporter which may then use ForEach to iterate over the collection
|
||||
// of aggregated metrics.
|
||||
type CheckpointSet interface {
|
||||
// InstrumentationLibraryReader is an interface for exporters to iterate
|
||||
// over one instrumentation library of metric data at a time.
|
||||
type InstrumentationLibraryReader interface {
|
||||
// ForEach calls the passed function once per instrumentation library,
|
||||
// allowing the caller to emit metrics grouped by the library that
|
||||
// produced them.
|
||||
ForEach(readerFunc func(instrumentation.Library, Reader) error) error
|
||||
}
|
||||
|
||||
// Reader allows a controller to access a complete checkpoint of
|
||||
// aggregated metrics from the Processor for a single library of
|
||||
// metric data. This is passed to the Exporter which may then use
|
||||
// ForEach to iterate over the collection of aggregated metrics.
|
||||
type Reader interface {
|
||||
// ForEach iterates over aggregated checkpoints for all
|
||||
// metrics that were updated during the last collection
|
||||
// period. Each aggregated checkpoint returned by the
|
||||
|
@ -22,6 +22,7 @@ import (
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"go.opentelemetry.io/otel/metric"
|
||||
"go.opentelemetry.io/otel/metric/metrictest"
|
||||
"go.opentelemetry.io/otel/metric/number"
|
||||
"go.opentelemetry.io/otel/metric/sdkapi"
|
||||
"go.opentelemetry.io/otel/sdk/export/metric/aggregation"
|
||||
@ -74,7 +75,7 @@ func TestRangeTest(t *testing.T) {
|
||||
// Only Counters implement a range test.
|
||||
for _, nkind := range []number.Kind{number.Float64Kind, number.Int64Kind} {
|
||||
t.Run(nkind.String(), func(t *testing.T) {
|
||||
desc := metric.NewDescriptor(
|
||||
desc := metrictest.NewDescriptor(
|
||||
"name",
|
||||
sdkapi.CounterInstrumentKind,
|
||||
nkind,
|
||||
@ -92,7 +93,7 @@ func TestNaNTest(t *testing.T) {
|
||||
sdkapi.HistogramInstrumentKind,
|
||||
sdkapi.GaugeObserverInstrumentKind,
|
||||
} {
|
||||
desc := metric.NewDescriptor(
|
||||
desc := metrictest.NewDescriptor(
|
||||
"name",
|
||||
mkind,
|
||||
nkind,
|
||||
|
@ -27,6 +27,7 @@ import (
|
||||
|
||||
ottest "go.opentelemetry.io/otel/internal/internaltest"
|
||||
"go.opentelemetry.io/otel/metric"
|
||||
"go.opentelemetry.io/otel/metric/metrictest"
|
||||
"go.opentelemetry.io/otel/metric/number"
|
||||
"go.opentelemetry.io/otel/metric/sdkapi"
|
||||
export "go.opentelemetry.io/otel/sdk/export/metric"
|
||||
@ -66,7 +67,7 @@ func newProfiles() []Profile {
|
||||
}
|
||||
|
||||
func NewAggregatorTest(mkind sdkapi.InstrumentKind, nkind number.Kind) *metric.Descriptor {
|
||||
desc := metric.NewDescriptor("test.name", mkind, nkind)
|
||||
desc := metrictest.NewDescriptor("test.name", mkind, nkind)
|
||||
return &desc
|
||||
}
|
||||
|
||||
|
@ -43,7 +43,7 @@ func newFixture(b *testing.B) *benchFixture {
|
||||
}
|
||||
|
||||
bf.accumulator = sdk.NewAccumulator(bf)
|
||||
bf.meter = metric.WrapMeterImpl(bf.accumulator, "benchmarks")
|
||||
bf.meter = metric.WrapMeterImpl(bf.accumulator)
|
||||
return bf
|
||||
}
|
||||
|
||||
|
@ -21,9 +21,10 @@ import (
|
||||
"time"
|
||||
|
||||
"go.opentelemetry.io/otel"
|
||||
"go.opentelemetry.io/otel/internal/metric/registry"
|
||||
"go.opentelemetry.io/otel/metric"
|
||||
"go.opentelemetry.io/otel/metric/registry"
|
||||
export "go.opentelemetry.io/otel/sdk/export/metric"
|
||||
"go.opentelemetry.io/otel/sdk/instrumentation"
|
||||
sdk "go.opentelemetry.io/otel/sdk/metric"
|
||||
controllerTime "go.opentelemetry.io/otel/sdk/metric/controller/time"
|
||||
"go.opentelemetry.io/otel/sdk/resource"
|
||||
@ -52,19 +53,25 @@ var ErrControllerStarted = fmt.Errorf("controller already started")
|
||||
// collection
|
||||
//
|
||||
// The controller supports mixing push and pull access to metric data
|
||||
// using the export.CheckpointSet RWLock interface. Collection will
|
||||
// using the export.Reader RWLock interface. Collection will
|
||||
// be blocked by a pull request in the basic controller.
|
||||
type Controller struct {
|
||||
lock sync.Mutex
|
||||
accumulator *sdk.Accumulator
|
||||
provider *registry.MeterProvider
|
||||
checkpointer export.Checkpointer
|
||||
resource *resource.Resource
|
||||
exporter export.Exporter
|
||||
wg sync.WaitGroup
|
||||
stopCh chan struct{}
|
||||
clock controllerTime.Clock
|
||||
ticker controllerTime.Ticker
|
||||
// lock protects libraries and synchronizes Start() and Stop().
|
||||
lock sync.Mutex
|
||||
// TODO: libraries is synchronized by lock, but could be
|
||||
// accomplished using a sync.Map. The SDK specification will
|
||||
// probably require this, as the draft already states that
|
||||
// Stop() and MeterProvider.Meter() should not block each
|
||||
// other.
|
||||
libraries map[instrumentation.Library]*registry.UniqueInstrumentMeterImpl
|
||||
checkpointerFactory export.CheckpointerFactory
|
||||
|
||||
resource *resource.Resource
|
||||
exporter export.Exporter
|
||||
wg sync.WaitGroup
|
||||
stopCh chan struct{}
|
||||
clock controllerTime.Clock
|
||||
ticker controllerTime.Ticker
|
||||
|
||||
collectPeriod time.Duration
|
||||
collectTimeout time.Duration
|
||||
@ -75,10 +82,44 @@ type Controller struct {
|
||||
collectedTime time.Time
|
||||
}
|
||||
|
||||
// New constructs a Controller using the provided checkpointer and
|
||||
// options (including optional exporter) to configure a metric
|
||||
var _ export.InstrumentationLibraryReader = &Controller{}
|
||||
var _ metric.MeterProvider = &Controller{}
|
||||
|
||||
func (c *Controller) Meter(instrumentationName string, opts ...metric.MeterOption) metric.Meter {
|
||||
cfg := metric.NewMeterConfig(opts...)
|
||||
library := instrumentation.Library{
|
||||
Name: instrumentationName,
|
||||
Version: cfg.InstrumentationVersion(),
|
||||
SchemaURL: cfg.SchemaURL(),
|
||||
}
|
||||
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
m, ok := c.libraries[library]
|
||||
if !ok {
|
||||
checkpointer := c.checkpointerFactory.NewCheckpointer()
|
||||
accumulator := sdk.NewAccumulator(checkpointer)
|
||||
m = registry.NewUniqueInstrumentMeterImpl(&accumulatorCheckpointer{
|
||||
Accumulator: accumulator,
|
||||
checkpointer: checkpointer,
|
||||
library: library,
|
||||
})
|
||||
|
||||
c.libraries[library] = m
|
||||
}
|
||||
return metric.WrapMeterImpl(m)
|
||||
}
|
||||
|
||||
type accumulatorCheckpointer struct {
|
||||
*sdk.Accumulator
|
||||
checkpointer export.Checkpointer
|
||||
library instrumentation.Library
|
||||
}
|
||||
|
||||
// New constructs a Controller using the provided checkpointer factory
|
||||
// and options (including optional exporter) to configure a metric
|
||||
// export pipeline.
|
||||
func New(checkpointer export.Checkpointer, opts ...Option) *Controller {
|
||||
func New(checkpointerFactory export.CheckpointerFactory, opts ...Option) *Controller {
|
||||
c := &config{
|
||||
CollectPeriod: DefaultPeriod,
|
||||
CollectTimeout: DefaultPeriod,
|
||||
@ -96,15 +137,13 @@ func New(checkpointer export.Checkpointer, opts ...Option) *Controller {
|
||||
otel.Handle(err)
|
||||
}
|
||||
}
|
||||
impl := sdk.NewAccumulator(checkpointer)
|
||||
return &Controller{
|
||||
provider: registry.NewMeterProvider(impl),
|
||||
accumulator: impl,
|
||||
checkpointer: checkpointer,
|
||||
resource: c.Resource,
|
||||
exporter: c.Exporter,
|
||||
stopCh: nil,
|
||||
clock: controllerTime.RealClock{},
|
||||
libraries: map[instrumentation.Library]*registry.UniqueInstrumentMeterImpl{},
|
||||
checkpointerFactory: checkpointerFactory,
|
||||
exporter: c.Exporter,
|
||||
resource: c.Resource,
|
||||
stopCh: nil,
|
||||
clock: controllerTime.RealClock{},
|
||||
|
||||
collectPeriod: c.CollectPeriod,
|
||||
collectTimeout: c.CollectTimeout,
|
||||
@ -120,11 +159,6 @@ func (c *Controller) SetClock(clock controllerTime.Clock) {
|
||||
c.clock = clock
|
||||
}
|
||||
|
||||
// MeterProvider returns a MeterProvider instance for this controller.
|
||||
func (c *Controller) MeterProvider() metric.MeterProvider {
|
||||
return c.provider
|
||||
}
|
||||
|
||||
// Resource returns the *resource.Resource associated with this
|
||||
// controller.
|
||||
func (c *Controller) Resource() *resource.Resource {
|
||||
@ -165,19 +199,23 @@ func (c *Controller) Start(ctx context.Context) error {
|
||||
//
|
||||
// Note that Stop() will not cancel an ongoing collection or export.
|
||||
func (c *Controller) Stop(ctx context.Context) error {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
if lastCollection := func() bool {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
|
||||
if c.stopCh == nil {
|
||||
if c.stopCh == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
close(c.stopCh)
|
||||
c.stopCh = nil
|
||||
c.wg.Wait()
|
||||
c.ticker.Stop()
|
||||
c.ticker = nil
|
||||
return true
|
||||
}(); !lastCollection {
|
||||
return nil
|
||||
}
|
||||
|
||||
close(c.stopCh)
|
||||
c.stopCh = nil
|
||||
c.wg.Wait()
|
||||
c.ticker.Stop()
|
||||
c.ticker = nil
|
||||
|
||||
return c.collect(ctx)
|
||||
}
|
||||
|
||||
@ -198,9 +236,7 @@ func (c *Controller) runTicker(ctx context.Context, stopCh chan struct{}) {
|
||||
|
||||
// collect computes a checkpoint and optionally exports it.
|
||||
func (c *Controller) collect(ctx context.Context) error {
|
||||
if err := c.checkpoint(ctx, func() bool {
|
||||
return true
|
||||
}); err != nil {
|
||||
if err := c.checkpoint(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
if c.exporter == nil {
|
||||
@ -212,19 +248,45 @@ func (c *Controller) collect(ctx context.Context) error {
|
||||
return c.export(ctx)
|
||||
}
|
||||
|
||||
// accumulatorList returns a snapshot of current accumulators
|
||||
// registered to this controller. This briefly locks the controller.
|
||||
func (c *Controller) accumulatorList() []*accumulatorCheckpointer {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
|
||||
var r []*accumulatorCheckpointer
|
||||
for _, entry := range c.libraries {
|
||||
acc, ok := entry.MeterImpl().(*accumulatorCheckpointer)
|
||||
if ok {
|
||||
r = append(r, acc)
|
||||
}
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
// checkpoint calls the Accumulator and Checkpointer interfaces to
|
||||
// compute the CheckpointSet. This applies the configured collection
|
||||
// compute the Reader. This applies the configured collection
|
||||
// timeout. Note that this does not try to cancel a Collect or Export
|
||||
// when Stop() is called.
|
||||
func (c *Controller) checkpoint(ctx context.Context, cond func() bool) error {
|
||||
ckpt := c.checkpointer.CheckpointSet()
|
||||
func (c *Controller) checkpoint(ctx context.Context) error {
|
||||
for _, impl := range c.accumulatorList() {
|
||||
if err := c.checkpointSingleAccumulator(ctx, impl); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// checkpointSingleAccumulator checkpoints a single instrumentation
|
||||
// library's accumulator, which involves calling
|
||||
// checkpointer.StartCollection, accumulator.Collect, and
|
||||
// checkpointer.FinishCollection in sequence.
|
||||
func (c *Controller) checkpointSingleAccumulator(ctx context.Context, ac *accumulatorCheckpointer) error {
|
||||
ckpt := ac.checkpointer.Reader()
|
||||
ckpt.Lock()
|
||||
defer ckpt.Unlock()
|
||||
|
||||
if !cond() {
|
||||
return nil
|
||||
}
|
||||
c.checkpointer.StartCollection()
|
||||
ac.checkpointer.StartCollection()
|
||||
|
||||
if c.collectTimeout > 0 {
|
||||
var cancel context.CancelFunc
|
||||
@ -232,7 +294,7 @@ func (c *Controller) checkpoint(ctx context.Context, cond func() bool) error {
|
||||
defer cancel()
|
||||
}
|
||||
|
||||
_ = c.accumulator.Collect(ctx)
|
||||
_ = ac.Accumulator.Collect(ctx)
|
||||
|
||||
var err error
|
||||
select {
|
||||
@ -243,7 +305,7 @@ func (c *Controller) checkpoint(ctx context.Context, cond func() bool) error {
|
||||
}
|
||||
|
||||
// Finish the checkpoint whether the accumulator timed out or not.
|
||||
if cerr := c.checkpointer.FinishCollection(); cerr != nil {
|
||||
if cerr := ac.checkpointer.FinishCollection(); cerr != nil {
|
||||
if err == nil {
|
||||
err = cerr
|
||||
} else {
|
||||
@ -254,34 +316,36 @@ func (c *Controller) checkpoint(ctx context.Context, cond func() bool) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// export calls the exporter with a read lock on the CheckpointSet,
|
||||
// export calls the exporter with a read lock on the Reader,
|
||||
// applying the configured export timeout.
|
||||
func (c *Controller) export(ctx context.Context) error {
|
||||
ckpt := c.checkpointer.CheckpointSet()
|
||||
ckpt.RLock()
|
||||
defer ckpt.RUnlock()
|
||||
|
||||
if c.pushTimeout > 0 {
|
||||
var cancel context.CancelFunc
|
||||
ctx, cancel = context.WithTimeout(ctx, c.pushTimeout)
|
||||
defer cancel()
|
||||
}
|
||||
|
||||
return c.exporter.Export(ctx, c.resource, ckpt)
|
||||
return c.exporter.Export(ctx, c.resource, c)
|
||||
}
|
||||
|
||||
// ForEach gives the caller read-locked access to the current
|
||||
// export.CheckpointSet.
|
||||
func (c *Controller) ForEach(ks export.ExportKindSelector, f func(export.Record) error) error {
|
||||
ckpt := c.checkpointer.CheckpointSet()
|
||||
ckpt.RLock()
|
||||
defer ckpt.RUnlock()
|
||||
|
||||
return ckpt.ForEach(ks, f)
|
||||
// ForEach implements export.InstrumentationLibraryReader.
|
||||
func (c *Controller) ForEach(readerFunc func(l instrumentation.Library, r export.Reader) error) error {
|
||||
for _, acPair := range c.accumulatorList() {
|
||||
reader := acPair.checkpointer.Reader()
|
||||
// TODO: We should not fail fast; instead accumulate errors.
|
||||
if err := func() error {
|
||||
reader.RLock()
|
||||
defer reader.RUnlock()
|
||||
return readerFunc(acPair.library, reader)
|
||||
}(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsRunning returns true if the controller was started via Start(),
|
||||
// indicating that the current export.CheckpointSet is being kept
|
||||
// indicating that the current export.Reader is being kept
|
||||
// up-to-date.
|
||||
func (c *Controller) IsRunning() bool {
|
||||
c.lock.Lock()
|
||||
@ -298,16 +362,20 @@ func (c *Controller) Collect(ctx context.Context) error {
|
||||
// computing checkpoints with the collection period.
|
||||
return ErrControllerStarted
|
||||
}
|
||||
if !c.shouldCollect() {
|
||||
return nil
|
||||
}
|
||||
|
||||
return c.checkpoint(ctx, c.shouldCollect)
|
||||
return c.checkpoint(ctx)
|
||||
}
|
||||
|
||||
// shouldCollect returns true if the collector should collect now,
|
||||
// based on the timestamp, the last collection time, and the
|
||||
// configured period.
|
||||
func (c *Controller) shouldCollect() bool {
|
||||
// This is called with the CheckpointSet exclusive
|
||||
// lock held.
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
|
||||
if c.collectPeriod == 0 {
|
||||
return true
|
||||
}
|
||||
|
@ -28,6 +28,7 @@ import (
|
||||
"go.opentelemetry.io/otel/metric"
|
||||
export "go.opentelemetry.io/otel/sdk/export/metric"
|
||||
"go.opentelemetry.io/otel/sdk/export/metric/aggregation"
|
||||
"go.opentelemetry.io/otel/sdk/instrumentation"
|
||||
controller "go.opentelemetry.io/otel/sdk/metric/controller/basic"
|
||||
"go.opentelemetry.io/otel/sdk/metric/controller/controllertest"
|
||||
processor "go.opentelemetry.io/otel/sdk/metric/processor/basic"
|
||||
@ -41,11 +42,14 @@ func getMap(t *testing.T, cont *controller.Controller) map[string]float64 {
|
||||
out := processortest.NewOutput(attribute.DefaultEncoder())
|
||||
|
||||
require.NoError(t, cont.ForEach(
|
||||
export.CumulativeExportKindSelector(),
|
||||
func(record export.Record) error {
|
||||
return out.AddRecord(record)
|
||||
},
|
||||
))
|
||||
func(_ instrumentation.Library, reader export.Reader) error {
|
||||
return reader.ForEach(
|
||||
export.CumulativeExportKindSelector(),
|
||||
func(record export.Record) error {
|
||||
return out.AddRecord(record)
|
||||
},
|
||||
)
|
||||
}))
|
||||
return out.Map()
|
||||
}
|
||||
|
||||
@ -113,7 +117,7 @@ func TestControllerUsesResource(t *testing.T) {
|
||||
sel := export.CumulativeExportKindSelector()
|
||||
exp := processortest.New(sel, attribute.DefaultEncoder())
|
||||
cont := controller.New(
|
||||
processor.New(
|
||||
processor.NewFactory(
|
||||
processortest.AggregatorSelector(),
|
||||
exp,
|
||||
),
|
||||
@ -121,9 +125,8 @@ func TestControllerUsesResource(t *testing.T) {
|
||||
)
|
||||
ctx := context.Background()
|
||||
require.NoError(t, cont.Start(ctx))
|
||||
prov := cont.MeterProvider()
|
||||
|
||||
ctr := metric.Must(prov.Meter("named")).NewFloat64Counter("calls.sum")
|
||||
ctr := metric.Must(cont.Meter("named")).NewFloat64Counter("calls.sum")
|
||||
ctr.Add(context.Background(), 1.)
|
||||
|
||||
// Collect once
|
||||
@ -139,7 +142,7 @@ func TestControllerUsesResource(t *testing.T) {
|
||||
|
||||
func TestStartNoExporter(t *testing.T) {
|
||||
cont := controller.New(
|
||||
processor.New(
|
||||
processor.NewFactory(
|
||||
processortest.AggregatorSelector(),
|
||||
export.CumulativeExportKindSelector(),
|
||||
),
|
||||
@ -149,10 +152,9 @@ func TestStartNoExporter(t *testing.T) {
|
||||
mock := controllertest.NewMockClock()
|
||||
cont.SetClock(mock)
|
||||
|
||||
prov := cont.MeterProvider()
|
||||
calls := int64(0)
|
||||
|
||||
_ = metric.Must(prov.Meter("named")).NewInt64CounterObserver("calls.lastvalue",
|
||||
_ = metric.Must(cont.Meter("named")).NewInt64CounterObserver("calls.lastvalue",
|
||||
func(ctx context.Context, result metric.Int64ObserverResult) {
|
||||
calls++
|
||||
checkTestContext(t, ctx)
|
||||
@ -209,7 +211,7 @@ func TestStartNoExporter(t *testing.T) {
|
||||
|
||||
func TestObserverCanceled(t *testing.T) {
|
||||
cont := controller.New(
|
||||
processor.New(
|
||||
processor.NewFactory(
|
||||
processortest.AggregatorSelector(),
|
||||
export.CumulativeExportKindSelector(),
|
||||
),
|
||||
@ -218,10 +220,9 @@ func TestObserverCanceled(t *testing.T) {
|
||||
controller.WithResource(resource.Empty()),
|
||||
)
|
||||
|
||||
prov := cont.MeterProvider()
|
||||
calls := int64(0)
|
||||
|
||||
_ = metric.Must(prov.Meter("named")).NewInt64CounterObserver("done.lastvalue",
|
||||
_ = metric.Must(cont.Meter("named")).NewInt64CounterObserver("done.lastvalue",
|
||||
func(ctx context.Context, result metric.Int64ObserverResult) {
|
||||
<-ctx.Done()
|
||||
calls++
|
||||
@ -242,7 +243,7 @@ func TestObserverCanceled(t *testing.T) {
|
||||
|
||||
func TestObserverContext(t *testing.T) {
|
||||
cont := controller.New(
|
||||
processor.New(
|
||||
processor.NewFactory(
|
||||
processortest.AggregatorSelector(),
|
||||
export.CumulativeExportKindSelector(),
|
||||
),
|
||||
@ -250,9 +251,7 @@ func TestObserverContext(t *testing.T) {
|
||||
controller.WithResource(resource.Empty()),
|
||||
)
|
||||
|
||||
prov := cont.MeterProvider()
|
||||
|
||||
_ = metric.Must(prov.Meter("named")).NewInt64CounterObserver("done.lastvalue",
|
||||
_ = metric.Must(cont.Meter("named")).NewInt64CounterObserver("done.lastvalue",
|
||||
func(ctx context.Context, result metric.Int64ObserverResult) {
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
checkTestContext(t, ctx)
|
||||
@ -284,7 +283,7 @@ func newBlockingExporter() *blockingExporter {
|
||||
}
|
||||
}
|
||||
|
||||
func (b *blockingExporter) Export(ctx context.Context, res *resource.Resource, output export.CheckpointSet) error {
|
||||
func (b *blockingExporter) Export(ctx context.Context, res *resource.Resource, output export.InstrumentationLibraryReader) error {
|
||||
var err error
|
||||
_ = b.exporter.Export(ctx, res, output)
|
||||
if b.calls == 0 {
|
||||
@ -306,7 +305,7 @@ func (*blockingExporter) ExportKindFor(
|
||||
func TestExportTimeout(t *testing.T) {
|
||||
exporter := newBlockingExporter()
|
||||
cont := controller.New(
|
||||
processor.New(
|
||||
processor.NewFactory(
|
||||
processortest.AggregatorSelector(),
|
||||
export.CumulativeExportKindSelector(),
|
||||
),
|
||||
@ -318,10 +317,8 @@ func TestExportTimeout(t *testing.T) {
|
||||
mock := controllertest.NewMockClock()
|
||||
cont.SetClock(mock)
|
||||
|
||||
prov := cont.MeterProvider()
|
||||
|
||||
calls := int64(0)
|
||||
_ = metric.Must(prov.Meter("named")).NewInt64CounterObserver("one.lastvalue",
|
||||
_ = metric.Must(cont.Meter("named")).NewInt64CounterObserver("one.lastvalue",
|
||||
func(ctx context.Context, result metric.Int64ObserverResult) {
|
||||
calls++
|
||||
result.Observe(calls)
|
||||
@ -363,7 +360,7 @@ func TestCollectAfterStopThenStartAgain(t *testing.T) {
|
||||
attribute.DefaultEncoder(),
|
||||
)
|
||||
cont := controller.New(
|
||||
processor.New(
|
||||
processor.NewFactory(
|
||||
processortest.AggregatorSelector(),
|
||||
exp,
|
||||
),
|
||||
@ -374,10 +371,8 @@ func TestCollectAfterStopThenStartAgain(t *testing.T) {
|
||||
mock := controllertest.NewMockClock()
|
||||
cont.SetClock(mock)
|
||||
|
||||
prov := cont.MeterProvider()
|
||||
|
||||
calls := 0
|
||||
_ = metric.Must(prov.Meter("named")).NewInt64CounterObserver("one.lastvalue",
|
||||
_ = metric.Must(cont.Meter("named")).NewInt64CounterObserver("one.lastvalue",
|
||||
func(ctx context.Context, result metric.Int64ObserverResult) {
|
||||
calls++
|
||||
result.Observe(int64(calls))
|
||||
@ -437,3 +432,46 @@ func TestCollectAfterStopThenStartAgain(t *testing.T) {
|
||||
"one.lastvalue//": 6,
|
||||
}, exp.Values())
|
||||
}
|
||||
|
||||
func TestRegistryFunction(t *testing.T) {
|
||||
exp := processortest.New(
|
||||
export.CumulativeExportKindSelector(),
|
||||
attribute.DefaultEncoder(),
|
||||
)
|
||||
cont := controller.New(
|
||||
processor.NewFactory(
|
||||
processortest.AggregatorSelector(),
|
||||
exp,
|
||||
),
|
||||
controller.WithCollectPeriod(time.Second),
|
||||
controller.WithExporter(exp),
|
||||
controller.WithResource(resource.Empty()),
|
||||
)
|
||||
|
||||
m1 := cont.Meter("test")
|
||||
m2 := cont.Meter("test")
|
||||
|
||||
require.NotNil(t, m1)
|
||||
require.Equal(t, m1, m2)
|
||||
|
||||
c1, err := m1.NewInt64Counter("counter.sum")
|
||||
require.NoError(t, err)
|
||||
|
||||
c2, err := m1.NewInt64Counter("counter.sum")
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, c1, c2)
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
require.NoError(t, cont.Start(ctx))
|
||||
|
||||
c1.Add(ctx, 10)
|
||||
c2.Add(ctx, 10)
|
||||
|
||||
require.NoError(t, cont.Stop(ctx))
|
||||
|
||||
require.EqualValues(t, map[string]float64{
|
||||
"counter.sum//": 20,
|
||||
}, exp.Values())
|
||||
}
|
||||
|
@ -34,7 +34,7 @@ import (
|
||||
|
||||
func TestPullNoCollect(t *testing.T) {
|
||||
puller := controller.New(
|
||||
processor.New(
|
||||
processor.NewFactory(
|
||||
processortest.AggregatorSelector(),
|
||||
export.CumulativeExportKindSelector(),
|
||||
processor.WithMemory(true),
|
||||
@ -44,14 +44,14 @@ func TestPullNoCollect(t *testing.T) {
|
||||
)
|
||||
|
||||
ctx := context.Background()
|
||||
meter := puller.MeterProvider().Meter("nocache")
|
||||
meter := puller.Meter("nocache")
|
||||
counter := metric.Must(meter).NewInt64Counter("counter.sum")
|
||||
|
||||
counter.Add(ctx, 10, attribute.String("A", "B"))
|
||||
|
||||
require.NoError(t, puller.Collect(ctx))
|
||||
records := processortest.NewOutput(attribute.DefaultEncoder())
|
||||
require.NoError(t, puller.ForEach(export.CumulativeExportKindSelector(), records.AddRecord))
|
||||
require.NoError(t, controllertest.ReadAll(puller, export.CumulativeExportKindSelector(), records.AddInstrumentationLibraryRecord))
|
||||
|
||||
require.EqualValues(t, map[string]float64{
|
||||
"counter.sum/A=B/": 10,
|
||||
@ -61,7 +61,7 @@ func TestPullNoCollect(t *testing.T) {
|
||||
|
||||
require.NoError(t, puller.Collect(ctx))
|
||||
records = processortest.NewOutput(attribute.DefaultEncoder())
|
||||
require.NoError(t, puller.ForEach(export.CumulativeExportKindSelector(), records.AddRecord))
|
||||
require.NoError(t, controllertest.ReadAll(puller, export.CumulativeExportKindSelector(), records.AddInstrumentationLibraryRecord))
|
||||
|
||||
require.EqualValues(t, map[string]float64{
|
||||
"counter.sum/A=B/": 20,
|
||||
@ -70,7 +70,7 @@ func TestPullNoCollect(t *testing.T) {
|
||||
|
||||
func TestPullWithCollect(t *testing.T) {
|
||||
puller := controller.New(
|
||||
processor.New(
|
||||
processor.NewFactory(
|
||||
processortest.AggregatorSelector(),
|
||||
export.CumulativeExportKindSelector(),
|
||||
processor.WithMemory(true),
|
||||
@ -82,14 +82,14 @@ func TestPullWithCollect(t *testing.T) {
|
||||
puller.SetClock(mock)
|
||||
|
||||
ctx := context.Background()
|
||||
meter := puller.MeterProvider().Meter("nocache")
|
||||
meter := puller.Meter("nocache")
|
||||
counter := metric.Must(meter).NewInt64Counter("counter.sum")
|
||||
|
||||
counter.Add(ctx, 10, attribute.String("A", "B"))
|
||||
|
||||
require.NoError(t, puller.Collect(ctx))
|
||||
records := processortest.NewOutput(attribute.DefaultEncoder())
|
||||
require.NoError(t, puller.ForEach(export.CumulativeExportKindSelector(), records.AddRecord))
|
||||
require.NoError(t, controllertest.ReadAll(puller, export.CumulativeExportKindSelector(), records.AddInstrumentationLibraryRecord))
|
||||
|
||||
require.EqualValues(t, map[string]float64{
|
||||
"counter.sum/A=B/": 10,
|
||||
@ -100,7 +100,7 @@ func TestPullWithCollect(t *testing.T) {
|
||||
// Cached value!
|
||||
require.NoError(t, puller.Collect(ctx))
|
||||
records = processortest.NewOutput(attribute.DefaultEncoder())
|
||||
require.NoError(t, puller.ForEach(export.CumulativeExportKindSelector(), records.AddRecord))
|
||||
require.NoError(t, controllertest.ReadAll(puller, export.CumulativeExportKindSelector(), records.AddInstrumentationLibraryRecord))
|
||||
|
||||
require.EqualValues(t, map[string]float64{
|
||||
"counter.sum/A=B/": 10,
|
||||
@ -112,7 +112,7 @@ func TestPullWithCollect(t *testing.T) {
|
||||
// Re-computed value!
|
||||
require.NoError(t, puller.Collect(ctx))
|
||||
records = processortest.NewOutput(attribute.DefaultEncoder())
|
||||
require.NoError(t, puller.ForEach(export.CumulativeExportKindSelector(), records.AddRecord))
|
||||
require.NoError(t, controllertest.ReadAll(puller, export.CumulativeExportKindSelector(), records.AddInstrumentationLibraryRecord))
|
||||
|
||||
require.EqualValues(t, map[string]float64{
|
||||
"counter.sum/A=B/": 20,
|
||||
|
@ -72,19 +72,17 @@ func newExporter() *processortest.Exporter {
|
||||
)
|
||||
}
|
||||
|
||||
func newCheckpointer() export.Checkpointer {
|
||||
return processortest.Checkpointer(
|
||||
processortest.NewProcessor(
|
||||
processortest.AggregatorSelector(),
|
||||
attribute.DefaultEncoder(),
|
||||
),
|
||||
func newCheckpointerFactory() export.CheckpointerFactory {
|
||||
return processortest.NewCheckpointerFactory(
|
||||
processortest.AggregatorSelector(),
|
||||
attribute.DefaultEncoder(),
|
||||
)
|
||||
}
|
||||
|
||||
func TestPushDoubleStop(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
exporter := newExporter()
|
||||
checkpointer := newCheckpointer()
|
||||
checkpointer := newCheckpointerFactory()
|
||||
p := controller.New(checkpointer, controller.WithExporter(exporter))
|
||||
require.NoError(t, p.Start(ctx))
|
||||
require.NoError(t, p.Stop(ctx))
|
||||
@ -94,7 +92,7 @@ func TestPushDoubleStop(t *testing.T) {
|
||||
func TestPushDoubleStart(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
exporter := newExporter()
|
||||
checkpointer := newCheckpointer()
|
||||
checkpointer := newCheckpointerFactory()
|
||||
p := controller.New(checkpointer, controller.WithExporter(exporter))
|
||||
require.NoError(t, p.Start(ctx))
|
||||
err := p.Start(ctx)
|
||||
@ -105,14 +103,14 @@ func TestPushDoubleStart(t *testing.T) {
|
||||
|
||||
func TestPushTicker(t *testing.T) {
|
||||
exporter := newExporter()
|
||||
checkpointer := newCheckpointer()
|
||||
checkpointer := newCheckpointerFactory()
|
||||
p := controller.New(
|
||||
checkpointer,
|
||||
controller.WithExporter(exporter),
|
||||
controller.WithCollectPeriod(time.Second),
|
||||
controller.WithResource(testResource),
|
||||
)
|
||||
meter := p.MeterProvider().Meter("name")
|
||||
meter := p.Meter("name")
|
||||
|
||||
mock := controllertest.NewMockClock()
|
||||
p.SetClock(mock)
|
||||
@ -185,7 +183,7 @@ func TestPushExportError(t *testing.T) {
|
||||
// This test validates the error handling
|
||||
// behavior of the basic Processor is honored
|
||||
// by the push processor.
|
||||
checkpointer := processor.New(processortest.AggregatorSelector(), exporter)
|
||||
checkpointer := processor.NewFactory(processortest.AggregatorSelector(), exporter)
|
||||
p := controller.New(
|
||||
checkpointer,
|
||||
controller.WithExporter(exporter),
|
||||
@ -198,7 +196,7 @@ func TestPushExportError(t *testing.T) {
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
meter := p.MeterProvider().Meter("name")
|
||||
meter := p.Meter("name")
|
||||
counter1 := metric.Must(meter).NewInt64Counter("counter1.sum")
|
||||
counter2 := metric.Must(meter).NewInt64Counter("counter2.sum")
|
||||
|
||||
|
@ -19,6 +19,8 @@ import (
|
||||
|
||||
"github.com/benbjohnson/clock"
|
||||
|
||||
export "go.opentelemetry.io/otel/sdk/export/metric"
|
||||
"go.opentelemetry.io/otel/sdk/instrumentation"
|
||||
controllerTime "go.opentelemetry.io/otel/sdk/metric/controller/time"
|
||||
)
|
||||
|
||||
@ -56,3 +58,18 @@ func (t MockTicker) Stop() {
|
||||
func (t MockTicker) C() <-chan time.Time {
|
||||
return t.ticker.C
|
||||
}
|
||||
|
||||
// ReadAll is a helper for tests that want a flat iterator over all
|
||||
// metrics instead of a two-level iterator (instrumentation library,
|
||||
// metric).
|
||||
func ReadAll(
|
||||
reader export.InstrumentationLibraryReader,
|
||||
kind export.ExportKindSelector,
|
||||
apply func(instrumentation.Library, export.Record) error,
|
||||
) error {
|
||||
return reader.ForEach(func(library instrumentation.Library, reader export.Reader) error {
|
||||
return reader.ForEach(kind, func(record export.Record) error {
|
||||
return apply(library, record)
|
||||
})
|
||||
})
|
||||
}
|
||||
|
@ -86,7 +86,7 @@ func newSDK(t *testing.T) (metric.Meter, *metricsdk.Accumulator, *testSelector,
|
||||
accum := metricsdk.NewAccumulator(
|
||||
processor,
|
||||
)
|
||||
meter := metric.WrapMeterImpl(accum, "test")
|
||||
meter := metric.WrapMeterImpl(accum)
|
||||
return meter, accum, testSelector, processor
|
||||
}
|
||||
|
||||
|
@ -102,7 +102,7 @@ record has an associated aggregator.
|
||||
Processor is an interface which sits between the SDK and an exporter.
|
||||
The Processor embeds an AggregatorSelector, used by the SDK to assign
|
||||
new Aggregators. The Processor supports a Process() API for submitting
|
||||
checkpointed aggregators to the processor, and a CheckpointSet() API
|
||||
checkpointed aggregators to the processor, and a Reader() API
|
||||
for producing a complete checkpoint for the exporter. Two default
|
||||
Processor implementations are provided, the "defaultkeys" Processor groups
|
||||
aggregate metrics by their recommended Descriptor.Keys(), the
|
||||
@ -113,9 +113,9 @@ provide the serialization logic for labels. This allows avoiding
|
||||
duplicate serialization of labels, once as a unique key in the SDK (or
|
||||
Processor) and once in the exporter.
|
||||
|
||||
CheckpointSet is an interface between the Processor and the Exporter.
|
||||
After completing a collection pass, the Processor.CheckpointSet() method
|
||||
returns a CheckpointSet, which the Exporter uses to iterate over all
|
||||
Reader is an interface between the Processor and the Exporter.
|
||||
After completing a collection pass, the Processor.Reader() method
|
||||
returns a Reader, which the Exporter uses to iterate over all
|
||||
the updated metrics.
|
||||
|
||||
Record is a struct containing the state of an individual exported
|
||||
@ -126,7 +126,7 @@ Labels is a struct containing an ordered set of labels, the
|
||||
corresponding unique encoding, and the encoder that produced it.
|
||||
|
||||
Exporter is the final stage of an export pipeline. It is called with
|
||||
a CheckpointSet capable of enumerating all the updated metrics.
|
||||
a Reader capable of enumerating all the updated metrics.
|
||||
|
||||
Controller is not an export interface per se, but it orchestrates the
|
||||
export pipeline. For example, a "push" controller will establish a
|
||||
|
@ -22,14 +22,14 @@ import (
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"go.opentelemetry.io/otel/metric"
|
||||
"go.opentelemetry.io/otel/metric/metrictest"
|
||||
"go.opentelemetry.io/otel/metric/number"
|
||||
"go.opentelemetry.io/otel/metric/sdkapi"
|
||||
"go.opentelemetry.io/otel/sdk/metric/aggregator/histogram"
|
||||
)
|
||||
|
||||
func TestStressInt64Histogram(t *testing.T) {
|
||||
desc := metric.NewDescriptor("some_metric", sdkapi.HistogramInstrumentKind, number.Int64Kind)
|
||||
desc := metrictest.NewDescriptor("some_metric", sdkapi.HistogramInstrumentKind, number.Int64Kind)
|
||||
|
||||
alloc := histogram.New(2, &desc, histogram.WithExplicitBoundaries([]float64{25, 50, 75}))
|
||||
h, ckpt := &alloc[0], &alloc[1]
|
||||
|
@ -20,14 +20,14 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"go.opentelemetry.io/otel/metric"
|
||||
"go.opentelemetry.io/otel/metric/metrictest"
|
||||
"go.opentelemetry.io/otel/metric/number"
|
||||
"go.opentelemetry.io/otel/metric/sdkapi"
|
||||
"go.opentelemetry.io/otel/sdk/metric/aggregator/minmaxsumcount"
|
||||
)
|
||||
|
||||
func TestStressInt64MinMaxSumCount(t *testing.T) {
|
||||
desc := metric.NewDescriptor("some_metric", sdkapi.HistogramInstrumentKind, number.Int64Kind)
|
||||
desc := metrictest.NewDescriptor("some_metric", sdkapi.HistogramInstrumentKind, number.Int64Kind)
|
||||
alloc := minmaxsumcount.New(2, &desc)
|
||||
mmsc, ckpt := &alloc[0], &alloc[1]
|
||||
|
||||
|
@ -90,7 +90,7 @@ type (
|
||||
state struct {
|
||||
config config
|
||||
|
||||
// RWMutex implements locking for the `CheckpointSet` interface.
|
||||
// RWMutex implements locking for the `Reader` interface.
|
||||
sync.RWMutex
|
||||
values map[stateKey]*stateValue
|
||||
|
||||
@ -113,7 +113,7 @@ type (
|
||||
|
||||
var _ export.Processor = &Processor{}
|
||||
var _ export.Checkpointer = &Processor{}
|
||||
var _ export.CheckpointSet = &state{}
|
||||
var _ export.Reader = &state{}
|
||||
|
||||
// ErrInconsistentState is returned when the sequence of collection's starts and finishes are incorrectly balanced.
|
||||
var ErrInconsistentState = fmt.Errorf("inconsistent processor state")
|
||||
@ -127,20 +127,43 @@ var ErrInvalidExportKind = fmt.Errorf("invalid export kind")
|
||||
// data, so that this Processor can prepare to compute Delta or
|
||||
// Cumulative Aggregations as needed.
|
||||
func New(aselector export.AggregatorSelector, eselector export.ExportKindSelector, opts ...Option) *Processor {
|
||||
return NewFactory(aselector, eselector, opts...).NewCheckpointer().(*Processor)
|
||||
}
|
||||
|
||||
type factory struct {
|
||||
aselector export.AggregatorSelector
|
||||
eselector export.ExportKindSelector
|
||||
config config
|
||||
}
|
||||
|
||||
func NewFactory(aselector export.AggregatorSelector, eselector export.ExportKindSelector, opts ...Option) export.CheckpointerFactory {
|
||||
var config config
|
||||
for _, opt := range opts {
|
||||
opt.applyProcessor(&config)
|
||||
}
|
||||
return factory{
|
||||
aselector: aselector,
|
||||
eselector: eselector,
|
||||
config: config,
|
||||
}
|
||||
}
|
||||
|
||||
var _ export.CheckpointerFactory = factory{}
|
||||
|
||||
func (f factory) NewCheckpointer() export.Checkpointer {
|
||||
now := time.Now()
|
||||
p := &Processor{
|
||||
AggregatorSelector: aselector,
|
||||
ExportKindSelector: eselector,
|
||||
AggregatorSelector: f.aselector,
|
||||
ExportKindSelector: f.eselector,
|
||||
state: state{
|
||||
values: map[stateKey]*stateValue{},
|
||||
processStart: now,
|
||||
intervalStart: now,
|
||||
config: f.config,
|
||||
},
|
||||
}
|
||||
for _, opt := range opts {
|
||||
opt.applyProcessor(&p.config)
|
||||
}
|
||||
return p
|
||||
|
||||
}
|
||||
|
||||
// Process implements export.Processor.
|
||||
@ -241,11 +264,11 @@ func (b *Processor) Process(accum export.Accumulation) error {
|
||||
return value.current.Merge(agg, desc)
|
||||
}
|
||||
|
||||
// CheckpointSet returns the associated CheckpointSet. Use the
|
||||
// CheckpointSet Locker interface to synchronize access to this
|
||||
// object. The CheckpointSet.ForEach() method cannot be called
|
||||
// Reader returns the associated Reader. Use the
|
||||
// Reader Locker interface to synchronize access to this
|
||||
// object. The Reader.ForEach() method cannot be called
|
||||
// concurrently with Process().
|
||||
func (b *Processor) CheckpointSet() export.CheckpointSet {
|
||||
func (b *Processor) Reader() export.Reader {
|
||||
return &b.state
|
||||
}
|
||||
|
||||
@ -260,7 +283,7 @@ func (b *Processor) StartCollection() {
|
||||
|
||||
// FinishCollection signals to the Processor that a complete
|
||||
// collection has finished and that ForEach will be called to access
|
||||
// the CheckpointSet.
|
||||
// the Reader.
|
||||
func (b *Processor) FinishCollection() error {
|
||||
b.intervalEnd = time.Now()
|
||||
if b.startedCollection != b.finishedCollection+1 {
|
||||
@ -314,7 +337,7 @@ func (b *Processor) FinishCollection() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// ForEach iterates through the CheckpointSet, passing an
|
||||
// ForEach iterates through the Reader, passing an
|
||||
// export.Record with the appropriate Cumulative or Delta aggregation
|
||||
// to an exporter.
|
||||
func (b *state) ForEach(exporter export.ExportKindSelector, f func(export.Record) error) error {
|
||||
|
@ -26,10 +26,12 @@ import (
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/metric"
|
||||
"go.opentelemetry.io/otel/metric/metrictest"
|
||||
"go.opentelemetry.io/otel/metric/number"
|
||||
"go.opentelemetry.io/otel/metric/sdkapi"
|
||||
export "go.opentelemetry.io/otel/sdk/export/metric"
|
||||
"go.opentelemetry.io/otel/sdk/export/metric/aggregation"
|
||||
"go.opentelemetry.io/otel/sdk/instrumentation"
|
||||
sdk "go.opentelemetry.io/otel/sdk/metric"
|
||||
"go.opentelemetry.io/otel/sdk/metric/aggregator/aggregatortest"
|
||||
"go.opentelemetry.io/otel/sdk/metric/processor/basic"
|
||||
@ -136,8 +138,8 @@ func testProcessor(
|
||||
|
||||
instSuffix := fmt.Sprint(".", strings.ToLower(akind.String()))
|
||||
|
||||
desc1 := metric.NewDescriptor(fmt.Sprint("inst1", instSuffix), mkind, nkind)
|
||||
desc2 := metric.NewDescriptor(fmt.Sprint("inst2", instSuffix), mkind, nkind)
|
||||
desc1 := metrictest.NewDescriptor(fmt.Sprint("inst1", instSuffix), mkind, nkind)
|
||||
desc2 := metrictest.NewDescriptor(fmt.Sprint("inst2", instSuffix), mkind, nkind)
|
||||
|
||||
for nc := 0; nc < nCheckpoint; nc++ {
|
||||
|
||||
@ -174,7 +176,7 @@ func testProcessor(
|
||||
continue
|
||||
}
|
||||
|
||||
checkpointSet := processor.CheckpointSet()
|
||||
reader := processor.Reader()
|
||||
|
||||
for _, repetitionAfterEmptyInterval := range []bool{false, true} {
|
||||
if repetitionAfterEmptyInterval {
|
||||
@ -188,7 +190,7 @@ func testProcessor(
|
||||
|
||||
// Test the final checkpoint state.
|
||||
records1 := processorTest.NewOutput(attribute.DefaultEncoder())
|
||||
err = checkpointSet.ForEach(export.ConstantExportKindSelector(ekind), records1.AddRecord)
|
||||
err = reader.ForEach(export.ConstantExportKindSelector(ekind), records1.AddRecord)
|
||||
|
||||
// Test for an allowed error:
|
||||
if err != nil && err != aggregation.ErrNoSubtraction {
|
||||
@ -267,7 +269,7 @@ func (bogusExporter) ExportKindFor(*metric.Descriptor, aggregation.Kind) export.
|
||||
return 1000000
|
||||
}
|
||||
|
||||
func (bogusExporter) Export(context.Context, export.CheckpointSet) error {
|
||||
func (bogusExporter) Export(context.Context, export.Reader) error {
|
||||
panic("Not called")
|
||||
}
|
||||
|
||||
@ -300,7 +302,7 @@ func TestBasicInconsistent(t *testing.T) {
|
||||
// Test no start
|
||||
b = basic.New(processorTest.AggregatorSelector(), export.StatelessExportKindSelector())
|
||||
|
||||
desc := metric.NewDescriptor("inst", sdkapi.CounterInstrumentKind, number.Int64Kind)
|
||||
desc := metrictest.NewDescriptor("inst", sdkapi.CounterInstrumentKind, number.Int64Kind)
|
||||
accum := export.NewAccumulation(&desc, attribute.EmptySet(), aggregatortest.NoopAggregator{})
|
||||
require.Equal(t, basic.ErrInconsistentState, b.Process(accum))
|
||||
|
||||
@ -325,7 +327,7 @@ func TestBasicTimestamps(t *testing.T) {
|
||||
time.Sleep(time.Nanosecond)
|
||||
afterNew := time.Now()
|
||||
|
||||
desc := metric.NewDescriptor("inst", sdkapi.CounterInstrumentKind, number.Int64Kind)
|
||||
desc := metrictest.NewDescriptor("inst", sdkapi.CounterInstrumentKind, number.Int64Kind)
|
||||
accum := export.NewAccumulation(&desc, attribute.EmptySet(), aggregatortest.NoopAggregator{})
|
||||
|
||||
b.StartCollection()
|
||||
@ -370,11 +372,11 @@ func TestBasicTimestamps(t *testing.T) {
|
||||
func TestStatefulNoMemoryCumulative(t *testing.T) {
|
||||
ekindSel := export.CumulativeExportKindSelector()
|
||||
|
||||
desc := metric.NewDescriptor("inst.sum", sdkapi.CounterInstrumentKind, number.Int64Kind)
|
||||
desc := metrictest.NewDescriptor("inst.sum", sdkapi.CounterInstrumentKind, number.Int64Kind)
|
||||
selector := processorTest.AggregatorSelector()
|
||||
|
||||
processor := basic.New(selector, ekindSel, basic.WithMemory(false))
|
||||
checkpointSet := processor.CheckpointSet()
|
||||
reader := processor.Reader()
|
||||
|
||||
for i := 1; i < 3; i++ {
|
||||
// Empty interval
|
||||
@ -383,7 +385,7 @@ func TestStatefulNoMemoryCumulative(t *testing.T) {
|
||||
|
||||
// Verify zero elements
|
||||
records := processorTest.NewOutput(attribute.DefaultEncoder())
|
||||
require.NoError(t, checkpointSet.ForEach(ekindSel, records.AddRecord))
|
||||
require.NoError(t, reader.ForEach(ekindSel, records.AddRecord))
|
||||
require.EqualValues(t, map[string]float64{}, records.Map())
|
||||
|
||||
// Add 10
|
||||
@ -393,7 +395,7 @@ func TestStatefulNoMemoryCumulative(t *testing.T) {
|
||||
|
||||
// Verify one element
|
||||
records = processorTest.NewOutput(attribute.DefaultEncoder())
|
||||
require.NoError(t, checkpointSet.ForEach(ekindSel, records.AddRecord))
|
||||
require.NoError(t, reader.ForEach(ekindSel, records.AddRecord))
|
||||
require.EqualValues(t, map[string]float64{
|
||||
"inst.sum/A=B/": float64(i * 10),
|
||||
}, records.Map())
|
||||
@ -403,11 +405,11 @@ func TestStatefulNoMemoryCumulative(t *testing.T) {
|
||||
func TestStatefulNoMemoryDelta(t *testing.T) {
|
||||
ekindSel := export.DeltaExportKindSelector()
|
||||
|
||||
desc := metric.NewDescriptor("inst.sum", sdkapi.CounterObserverInstrumentKind, number.Int64Kind)
|
||||
desc := metrictest.NewDescriptor("inst.sum", sdkapi.CounterObserverInstrumentKind, number.Int64Kind)
|
||||
selector := processorTest.AggregatorSelector()
|
||||
|
||||
processor := basic.New(selector, ekindSel, basic.WithMemory(false))
|
||||
checkpointSet := processor.CheckpointSet()
|
||||
reader := processor.Reader()
|
||||
|
||||
for i := 1; i < 3; i++ {
|
||||
// Empty interval
|
||||
@ -416,7 +418,7 @@ func TestStatefulNoMemoryDelta(t *testing.T) {
|
||||
|
||||
// Verify zero elements
|
||||
records := processorTest.NewOutput(attribute.DefaultEncoder())
|
||||
require.NoError(t, checkpointSet.ForEach(ekindSel, records.AddRecord))
|
||||
require.NoError(t, reader.ForEach(ekindSel, records.AddRecord))
|
||||
require.EqualValues(t, map[string]float64{}, records.Map())
|
||||
|
||||
// Add 10
|
||||
@ -426,7 +428,7 @@ func TestStatefulNoMemoryDelta(t *testing.T) {
|
||||
|
||||
// Verify one element
|
||||
records = processorTest.NewOutput(attribute.DefaultEncoder())
|
||||
require.NoError(t, checkpointSet.ForEach(ekindSel, records.AddRecord))
|
||||
require.NoError(t, reader.ForEach(ekindSel, records.AddRecord))
|
||||
require.EqualValues(t, map[string]float64{
|
||||
"inst.sum/A=B/": 10,
|
||||
}, records.Map())
|
||||
@ -439,11 +441,11 @@ func TestMultiObserverSum(t *testing.T) {
|
||||
export.DeltaExportKindSelector(),
|
||||
} {
|
||||
|
||||
desc := metric.NewDescriptor("observe.sum", sdkapi.CounterObserverInstrumentKind, number.Int64Kind)
|
||||
desc := metrictest.NewDescriptor("observe.sum", sdkapi.CounterObserverInstrumentKind, number.Int64Kind)
|
||||
selector := processorTest.AggregatorSelector()
|
||||
|
||||
processor := basic.New(selector, ekindSel, basic.WithMemory(false))
|
||||
checkpointSet := processor.CheckpointSet()
|
||||
reader := processor.Reader()
|
||||
|
||||
for i := 1; i < 3; i++ {
|
||||
// Add i*10*3 times
|
||||
@ -461,7 +463,7 @@ func TestMultiObserverSum(t *testing.T) {
|
||||
|
||||
// Verify one element
|
||||
records := processorTest.NewOutput(attribute.DefaultEncoder())
|
||||
require.NoError(t, checkpointSet.ForEach(ekindSel, records.AddRecord))
|
||||
require.NoError(t, reader.ForEach(ekindSel, records.AddRecord))
|
||||
require.EqualValues(t, map[string]float64{
|
||||
"observe.sum/A=B/": float64(3 * 10 * multiplier),
|
||||
}, records.Map())
|
||||
@ -477,7 +479,7 @@ func TestCounterObserverEndToEnd(t *testing.T) {
|
||||
eselector,
|
||||
)
|
||||
accum := sdk.NewAccumulator(proc)
|
||||
meter := metric.WrapMeterImpl(accum, "testing")
|
||||
meter := metric.WrapMeterImpl(accum)
|
||||
|
||||
var calls int64
|
||||
metric.Must(meter).NewInt64CounterObserver("observer.sum",
|
||||
@ -486,19 +488,23 @@ func TestCounterObserverEndToEnd(t *testing.T) {
|
||||
result.Observe(calls)
|
||||
},
|
||||
)
|
||||
data := proc.CheckpointSet()
|
||||
reader := proc.Reader()
|
||||
|
||||
var startTime [3]time.Time
|
||||
var endTime [3]time.Time
|
||||
|
||||
for i := range startTime {
|
||||
data := proc.Reader()
|
||||
data.Lock()
|
||||
proc.StartCollection()
|
||||
accum.Collect(ctx)
|
||||
require.NoError(t, proc.FinishCollection())
|
||||
|
||||
exporter := processortest.New(eselector, attribute.DefaultEncoder())
|
||||
require.NoError(t, exporter.Export(ctx, resource.Empty(), data))
|
||||
require.NoError(t, exporter.Export(ctx, resource.Empty(), processortest.OneInstrumentationLibraryReader(
|
||||
instrumentation.Library{
|
||||
Name: "test",
|
||||
}, reader)))
|
||||
|
||||
require.EqualValues(t, map[string]float64{
|
||||
"observer.sum//": float64(i + 1),
|
||||
|
@ -18,7 +18,7 @@ package basic // import "go.opentelemetry.io/otel/sdk/metric/processor/basic"
|
||||
type config struct {
|
||||
// Memory controls whether the processor remembers metric
|
||||
// instruments and label sets that were previously reported.
|
||||
// When Memory is true, CheckpointSet.ForEach() will visit
|
||||
// When Memory is true, Reader.ForEach() will visit
|
||||
// metrics that were not updated in the most recent interval.
|
||||
Memory bool
|
||||
}
|
||||
|
@ -26,6 +26,7 @@ import (
|
||||
"go.opentelemetry.io/otel/metric/number"
|
||||
export "go.opentelemetry.io/otel/sdk/export/metric"
|
||||
"go.opentelemetry.io/otel/sdk/export/metric/aggregation"
|
||||
"go.opentelemetry.io/otel/sdk/instrumentation"
|
||||
"go.opentelemetry.io/otel/sdk/metric/aggregator/exact"
|
||||
"go.opentelemetry.io/otel/sdk/metric/aggregator/histogram"
|
||||
"go.opentelemetry.io/otel/sdk/metric/aggregator/lastvalue"
|
||||
@ -45,14 +46,14 @@ type (
|
||||
}
|
||||
|
||||
// mapValue is value stored in a processor used to produce a
|
||||
// CheckpointSet.
|
||||
// Reader.
|
||||
mapValue struct {
|
||||
labels *attribute.Set
|
||||
resource *resource.Resource
|
||||
aggregator export.Aggregator
|
||||
}
|
||||
|
||||
// Output implements export.CheckpointSet.
|
||||
// Output implements export.Reader.
|
||||
Output struct {
|
||||
m map[mapKey]mapValue
|
||||
labelEncoder attribute.Encoder
|
||||
@ -92,6 +93,28 @@ type (
|
||||
}
|
||||
)
|
||||
|
||||
type testFactory struct {
|
||||
selector export.AggregatorSelector
|
||||
encoder attribute.Encoder
|
||||
}
|
||||
|
||||
func NewCheckpointerFactory(selector export.AggregatorSelector, encoder attribute.Encoder) export.CheckpointerFactory {
|
||||
return testFactory{
|
||||
selector: selector,
|
||||
encoder: encoder,
|
||||
}
|
||||
}
|
||||
|
||||
func NewCheckpointer(p *Processor) export.Checkpointer {
|
||||
return &testCheckpointer{
|
||||
Processor: p,
|
||||
}
|
||||
}
|
||||
|
||||
func (f testFactory) NewCheckpointer() export.Checkpointer {
|
||||
return NewCheckpointer(NewProcessor(f.selector, f.encoder))
|
||||
}
|
||||
|
||||
// NewProcessor returns a new testing Processor implementation.
|
||||
// Verify expected outputs using Values(), e.g.:
|
||||
//
|
||||
@ -126,14 +149,6 @@ func (p *Processor) Reset() {
|
||||
p.output.Reset()
|
||||
}
|
||||
|
||||
// Checkpointer returns a checkpointer that computes a single
|
||||
// interval.
|
||||
func Checkpointer(p *Processor) export.Checkpointer {
|
||||
return &testCheckpointer{
|
||||
Processor: p,
|
||||
}
|
||||
}
|
||||
|
||||
// StartCollection implements export.Checkpointer.
|
||||
func (c *testCheckpointer) StartCollection() {
|
||||
if c.started != c.finished {
|
||||
@ -153,8 +168,8 @@ func (c *testCheckpointer) FinishCollection() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// CheckpointSet implements export.Checkpointer.
|
||||
func (c *testCheckpointer) CheckpointSet() export.CheckpointSet {
|
||||
// Reader implements export.Checkpointer.
|
||||
func (c *testCheckpointer) Reader() export.Reader {
|
||||
return c.Processor.output
|
||||
}
|
||||
|
||||
@ -214,7 +229,7 @@ func NewOutput(labelEncoder attribute.Encoder) *Output {
|
||||
}
|
||||
}
|
||||
|
||||
// ForEach implements export.CheckpointSet.
|
||||
// ForEach implements export.Reader.
|
||||
func (o *Output) ForEach(_ export.ExportKindSelector, ff func(export.Record) error) error {
|
||||
for key, value := range o.m {
|
||||
if err := ff(export.NewRecord(
|
||||
@ -238,6 +253,10 @@ func (o *Output) AddRecord(rec export.Record) error {
|
||||
return o.AddRecordWithResource(rec, resource.Empty())
|
||||
}
|
||||
|
||||
func (o *Output) AddInstrumentationLibraryRecord(_ instrumentation.Library, rec export.Record) error {
|
||||
return o.AddRecordWithResource(rec, resource.Empty())
|
||||
}
|
||||
|
||||
func (o *Output) AddRecordWithResource(rec export.Record, res *resource.Resource) error {
|
||||
key := mapKey{
|
||||
desc: rec.Descriptor(),
|
||||
@ -332,17 +351,19 @@ func New(selector export.ExportKindSelector, encoder attribute.Encoder) *Exporte
|
||||
}
|
||||
}
|
||||
|
||||
func (e *Exporter) Export(_ context.Context, res *resource.Resource, ckpt export.CheckpointSet) error {
|
||||
func (e *Exporter) Export(_ context.Context, res *resource.Resource, ckpt export.InstrumentationLibraryReader) error {
|
||||
e.output.Lock()
|
||||
defer e.output.Unlock()
|
||||
e.exportCount++
|
||||
return ckpt.ForEach(e.ExportKindSelector, func(r export.Record) error {
|
||||
if e.InjectErr != nil {
|
||||
if err := e.InjectErr(r); err != nil {
|
||||
return err
|
||||
return ckpt.ForEach(func(library instrumentation.Library, mr export.Reader) error {
|
||||
return mr.ForEach(e.ExportKindSelector, func(r export.Record) error {
|
||||
if e.InjectErr != nil {
|
||||
if err := e.InjectErr(r); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return e.output.AddRecordWithResource(r, res)
|
||||
return e.output.AddRecordWithResource(r, res)
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
@ -372,3 +393,51 @@ func (e *Exporter) Reset() {
|
||||
e.output.Reset()
|
||||
e.exportCount = 0
|
||||
}
|
||||
|
||||
func OneInstrumentationLibraryReader(l instrumentation.Library, r export.Reader) export.InstrumentationLibraryReader {
|
||||
return oneLibraryReader{l, r}
|
||||
}
|
||||
|
||||
type oneLibraryReader struct {
|
||||
library instrumentation.Library
|
||||
reader export.Reader
|
||||
}
|
||||
|
||||
func (o oneLibraryReader) ForEach(readerFunc func(instrumentation.Library, export.Reader) error) error {
|
||||
return readerFunc(o.library, o.reader)
|
||||
}
|
||||
|
||||
func MultiInstrumentationLibraryReader(records map[instrumentation.Library][]export.Record) export.InstrumentationLibraryReader {
|
||||
return instrumentationLibraryReader{records: records}
|
||||
}
|
||||
|
||||
type instrumentationLibraryReader struct {
|
||||
records map[instrumentation.Library][]export.Record
|
||||
}
|
||||
|
||||
var _ export.InstrumentationLibraryReader = instrumentationLibraryReader{}
|
||||
|
||||
func (m instrumentationLibraryReader) ForEach(fn func(instrumentation.Library, export.Reader) error) error {
|
||||
for library, records := range m.records {
|
||||
if err := fn(library, &metricReader{records: records}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type metricReader struct {
|
||||
sync.RWMutex
|
||||
records []export.Record
|
||||
}
|
||||
|
||||
var _ export.Reader = &metricReader{}
|
||||
|
||||
func (m *metricReader) ForEach(_ export.ExportKindSelector, fn func(export.Record) error) error {
|
||||
for _, record := range m.records {
|
||||
if err := fn(record); err != nil && err != aggregation.ErrNoData {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -23,7 +23,9 @@ import (
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/metric"
|
||||
export "go.opentelemetry.io/otel/sdk/export/metric"
|
||||
"go.opentelemetry.io/otel/sdk/instrumentation"
|
||||
metricsdk "go.opentelemetry.io/otel/sdk/metric"
|
||||
"go.opentelemetry.io/otel/sdk/metric/processor/processortest"
|
||||
processorTest "go.opentelemetry.io/otel/sdk/metric/processor/processortest"
|
||||
"go.opentelemetry.io/otel/sdk/resource"
|
||||
)
|
||||
@ -31,7 +33,7 @@ import (
|
||||
func generateTestData(proc export.Processor) {
|
||||
ctx := context.Background()
|
||||
accum := metricsdk.NewAccumulator(proc)
|
||||
meter := metric.WrapMeterImpl(accum, "testing")
|
||||
meter := metric.WrapMeterImpl(accum)
|
||||
|
||||
counter := metric.Must(meter).NewFloat64Counter("counter.sum")
|
||||
|
||||
@ -51,12 +53,12 @@ func generateTestData(proc export.Processor) {
|
||||
func TestProcessorTesting(t *testing.T) {
|
||||
// Test the Processor test helper using a real Accumulator to
|
||||
// generate Accumulations.
|
||||
testProc := processorTest.NewProcessor(
|
||||
processorTest.AggregatorSelector(),
|
||||
attribute.DefaultEncoder(),
|
||||
checkpointer := processorTest.NewCheckpointer(
|
||||
processorTest.NewProcessor(
|
||||
processorTest.AggregatorSelector(),
|
||||
attribute.DefaultEncoder(),
|
||||
),
|
||||
)
|
||||
checkpointer := processorTest.Checkpointer(testProc)
|
||||
|
||||
generateTestData(checkpointer)
|
||||
|
||||
res := resource.NewSchemaless(attribute.String("R", "V"))
|
||||
@ -73,7 +75,12 @@ func TestProcessorTesting(t *testing.T) {
|
||||
attribute.DefaultEncoder(),
|
||||
)
|
||||
|
||||
err := exporter.Export(context.Background(), res, checkpointer.CheckpointSet())
|
||||
err := exporter.Export(context.Background(), res, processortest.OneInstrumentationLibraryReader(
|
||||
instrumentation.Library{
|
||||
Name: "test",
|
||||
},
|
||||
checkpointer.Reader(),
|
||||
))
|
||||
require.NoError(t, err)
|
||||
require.EqualValues(t, expect, exporter.Values())
|
||||
}
|
||||
|
@ -23,8 +23,10 @@ import (
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/metric"
|
||||
export "go.opentelemetry.io/otel/sdk/export/metric"
|
||||
"go.opentelemetry.io/otel/sdk/instrumentation"
|
||||
metricsdk "go.opentelemetry.io/otel/sdk/metric"
|
||||
"go.opentelemetry.io/otel/sdk/metric/processor/basic"
|
||||
"go.opentelemetry.io/otel/sdk/metric/processor/processortest"
|
||||
processorTest "go.opentelemetry.io/otel/sdk/metric/processor/processortest"
|
||||
"go.opentelemetry.io/otel/sdk/metric/processor/reducer"
|
||||
"go.opentelemetry.io/otel/sdk/resource"
|
||||
@ -53,7 +55,7 @@ func (testFilter) LabelFilterFor(_ *metric.Descriptor) attribute.Filter {
|
||||
|
||||
func generateData(impl metric.MeterImpl) {
|
||||
ctx := context.Background()
|
||||
meter := metric.WrapMeterImpl(impl, "testing")
|
||||
meter := metric.WrapMeterImpl(impl)
|
||||
|
||||
counter := metric.Must(meter).NewFloat64Counter("counter.sum")
|
||||
|
||||
@ -74,7 +76,7 @@ func TestFilterProcessor(t *testing.T) {
|
||||
attribute.DefaultEncoder(),
|
||||
)
|
||||
accum := metricsdk.NewAccumulator(
|
||||
reducer.New(testFilter{}, processorTest.Checkpointer(testProc)),
|
||||
reducer.New(testFilter{}, processorTest.NewCheckpointer(testProc)),
|
||||
)
|
||||
generateData(accum)
|
||||
|
||||
@ -103,7 +105,9 @@ func TestFilterBasicProcessor(t *testing.T) {
|
||||
}
|
||||
|
||||
res := resource.NewSchemaless(attribute.String("R", "V"))
|
||||
require.NoError(t, exporter.Export(context.Background(), res, basicProc.CheckpointSet()))
|
||||
require.NoError(t, exporter.Export(context.Background(), res, processortest.OneInstrumentationLibraryReader(instrumentation.Library{
|
||||
Name: "test",
|
||||
}, basicProc.Reader())))
|
||||
|
||||
require.EqualValues(t, map[string]float64{
|
||||
"counter.sum/A=1,C=3/R=V": 200,
|
||||
|
@ -20,6 +20,7 @@ import (
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"go.opentelemetry.io/otel/metric"
|
||||
"go.opentelemetry.io/otel/metric/metrictest"
|
||||
"go.opentelemetry.io/otel/metric/number"
|
||||
"go.opentelemetry.io/otel/metric/sdkapi"
|
||||
export "go.opentelemetry.io/otel/sdk/export/metric"
|
||||
@ -32,12 +33,12 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
testCounterDesc = metric.NewDescriptor("counter", sdkapi.CounterInstrumentKind, number.Int64Kind)
|
||||
testUpDownCounterDesc = metric.NewDescriptor("updowncounter", sdkapi.UpDownCounterInstrumentKind, number.Int64Kind)
|
||||
testCounterObserverDesc = metric.NewDescriptor("counterobserver", sdkapi.CounterObserverInstrumentKind, number.Int64Kind)
|
||||
testUpDownCounterObserverDesc = metric.NewDescriptor("updowncounterobserver", sdkapi.UpDownCounterObserverInstrumentKind, number.Int64Kind)
|
||||
testHistogramDesc = metric.NewDescriptor("histogram", sdkapi.HistogramInstrumentKind, number.Int64Kind)
|
||||
testGaugeObserverDesc = metric.NewDescriptor("gauge", sdkapi.GaugeObserverInstrumentKind, number.Int64Kind)
|
||||
testCounterDesc = metrictest.NewDescriptor("counter", sdkapi.CounterInstrumentKind, number.Int64Kind)
|
||||
testUpDownCounterDesc = metrictest.NewDescriptor("updowncounter", sdkapi.UpDownCounterInstrumentKind, number.Int64Kind)
|
||||
testCounterObserverDesc = metrictest.NewDescriptor("counterobserver", sdkapi.CounterObserverInstrumentKind, number.Int64Kind)
|
||||
testUpDownCounterObserverDesc = metrictest.NewDescriptor("updowncounterobserver", sdkapi.UpDownCounterObserverInstrumentKind, number.Int64Kind)
|
||||
testHistogramDesc = metrictest.NewDescriptor("histogram", sdkapi.HistogramInstrumentKind, number.Int64Kind)
|
||||
testGaugeObserverDesc = metrictest.NewDescriptor("gauge", sdkapi.GaugeObserverInstrumentKind, number.Int64Kind)
|
||||
)
|
||||
|
||||
func oneAgg(sel export.AggregatorSelector, desc *metric.Descriptor) export.Aggregator {
|
||||
|
@ -246,7 +246,7 @@ func (f *testFixture) preCollect() {
|
||||
f.dupCheck = map[testKey]int{}
|
||||
}
|
||||
|
||||
func (*testFixture) CheckpointSet() export.CheckpointSet {
|
||||
func (*testFixture) Reader() export.Reader {
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -296,7 +296,7 @@ func stressTest(t *testing.T, impl testImpl) {
|
||||
cc := concurrency()
|
||||
|
||||
sdk := NewAccumulator(fixture)
|
||||
meter := metric.WrapMeterImpl(sdk, "stress_test")
|
||||
meter := metric.WrapMeterImpl(sdk)
|
||||
fixture.wg.Add(cc + 1)
|
||||
|
||||
for i := 0; i < cc; i++ {
|
||||
|
Loading…
x
Reference in New Issue
Block a user