mirror of
https://github.com/open-telemetry/opentelemetry-go.git
synced 2025-02-09 13:37:12 +02:00
Separate InstrumentationLibrary from metric.Descriptor (#2197)
* factor instrumentation library out of the instrument descriptor * SDK tests pass * checkpoint work * otlp and opencensus tests passing * prometheus * tests pass, working on lint * lint applied: MetricReader->Reader * comments * Changelog * Apply suggestions from code review Co-authored-by: alrex <alrex.boten@gmail.com> * remove an interdependency * fix build * re-indent one * Apply suggestions from code review Co-authored-by: Tyler Yahn <MrAlias@users.noreply.github.com> * Lint&feedback * update after rename * comment fix * style fix for meter options * remove libraryReader, let Controller implement the reader API directly * rename 'impl' field to 'provider' * remove a type assertion * move metric/registry into internal; move registry.MeterProvider into metric controller * add test for controller registry function * CheckpointSet->Reader everywhere * lint * remove two unnecessary accessor methods; Controller implements MeterProvider and InstrumentationLibraryReader directly, no need to get these * use a sync.Map * ensure the initOnce is always called; handle multiple errors * Apply suggestions from code review Co-authored-by: Anthony Mirabella <a9@aneurysm9.com> * cleanup locking in metrictest * Revert "ensure the initOnce is always called; handle multiple errors" This reverts commit 3356eb5ed0c288ac3edcc2bc2e853aecda7f29b3. * Revert "use a sync.Map" This reverts commit ea7bc599bd3a24c4acb4cd9facd13f08cd702237. * restore the TODO about sync.Map Co-authored-by: alrex <alrex.boten@gmail.com> Co-authored-by: Tyler Yahn <MrAlias@users.noreply.github.com> Co-authored-by: Anthony Mirabella <a9@aneurysm9.com>
This commit is contained in:
parent
92551d3933
commit
3c8e1853f0
@ -61,6 +61,9 @@ This release includes an API and SDK for the tracing signal that will comply wit
|
|||||||
- The Metrics SDK export record no longer contains a Resource pointer, the SDK `"go.opentelemetry.io/otel/sdk/trace/export/metric".Exporter.Export()` function for push-based exporters now takes a single Resource argument, pull-based exporters use `"go.opentelemetry.io/otel/sdk/metric/controller/basic".Controller.Resource()`. (#2120)
|
- The Metrics SDK export record no longer contains a Resource pointer, the SDK `"go.opentelemetry.io/otel/sdk/trace/export/metric".Exporter.Export()` function for push-based exporters now takes a single Resource argument, pull-based exporters use `"go.opentelemetry.io/otel/sdk/metric/controller/basic".Controller.Resource()`. (#2120)
|
||||||
- The JSON output of the `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` is harmonized now such that the output is "plain" JSON objects after each other of the form `{ ... } { ... } { ... }`. Earlier the JSON objects describing a span were wrapped in a slice for each `Exporter.ExportSpans` call, like `[ { ... } ][ { ... } { ... } ]`. Outputting JSON object directly after each other is consistent with JSON loggers, and a bit easier to parse and read. (#2196)
|
- The JSON output of the `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` is harmonized now such that the output is "plain" JSON objects after each other of the form `{ ... } { ... } { ... }`. Earlier the JSON objects describing a span were wrapped in a slice for each `Exporter.ExportSpans` call, like `[ { ... } ][ { ... } { ... } ]`. Outputting JSON object directly after each other is consistent with JSON loggers, and a bit easier to parse and read. (#2196)
|
||||||
- Update the `NewTracerConfig`, `NewSpanStartConfig`, `NewSpanEndConfig`, and `NewEventConfig` function in the `go.opentelemetry.io/otel/trace` package to return their respective configurations as structs instead of pointers to the struct. (#2212)
|
- Update the `NewTracerConfig`, `NewSpanStartConfig`, `NewSpanEndConfig`, and `NewEventConfig` function in the `go.opentelemetry.io/otel/trace` package to return their respective configurations as structs instead of pointers to the struct. (#2212)
|
||||||
|
- The Metric SDK `Export()` function takes a new two-level reader interface for iterating over results one instrumentation library at a time. (#2197)
|
||||||
|
- The former `"go.opentelemetry.io/otel/sdk/export/metric".CheckpointSet` is renamed `Reader`.
|
||||||
|
- The new interface is named `"go.opentelemetry.io/otel/sdk/export/metric".InstrumentationLibraryReader`.
|
||||||
|
|
||||||
### Deprecated
|
### Deprecated
|
||||||
|
|
||||||
|
@ -32,6 +32,7 @@ import (
|
|||||||
"go.opentelemetry.io/otel/metric/unit"
|
"go.opentelemetry.io/otel/metric/unit"
|
||||||
export "go.opentelemetry.io/otel/sdk/export/metric"
|
export "go.opentelemetry.io/otel/sdk/export/metric"
|
||||||
"go.opentelemetry.io/otel/sdk/export/metric/aggregation"
|
"go.opentelemetry.io/otel/sdk/export/metric/aggregation"
|
||||||
|
"go.opentelemetry.io/otel/sdk/instrumentation"
|
||||||
"go.opentelemetry.io/otel/sdk/resource"
|
"go.opentelemetry.io/otel/sdk/resource"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -55,18 +56,30 @@ func (e *exporter) ExportMetrics(ctx context.Context, metrics []*metricdata.Metr
|
|||||||
if len(metrics) != 0 {
|
if len(metrics) != 0 {
|
||||||
res = convertResource(metrics[0].Resource)
|
res = convertResource(metrics[0].Resource)
|
||||||
}
|
}
|
||||||
return e.base.Export(ctx, res, &checkpointSet{metrics: metrics})
|
return e.base.Export(ctx, res, &censusLibraryReader{metrics: metrics})
|
||||||
}
|
}
|
||||||
|
|
||||||
type checkpointSet struct {
|
type censusLibraryReader struct {
|
||||||
// RWMutex implements locking for the `CheckpointSet` interface.
|
metrics []*metricdata.Metric
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r censusLibraryReader) ForEach(readerFunc func(instrumentation.Library, export.Reader) error) error {
|
||||||
|
return readerFunc(instrumentation.Library{
|
||||||
|
Name: "OpenCensus Bridge",
|
||||||
|
}, &metricReader{metrics: r.metrics})
|
||||||
|
}
|
||||||
|
|
||||||
|
type metricReader struct {
|
||||||
|
// RWMutex implements locking for the `Reader` interface.
|
||||||
sync.RWMutex
|
sync.RWMutex
|
||||||
metrics []*metricdata.Metric
|
metrics []*metricdata.Metric
|
||||||
}
|
}
|
||||||
|
|
||||||
// ForEach iterates through the CheckpointSet, passing an
|
var _ export.Reader = &metricReader{}
|
||||||
// export.Record with the appropriate aggregation to an exporter.
|
|
||||||
func (d *checkpointSet) ForEach(exporter export.ExportKindSelector, f func(export.Record) error) error {
|
// ForEach iterates through the metrics data, synthesizing an
|
||||||
|
// export.Record with the appropriate aggregation for the exporter.
|
||||||
|
func (d *metricReader) ForEach(exporter export.ExportKindSelector, f func(export.Record) error) error {
|
||||||
for _, m := range d.metrics {
|
for _, m := range d.metrics {
|
||||||
descriptor, err := convertDescriptor(m.Descriptor)
|
descriptor, err := convertDescriptor(m.Descriptor)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -158,7 +171,6 @@ func convertDescriptor(ocDescriptor metricdata.Descriptor) (metric.Descriptor, e
|
|||||||
}
|
}
|
||||||
opts := []metric.InstrumentOption{
|
opts := []metric.InstrumentOption{
|
||||||
metric.WithDescription(ocDescriptor.Description),
|
metric.WithDescription(ocDescriptor.Description),
|
||||||
metric.WithInstrumentationName("OpenCensus Bridge"),
|
|
||||||
}
|
}
|
||||||
switch ocDescriptor.Unit {
|
switch ocDescriptor.Unit {
|
||||||
case metricdata.UnitDimensionless:
|
case metricdata.UnitDimensionless:
|
||||||
@ -168,5 +180,6 @@ func convertDescriptor(ocDescriptor metricdata.Descriptor) (metric.Descriptor, e
|
|||||||
case metricdata.UnitMilliseconds:
|
case metricdata.UnitMilliseconds:
|
||||||
opts = append(opts, metric.WithUnit(unit.Milliseconds))
|
opts = append(opts, metric.WithUnit(unit.Milliseconds))
|
||||||
}
|
}
|
||||||
return metric.NewDescriptor(ocDescriptor.Name, ikind, nkind, opts...), nil
|
cfg := metric.NewInstrumentConfig(opts...)
|
||||||
|
return metric.NewDescriptor(ocDescriptor.Name, ikind, nkind, cfg.Description(), cfg.Unit()), nil
|
||||||
}
|
}
|
||||||
|
@ -28,12 +28,15 @@ import (
|
|||||||
|
|
||||||
"go.opentelemetry.io/otel/attribute"
|
"go.opentelemetry.io/otel/attribute"
|
||||||
"go.opentelemetry.io/otel/metric"
|
"go.opentelemetry.io/otel/metric"
|
||||||
|
"go.opentelemetry.io/otel/metric/metrictest"
|
||||||
"go.opentelemetry.io/otel/metric/number"
|
"go.opentelemetry.io/otel/metric/number"
|
||||||
"go.opentelemetry.io/otel/metric/sdkapi"
|
"go.opentelemetry.io/otel/metric/sdkapi"
|
||||||
"go.opentelemetry.io/otel/metric/unit"
|
"go.opentelemetry.io/otel/metric/unit"
|
||||||
export "go.opentelemetry.io/otel/sdk/export/metric"
|
export "go.opentelemetry.io/otel/sdk/export/metric"
|
||||||
exportmetric "go.opentelemetry.io/otel/sdk/export/metric"
|
exportmetric "go.opentelemetry.io/otel/sdk/export/metric"
|
||||||
"go.opentelemetry.io/otel/sdk/export/metric/aggregation"
|
"go.opentelemetry.io/otel/sdk/export/metric/aggregation"
|
||||||
|
"go.opentelemetry.io/otel/sdk/instrumentation"
|
||||||
|
"go.opentelemetry.io/otel/sdk/metric/controller/controllertest"
|
||||||
"go.opentelemetry.io/otel/sdk/resource"
|
"go.opentelemetry.io/otel/sdk/resource"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -44,12 +47,13 @@ type fakeExporter struct {
|
|||||||
err error
|
err error
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *fakeExporter) Export(ctx context.Context, res *resource.Resource, cps exportmetric.CheckpointSet) error {
|
func (f *fakeExporter) Export(ctx context.Context, res *resource.Resource, ilr exportmetric.InstrumentationLibraryReader) error {
|
||||||
return cps.ForEach(f, func(record exportmetric.Record) error {
|
return controllertest.ReadAll(ilr, export.StatelessExportKindSelector(),
|
||||||
f.resource = res
|
func(_ instrumentation.Library, record exportmetric.Record) error {
|
||||||
f.records = append(f.records, record)
|
f.resource = res
|
||||||
return f.err
|
f.records = append(f.records, record)
|
||||||
})
|
return f.err
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
type fakeErrorHandler struct {
|
type fakeErrorHandler struct {
|
||||||
@ -71,11 +75,10 @@ func (f *fakeErrorHandler) matches(err error) error {
|
|||||||
|
|
||||||
func TestExportMetrics(t *testing.T) {
|
func TestExportMetrics(t *testing.T) {
|
||||||
now := time.Now()
|
now := time.Now()
|
||||||
basicDesc := metric.NewDescriptor(
|
basicDesc := metrictest.NewDescriptor(
|
||||||
"",
|
"",
|
||||||
sdkapi.GaugeObserverInstrumentKind,
|
sdkapi.GaugeObserverInstrumentKind,
|
||||||
number.Int64Kind,
|
number.Int64Kind,
|
||||||
metric.WithInstrumentationName("OpenCensus Bridge"),
|
|
||||||
)
|
)
|
||||||
fakeErrorHandler := &fakeErrorHandler{}
|
fakeErrorHandler := &fakeErrorHandler{}
|
||||||
otel.SetErrorHandler(fakeErrorHandler)
|
otel.SetErrorHandler(fakeErrorHandler)
|
||||||
@ -393,11 +396,10 @@ func TestConvertDescriptor(t *testing.T) {
|
|||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
desc: "empty descriptor",
|
desc: "empty descriptor",
|
||||||
expected: metric.NewDescriptor(
|
expected: metrictest.NewDescriptor(
|
||||||
"",
|
"",
|
||||||
sdkapi.GaugeObserverInstrumentKind,
|
sdkapi.GaugeObserverInstrumentKind,
|
||||||
number.Int64Kind,
|
number.Int64Kind,
|
||||||
metric.WithInstrumentationName("OpenCensus Bridge"),
|
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -408,11 +410,10 @@ func TestConvertDescriptor(t *testing.T) {
|
|||||||
Unit: metricdata.UnitBytes,
|
Unit: metricdata.UnitBytes,
|
||||||
Type: metricdata.TypeGaugeInt64,
|
Type: metricdata.TypeGaugeInt64,
|
||||||
},
|
},
|
||||||
expected: metric.NewDescriptor(
|
expected: metrictest.NewDescriptor(
|
||||||
"foo",
|
"foo",
|
||||||
sdkapi.GaugeObserverInstrumentKind,
|
sdkapi.GaugeObserverInstrumentKind,
|
||||||
number.Int64Kind,
|
number.Int64Kind,
|
||||||
metric.WithInstrumentationName("OpenCensus Bridge"),
|
|
||||||
metric.WithDescription("bar"),
|
metric.WithDescription("bar"),
|
||||||
metric.WithUnit(unit.Bytes),
|
metric.WithUnit(unit.Bytes),
|
||||||
),
|
),
|
||||||
@ -425,11 +426,10 @@ func TestConvertDescriptor(t *testing.T) {
|
|||||||
Unit: metricdata.UnitMilliseconds,
|
Unit: metricdata.UnitMilliseconds,
|
||||||
Type: metricdata.TypeGaugeFloat64,
|
Type: metricdata.TypeGaugeFloat64,
|
||||||
},
|
},
|
||||||
expected: metric.NewDescriptor(
|
expected: metrictest.NewDescriptor(
|
||||||
"foo",
|
"foo",
|
||||||
sdkapi.GaugeObserverInstrumentKind,
|
sdkapi.GaugeObserverInstrumentKind,
|
||||||
number.Float64Kind,
|
number.Float64Kind,
|
||||||
metric.WithInstrumentationName("OpenCensus Bridge"),
|
|
||||||
metric.WithDescription("bar"),
|
metric.WithDescription("bar"),
|
||||||
metric.WithUnit(unit.Milliseconds),
|
metric.WithUnit(unit.Milliseconds),
|
||||||
),
|
),
|
||||||
@ -442,11 +442,10 @@ func TestConvertDescriptor(t *testing.T) {
|
|||||||
Unit: metricdata.UnitDimensionless,
|
Unit: metricdata.UnitDimensionless,
|
||||||
Type: metricdata.TypeCumulativeInt64,
|
Type: metricdata.TypeCumulativeInt64,
|
||||||
},
|
},
|
||||||
expected: metric.NewDescriptor(
|
expected: metrictest.NewDescriptor(
|
||||||
"foo",
|
"foo",
|
||||||
sdkapi.CounterObserverInstrumentKind,
|
sdkapi.CounterObserverInstrumentKind,
|
||||||
number.Int64Kind,
|
number.Int64Kind,
|
||||||
metric.WithInstrumentationName("OpenCensus Bridge"),
|
|
||||||
metric.WithDescription("bar"),
|
metric.WithDescription("bar"),
|
||||||
metric.WithUnit(unit.Dimensionless),
|
metric.WithUnit(unit.Dimensionless),
|
||||||
),
|
),
|
||||||
@ -459,11 +458,10 @@ func TestConvertDescriptor(t *testing.T) {
|
|||||||
Unit: metricdata.UnitDimensionless,
|
Unit: metricdata.UnitDimensionless,
|
||||||
Type: metricdata.TypeCumulativeFloat64,
|
Type: metricdata.TypeCumulativeFloat64,
|
||||||
},
|
},
|
||||||
expected: metric.NewDescriptor(
|
expected: metrictest.NewDescriptor(
|
||||||
"foo",
|
"foo",
|
||||||
sdkapi.CounterObserverInstrumentKind,
|
sdkapi.CounterObserverInstrumentKind,
|
||||||
number.Float64Kind,
|
number.Float64Kind,
|
||||||
metric.WithInstrumentationName("OpenCensus Bridge"),
|
|
||||||
metric.WithDescription("bar"),
|
metric.WithDescription("bar"),
|
||||||
metric.WithUnit(unit.Dimensionless),
|
metric.WithUnit(unit.Dimensionless),
|
||||||
),
|
),
|
||||||
|
@ -8,6 +8,7 @@ require (
|
|||||||
go.opentelemetry.io/otel/metric v0.23.0
|
go.opentelemetry.io/otel/metric v0.23.0
|
||||||
go.opentelemetry.io/otel/sdk v1.0.0
|
go.opentelemetry.io/otel/sdk v1.0.0
|
||||||
go.opentelemetry.io/otel/sdk/export/metric v0.23.0
|
go.opentelemetry.io/otel/sdk/export/metric v0.23.0
|
||||||
|
go.opentelemetry.io/otel/sdk/metric v0.23.0
|
||||||
go.opentelemetry.io/otel/trace v1.0.0
|
go.opentelemetry.io/otel/trace v1.0.0
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -1,5 +1,7 @@
|
|||||||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||||
|
github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8=
|
||||||
|
github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
|
||||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||||
github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
|
github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
|
||||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
|
@ -1,5 +1,7 @@
|
|||||||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||||
|
github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8=
|
||||||
|
github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
|
||||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||||
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
||||||
|
@ -40,7 +40,7 @@ var (
|
|||||||
func initMeter() {
|
func initMeter() {
|
||||||
config := prometheus.Config{}
|
config := prometheus.Config{}
|
||||||
c := controller.New(
|
c := controller.New(
|
||||||
processor.New(
|
processor.NewFactory(
|
||||||
selector.NewWithHistogramDistribution(
|
selector.NewWithHistogramDistribution(
|
||||||
histogram.WithExplicitBoundaries(config.DefaultHistogramBoundaries),
|
histogram.WithExplicitBoundaries(config.DefaultHistogramBoundaries),
|
||||||
),
|
),
|
||||||
|
@ -24,6 +24,7 @@ import (
|
|||||||
metricsdk "go.opentelemetry.io/otel/sdk/export/metric"
|
metricsdk "go.opentelemetry.io/otel/sdk/export/metric"
|
||||||
"go.opentelemetry.io/otel/sdk/export/metric/aggregation"
|
"go.opentelemetry.io/otel/sdk/export/metric/aggregation"
|
||||||
"go.opentelemetry.io/otel/sdk/resource"
|
"go.opentelemetry.io/otel/sdk/resource"
|
||||||
|
metricpb "go.opentelemetry.io/proto/otlp/metrics/v1"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@ -43,16 +44,20 @@ type Exporter struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Export exports a batch of metrics.
|
// Export exports a batch of metrics.
|
||||||
func (e *Exporter) Export(ctx context.Context, res *resource.Resource, checkpointSet metricsdk.CheckpointSet) error {
|
func (e *Exporter) Export(ctx context.Context, res *resource.Resource, ilr metricsdk.InstrumentationLibraryReader) error {
|
||||||
rms, err := metrictransform.CheckpointSet(ctx, e, res, checkpointSet, 1)
|
rm, err := metrictransform.InstrumentationLibraryReader(ctx, e, res, ilr, 1)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if len(rms) == 0 {
|
if rm == nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
return e.client.UploadMetrics(ctx, rms)
|
// TODO: There is never more than one resource emitted by this
|
||||||
|
// call, as per the specification. We can change the
|
||||||
|
// signature of UploadMetrics correspondingly. Here create a
|
||||||
|
// singleton list to reduce the size of the current PR:
|
||||||
|
return e.client.UploadMetrics(ctx, []*metricpb.ResourceMetrics{rm})
|
||||||
}
|
}
|
||||||
|
|
||||||
// Start establishes a connection to the receiving endpoint.
|
// Start establishes a connection to the receiving endpoint.
|
||||||
|
@ -17,7 +17,6 @@ package otlpmetric_test
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"sync"
|
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@ -30,12 +29,14 @@ import (
|
|||||||
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric"
|
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric"
|
||||||
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/metrictransform"
|
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/metrictransform"
|
||||||
"go.opentelemetry.io/otel/metric"
|
"go.opentelemetry.io/otel/metric"
|
||||||
|
"go.opentelemetry.io/otel/metric/metrictest"
|
||||||
"go.opentelemetry.io/otel/metric/number"
|
"go.opentelemetry.io/otel/metric/number"
|
||||||
"go.opentelemetry.io/otel/metric/sdkapi"
|
"go.opentelemetry.io/otel/metric/sdkapi"
|
||||||
metricsdk "go.opentelemetry.io/otel/sdk/export/metric"
|
metricsdk "go.opentelemetry.io/otel/sdk/export/metric"
|
||||||
"go.opentelemetry.io/otel/sdk/export/metric/aggregation"
|
"go.opentelemetry.io/otel/sdk/instrumentation"
|
||||||
"go.opentelemetry.io/otel/sdk/metric/aggregator/histogram"
|
"go.opentelemetry.io/otel/sdk/metric/aggregator/histogram"
|
||||||
"go.opentelemetry.io/otel/sdk/metric/aggregator/sum"
|
"go.opentelemetry.io/otel/sdk/metric/aggregator/sum"
|
||||||
|
"go.opentelemetry.io/otel/sdk/metric/processor/processortest"
|
||||||
"go.opentelemetry.io/otel/sdk/resource"
|
"go.opentelemetry.io/otel/sdk/resource"
|
||||||
commonpb "go.opentelemetry.io/proto/otlp/common/v1"
|
commonpb "go.opentelemetry.io/proto/otlp/common/v1"
|
||||||
metricpb "go.opentelemetry.io/proto/otlp/metrics/v1"
|
metricpb "go.opentelemetry.io/proto/otlp/metrics/v1"
|
||||||
@ -85,26 +86,31 @@ func pointTime() uint64 {
|
|||||||
return uint64(intervalEnd.UnixNano())
|
return uint64(intervalEnd.UnixNano())
|
||||||
}
|
}
|
||||||
|
|
||||||
type checkpointSet struct {
|
type testRecord struct {
|
||||||
sync.RWMutex
|
|
||||||
records []metricsdk.Record
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *checkpointSet) ForEach(_ metricsdk.ExportKindSelector, fn func(metricsdk.Record) error) error {
|
|
||||||
for _, r := range m.records {
|
|
||||||
if err := fn(r); err != nil && err != aggregation.ErrNoData {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type record struct {
|
|
||||||
name string
|
name string
|
||||||
iKind sdkapi.InstrumentKind
|
iKind sdkapi.InstrumentKind
|
||||||
nKind number.Kind
|
nKind number.Kind
|
||||||
opts []metric.InstrumentOption
|
|
||||||
labels []attribute.KeyValue
|
labels []attribute.KeyValue
|
||||||
|
|
||||||
|
meterName string
|
||||||
|
meterOpts []metric.MeterOption
|
||||||
|
}
|
||||||
|
|
||||||
|
func record(
|
||||||
|
name string,
|
||||||
|
iKind sdkapi.InstrumentKind,
|
||||||
|
nKind number.Kind,
|
||||||
|
labels []attribute.KeyValue,
|
||||||
|
meterName string,
|
||||||
|
meterOpts ...metric.MeterOption) testRecord {
|
||||||
|
return testRecord{
|
||||||
|
name: name,
|
||||||
|
iKind: iKind,
|
||||||
|
nKind: nKind,
|
||||||
|
labels: labels,
|
||||||
|
meterName: meterName,
|
||||||
|
meterOpts: meterOpts,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@ -154,26 +160,31 @@ var (
|
|||||||
testerAResourcePb = metrictransform.Resource(testerAResource)
|
testerAResourcePb = metrictransform.Resource(testerAResource)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// Most of this test uses an empty instrumentation library name.
|
||||||
|
testLibName = ""
|
||||||
|
)
|
||||||
|
|
||||||
func TestNoGroupingExport(t *testing.T) {
|
func TestNoGroupingExport(t *testing.T) {
|
||||||
runMetricExportTests(
|
runMetricExportTests(
|
||||||
t,
|
t,
|
||||||
nil,
|
nil,
|
||||||
nil,
|
resource.Empty(),
|
||||||
[]record{
|
[]testRecord{
|
||||||
{
|
record(
|
||||||
"int64-count",
|
"int64-count",
|
||||||
sdkapi.CounterInstrumentKind,
|
sdkapi.CounterInstrumentKind,
|
||||||
number.Int64Kind,
|
number.Int64Kind,
|
||||||
nil,
|
|
||||||
append(baseKeyValues, cpuKey.Int(1)),
|
append(baseKeyValues, cpuKey.Int(1)),
|
||||||
},
|
testLibName,
|
||||||
{
|
),
|
||||||
|
record(
|
||||||
"int64-count",
|
"int64-count",
|
||||||
sdkapi.CounterInstrumentKind,
|
sdkapi.CounterInstrumentKind,
|
||||||
number.Int64Kind,
|
number.Int64Kind,
|
||||||
nil,
|
|
||||||
append(baseKeyValues, cpuKey.Int(2)),
|
append(baseKeyValues, cpuKey.Int(2)),
|
||||||
},
|
testLibName,
|
||||||
|
),
|
||||||
},
|
},
|
||||||
[]*metricpb.ResourceMetrics{
|
[]*metricpb.ResourceMetrics{
|
||||||
{
|
{
|
||||||
@ -213,13 +224,13 @@ func TestNoGroupingExport(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestHistogramMetricGroupingExport(t *testing.T) {
|
func TestHistogramMetricGroupingExport(t *testing.T) {
|
||||||
r := record{
|
r := record(
|
||||||
"histogram",
|
"histogram",
|
||||||
sdkapi.HistogramInstrumentKind,
|
sdkapi.HistogramInstrumentKind,
|
||||||
number.Int64Kind,
|
number.Int64Kind,
|
||||||
nil,
|
|
||||||
append(baseKeyValues, cpuKey.Int(1)),
|
append(baseKeyValues, cpuKey.Int(1)),
|
||||||
}
|
testLibName,
|
||||||
|
)
|
||||||
expected := []*metricpb.ResourceMetrics{
|
expected := []*metricpb.ResourceMetrics{
|
||||||
{
|
{
|
||||||
Resource: nil,
|
Resource: nil,
|
||||||
@ -259,22 +270,22 @@ func TestHistogramMetricGroupingExport(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
runMetricExportTests(t, nil, nil, []record{r, r}, expected)
|
runMetricExportTests(t, nil, resource.Empty(), []testRecord{r, r}, expected)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestCountInt64MetricGroupingExport(t *testing.T) {
|
func TestCountInt64MetricGroupingExport(t *testing.T) {
|
||||||
r := record{
|
r := record(
|
||||||
"int64-count",
|
"int64-count",
|
||||||
sdkapi.CounterInstrumentKind,
|
sdkapi.CounterInstrumentKind,
|
||||||
number.Int64Kind,
|
number.Int64Kind,
|
||||||
nil,
|
|
||||||
append(baseKeyValues, cpuKey.Int(1)),
|
append(baseKeyValues, cpuKey.Int(1)),
|
||||||
}
|
testLibName,
|
||||||
|
)
|
||||||
runMetricExportTests(
|
runMetricExportTests(
|
||||||
t,
|
t,
|
||||||
nil,
|
nil,
|
||||||
nil,
|
resource.Empty(),
|
||||||
[]record{r, r},
|
[]testRecord{r, r},
|
||||||
[]*metricpb.ResourceMetrics{
|
[]*metricpb.ResourceMetrics{
|
||||||
{
|
{
|
||||||
Resource: nil,
|
Resource: nil,
|
||||||
@ -313,18 +324,18 @@ func TestCountInt64MetricGroupingExport(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestCountFloat64MetricGroupingExport(t *testing.T) {
|
func TestCountFloat64MetricGroupingExport(t *testing.T) {
|
||||||
r := record{
|
r := record(
|
||||||
"float64-count",
|
"float64-count",
|
||||||
sdkapi.CounterInstrumentKind,
|
sdkapi.CounterInstrumentKind,
|
||||||
number.Float64Kind,
|
number.Float64Kind,
|
||||||
nil,
|
|
||||||
append(baseKeyValues, cpuKey.Int(1)),
|
append(baseKeyValues, cpuKey.Int(1)),
|
||||||
}
|
testLibName,
|
||||||
|
)
|
||||||
runMetricExportTests(
|
runMetricExportTests(
|
||||||
t,
|
t,
|
||||||
nil,
|
nil,
|
||||||
nil,
|
resource.Empty(),
|
||||||
[]record{r, r},
|
[]testRecord{r, r},
|
||||||
[]*metricpb.ResourceMetrics{
|
[]*metricpb.ResourceMetrics{
|
||||||
{
|
{
|
||||||
Resource: nil,
|
Resource: nil,
|
||||||
@ -367,35 +378,35 @@ func TestResourceMetricGroupingExport(t *testing.T) {
|
|||||||
t,
|
t,
|
||||||
nil,
|
nil,
|
||||||
testerAResource,
|
testerAResource,
|
||||||
[]record{
|
[]testRecord{
|
||||||
{
|
record(
|
||||||
"int64-count",
|
"int64-count",
|
||||||
sdkapi.CounterInstrumentKind,
|
sdkapi.CounterInstrumentKind,
|
||||||
number.Int64Kind,
|
number.Int64Kind,
|
||||||
nil,
|
|
||||||
append(baseKeyValues, cpuKey.Int(1)),
|
append(baseKeyValues, cpuKey.Int(1)),
|
||||||
},
|
testLibName,
|
||||||
{
|
),
|
||||||
|
record(
|
||||||
"int64-count",
|
"int64-count",
|
||||||
sdkapi.CounterInstrumentKind,
|
sdkapi.CounterInstrumentKind,
|
||||||
number.Int64Kind,
|
number.Int64Kind,
|
||||||
nil,
|
|
||||||
append(baseKeyValues, cpuKey.Int(1)),
|
append(baseKeyValues, cpuKey.Int(1)),
|
||||||
},
|
testLibName,
|
||||||
{
|
),
|
||||||
|
record(
|
||||||
"int64-count",
|
"int64-count",
|
||||||
sdkapi.CounterInstrumentKind,
|
sdkapi.CounterInstrumentKind,
|
||||||
number.Int64Kind,
|
number.Int64Kind,
|
||||||
nil,
|
|
||||||
append(baseKeyValues, cpuKey.Int(2)),
|
append(baseKeyValues, cpuKey.Int(2)),
|
||||||
},
|
testLibName,
|
||||||
{
|
),
|
||||||
|
record(
|
||||||
"int64-count",
|
"int64-count",
|
||||||
sdkapi.CounterInstrumentKind,
|
sdkapi.CounterInstrumentKind,
|
||||||
number.Int64Kind,
|
number.Int64Kind,
|
||||||
nil,
|
|
||||||
append(baseKeyValues, cpuKey.Int(1)),
|
append(baseKeyValues, cpuKey.Int(1)),
|
||||||
},
|
testLibName,
|
||||||
|
),
|
||||||
},
|
},
|
||||||
[]*metricpb.ResourceMetrics{
|
[]*metricpb.ResourceMetrics{
|
||||||
{
|
{
|
||||||
@ -447,57 +458,56 @@ func TestResourceMetricGroupingExport(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestResourceInstLibMetricGroupingExport(t *testing.T) {
|
func TestResourceInstLibMetricGroupingExport(t *testing.T) {
|
||||||
countingLib1 := []metric.InstrumentOption{
|
version1 := metric.WithInstrumentationVersion("v1")
|
||||||
metric.WithInstrumentationName("counting-lib"),
|
version2 := metric.WithInstrumentationVersion("v2")
|
||||||
metric.WithInstrumentationVersion("v1"),
|
specialSchema := metric.WithSchemaURL("schurl")
|
||||||
}
|
summingLib := "summing-lib"
|
||||||
countingLib2 := []metric.InstrumentOption{
|
countingLib := "counting-lib"
|
||||||
metric.WithInstrumentationName("counting-lib"),
|
|
||||||
metric.WithInstrumentationVersion("v2"),
|
|
||||||
}
|
|
||||||
summingLib := []metric.InstrumentOption{
|
|
||||||
metric.WithInstrumentationName("summing-lib"),
|
|
||||||
}
|
|
||||||
runMetricExportTests(
|
runMetricExportTests(
|
||||||
t,
|
t,
|
||||||
nil,
|
nil,
|
||||||
testerAResource,
|
testerAResource,
|
||||||
[]record{
|
[]testRecord{
|
||||||
{
|
record(
|
||||||
"int64-count",
|
"int64-count",
|
||||||
sdkapi.CounterInstrumentKind,
|
sdkapi.CounterInstrumentKind,
|
||||||
number.Int64Kind,
|
number.Int64Kind,
|
||||||
countingLib1,
|
|
||||||
append(baseKeyValues, cpuKey.Int(1)),
|
append(baseKeyValues, cpuKey.Int(1)),
|
||||||
},
|
countingLib,
|
||||||
{
|
version1,
|
||||||
|
),
|
||||||
|
record(
|
||||||
"int64-count",
|
"int64-count",
|
||||||
sdkapi.CounterInstrumentKind,
|
sdkapi.CounterInstrumentKind,
|
||||||
number.Int64Kind,
|
number.Int64Kind,
|
||||||
countingLib2,
|
|
||||||
append(baseKeyValues, cpuKey.Int(1)),
|
append(baseKeyValues, cpuKey.Int(1)),
|
||||||
},
|
countingLib,
|
||||||
{
|
version2,
|
||||||
|
),
|
||||||
|
record(
|
||||||
"int64-count",
|
"int64-count",
|
||||||
sdkapi.CounterInstrumentKind,
|
sdkapi.CounterInstrumentKind,
|
||||||
number.Int64Kind,
|
number.Int64Kind,
|
||||||
countingLib1,
|
|
||||||
append(baseKeyValues, cpuKey.Int(1)),
|
append(baseKeyValues, cpuKey.Int(1)),
|
||||||
},
|
countingLib,
|
||||||
{
|
version1,
|
||||||
|
),
|
||||||
|
record(
|
||||||
"int64-count",
|
"int64-count",
|
||||||
sdkapi.CounterInstrumentKind,
|
sdkapi.CounterInstrumentKind,
|
||||||
number.Int64Kind,
|
number.Int64Kind,
|
||||||
countingLib1,
|
|
||||||
append(baseKeyValues, cpuKey.Int(2)),
|
append(baseKeyValues, cpuKey.Int(2)),
|
||||||
},
|
countingLib,
|
||||||
{
|
version1,
|
||||||
|
),
|
||||||
|
record(
|
||||||
"int64-count",
|
"int64-count",
|
||||||
sdkapi.CounterInstrumentKind,
|
sdkapi.CounterInstrumentKind,
|
||||||
number.Int64Kind,
|
number.Int64Kind,
|
||||||
summingLib,
|
|
||||||
append(baseKeyValues, cpuKey.Int(1)),
|
append(baseKeyValues, cpuKey.Int(1)),
|
||||||
},
|
summingLib,
|
||||||
|
specialSchema,
|
||||||
|
),
|
||||||
},
|
},
|
||||||
[]*metricpb.ResourceMetrics{
|
[]*metricpb.ResourceMetrics{
|
||||||
{
|
{
|
||||||
@ -569,6 +579,7 @@ func TestResourceInstLibMetricGroupingExport(t *testing.T) {
|
|||||||
InstrumentationLibrary: &commonpb.InstrumentationLibrary{
|
InstrumentationLibrary: &commonpb.InstrumentationLibrary{
|
||||||
Name: "summing-lib",
|
Name: "summing-lib",
|
||||||
},
|
},
|
||||||
|
SchemaUrl: "schurl",
|
||||||
Metrics: []*metricpb.Metric{
|
Metrics: []*metricpb.Metric{
|
||||||
{
|
{
|
||||||
Name: "int64-count",
|
Name: "int64-count",
|
||||||
@ -618,14 +629,14 @@ func TestStatelessExportKind(t *testing.T) {
|
|||||||
),
|
),
|
||||||
},
|
},
|
||||||
testerAResource,
|
testerAResource,
|
||||||
[]record{
|
[]testRecord{
|
||||||
{
|
record(
|
||||||
"instrument",
|
"instrument",
|
||||||
k.instrumentKind,
|
k.instrumentKind,
|
||||||
number.Int64Kind,
|
number.Int64Kind,
|
||||||
nil,
|
|
||||||
append(baseKeyValues, cpuKey.Int(1)),
|
append(baseKeyValues, cpuKey.Int(1)),
|
||||||
},
|
testLibName,
|
||||||
|
),
|
||||||
},
|
},
|
||||||
[]*metricpb.ResourceMetrics{
|
[]*metricpb.ResourceMetrics{
|
||||||
{
|
{
|
||||||
@ -660,14 +671,14 @@ func TestStatelessExportKind(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func runMetricExportTests(t *testing.T, opts []otlpmetric.Option, res *resource.Resource, records []record, expected []*metricpb.ResourceMetrics) {
|
func runMetricExportTests(t *testing.T, opts []otlpmetric.Option, res *resource.Resource, records []testRecord, expected []*metricpb.ResourceMetrics) {
|
||||||
exp, driver := newExporter(t, opts...)
|
exp, driver := newExporter(t, opts...)
|
||||||
|
|
||||||
recs := []metricsdk.Record{}
|
libraryRecs := map[instrumentation.Library][]metricsdk.Record{}
|
||||||
for _, r := range records {
|
for _, r := range records {
|
||||||
lcopy := make([]attribute.KeyValue, len(r.labels))
|
lcopy := make([]attribute.KeyValue, len(r.labels))
|
||||||
copy(lcopy, r.labels)
|
copy(lcopy, r.labels)
|
||||||
desc := metric.NewDescriptor(r.name, r.iKind, r.nKind, r.opts...)
|
desc := metrictest.NewDescriptor(r.name, r.iKind, r.nKind)
|
||||||
labs := attribute.NewSet(lcopy...)
|
labs := attribute.NewSet(lcopy...)
|
||||||
|
|
||||||
var agg, ckpt metricsdk.Aggregator
|
var agg, ckpt metricsdk.Aggregator
|
||||||
@ -705,9 +716,15 @@ func runMetricExportTests(t *testing.T, opts []otlpmetric.Option, res *resource.
|
|||||||
}
|
}
|
||||||
require.NoError(t, agg.SynchronizedMove(ckpt, &desc))
|
require.NoError(t, agg.SynchronizedMove(ckpt, &desc))
|
||||||
|
|
||||||
recs = append(recs, metricsdk.NewRecord(&desc, &labs, ckpt.Aggregation(), intervalStart, intervalEnd))
|
meterCfg := metric.NewMeterConfig(r.meterOpts...)
|
||||||
|
lib := instrumentation.Library{
|
||||||
|
Name: r.meterName,
|
||||||
|
Version: meterCfg.InstrumentationVersion(),
|
||||||
|
SchemaURL: meterCfg.SchemaURL(),
|
||||||
|
}
|
||||||
|
libraryRecs[lib] = append(libraryRecs[lib], metricsdk.NewRecord(&desc, &labs, ckpt.Aggregation(), intervalStart, intervalEnd))
|
||||||
}
|
}
|
||||||
assert.NoError(t, exp.Export(context.Background(), res, &checkpointSet{records: recs}))
|
assert.NoError(t, exp.Export(context.Background(), res, processortest.MultiInstrumentationLibraryReader(libraryRecs)))
|
||||||
|
|
||||||
// assert.ElementsMatch does not equate nested slices of different order,
|
// assert.ElementsMatch does not equate nested slices of different order,
|
||||||
// therefore this requires the top level slice to be broken down.
|
// therefore this requires the top level slice to be broken down.
|
||||||
@ -715,7 +732,7 @@ func runMetricExportTests(t *testing.T, opts []otlpmetric.Option, res *resource.
|
|||||||
// that validate the metric elements match for all expected pairs. Finally,
|
// that validate the metric elements match for all expected pairs. Finally,
|
||||||
// make we saw all expected pairs.
|
// make we saw all expected pairs.
|
||||||
keyFor := func(ilm *metricpb.InstrumentationLibraryMetrics) string {
|
keyFor := func(ilm *metricpb.InstrumentationLibraryMetrics) string {
|
||||||
return fmt.Sprintf("%s/%s", ilm.GetInstrumentationLibrary().GetName(), ilm.GetInstrumentationLibrary().GetVersion())
|
return fmt.Sprintf("%s/%s/%s", ilm.GetInstrumentationLibrary().GetName(), ilm.GetInstrumentationLibrary().GetVersion(), ilm.GetSchemaUrl())
|
||||||
}
|
}
|
||||||
got := map[string][]*metricpb.Metric{}
|
got := map[string][]*metricpb.Metric{}
|
||||||
for _, rm := range driver.rm {
|
for _, rm := range driver.rm {
|
||||||
@ -767,7 +784,11 @@ func TestEmptyMetricExport(t *testing.T) {
|
|||||||
},
|
},
|
||||||
} {
|
} {
|
||||||
driver.Reset()
|
driver.Reset()
|
||||||
require.NoError(t, exp.Export(context.Background(), resource.Empty(), &checkpointSet{records: test.records}))
|
require.NoError(t, exp.Export(context.Background(), resource.Empty(), processortest.MultiInstrumentationLibraryReader(map[instrumentation.Library][]metricsdk.Record{
|
||||||
|
{
|
||||||
|
Name: testLibName,
|
||||||
|
}: test.records,
|
||||||
|
})))
|
||||||
assert.Equal(t, test.want, driver.rm)
|
assert.Equal(t, test.want, driver.rm)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -52,15 +52,14 @@ var (
|
|||||||
// transformation.
|
// transformation.
|
||||||
ErrContextCanceled = errors.New("context canceled")
|
ErrContextCanceled = errors.New("context canceled")
|
||||||
|
|
||||||
// ErrTransforming is returned when an unexected error is encoutered transforming.
|
// ErrTransforming is returned when an unexected error is encountered transforming.
|
||||||
ErrTransforming = errors.New("transforming failed")
|
ErrTransforming = errors.New("transforming failed")
|
||||||
)
|
)
|
||||||
|
|
||||||
// result is the product of transforming Records into OTLP Metrics.
|
// result is the product of transforming Records into OTLP Metrics.
|
||||||
type result struct {
|
type result struct {
|
||||||
InstrumentationLibrary instrumentation.Library
|
Metric *metricpb.Metric
|
||||||
Metric *metricpb.Metric
|
Err error
|
||||||
Err error
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// toNanos returns the number of nanoseconds since the UNIX epoch.
|
// toNanos returns the number of nanoseconds since the UNIX epoch.
|
||||||
@ -71,50 +70,78 @@ func toNanos(t time.Time) uint64 {
|
|||||||
return uint64(t.UnixNano())
|
return uint64(t.UnixNano())
|
||||||
}
|
}
|
||||||
|
|
||||||
// CheckpointSet transforms all records contained in a checkpoint into
|
// InstrumentationLibraryReader transforms all records contained in a checkpoint into
|
||||||
// batched OTLP ResourceMetrics.
|
// batched OTLP ResourceMetrics.
|
||||||
func CheckpointSet(ctx context.Context, exportSelector export.ExportKindSelector, res *resource.Resource, cps export.CheckpointSet, numWorkers uint) ([]*metricpb.ResourceMetrics, error) {
|
func InstrumentationLibraryReader(ctx context.Context, exportSelector export.ExportKindSelector, res *resource.Resource, ilmr export.InstrumentationLibraryReader, numWorkers uint) (*metricpb.ResourceMetrics, error) {
|
||||||
records, errc := source(ctx, exportSelector, cps)
|
var ilms []*metricpb.InstrumentationLibraryMetrics
|
||||||
|
|
||||||
// Start a fixed number of goroutines to transform records.
|
err := ilmr.ForEach(func(lib instrumentation.Library, mr export.Reader) error {
|
||||||
transformed := make(chan result)
|
|
||||||
var wg sync.WaitGroup
|
records, errc := source(ctx, exportSelector, mr)
|
||||||
wg.Add(int(numWorkers))
|
|
||||||
for i := uint(0); i < numWorkers; i++ {
|
// Start a fixed number of goroutines to transform records.
|
||||||
|
transformed := make(chan result)
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
wg.Add(int(numWorkers))
|
||||||
|
for i := uint(0); i < numWorkers; i++ {
|
||||||
|
go func() {
|
||||||
|
defer wg.Done()
|
||||||
|
transformer(ctx, exportSelector, records, transformed)
|
||||||
|
}()
|
||||||
|
}
|
||||||
go func() {
|
go func() {
|
||||||
defer wg.Done()
|
wg.Wait()
|
||||||
transformer(ctx, exportSelector, records, transformed)
|
close(transformed)
|
||||||
}()
|
}()
|
||||||
}
|
|
||||||
go func() {
|
|
||||||
wg.Wait()
|
|
||||||
close(transformed)
|
|
||||||
}()
|
|
||||||
|
|
||||||
// Synchronously collect the transformed records and transmit.
|
// Synchronously collect the transformed records and transmit.
|
||||||
rms, err := sink(ctx, res, transformed)
|
ms, err := sink(ctx, transformed)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// source is complete, check for any errors.
|
||||||
|
if err := <-errc; err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if len(ms) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
ilms = append(ilms, &metricpb.InstrumentationLibraryMetrics{
|
||||||
|
Metrics: ms,
|
||||||
|
SchemaUrl: lib.SchemaURL,
|
||||||
|
InstrumentationLibrary: &commonpb.InstrumentationLibrary{
|
||||||
|
Name: lib.Name,
|
||||||
|
Version: lib.Version,
|
||||||
|
},
|
||||||
|
})
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
if len(ilms) == 0 {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// source is complete, check for any errors.
|
rms := &metricpb.ResourceMetrics{
|
||||||
if err := <-errc; err != nil {
|
Resource: Resource(res),
|
||||||
return nil, err
|
SchemaUrl: res.SchemaURL(),
|
||||||
|
InstrumentationLibraryMetrics: ilms,
|
||||||
}
|
}
|
||||||
return rms, nil
|
|
||||||
|
return rms, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// source starts a goroutine that sends each one of the Records yielded by
|
// source starts a goroutine that sends each one of the Records yielded by
|
||||||
// the CheckpointSet on the returned chan. Any error encoutered will be sent
|
// the Reader on the returned chan. Any error encountered will be sent
|
||||||
// on the returned error chan after seeding is complete.
|
// on the returned error chan after seeding is complete.
|
||||||
func source(ctx context.Context, exportSelector export.ExportKindSelector, cps export.CheckpointSet) (<-chan export.Record, <-chan error) {
|
func source(ctx context.Context, exportSelector export.ExportKindSelector, mr export.Reader) (<-chan export.Record, <-chan error) {
|
||||||
errc := make(chan error, 1)
|
errc := make(chan error, 1)
|
||||||
out := make(chan export.Record)
|
out := make(chan export.Record)
|
||||||
// Seed records into process.
|
// Seed records into process.
|
||||||
go func() {
|
go func() {
|
||||||
defer close(out)
|
defer close(out)
|
||||||
// No select is needed since errc is buffered.
|
// No select is needed since errc is buffered.
|
||||||
errc <- cps.ForEach(exportSelector, func(r export.Record) error {
|
errc <- mr.ForEach(exportSelector, func(r export.Record) error {
|
||||||
select {
|
select {
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
return ErrContextCanceled
|
return ErrContextCanceled
|
||||||
@ -136,10 +163,6 @@ func transformer(ctx context.Context, exportSelector export.ExportKindSelector,
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
res := result{
|
res := result{
|
||||||
InstrumentationLibrary: instrumentation.Library{
|
|
||||||
Name: r.Descriptor().InstrumentationName(),
|
|
||||||
Version: r.Descriptor().InstrumentationVersion(),
|
|
||||||
},
|
|
||||||
Metric: m,
|
Metric: m,
|
||||||
Err: err,
|
Err: err,
|
||||||
}
|
}
|
||||||
@ -153,32 +176,34 @@ func transformer(ctx context.Context, exportSelector export.ExportKindSelector,
|
|||||||
|
|
||||||
// sink collects transformed Records and batches them.
|
// sink collects transformed Records and batches them.
|
||||||
//
|
//
|
||||||
// Any errors encoutered transforming input will be reported with an
|
// Any errors encountered transforming input will be reported with an
|
||||||
// ErrTransforming as well as the completed ResourceMetrics. It is up to the
|
// ErrTransforming as well as the completed ResourceMetrics. It is up to the
|
||||||
// caller to handle any incorrect data in these ResourceMetrics.
|
// caller to handle any incorrect data in these ResourceMetric.
|
||||||
func sink(ctx context.Context, res *resource.Resource, in <-chan result) ([]*metricpb.ResourceMetrics, error) {
|
func sink(ctx context.Context, in <-chan result) ([]*metricpb.Metric, error) {
|
||||||
var errStrings []string
|
var errStrings []string
|
||||||
|
|
||||||
// Group by instrumentation library name and then the MetricDescriptor.
|
// Group by the MetricDescriptor.
|
||||||
grouped := map[instrumentation.Library]map[string]*metricpb.Metric{}
|
grouped := map[string]*metricpb.Metric{}
|
||||||
for res := range in {
|
for res := range in {
|
||||||
if res.Err != nil {
|
if res.Err != nil {
|
||||||
errStrings = append(errStrings, res.Err.Error())
|
errStrings = append(errStrings, res.Err.Error())
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
mb, ok := grouped[res.InstrumentationLibrary]
|
|
||||||
if !ok {
|
|
||||||
mb = make(map[string]*metricpb.Metric)
|
|
||||||
grouped[res.InstrumentationLibrary] = mb
|
|
||||||
}
|
|
||||||
|
|
||||||
mID := res.Metric.GetName()
|
mID := res.Metric.GetName()
|
||||||
m, ok := mb[mID]
|
m, ok := grouped[mID]
|
||||||
if !ok {
|
if !ok {
|
||||||
mb[mID] = res.Metric
|
grouped[mID] = res.Metric
|
||||||
continue
|
continue
|
||||||
|
|
||||||
}
|
}
|
||||||
|
// Note: There is extra work happening in this code
|
||||||
|
// that can be improved when the work described in
|
||||||
|
// #2119 is completed. The SDK has a guarantee that
|
||||||
|
// no more than one point per period per label set is
|
||||||
|
// produced, so this fallthrough should never happen.
|
||||||
|
// The final step of #2119 is to remove all the
|
||||||
|
// grouping logic here.
|
||||||
switch res.Metric.Data.(type) {
|
switch res.Metric.Data.(type) {
|
||||||
case *metricpb.Metric_Gauge:
|
case *metricpb.Metric_Gauge:
|
||||||
m.GetGauge().DataPoints = append(m.GetGauge().DataPoints, res.Metric.GetGauge().DataPoints...)
|
m.GetGauge().DataPoints = append(m.GetGauge().DataPoints, res.Metric.GetGauge().DataPoints...)
|
||||||
@ -198,33 +223,16 @@ func sink(ctx context.Context, res *resource.Resource, in <-chan result) ([]*met
|
|||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
rm := &metricpb.ResourceMetrics{
|
ms := make([]*metricpb.Metric, 0, len(grouped))
|
||||||
Resource: Resource(res),
|
for _, m := range grouped {
|
||||||
}
|
ms = append(ms, m)
|
||||||
if res != nil {
|
|
||||||
rm.SchemaUrl = res.SchemaURL()
|
|
||||||
}
|
|
||||||
|
|
||||||
rms := []*metricpb.ResourceMetrics{rm}
|
|
||||||
for il, mb := range grouped {
|
|
||||||
ilm := &metricpb.InstrumentationLibraryMetrics{
|
|
||||||
Metrics: make([]*metricpb.Metric, 0, len(mb)),
|
|
||||||
InstrumentationLibrary: &commonpb.InstrumentationLibrary{
|
|
||||||
Name: il.Name,
|
|
||||||
Version: il.Version,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
for _, m := range mb {
|
|
||||||
ilm.Metrics = append(ilm.Metrics, m)
|
|
||||||
}
|
|
||||||
rm.InstrumentationLibraryMetrics = append(rm.InstrumentationLibraryMetrics, ilm)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Report any transform errors.
|
// Report any transform errors.
|
||||||
if len(errStrings) > 0 {
|
if len(errStrings) > 0 {
|
||||||
return rms, fmt.Errorf("%w:\n -%s", ErrTransforming, strings.Join(errStrings, "\n -"))
|
return ms, fmt.Errorf("%w:\n -%s", ErrTransforming, strings.Join(errStrings, "\n -"))
|
||||||
}
|
}
|
||||||
return rms, nil
|
return ms, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Record transforms a Record into an OTLP Metric. An ErrIncompatibleAgg
|
// Record transforms a Record into an OTLP Metric. An ErrIncompatibleAgg
|
||||||
|
@ -26,6 +26,7 @@ import (
|
|||||||
|
|
||||||
"go.opentelemetry.io/otel/attribute"
|
"go.opentelemetry.io/otel/attribute"
|
||||||
"go.opentelemetry.io/otel/metric"
|
"go.opentelemetry.io/otel/metric"
|
||||||
|
"go.opentelemetry.io/otel/metric/metrictest"
|
||||||
"go.opentelemetry.io/otel/metric/number"
|
"go.opentelemetry.io/otel/metric/number"
|
||||||
"go.opentelemetry.io/otel/metric/sdkapi"
|
"go.opentelemetry.io/otel/metric/sdkapi"
|
||||||
export "go.opentelemetry.io/otel/sdk/export/metric"
|
export "go.opentelemetry.io/otel/sdk/export/metric"
|
||||||
@ -122,7 +123,7 @@ func TestMinMaxSumCountValue(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestMinMaxSumCountDatapoints(t *testing.T) {
|
func TestMinMaxSumCountDatapoints(t *testing.T) {
|
||||||
desc := metric.NewDescriptor("", sdkapi.HistogramInstrumentKind, number.Int64Kind)
|
desc := metrictest.NewDescriptor("", sdkapi.HistogramInstrumentKind, number.Int64Kind)
|
||||||
labels := attribute.NewSet(attribute.String("one", "1"))
|
labels := attribute.NewSet(attribute.String("one", "1"))
|
||||||
mmscs := minmaxsumcount.New(2, &metric.Descriptor{})
|
mmscs := minmaxsumcount.New(2, &metric.Descriptor{})
|
||||||
mmsc, ckpt := &mmscs[0], &mmscs[1]
|
mmsc, ckpt := &mmscs[0], &mmscs[1]
|
||||||
@ -178,7 +179,7 @@ func TestMinMaxSumCountPropagatesErrors(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestSumIntDataPoints(t *testing.T) {
|
func TestSumIntDataPoints(t *testing.T) {
|
||||||
desc := metric.NewDescriptor("", sdkapi.HistogramInstrumentKind, number.Int64Kind)
|
desc := metrictest.NewDescriptor("", sdkapi.HistogramInstrumentKind, number.Int64Kind)
|
||||||
labels := attribute.NewSet(attribute.String("one", "1"))
|
labels := attribute.NewSet(attribute.String("one", "1"))
|
||||||
sums := sumAgg.New(2)
|
sums := sumAgg.New(2)
|
||||||
s, ckpt := &sums[0], &sums[1]
|
s, ckpt := &sums[0], &sums[1]
|
||||||
@ -218,7 +219,7 @@ func TestSumIntDataPoints(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestSumFloatDataPoints(t *testing.T) {
|
func TestSumFloatDataPoints(t *testing.T) {
|
||||||
desc := metric.NewDescriptor("", sdkapi.HistogramInstrumentKind, number.Float64Kind)
|
desc := metrictest.NewDescriptor("", sdkapi.HistogramInstrumentKind, number.Float64Kind)
|
||||||
labels := attribute.NewSet(attribute.String("one", "1"))
|
labels := attribute.NewSet(attribute.String("one", "1"))
|
||||||
sums := sumAgg.New(2)
|
sums := sumAgg.New(2)
|
||||||
s, ckpt := &sums[0], &sums[1]
|
s, ckpt := &sums[0], &sums[1]
|
||||||
@ -256,7 +257,7 @@ func TestSumFloatDataPoints(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestLastValueIntDataPoints(t *testing.T) {
|
func TestLastValueIntDataPoints(t *testing.T) {
|
||||||
desc := metric.NewDescriptor("", sdkapi.HistogramInstrumentKind, number.Int64Kind)
|
desc := metrictest.NewDescriptor("", sdkapi.HistogramInstrumentKind, number.Int64Kind)
|
||||||
labels := attribute.NewSet(attribute.String("one", "1"))
|
labels := attribute.NewSet(attribute.String("one", "1"))
|
||||||
lvs := lvAgg.New(2)
|
lvs := lvAgg.New(2)
|
||||||
lv, ckpt := &lvs[0], &lvs[1]
|
lv, ckpt := &lvs[0], &lvs[1]
|
||||||
@ -291,7 +292,7 @@ func TestLastValueIntDataPoints(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestExactIntDataPoints(t *testing.T) {
|
func TestExactIntDataPoints(t *testing.T) {
|
||||||
desc := metric.NewDescriptor("", sdkapi.HistogramInstrumentKind, number.Int64Kind)
|
desc := metrictest.NewDescriptor("", sdkapi.HistogramInstrumentKind, number.Int64Kind)
|
||||||
labels := attribute.NewSet(attribute.String("one", "1"))
|
labels := attribute.NewSet(attribute.String("one", "1"))
|
||||||
arrs := arrAgg.New(2)
|
arrs := arrAgg.New(2)
|
||||||
e, ckpt := &arrs[0], &arrs[1]
|
e, ckpt := &arrs[0], &arrs[1]
|
||||||
@ -326,7 +327,7 @@ func TestExactIntDataPoints(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestExactFloatDataPoints(t *testing.T) {
|
func TestExactFloatDataPoints(t *testing.T) {
|
||||||
desc := metric.NewDescriptor("", sdkapi.HistogramInstrumentKind, number.Float64Kind)
|
desc := metrictest.NewDescriptor("", sdkapi.HistogramInstrumentKind, number.Float64Kind)
|
||||||
labels := attribute.NewSet(attribute.String("one", "1"))
|
labels := attribute.NewSet(attribute.String("one", "1"))
|
||||||
arrs := arrAgg.New(2)
|
arrs := arrAgg.New(2)
|
||||||
e, ckpt := &arrs[0], &arrs[1]
|
e, ckpt := &arrs[0], &arrs[1]
|
||||||
@ -360,7 +361,7 @@ func TestExactFloatDataPoints(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestSumErrUnknownValueType(t *testing.T) {
|
func TestSumErrUnknownValueType(t *testing.T) {
|
||||||
desc := metric.NewDescriptor("", sdkapi.HistogramInstrumentKind, number.Kind(-1))
|
desc := metrictest.NewDescriptor("", sdkapi.HistogramInstrumentKind, number.Kind(-1))
|
||||||
labels := attribute.NewSet()
|
labels := attribute.NewSet()
|
||||||
s := &sumAgg.New(1)[0]
|
s := &sumAgg.New(1)[0]
|
||||||
record := export.NewRecord(&desc, &labels, s, intervalStart, intervalEnd)
|
record := export.NewRecord(&desc, &labels, s, intervalStart, intervalEnd)
|
||||||
@ -445,7 +446,7 @@ var _ aggregation.MinMaxSumCount = &testErrMinMaxSumCount{}
|
|||||||
|
|
||||||
func TestRecordAggregatorIncompatibleErrors(t *testing.T) {
|
func TestRecordAggregatorIncompatibleErrors(t *testing.T) {
|
||||||
makeMpb := func(kind aggregation.Kind, agg aggregation.Aggregation) (*metricpb.Metric, error) {
|
makeMpb := func(kind aggregation.Kind, agg aggregation.Aggregation) (*metricpb.Metric, error) {
|
||||||
desc := metric.NewDescriptor("things", sdkapi.CounterInstrumentKind, number.Int64Kind)
|
desc := metrictest.NewDescriptor("things", sdkapi.CounterInstrumentKind, number.Int64Kind)
|
||||||
labels := attribute.NewSet()
|
labels := attribute.NewSet()
|
||||||
test := &testAgg{
|
test := &testAgg{
|
||||||
kind: kind,
|
kind: kind,
|
||||||
@ -481,7 +482,7 @@ func TestRecordAggregatorIncompatibleErrors(t *testing.T) {
|
|||||||
|
|
||||||
func TestRecordAggregatorUnexpectedErrors(t *testing.T) {
|
func TestRecordAggregatorUnexpectedErrors(t *testing.T) {
|
||||||
makeMpb := func(kind aggregation.Kind, agg aggregation.Aggregation) (*metricpb.Metric, error) {
|
makeMpb := func(kind aggregation.Kind, agg aggregation.Aggregation) (*metricpb.Metric, error) {
|
||||||
desc := metric.NewDescriptor("things", sdkapi.CounterInstrumentKind, number.Int64Kind)
|
desc := metrictest.NewDescriptor("things", sdkapi.CounterInstrumentKind, number.Int64Kind)
|
||||||
labels := attribute.NewSet()
|
labels := attribute.NewSet()
|
||||||
return Record(export.CumulativeExportKindSelector(), export.NewRecord(&desc, &labels, agg, intervalStart, intervalEnd))
|
return Record(export.CumulativeExportKindSelector(), export.NewRecord(&desc, &labels, agg, intervalStart, intervalEnd))
|
||||||
}
|
}
|
||||||
|
@ -20,81 +20,52 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"go.opentelemetry.io/otel/attribute"
|
"go.opentelemetry.io/otel/attribute"
|
||||||
"go.opentelemetry.io/otel/metric"
|
"go.opentelemetry.io/otel/metric/metrictest"
|
||||||
"go.opentelemetry.io/otel/metric/number"
|
"go.opentelemetry.io/otel/metric/number"
|
||||||
"go.opentelemetry.io/otel/metric/sdkapi"
|
"go.opentelemetry.io/otel/metric/sdkapi"
|
||||||
exportmetric "go.opentelemetry.io/otel/sdk/export/metric"
|
exportmetric "go.opentelemetry.io/otel/sdk/export/metric"
|
||||||
|
"go.opentelemetry.io/otel/sdk/instrumentation"
|
||||||
"go.opentelemetry.io/otel/sdk/metric/aggregator/sum"
|
"go.opentelemetry.io/otel/sdk/metric/aggregator/sum"
|
||||||
|
"go.opentelemetry.io/otel/sdk/metric/processor/processortest"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Used to avoid implementing locking functions for test
|
// OneRecordReader is a Reader that returns just one
|
||||||
// checkpointsets.
|
|
||||||
type noopLocker struct{}
|
|
||||||
|
|
||||||
// Lock implements sync.Locker, which is needed for
|
|
||||||
// exportmetric.CheckpointSet.
|
|
||||||
func (noopLocker) Lock() {}
|
|
||||||
|
|
||||||
// Unlock implements sync.Locker, which is needed for
|
|
||||||
// exportmetric.CheckpointSet.
|
|
||||||
func (noopLocker) Unlock() {}
|
|
||||||
|
|
||||||
// RLock implements exportmetric.CheckpointSet.
|
|
||||||
func (noopLocker) RLock() {}
|
|
||||||
|
|
||||||
// RUnlock implements exportmetric.CheckpointSet.
|
|
||||||
func (noopLocker) RUnlock() {}
|
|
||||||
|
|
||||||
// OneRecordCheckpointSet is a CheckpointSet that returns just one
|
|
||||||
// filled record. It may be useful for testing driver's metrics
|
// filled record. It may be useful for testing driver's metrics
|
||||||
// export.
|
// export.
|
||||||
type OneRecordCheckpointSet struct {
|
func OneRecordReader() exportmetric.InstrumentationLibraryReader {
|
||||||
noopLocker
|
desc := metrictest.NewDescriptor(
|
||||||
}
|
|
||||||
|
|
||||||
var _ exportmetric.CheckpointSet = OneRecordCheckpointSet{}
|
|
||||||
|
|
||||||
// ForEach implements exportmetric.CheckpointSet. It always invokes
|
|
||||||
// the callback once with always the same record.
|
|
||||||
func (OneRecordCheckpointSet) ForEach(kindSelector exportmetric.ExportKindSelector, recordFunc func(exportmetric.Record) error) error {
|
|
||||||
desc := metric.NewDescriptor(
|
|
||||||
"foo",
|
"foo",
|
||||||
sdkapi.CounterInstrumentKind,
|
sdkapi.CounterInstrumentKind,
|
||||||
number.Int64Kind,
|
number.Int64Kind,
|
||||||
)
|
)
|
||||||
agg := sum.New(1)
|
agg := sum.New(1)
|
||||||
if err := agg[0].Update(context.Background(), number.NewInt64Number(42), &desc); err != nil {
|
if err := agg[0].Update(context.Background(), number.NewInt64Number(42), &desc); err != nil {
|
||||||
return err
|
panic(err)
|
||||||
}
|
}
|
||||||
start := time.Date(2020, time.December, 8, 19, 15, 0, 0, time.UTC)
|
start := time.Date(2020, time.December, 8, 19, 15, 0, 0, time.UTC)
|
||||||
end := time.Date(2020, time.December, 8, 19, 16, 0, 0, time.UTC)
|
end := time.Date(2020, time.December, 8, 19, 16, 0, 0, time.UTC)
|
||||||
labels := attribute.NewSet(attribute.String("abc", "def"), attribute.Int64("one", 1))
|
labels := attribute.NewSet(attribute.String("abc", "def"), attribute.Int64("one", 1))
|
||||||
rec := exportmetric.NewRecord(&desc, &labels, agg[0].Aggregation(), start, end)
|
rec := exportmetric.NewRecord(&desc, &labels, agg[0].Aggregation(), start, end)
|
||||||
return recordFunc(rec)
|
|
||||||
|
return processortest.MultiInstrumentationLibraryReader(
|
||||||
|
map[instrumentation.Library][]exportmetric.Record{
|
||||||
|
{
|
||||||
|
Name: "onelib",
|
||||||
|
}: {rec},
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// EmptyCheckpointSet is a checkpointer that has no records at all.
|
func EmptyReader() exportmetric.InstrumentationLibraryReader {
|
||||||
type EmptyCheckpointSet struct {
|
return processortest.MultiInstrumentationLibraryReader(nil)
|
||||||
noopLocker
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ exportmetric.CheckpointSet = EmptyCheckpointSet{}
|
// FailReader is a checkpointer that returns an error during
|
||||||
|
|
||||||
// ForEach implements exportmetric.CheckpointSet. It never invokes the
|
|
||||||
// callback.
|
|
||||||
func (EmptyCheckpointSet) ForEach(kindSelector exportmetric.ExportKindSelector, recordFunc func(exportmetric.Record) error) error {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// FailCheckpointSet is a checkpointer that returns an error during
|
|
||||||
// ForEach.
|
// ForEach.
|
||||||
type FailCheckpointSet struct {
|
type FailReader struct{}
|
||||||
noopLocker
|
|
||||||
}
|
|
||||||
|
|
||||||
var _ exportmetric.CheckpointSet = FailCheckpointSet{}
|
var _ exportmetric.InstrumentationLibraryReader = FailReader{}
|
||||||
|
|
||||||
// ForEach implements exportmetric.CheckpointSet. It always fails.
|
// ForEach implements exportmetric.Reader. It always fails.
|
||||||
func (FailCheckpointSet) ForEach(kindSelector exportmetric.ExportKindSelector, recordFunc func(exportmetric.Record) error) error {
|
func (FailReader) ForEach(readerFunc func(instrumentation.Library, exportmetric.Reader) error) error {
|
||||||
return fmt.Errorf("fail")
|
return fmt.Errorf("fail")
|
||||||
}
|
}
|
||||||
|
@ -40,11 +40,11 @@ import (
|
|||||||
// themselves.
|
// themselves.
|
||||||
func RunEndToEndTest(ctx context.Context, t *testing.T, exp *otlpmetric.Exporter, mcMetrics Collector) {
|
func RunEndToEndTest(ctx context.Context, t *testing.T, exp *otlpmetric.Exporter, mcMetrics Collector) {
|
||||||
selector := simple.NewWithInexpensiveDistribution()
|
selector := simple.NewWithInexpensiveDistribution()
|
||||||
proc := processor.New(selector, exportmetric.StatelessExportKindSelector())
|
proc := processor.NewFactory(selector, exportmetric.StatelessExportKindSelector())
|
||||||
cont := controller.New(proc, controller.WithExporter(exp))
|
cont := controller.New(proc, controller.WithExporter(exp))
|
||||||
require.NoError(t, cont.Start(ctx))
|
require.NoError(t, cont.Start(ctx))
|
||||||
|
|
||||||
meter := cont.MeterProvider().Meter("test-meter")
|
meter := cont.Meter("test-meter")
|
||||||
labels := []attribute.KeyValue{attribute.Bool("test", true)}
|
labels := []attribute.KeyValue{attribute.Bool("test", true)}
|
||||||
|
|
||||||
type data struct {
|
type data struct {
|
||||||
|
@ -40,7 +40,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
oneRecord = otlpmetrictest.OneRecordCheckpointSet{}
|
oneRecord = otlpmetrictest.OneRecordReader()
|
||||||
|
|
||||||
testResource = resource.Empty()
|
testResource = resource.Empty()
|
||||||
)
|
)
|
||||||
@ -719,7 +719,7 @@ func TestEmptyData(t *testing.T) {
|
|||||||
assert.NoError(t, exp.Shutdown(ctx))
|
assert.NoError(t, exp.Shutdown(ctx))
|
||||||
}()
|
}()
|
||||||
|
|
||||||
assert.NoError(t, exp.Export(ctx, testResource, otlpmetrictest.EmptyCheckpointSet{}))
|
assert.NoError(t, exp.Export(ctx, testResource, otlpmetrictest.EmptyReader()))
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestFailedMetricTransform(t *testing.T) {
|
func TestFailedMetricTransform(t *testing.T) {
|
||||||
@ -737,5 +737,5 @@ func TestFailedMetricTransform(t *testing.T) {
|
|||||||
assert.NoError(t, exp.Shutdown(ctx))
|
assert.NoError(t, exp.Shutdown(ctx))
|
||||||
}()
|
}()
|
||||||
|
|
||||||
assert.Error(t, exp.Export(ctx, testResource, otlpmetrictest.FailCheckpointSet{}))
|
assert.Error(t, exp.Export(ctx, testResource, otlpmetrictest.FailReader{}))
|
||||||
}
|
}
|
||||||
|
@ -48,14 +48,14 @@ func Example_insecure() {
|
|||||||
}()
|
}()
|
||||||
|
|
||||||
pusher := controller.New(
|
pusher := controller.New(
|
||||||
processor.New(
|
processor.NewFactory(
|
||||||
simple.NewWithExactDistribution(),
|
simple.NewWithExactDistribution(),
|
||||||
exp,
|
exp,
|
||||||
),
|
),
|
||||||
controller.WithExporter(exp),
|
controller.WithExporter(exp),
|
||||||
controller.WithCollectPeriod(2*time.Second),
|
controller.WithCollectPeriod(2*time.Second),
|
||||||
)
|
)
|
||||||
global.SetMeterProvider(pusher.MeterProvider())
|
global.SetMeterProvider(pusher)
|
||||||
|
|
||||||
if err := pusher.Start(ctx); err != nil {
|
if err := pusher.Start(ctx); err != nil {
|
||||||
log.Fatalf("could not start metric controoler: %v", err)
|
log.Fatalf("could not start metric controoler: %v", err)
|
||||||
@ -107,14 +107,14 @@ func Example_withTLS() {
|
|||||||
}()
|
}()
|
||||||
|
|
||||||
pusher := controller.New(
|
pusher := controller.New(
|
||||||
processor.New(
|
processor.NewFactory(
|
||||||
simple.NewWithExactDistribution(),
|
simple.NewWithExactDistribution(),
|
||||||
exp,
|
exp,
|
||||||
),
|
),
|
||||||
controller.WithExporter(exp),
|
controller.WithExporter(exp),
|
||||||
controller.WithCollectPeriod(2*time.Second),
|
controller.WithCollectPeriod(2*time.Second),
|
||||||
)
|
)
|
||||||
global.SetMeterProvider(pusher.MeterProvider())
|
global.SetMeterProvider(pusher)
|
||||||
|
|
||||||
if err := pusher.Start(ctx); err != nil {
|
if err := pusher.Start(ctx); err != nil {
|
||||||
log.Fatalf("could not start metric controoler: %v", err)
|
log.Fatalf("could not start metric controoler: %v", err)
|
||||||
@ -164,14 +164,14 @@ func Example_withDifferentSignalCollectors() {
|
|||||||
}()
|
}()
|
||||||
|
|
||||||
pusher := controller.New(
|
pusher := controller.New(
|
||||||
processor.New(
|
processor.NewFactory(
|
||||||
simple.NewWithExactDistribution(),
|
simple.NewWithExactDistribution(),
|
||||||
exp,
|
exp,
|
||||||
),
|
),
|
||||||
controller.WithExporter(exp),
|
controller.WithExporter(exp),
|
||||||
controller.WithCollectPeriod(2*time.Second),
|
controller.WithCollectPeriod(2*time.Second),
|
||||||
)
|
)
|
||||||
global.SetMeterProvider(pusher.MeterProvider())
|
global.SetMeterProvider(pusher)
|
||||||
|
|
||||||
if err := pusher.Start(ctx); err != nil {
|
if err := pusher.Start(ctx); err != nil {
|
||||||
log.Fatalf("could not start metric controoler: %v", err)
|
log.Fatalf("could not start metric controoler: %v", err)
|
||||||
|
@ -38,7 +38,7 @@ const (
|
|||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
oneRecord = otlpmetrictest.OneRecordCheckpointSet{}
|
oneRecord = otlpmetrictest.OneRecordReader()
|
||||||
|
|
||||||
testResource = resource.Empty()
|
testResource = resource.Empty()
|
||||||
)
|
)
|
||||||
|
@ -32,6 +32,7 @@ import (
|
|||||||
"go.opentelemetry.io/otel/metric/number"
|
"go.opentelemetry.io/otel/metric/number"
|
||||||
export "go.opentelemetry.io/otel/sdk/export/metric"
|
export "go.opentelemetry.io/otel/sdk/export/metric"
|
||||||
"go.opentelemetry.io/otel/sdk/export/metric/aggregation"
|
"go.opentelemetry.io/otel/sdk/export/metric/aggregation"
|
||||||
|
"go.opentelemetry.io/otel/sdk/instrumentation"
|
||||||
controller "go.opentelemetry.io/otel/sdk/metric/controller/basic"
|
controller "go.opentelemetry.io/otel/sdk/metric/controller/basic"
|
||||||
"go.opentelemetry.io/otel/sdk/resource"
|
"go.opentelemetry.io/otel/sdk/resource"
|
||||||
)
|
)
|
||||||
@ -120,7 +121,7 @@ func New(config Config, controller *controller.Controller) (*Exporter, error) {
|
|||||||
|
|
||||||
// MeterProvider returns the MeterProvider of this exporter.
|
// MeterProvider returns the MeterProvider of this exporter.
|
||||||
func (e *Exporter) MeterProvider() metric.MeterProvider {
|
func (e *Exporter) MeterProvider() metric.MeterProvider {
|
||||||
return e.controller.MeterProvider()
|
return e.controller
|
||||||
}
|
}
|
||||||
|
|
||||||
// Controller returns the controller object that coordinates collection for the SDK.
|
// Controller returns the controller object that coordinates collection for the SDK.
|
||||||
@ -152,15 +153,17 @@ func (c *collector) Describe(ch chan<- *prometheus.Desc) {
|
|||||||
c.exp.lock.RLock()
|
c.exp.lock.RLock()
|
||||||
defer c.exp.lock.RUnlock()
|
defer c.exp.lock.RUnlock()
|
||||||
|
|
||||||
_ = c.exp.Controller().ForEach(c.exp, func(record export.Record) error {
|
_ = c.exp.Controller().ForEach(func(_ instrumentation.Library, reader export.Reader) error {
|
||||||
var labelKeys []string
|
return reader.ForEach(c.exp, func(record export.Record) error {
|
||||||
mergeLabels(record, c.exp.controller.Resource(), &labelKeys, nil)
|
var labelKeys []string
|
||||||
ch <- c.toDesc(record, labelKeys)
|
mergeLabels(record, c.exp.controller.Resource(), &labelKeys, nil)
|
||||||
return nil
|
ch <- c.toDesc(record, labelKeys)
|
||||||
|
return nil
|
||||||
|
})
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// Collect exports the last calculated CheckpointSet.
|
// Collect exports the last calculated Reader state.
|
||||||
//
|
//
|
||||||
// Collect is invoked whenever prometheus.Gatherer is also invoked.
|
// Collect is invoked whenever prometheus.Gatherer is also invoked.
|
||||||
// For example, when the HTTP endpoint is invoked by Prometheus.
|
// For example, when the HTTP endpoint is invoked by Prometheus.
|
||||||
@ -173,36 +176,39 @@ func (c *collector) Collect(ch chan<- prometheus.Metric) {
|
|||||||
otel.Handle(err)
|
otel.Handle(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
err := ctrl.ForEach(c.exp, func(record export.Record) error {
|
err := ctrl.ForEach(func(_ instrumentation.Library, reader export.Reader) error {
|
||||||
agg := record.Aggregation()
|
return reader.ForEach(c.exp, func(record export.Record) error {
|
||||||
numberKind := record.Descriptor().NumberKind()
|
|
||||||
instrumentKind := record.Descriptor().InstrumentKind()
|
|
||||||
|
|
||||||
var labelKeys, labels []string
|
agg := record.Aggregation()
|
||||||
mergeLabels(record, c.exp.controller.Resource(), &labelKeys, &labels)
|
numberKind := record.Descriptor().NumberKind()
|
||||||
|
instrumentKind := record.Descriptor().InstrumentKind()
|
||||||
|
|
||||||
desc := c.toDesc(record, labelKeys)
|
var labelKeys, labels []string
|
||||||
|
mergeLabels(record, c.exp.controller.Resource(), &labelKeys, &labels)
|
||||||
|
|
||||||
if hist, ok := agg.(aggregation.Histogram); ok {
|
desc := c.toDesc(record, labelKeys)
|
||||||
if err := c.exportHistogram(ch, hist, numberKind, desc, labels); err != nil {
|
|
||||||
return fmt.Errorf("exporting histogram: %w", err)
|
if hist, ok := agg.(aggregation.Histogram); ok {
|
||||||
|
if err := c.exportHistogram(ch, hist, numberKind, desc, labels); err != nil {
|
||||||
|
return fmt.Errorf("exporting histogram: %w", err)
|
||||||
|
}
|
||||||
|
} else if sum, ok := agg.(aggregation.Sum); ok && instrumentKind.Monotonic() {
|
||||||
|
if err := c.exportMonotonicCounter(ch, sum, numberKind, desc, labels); err != nil {
|
||||||
|
return fmt.Errorf("exporting monotonic counter: %w", err)
|
||||||
|
}
|
||||||
|
} else if sum, ok := agg.(aggregation.Sum); ok && !instrumentKind.Monotonic() {
|
||||||
|
if err := c.exportNonMonotonicCounter(ch, sum, numberKind, desc, labels); err != nil {
|
||||||
|
return fmt.Errorf("exporting non monotonic counter: %w", err)
|
||||||
|
}
|
||||||
|
} else if lastValue, ok := agg.(aggregation.LastValue); ok {
|
||||||
|
if err := c.exportLastValue(ch, lastValue, numberKind, desc, labels); err != nil {
|
||||||
|
return fmt.Errorf("exporting last value: %w", err)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
return fmt.Errorf("%w: %s", ErrUnsupportedAggregator, agg.Kind())
|
||||||
}
|
}
|
||||||
} else if sum, ok := agg.(aggregation.Sum); ok && instrumentKind.Monotonic() {
|
return nil
|
||||||
if err := c.exportMonotonicCounter(ch, sum, numberKind, desc, labels); err != nil {
|
})
|
||||||
return fmt.Errorf("exporting monotonic counter: %w", err)
|
|
||||||
}
|
|
||||||
} else if sum, ok := agg.(aggregation.Sum); ok && !instrumentKind.Monotonic() {
|
|
||||||
if err := c.exportNonMonotonicCounter(ch, sum, numberKind, desc, labels); err != nil {
|
|
||||||
return fmt.Errorf("exporting non monotonic counter: %w", err)
|
|
||||||
}
|
|
||||||
} else if lastValue, ok := agg.(aggregation.LastValue); ok {
|
|
||||||
if err := c.exportLastValue(ch, lastValue, numberKind, desc, labels); err != nil {
|
|
||||||
return fmt.Errorf("exporting last value: %w", err)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
return fmt.Errorf("%w: %s", ErrUnsupportedAggregator, agg.Kind())
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
otel.Handle(err)
|
otel.Handle(err)
|
||||||
|
@ -84,7 +84,7 @@ func expectHistogram(name string, values ...string) expectedMetric {
|
|||||||
|
|
||||||
func newPipeline(config prometheus.Config, options ...controller.Option) (*prometheus.Exporter, error) {
|
func newPipeline(config prometheus.Config, options ...controller.Option) (*prometheus.Exporter, error) {
|
||||||
c := controller.New(
|
c := controller.New(
|
||||||
processor.New(
|
processor.NewFactory(
|
||||||
selector.NewWithHistogramDistribution(
|
selector.NewWithHistogramDistribution(
|
||||||
histogram.WithExplicitBoundaries(config.DefaultHistogramBoundaries),
|
histogram.WithExplicitBoundaries(config.DefaultHistogramBoundaries),
|
||||||
),
|
),
|
||||||
|
@ -72,7 +72,7 @@ func InstallExportPipeline(ctx context.Context) func() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pusher := controller.New(
|
pusher := controller.New(
|
||||||
processor.New(
|
processor.NewFactory(
|
||||||
simple.NewWithInexpensiveDistribution(),
|
simple.NewWithInexpensiveDistribution(),
|
||||||
exporter,
|
exporter,
|
||||||
),
|
),
|
||||||
@ -81,7 +81,7 @@ func InstallExportPipeline(ctx context.Context) func() {
|
|||||||
if err = pusher.Start(ctx); err != nil {
|
if err = pusher.Start(ctx); err != nil {
|
||||||
log.Fatalf("starting push controller: %v", err)
|
log.Fatalf("starting push controller: %v", err)
|
||||||
}
|
}
|
||||||
global.SetMeterProvider(pusher.MeterProvider())
|
global.SetMeterProvider(pusher)
|
||||||
|
|
||||||
return func() {
|
return func() {
|
||||||
if err := pusher.Stop(ctx); err != nil {
|
if err := pusher.Stop(ctx); err != nil {
|
||||||
|
@ -25,6 +25,7 @@ import (
|
|||||||
"go.opentelemetry.io/otel/metric"
|
"go.opentelemetry.io/otel/metric"
|
||||||
exportmetric "go.opentelemetry.io/otel/sdk/export/metric"
|
exportmetric "go.opentelemetry.io/otel/sdk/export/metric"
|
||||||
"go.opentelemetry.io/otel/sdk/export/metric/aggregation"
|
"go.opentelemetry.io/otel/sdk/export/metric/aggregation"
|
||||||
|
"go.opentelemetry.io/otel/sdk/instrumentation"
|
||||||
"go.opentelemetry.io/otel/sdk/resource"
|
"go.opentelemetry.io/otel/sdk/resource"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -50,93 +51,99 @@ func (e *metricExporter) ExportKindFor(desc *metric.Descriptor, kind aggregation
|
|||||||
return exportmetric.StatelessExportKindSelector().ExportKindFor(desc, kind)
|
return exportmetric.StatelessExportKindSelector().ExportKindFor(desc, kind)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *metricExporter) Export(_ context.Context, res *resource.Resource, checkpointSet exportmetric.CheckpointSet) error {
|
func (e *metricExporter) Export(_ context.Context, res *resource.Resource, reader exportmetric.InstrumentationLibraryReader) error {
|
||||||
var aggError error
|
var aggError error
|
||||||
var batch []line
|
var batch []line
|
||||||
aggError = checkpointSet.ForEach(e, func(record exportmetric.Record) error {
|
aggError = reader.ForEach(func(lib instrumentation.Library, mr exportmetric.Reader) error {
|
||||||
desc := record.Descriptor()
|
|
||||||
agg := record.Aggregation()
|
|
||||||
kind := desc.NumberKind()
|
|
||||||
encodedResource := res.Encoded(e.config.LabelEncoder)
|
|
||||||
|
|
||||||
var instLabels []attribute.KeyValue
|
var instLabels []attribute.KeyValue
|
||||||
if name := desc.InstrumentationName(); name != "" {
|
if name := lib.Name; name != "" {
|
||||||
instLabels = append(instLabels, attribute.String("instrumentation.name", name))
|
instLabels = append(instLabels, attribute.String("instrumentation.name", name))
|
||||||
if version := desc.InstrumentationVersion(); version != "" {
|
if version := lib.Version; version != "" {
|
||||||
instLabels = append(instLabels, attribute.String("instrumentation.version", version))
|
instLabels = append(instLabels, attribute.String("instrumentation.version", version))
|
||||||
}
|
}
|
||||||
|
if schema := lib.SchemaURL; schema != "" {
|
||||||
|
instLabels = append(instLabels, attribute.String("instrumentation.schema_url", schema))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
instSet := attribute.NewSet(instLabels...)
|
instSet := attribute.NewSet(instLabels...)
|
||||||
encodedInstLabels := instSet.Encoded(e.config.LabelEncoder)
|
encodedInstLabels := instSet.Encoded(e.config.LabelEncoder)
|
||||||
|
|
||||||
var expose line
|
return mr.ForEach(e, func(record exportmetric.Record) error {
|
||||||
|
desc := record.Descriptor()
|
||||||
|
agg := record.Aggregation()
|
||||||
|
kind := desc.NumberKind()
|
||||||
|
encodedResource := res.Encoded(e.config.LabelEncoder)
|
||||||
|
|
||||||
if sum, ok := agg.(aggregation.Sum); ok {
|
var expose line
|
||||||
value, err := sum.Sum()
|
|
||||||
if err != nil {
|
if sum, ok := agg.(aggregation.Sum); ok {
|
||||||
return err
|
value, err := sum.Sum()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
expose.Sum = value.AsInterface(kind)
|
||||||
}
|
}
|
||||||
expose.Sum = value.AsInterface(kind)
|
|
||||||
}
|
|
||||||
|
|
||||||
if mmsc, ok := agg.(aggregation.MinMaxSumCount); ok {
|
if mmsc, ok := agg.(aggregation.MinMaxSumCount); ok {
|
||||||
count, err := mmsc.Count()
|
count, err := mmsc.Count()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
}
|
||||||
|
expose.Count = count
|
||||||
|
|
||||||
|
max, err := mmsc.Max()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
expose.Max = max.AsInterface(kind)
|
||||||
|
|
||||||
|
min, err := mmsc.Min()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
expose.Min = min.AsInterface(kind)
|
||||||
|
} else if lv, ok := agg.(aggregation.LastValue); ok {
|
||||||
|
value, timestamp, err := lv.LastValue()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
expose.LastValue = value.AsInterface(kind)
|
||||||
|
|
||||||
|
if e.config.Timestamps {
|
||||||
|
expose.Timestamp = ×tamp
|
||||||
|
}
|
||||||
}
|
}
|
||||||
expose.Count = count
|
|
||||||
|
|
||||||
max, err := mmsc.Max()
|
var encodedLabels string
|
||||||
if err != nil {
|
iter := record.Labels().Iter()
|
||||||
return err
|
if iter.Len() > 0 {
|
||||||
|
encodedLabels = record.Labels().Encoded(e.config.LabelEncoder)
|
||||||
}
|
}
|
||||||
expose.Max = max.AsInterface(kind)
|
|
||||||
|
|
||||||
min, err := mmsc.Min()
|
var sb strings.Builder
|
||||||
if err != nil {
|
|
||||||
return err
|
sb.WriteString(desc.Name())
|
||||||
|
|
||||||
|
if len(encodedLabels) > 0 || len(encodedResource) > 0 || len(encodedInstLabels) > 0 {
|
||||||
|
sb.WriteRune('{')
|
||||||
|
sb.WriteString(encodedResource)
|
||||||
|
if len(encodedInstLabels) > 0 && len(encodedResource) > 0 {
|
||||||
|
sb.WriteRune(',')
|
||||||
|
}
|
||||||
|
sb.WriteString(encodedInstLabels)
|
||||||
|
if len(encodedLabels) > 0 && (len(encodedInstLabels) > 0 || len(encodedResource) > 0) {
|
||||||
|
sb.WriteRune(',')
|
||||||
|
}
|
||||||
|
sb.WriteString(encodedLabels)
|
||||||
|
sb.WriteRune('}')
|
||||||
}
|
}
|
||||||
expose.Min = min.AsInterface(kind)
|
|
||||||
} else if lv, ok := agg.(aggregation.LastValue); ok {
|
|
||||||
value, timestamp, err := lv.LastValue()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
expose.LastValue = value.AsInterface(kind)
|
|
||||||
|
|
||||||
if e.config.Timestamps {
|
expose.Name = sb.String()
|
||||||
expose.Timestamp = ×tamp
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var encodedLabels string
|
batch = append(batch, expose)
|
||||||
iter := record.Labels().Iter()
|
return nil
|
||||||
if iter.Len() > 0 {
|
})
|
||||||
encodedLabels = record.Labels().Encoded(e.config.LabelEncoder)
|
|
||||||
}
|
|
||||||
|
|
||||||
var sb strings.Builder
|
|
||||||
|
|
||||||
sb.WriteString(desc.Name())
|
|
||||||
|
|
||||||
if len(encodedLabels) > 0 || len(encodedResource) > 0 || len(encodedInstLabels) > 0 {
|
|
||||||
sb.WriteRune('{')
|
|
||||||
sb.WriteString(encodedResource)
|
|
||||||
if len(encodedInstLabels) > 0 && len(encodedResource) > 0 {
|
|
||||||
sb.WriteRune(',')
|
|
||||||
}
|
|
||||||
sb.WriteString(encodedInstLabels)
|
|
||||||
if len(encodedLabels) > 0 && (len(encodedInstLabels) > 0 || len(encodedResource) > 0) {
|
|
||||||
sb.WriteRune(',')
|
|
||||||
}
|
|
||||||
sb.WriteString(encodedLabels)
|
|
||||||
sb.WriteRune('}')
|
|
||||||
}
|
|
||||||
|
|
||||||
expose.Name = sb.String()
|
|
||||||
|
|
||||||
batch = append(batch, expose)
|
|
||||||
return nil
|
|
||||||
})
|
})
|
||||||
if len(batch) == 0 {
|
if len(batch) == 0 {
|
||||||
return aggError
|
return aggError
|
||||||
|
@ -61,14 +61,14 @@ func newFixtureWithResource(t *testing.T, res *resource.Resource, opts ...stdout
|
|||||||
t.Fatal("Error building fixture: ", err)
|
t.Fatal("Error building fixture: ", err)
|
||||||
}
|
}
|
||||||
aggSel := processortest.AggregatorSelector()
|
aggSel := processortest.AggregatorSelector()
|
||||||
proc := processor.New(aggSel, export.StatelessExportKindSelector())
|
proc := processor.NewFactory(aggSel, export.StatelessExportKindSelector())
|
||||||
cont := controller.New(proc,
|
cont := controller.New(proc,
|
||||||
controller.WithExporter(exp),
|
controller.WithExporter(exp),
|
||||||
controller.WithResource(res),
|
controller.WithResource(res),
|
||||||
)
|
)
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
require.NoError(t, cont.Start(ctx))
|
require.NoError(t, cont.Start(ctx))
|
||||||
meter := cont.MeterProvider().Meter("test")
|
meter := cont.Meter("test")
|
||||||
|
|
||||||
return testFixture{
|
return testFixture{
|
||||||
t: t,
|
t: t,
|
||||||
@ -87,7 +87,7 @@ func (fix testFixture) Output() string {
|
|||||||
func TestStdoutTimestamp(t *testing.T) {
|
func TestStdoutTimestamp(t *testing.T) {
|
||||||
var buf bytes.Buffer
|
var buf bytes.Buffer
|
||||||
aggSel := processortest.AggregatorSelector()
|
aggSel := processortest.AggregatorSelector()
|
||||||
proc := processor.New(aggSel, export.CumulativeExportKindSelector())
|
proc := processor.NewFactory(aggSel, export.CumulativeExportKindSelector())
|
||||||
exporter, err := stdoutmetric.New(
|
exporter, err := stdoutmetric.New(
|
||||||
stdoutmetric.WithWriter(&buf),
|
stdoutmetric.WithWriter(&buf),
|
||||||
)
|
)
|
||||||
@ -101,7 +101,7 @@ func TestStdoutTimestamp(t *testing.T) {
|
|||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
|
|
||||||
require.NoError(t, cont.Start(ctx))
|
require.NoError(t, cont.Start(ctx))
|
||||||
meter := cont.MeterProvider().Meter("test")
|
meter := cont.Meter("test")
|
||||||
counter := metric.Must(meter).NewInt64Counter("name.lastvalue")
|
counter := metric.Must(meter).NewInt64Counter("name.lastvalue")
|
||||||
|
|
||||||
before := time.Now()
|
before := time.Now()
|
||||||
|
@ -21,9 +21,9 @@ import (
|
|||||||
"unsafe"
|
"unsafe"
|
||||||
|
|
||||||
"go.opentelemetry.io/otel/attribute"
|
"go.opentelemetry.io/otel/attribute"
|
||||||
|
"go.opentelemetry.io/otel/internal/metric/registry"
|
||||||
"go.opentelemetry.io/otel/metric"
|
"go.opentelemetry.io/otel/metric"
|
||||||
"go.opentelemetry.io/otel/metric/number"
|
"go.opentelemetry.io/otel/metric/number"
|
||||||
"go.opentelemetry.io/otel/metric/registry"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// This file contains the forwarding implementation of MeterProvider used as
|
// This file contains the forwarding implementation of MeterProvider used as
|
||||||
@ -48,7 +48,9 @@ import (
|
|||||||
// methods of the api/metric/registry package.
|
// methods of the api/metric/registry package.
|
||||||
|
|
||||||
type meterKey struct {
|
type meterKey struct {
|
||||||
Name, Version string
|
InstrumentationName string
|
||||||
|
InstrumentationVersion string
|
||||||
|
SchemaURL string
|
||||||
}
|
}
|
||||||
|
|
||||||
type meterProvider struct {
|
type meterProvider struct {
|
||||||
@ -138,7 +140,7 @@ func (p *meterProvider) setDelegate(provider metric.MeterProvider) {
|
|||||||
|
|
||||||
p.delegate = provider
|
p.delegate = provider
|
||||||
for key, entry := range p.meters {
|
for key, entry := range p.meters {
|
||||||
entry.impl.setDelegate(key.Name, key.Version, provider)
|
entry.impl.setDelegate(key, provider)
|
||||||
}
|
}
|
||||||
p.meters = nil
|
p.meters = nil
|
||||||
}
|
}
|
||||||
@ -151,28 +153,38 @@ func (p *meterProvider) Meter(instrumentationName string, opts ...metric.MeterOp
|
|||||||
return p.delegate.Meter(instrumentationName, opts...)
|
return p.delegate.Meter(instrumentationName, opts...)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
cfg := metric.NewMeterConfig(opts...)
|
||||||
key := meterKey{
|
key := meterKey{
|
||||||
Name: instrumentationName,
|
InstrumentationName: instrumentationName,
|
||||||
Version: metric.NewMeterConfig(opts...).InstrumentationVersion(),
|
InstrumentationVersion: cfg.InstrumentationVersion(),
|
||||||
|
SchemaURL: cfg.SchemaURL(),
|
||||||
}
|
}
|
||||||
entry, ok := p.meters[key]
|
entry, ok := p.meters[key]
|
||||||
if !ok {
|
if !ok {
|
||||||
entry = &meterEntry{}
|
entry = &meterEntry{}
|
||||||
|
// Note: This code implements its own MeterProvider
|
||||||
|
// name-uniqueness logic because there is
|
||||||
|
// synchronization required at the moment of
|
||||||
|
// delegation. We use the same instrument-uniqueness
|
||||||
|
// checking the real SDK uses here:
|
||||||
entry.unique = registry.NewUniqueInstrumentMeterImpl(&entry.impl)
|
entry.unique = registry.NewUniqueInstrumentMeterImpl(&entry.impl)
|
||||||
p.meters[key] = entry
|
p.meters[key] = entry
|
||||||
|
|
||||||
}
|
}
|
||||||
return metric.WrapMeterImpl(entry.unique, key.Name, metric.WithInstrumentationVersion(key.Version))
|
return metric.WrapMeterImpl(entry.unique)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Meter interface and delegation
|
// Meter interface and delegation
|
||||||
|
|
||||||
func (m *meterImpl) setDelegate(name, version string, provider metric.MeterProvider) {
|
func (m *meterImpl) setDelegate(key meterKey, provider metric.MeterProvider) {
|
||||||
m.lock.Lock()
|
m.lock.Lock()
|
||||||
defer m.lock.Unlock()
|
defer m.lock.Unlock()
|
||||||
|
|
||||||
d := new(metric.MeterImpl)
|
d := new(metric.MeterImpl)
|
||||||
*d = provider.Meter(name, metric.WithInstrumentationVersion(version)).MeterImpl()
|
*d = provider.Meter(
|
||||||
|
key.InstrumentationName,
|
||||||
|
metric.WithInstrumentationVersion(key.InstrumentationVersion),
|
||||||
|
metric.WithSchemaURL(key.SchemaURL),
|
||||||
|
).MeterImpl()
|
||||||
m.delegate = unsafe.Pointer(d)
|
m.delegate = unsafe.Pointer(d)
|
||||||
|
|
||||||
for _, inst := range m.syncInsts {
|
for _, inst := range m.syncInsts {
|
||||||
|
@ -39,7 +39,17 @@ func TestDirect(t *testing.T) {
|
|||||||
|
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
meter1 := metricglobal.Meter("test1", metric.WithInstrumentationVersion("semver:v1.0.0"))
|
meter1 := metricglobal.Meter("test1", metric.WithInstrumentationVersion("semver:v1.0.0"))
|
||||||
meter2 := metricglobal.Meter("test2")
|
meter2 := metricglobal.Meter("test2", metric.WithSchemaURL("hello"))
|
||||||
|
|
||||||
|
library1 := metrictest.Library{
|
||||||
|
InstrumentationName: "test1",
|
||||||
|
InstrumentationVersion: "semver:v1.0.0",
|
||||||
|
}
|
||||||
|
library2 := metrictest.Library{
|
||||||
|
InstrumentationName: "test2",
|
||||||
|
SchemaURL: "hello",
|
||||||
|
}
|
||||||
|
|
||||||
labels1 := []attribute.KeyValue{attribute.String("A", "B")}
|
labels1 := []attribute.KeyValue{attribute.String("A", "B")}
|
||||||
labels2 := []attribute.KeyValue{attribute.String("C", "D")}
|
labels2 := []attribute.KeyValue{attribute.String("C", "D")}
|
||||||
labels3 := []attribute.KeyValue{attribute.String("E", "F")}
|
labels3 := []attribute.KeyValue{attribute.String("E", "F")}
|
||||||
@ -66,66 +76,60 @@ func TestDirect(t *testing.T) {
|
|||||||
second.Record(ctx, 1, labels3...)
|
second.Record(ctx, 1, labels3...)
|
||||||
second.Record(ctx, 2, labels3...)
|
second.Record(ctx, 2, labels3...)
|
||||||
|
|
||||||
mock, provider := metrictest.NewMeterProvider()
|
provider := metrictest.NewMeterProvider()
|
||||||
metricglobal.SetMeterProvider(provider)
|
metricglobal.SetMeterProvider(provider)
|
||||||
|
|
||||||
counter.Add(ctx, 1, labels1...)
|
counter.Add(ctx, 1, labels1...)
|
||||||
histogram.Record(ctx, 3, labels1...)
|
histogram.Record(ctx, 3, labels1...)
|
||||||
second.Record(ctx, 3, labels3...)
|
second.Record(ctx, 3, labels3...)
|
||||||
|
|
||||||
mock.RunAsyncInstruments()
|
provider.RunAsyncInstruments()
|
||||||
|
|
||||||
measurements := metrictest.AsStructs(mock.MeasurementBatches)
|
measurements := metrictest.AsStructs(provider.MeasurementBatches)
|
||||||
|
|
||||||
require.EqualValues(t,
|
require.EqualValues(t,
|
||||||
[]metrictest.Measured{
|
[]metrictest.Measured{
|
||||||
{
|
{
|
||||||
Name: "test.counter",
|
Name: "test.counter",
|
||||||
InstrumentationName: "test1",
|
Library: library1,
|
||||||
InstrumentationVersion: "semver:v1.0.0",
|
Labels: metrictest.LabelsToMap(labels1...),
|
||||||
Labels: metrictest.LabelsToMap(labels1...),
|
Number: asInt(1),
|
||||||
Number: asInt(1),
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Name: "test.histogram",
|
Name: "test.histogram",
|
||||||
InstrumentationName: "test1",
|
Library: library1,
|
||||||
InstrumentationVersion: "semver:v1.0.0",
|
Labels: metrictest.LabelsToMap(labels1...),
|
||||||
Labels: metrictest.LabelsToMap(labels1...),
|
Number: asFloat(3),
|
||||||
Number: asFloat(3),
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Name: "test.second",
|
Name: "test.second",
|
||||||
InstrumentationName: "test2",
|
Library: library2,
|
||||||
Labels: metrictest.LabelsToMap(labels3...),
|
Labels: metrictest.LabelsToMap(labels3...),
|
||||||
Number: asFloat(3),
|
Number: asFloat(3),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Name: "test.gauge.float",
|
Name: "test.gauge.float",
|
||||||
InstrumentationName: "test1",
|
Library: library1,
|
||||||
InstrumentationVersion: "semver:v1.0.0",
|
Labels: metrictest.LabelsToMap(labels1...),
|
||||||
Labels: metrictest.LabelsToMap(labels1...),
|
Number: asFloat(1),
|
||||||
Number: asFloat(1),
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Name: "test.gauge.float",
|
Name: "test.gauge.float",
|
||||||
InstrumentationName: "test1",
|
Library: library1,
|
||||||
InstrumentationVersion: "semver:v1.0.0",
|
Labels: metrictest.LabelsToMap(labels2...),
|
||||||
Labels: metrictest.LabelsToMap(labels2...),
|
Number: asFloat(2),
|
||||||
Number: asFloat(2),
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Name: "test.gauge.int",
|
Name: "test.gauge.int",
|
||||||
InstrumentationName: "test1",
|
Library: library1,
|
||||||
InstrumentationVersion: "semver:v1.0.0",
|
Labels: metrictest.LabelsToMap(labels1...),
|
||||||
Labels: metrictest.LabelsToMap(labels1...),
|
Number: asInt(1),
|
||||||
Number: asInt(1),
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Name: "test.gauge.int",
|
Name: "test.gauge.int",
|
||||||
InstrumentationName: "test1",
|
Library: library1,
|
||||||
InstrumentationVersion: "semver:v1.0.0",
|
Labels: metrictest.LabelsToMap(labels2...),
|
||||||
Labels: metrictest.LabelsToMap(labels2...),
|
Number: asInt(2),
|
||||||
Number: asInt(2),
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
measurements,
|
measurements,
|
||||||
@ -138,7 +142,11 @@ func TestBound(t *testing.T) {
|
|||||||
// Note: this test uses opposite Float64/Int64 number kinds
|
// Note: this test uses opposite Float64/Int64 number kinds
|
||||||
// vs. the above, to cover all the instruments.
|
// vs. the above, to cover all the instruments.
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
glob := metricglobal.Meter("test")
|
glob := metricglobal.Meter(
|
||||||
|
"test",
|
||||||
|
metric.WithInstrumentationVersion("semver:test-1.0"),
|
||||||
|
metric.WithSchemaURL("schema://url"),
|
||||||
|
)
|
||||||
labels1 := []attribute.KeyValue{attribute.String("A", "B")}
|
labels1 := []attribute.KeyValue{attribute.String("A", "B")}
|
||||||
|
|
||||||
counter := Must(glob).NewFloat64Counter("test.counter")
|
counter := Must(glob).NewFloat64Counter("test.counter")
|
||||||
@ -151,28 +159,34 @@ func TestBound(t *testing.T) {
|
|||||||
boundM.Record(ctx, 1)
|
boundM.Record(ctx, 1)
|
||||||
boundM.Record(ctx, 2)
|
boundM.Record(ctx, 2)
|
||||||
|
|
||||||
mock, provider := metrictest.NewMeterProvider()
|
provider := metrictest.NewMeterProvider()
|
||||||
metricglobal.SetMeterProvider(provider)
|
metricglobal.SetMeterProvider(provider)
|
||||||
|
|
||||||
boundC.Add(ctx, 1)
|
boundC.Add(ctx, 1)
|
||||||
boundM.Record(ctx, 3)
|
boundM.Record(ctx, 3)
|
||||||
|
|
||||||
|
library := metrictest.Library{
|
||||||
|
InstrumentationName: "test",
|
||||||
|
InstrumentationVersion: "semver:test-1.0",
|
||||||
|
SchemaURL: "schema://url",
|
||||||
|
}
|
||||||
|
|
||||||
require.EqualValues(t,
|
require.EqualValues(t,
|
||||||
[]metrictest.Measured{
|
[]metrictest.Measured{
|
||||||
{
|
{
|
||||||
Name: "test.counter",
|
Name: "test.counter",
|
||||||
InstrumentationName: "test",
|
Library: library,
|
||||||
Labels: metrictest.LabelsToMap(labels1...),
|
Labels: metrictest.LabelsToMap(labels1...),
|
||||||
Number: asFloat(1),
|
Number: asFloat(1),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Name: "test.histogram",
|
Name: "test.histogram",
|
||||||
InstrumentationName: "test",
|
Library: library,
|
||||||
Labels: metrictest.LabelsToMap(labels1...),
|
Labels: metrictest.LabelsToMap(labels1...),
|
||||||
Number: asInt(3),
|
Number: asInt(3),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
metrictest.AsStructs(mock.MeasurementBatches))
|
metrictest.AsStructs(provider.MeasurementBatches))
|
||||||
|
|
||||||
boundC.Unbind()
|
boundC.Unbind()
|
||||||
boundM.Unbind()
|
boundM.Unbind()
|
||||||
@ -199,7 +213,7 @@ func TestUnbindThenRecordOne(t *testing.T) {
|
|||||||
global.ResetForTest()
|
global.ResetForTest()
|
||||||
|
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
mock, provider := metrictest.NewMeterProvider()
|
provider := metrictest.NewMeterProvider()
|
||||||
|
|
||||||
meter := metricglobal.Meter("test")
|
meter := metricglobal.Meter("test")
|
||||||
counter := Must(meter).NewInt64Counter("test.counter")
|
counter := Must(meter).NewInt64Counter("test.counter")
|
||||||
@ -210,7 +224,7 @@ func TestUnbindThenRecordOne(t *testing.T) {
|
|||||||
require.NotPanics(t, func() {
|
require.NotPanics(t, func() {
|
||||||
boundC.Add(ctx, 1)
|
boundC.Add(ctx, 1)
|
||||||
})
|
})
|
||||||
require.Equal(t, 0, len(mock.MeasurementBatches))
|
require.Equal(t, 0, len(provider.MeasurementBatches))
|
||||||
}
|
}
|
||||||
|
|
||||||
type meterProviderWithConstructorError struct {
|
type meterProviderWithConstructorError struct {
|
||||||
@ -222,7 +236,7 @@ type meterWithConstructorError struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (m *meterProviderWithConstructorError) Meter(iName string, opts ...metric.MeterOption) metric.Meter {
|
func (m *meterProviderWithConstructorError) Meter(iName string, opts ...metric.MeterOption) metric.Meter {
|
||||||
return metric.WrapMeterImpl(&meterWithConstructorError{m.MeterProvider.Meter(iName, opts...).MeterImpl()}, iName, opts...)
|
return metric.WrapMeterImpl(&meterWithConstructorError{m.MeterProvider.Meter(iName, opts...).MeterImpl()})
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *meterWithConstructorError) NewSyncInstrument(_ metric.Descriptor) (metric.SyncImpl, error) {
|
func (m *meterWithConstructorError) NewSyncInstrument(_ metric.Descriptor) (metric.SyncImpl, error) {
|
||||||
@ -238,7 +252,7 @@ func TestErrorInDeferredConstructor(t *testing.T) {
|
|||||||
c1 := Must(meter).NewInt64Counter("test")
|
c1 := Must(meter).NewInt64Counter("test")
|
||||||
c2 := Must(meter).NewInt64Counter("test")
|
c2 := Must(meter).NewInt64Counter("test")
|
||||||
|
|
||||||
_, provider := metrictest.NewMeterProvider()
|
provider := metrictest.NewMeterProvider()
|
||||||
sdk := &meterProviderWithConstructorError{provider}
|
sdk := &meterProviderWithConstructorError{provider}
|
||||||
|
|
||||||
require.Panics(t, func() {
|
require.Panics(t, func() {
|
||||||
@ -280,7 +294,7 @@ func TestImplementationIndirection(t *testing.T) {
|
|||||||
require.False(t, ok)
|
require.False(t, ok)
|
||||||
|
|
||||||
// Register the SDK
|
// Register the SDK
|
||||||
_, provider := metrictest.NewMeterProvider()
|
provider := metrictest.NewMeterProvider()
|
||||||
metricglobal.SetMeterProvider(provider)
|
metricglobal.SetMeterProvider(provider)
|
||||||
|
|
||||||
// Repeat the above tests
|
// Repeat the above tests
|
||||||
@ -309,7 +323,7 @@ func TestRecordBatchMock(t *testing.T) {
|
|||||||
|
|
||||||
meter.RecordBatch(context.Background(), nil, counter.Measurement(1))
|
meter.RecordBatch(context.Background(), nil, counter.Measurement(1))
|
||||||
|
|
||||||
mock, provider := metrictest.NewMeterProvider()
|
provider := metrictest.NewMeterProvider()
|
||||||
metricglobal.SetMeterProvider(provider)
|
metricglobal.SetMeterProvider(provider)
|
||||||
|
|
||||||
meter.RecordBatch(context.Background(), nil, counter.Measurement(1))
|
meter.RecordBatch(context.Background(), nil, counter.Measurement(1))
|
||||||
@ -317,11 +331,13 @@ func TestRecordBatchMock(t *testing.T) {
|
|||||||
require.EqualValues(t,
|
require.EqualValues(t,
|
||||||
[]metrictest.Measured{
|
[]metrictest.Measured{
|
||||||
{
|
{
|
||||||
Name: "test.counter",
|
Name: "test.counter",
|
||||||
InstrumentationName: "builtin",
|
Library: metrictest.Library{
|
||||||
Labels: metrictest.LabelsToMap(),
|
InstrumentationName: "builtin",
|
||||||
Number: asInt(1),
|
},
|
||||||
|
Labels: metrictest.LabelsToMap(),
|
||||||
|
Number: asInt(1),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
metrictest.AsStructs(mock.MeasurementBatches))
|
metrictest.AsStructs(provider.MeasurementBatches))
|
||||||
}
|
}
|
||||||
|
@ -21,8 +21,9 @@ import (
|
|||||||
|
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
|
"go.opentelemetry.io/otel/internal/metric/registry"
|
||||||
"go.opentelemetry.io/otel/metric"
|
"go.opentelemetry.io/otel/metric"
|
||||||
"go.opentelemetry.io/otel/metric/registry"
|
"go.opentelemetry.io/otel/metric/metrictest"
|
||||||
)
|
)
|
||||||
|
|
||||||
type (
|
type (
|
||||||
@ -78,6 +79,10 @@ func TestRegistrySameInstruments(t *testing.T) {
|
|||||||
require.NoError(t, err1)
|
require.NoError(t, err1)
|
||||||
require.NoError(t, err2)
|
require.NoError(t, err2)
|
||||||
require.Equal(t, inst1, inst2)
|
require.Equal(t, inst1, inst2)
|
||||||
|
|
||||||
|
SetMeterProvider(metrictest.NewMeterProvider())
|
||||||
|
|
||||||
|
require.Equal(t, inst1, inst2)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -89,6 +94,16 @@ func TestRegistryDifferentNamespace(t *testing.T) {
|
|||||||
|
|
||||||
require.NoError(t, err1)
|
require.NoError(t, err1)
|
||||||
require.NoError(t, err2)
|
require.NoError(t, err2)
|
||||||
|
|
||||||
|
if inst1.Descriptor().InstrumentKind().Synchronous() {
|
||||||
|
// They're equal because of a `nil` pointer at this point.
|
||||||
|
// (Only for synchronous instruments, which lack callacks.)
|
||||||
|
require.EqualValues(t, inst1, inst2)
|
||||||
|
}
|
||||||
|
|
||||||
|
SetMeterProvider(metrictest.NewMeterProvider())
|
||||||
|
|
||||||
|
// They're different after the deferred setup.
|
||||||
require.NotEqual(t, inst1, inst2)
|
require.NotEqual(t, inst1, inst2)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -109,7 +124,7 @@ func TestRegistryDiffInstruments(t *testing.T) {
|
|||||||
require.Error(t, err)
|
require.Error(t, err)
|
||||||
require.NotNil(t, other)
|
require.NotNil(t, other)
|
||||||
require.True(t, errors.Is(err, registry.ErrMetricKindMismatch))
|
require.True(t, errors.Is(err, registry.ErrMetricKindMismatch))
|
||||||
require.Contains(t, err.Error(), "super")
|
require.Contains(t, err.Error(), "by this name with another kind or number type")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -21,4 +21,4 @@ This package is currently in a pre-GA phase. Backwards incompatible changes
|
|||||||
may be introduced in subsequent minor version releases as we work to track the
|
may be introduced in subsequent minor version releases as we work to track the
|
||||||
evolving OpenTelemetry specification and user feedback.
|
evolving OpenTelemetry specification and user feedback.
|
||||||
*/
|
*/
|
||||||
package registry // import "go.opentelemetry.io/otel/metric/registry"
|
package registry // import "go.opentelemetry.io/otel/internal/metric/registry"
|
@ -23,77 +23,46 @@ import (
|
|||||||
"go.opentelemetry.io/otel/metric"
|
"go.opentelemetry.io/otel/metric"
|
||||||
)
|
)
|
||||||
|
|
||||||
// MeterProvider is a standard MeterProvider for wrapping `MeterImpl`
|
// UniqueInstrumentMeterImpl implements the metric.MeterImpl interface, adding
|
||||||
type MeterProvider struct {
|
// uniqueness checking for instrument descriptors.
|
||||||
impl metric.MeterImpl
|
type UniqueInstrumentMeterImpl struct {
|
||||||
}
|
|
||||||
|
|
||||||
var _ metric.MeterProvider = (*MeterProvider)(nil)
|
|
||||||
|
|
||||||
// uniqueInstrumentMeterImpl implements the metric.MeterImpl interface, adding
|
|
||||||
// uniqueness checking for instrument descriptors. Use NewUniqueInstrumentMeter
|
|
||||||
// to wrap an implementation with uniqueness checking.
|
|
||||||
type uniqueInstrumentMeterImpl struct {
|
|
||||||
lock sync.Mutex
|
lock sync.Mutex
|
||||||
impl metric.MeterImpl
|
impl metric.MeterImpl
|
||||||
state map[key]metric.InstrumentImpl
|
state map[string]metric.InstrumentImpl
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ metric.MeterImpl = (*uniqueInstrumentMeterImpl)(nil)
|
var _ metric.MeterImpl = (*UniqueInstrumentMeterImpl)(nil)
|
||||||
|
|
||||||
type key struct {
|
|
||||||
instrumentName string
|
|
||||||
instrumentationName string
|
|
||||||
InstrumentationVersion string
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewMeterProvider returns a new provider that implements instrument
|
|
||||||
// name-uniqueness checking.
|
|
||||||
func NewMeterProvider(impl metric.MeterImpl) *MeterProvider {
|
|
||||||
return &MeterProvider{
|
|
||||||
impl: NewUniqueInstrumentMeterImpl(impl),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Meter implements MeterProvider.
|
|
||||||
func (p *MeterProvider) Meter(instrumentationName string, opts ...metric.MeterOption) metric.Meter {
|
|
||||||
return metric.WrapMeterImpl(p.impl, instrumentationName, opts...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ErrMetricKindMismatch is the standard error for mismatched metric
|
// ErrMetricKindMismatch is the standard error for mismatched metric
|
||||||
// instrument definitions.
|
// instrument definitions.
|
||||||
var ErrMetricKindMismatch = fmt.Errorf(
|
var ErrMetricKindMismatch = fmt.Errorf(
|
||||||
"a metric was already registered by this name with another kind or number type")
|
"a metric was already registered by this name with another kind or number type")
|
||||||
|
|
||||||
// NewUniqueInstrumentMeterImpl returns a wrapped metric.MeterImpl with
|
// NewUniqueInstrumentMeterImpl returns a wrapped metric.MeterImpl
|
||||||
// the addition of uniqueness checking.
|
// with the addition of instrument name uniqueness checking.
|
||||||
func NewUniqueInstrumentMeterImpl(impl metric.MeterImpl) metric.MeterImpl {
|
func NewUniqueInstrumentMeterImpl(impl metric.MeterImpl) *UniqueInstrumentMeterImpl {
|
||||||
return &uniqueInstrumentMeterImpl{
|
return &UniqueInstrumentMeterImpl{
|
||||||
impl: impl,
|
impl: impl,
|
||||||
state: map[key]metric.InstrumentImpl{},
|
state: map[string]metric.InstrumentImpl{},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// MeterImpl gives the caller access to the underlying MeterImpl
|
||||||
|
// used by this UniqueInstrumentMeterImpl.
|
||||||
|
func (u *UniqueInstrumentMeterImpl) MeterImpl() metric.MeterImpl {
|
||||||
|
return u.impl
|
||||||
|
}
|
||||||
|
|
||||||
// RecordBatch implements metric.MeterImpl.
|
// RecordBatch implements metric.MeterImpl.
|
||||||
func (u *uniqueInstrumentMeterImpl) RecordBatch(ctx context.Context, labels []attribute.KeyValue, ms ...metric.Measurement) {
|
func (u *UniqueInstrumentMeterImpl) RecordBatch(ctx context.Context, labels []attribute.KeyValue, ms ...metric.Measurement) {
|
||||||
u.impl.RecordBatch(ctx, labels, ms...)
|
u.impl.RecordBatch(ctx, labels, ms...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func keyOf(descriptor metric.Descriptor) key {
|
|
||||||
return key{
|
|
||||||
descriptor.Name(),
|
|
||||||
descriptor.InstrumentationName(),
|
|
||||||
descriptor.InstrumentationVersion(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewMetricKindMismatchError formats an error that describes a
|
// NewMetricKindMismatchError formats an error that describes a
|
||||||
// mismatched metric instrument definition.
|
// mismatched metric instrument definition.
|
||||||
func NewMetricKindMismatchError(desc metric.Descriptor) error {
|
func NewMetricKindMismatchError(desc metric.Descriptor) error {
|
||||||
return fmt.Errorf("metric was %s (%s %s)registered as a %s %s: %w",
|
return fmt.Errorf("metric %s registered as %s %s: %w",
|
||||||
desc.Name(),
|
desc.Name(),
|
||||||
desc.InstrumentationName(),
|
|
||||||
desc.InstrumentationVersion(),
|
|
||||||
desc.NumberKind(),
|
desc.NumberKind(),
|
||||||
desc.InstrumentKind(),
|
desc.InstrumentKind(),
|
||||||
ErrMetricKindMismatch)
|
ErrMetricKindMismatch)
|
||||||
@ -111,8 +80,8 @@ func Compatible(candidate, existing metric.Descriptor) bool {
|
|||||||
// `descriptor` argument. If there is an existing compatible
|
// `descriptor` argument. If there is an existing compatible
|
||||||
// registration, this returns the already-registered instrument. If
|
// registration, this returns the already-registered instrument. If
|
||||||
// there is no conflict and no prior registration, returns (nil, nil).
|
// there is no conflict and no prior registration, returns (nil, nil).
|
||||||
func (u *uniqueInstrumentMeterImpl) checkUniqueness(descriptor metric.Descriptor) (metric.InstrumentImpl, error) {
|
func (u *UniqueInstrumentMeterImpl) checkUniqueness(descriptor metric.Descriptor) (metric.InstrumentImpl, error) {
|
||||||
impl, ok := u.state[keyOf(descriptor)]
|
impl, ok := u.state[descriptor.Name()]
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
@ -125,7 +94,7 @@ func (u *uniqueInstrumentMeterImpl) checkUniqueness(descriptor metric.Descriptor
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewSyncInstrument implements metric.MeterImpl.
|
// NewSyncInstrument implements metric.MeterImpl.
|
||||||
func (u *uniqueInstrumentMeterImpl) NewSyncInstrument(descriptor metric.Descriptor) (metric.SyncImpl, error) {
|
func (u *UniqueInstrumentMeterImpl) NewSyncInstrument(descriptor metric.Descriptor) (metric.SyncImpl, error) {
|
||||||
u.lock.Lock()
|
u.lock.Lock()
|
||||||
defer u.lock.Unlock()
|
defer u.lock.Unlock()
|
||||||
|
|
||||||
@ -141,12 +110,12 @@ func (u *uniqueInstrumentMeterImpl) NewSyncInstrument(descriptor metric.Descript
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
u.state[keyOf(descriptor)] = syncInst
|
u.state[descriptor.Name()] = syncInst
|
||||||
return syncInst, nil
|
return syncInst, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewAsyncInstrument implements metric.MeterImpl.
|
// NewAsyncInstrument implements metric.MeterImpl.
|
||||||
func (u *uniqueInstrumentMeterImpl) NewAsyncInstrument(
|
func (u *UniqueInstrumentMeterImpl) NewAsyncInstrument(
|
||||||
descriptor metric.Descriptor,
|
descriptor metric.Descriptor,
|
||||||
runner metric.AsyncRunner,
|
runner metric.AsyncRunner,
|
||||||
) (metric.AsyncImpl, error) {
|
) (metric.AsyncImpl, error) {
|
||||||
@ -165,6 +134,6 @@ func (u *uniqueInstrumentMeterImpl) NewAsyncInstrument(
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
u.state[keyOf(descriptor)] = asyncInst
|
u.state[descriptor.Name()] = asyncInst
|
||||||
return asyncInst, nil
|
return asyncInst, nil
|
||||||
}
|
}
|
@ -21,9 +21,9 @@ import (
|
|||||||
|
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
|
"go.opentelemetry.io/otel/internal/metric/registry"
|
||||||
"go.opentelemetry.io/otel/metric"
|
"go.opentelemetry.io/otel/metric"
|
||||||
"go.opentelemetry.io/otel/metric/metrictest"
|
"go.opentelemetry.io/otel/metric/metrictest"
|
||||||
"go.opentelemetry.io/otel/metric/registry"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type (
|
type (
|
||||||
@ -70,11 +70,17 @@ func unwrap(impl interface{}, err error) (metric.InstrumentImpl, error) {
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func testMeterWithRegistry(name string) metric.Meter {
|
||||||
|
return metric.WrapMeterImpl(
|
||||||
|
registry.NewUniqueInstrumentMeterImpl(
|
||||||
|
metrictest.NewMeterProvider().Meter(name).MeterImpl(),
|
||||||
|
),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
func TestRegistrySameInstruments(t *testing.T) {
|
func TestRegistrySameInstruments(t *testing.T) {
|
||||||
for _, nf := range allNew {
|
for _, nf := range allNew {
|
||||||
_, provider := metrictest.NewMeterProvider()
|
meter := testMeterWithRegistry("meter")
|
||||||
|
|
||||||
meter := provider.Meter("meter")
|
|
||||||
inst1, err1 := nf(meter, "this")
|
inst1, err1 := nf(meter, "this")
|
||||||
inst2, err2 := nf(meter, "this")
|
inst2, err2 := nf(meter, "this")
|
||||||
|
|
||||||
@ -86,7 +92,7 @@ func TestRegistrySameInstruments(t *testing.T) {
|
|||||||
|
|
||||||
func TestRegistryDifferentNamespace(t *testing.T) {
|
func TestRegistryDifferentNamespace(t *testing.T) {
|
||||||
for _, nf := range allNew {
|
for _, nf := range allNew {
|
||||||
_, provider := metrictest.NewMeterProvider()
|
provider := metrictest.NewMeterProvider()
|
||||||
|
|
||||||
meter1 := provider.Meter("meter1")
|
meter1 := provider.Meter("meter1")
|
||||||
meter2 := provider.Meter("meter2")
|
meter2 := provider.Meter("meter2")
|
||||||
@ -101,8 +107,7 @@ func TestRegistryDifferentNamespace(t *testing.T) {
|
|||||||
|
|
||||||
func TestRegistryDiffInstruments(t *testing.T) {
|
func TestRegistryDiffInstruments(t *testing.T) {
|
||||||
for origName, origf := range allNew {
|
for origName, origf := range allNew {
|
||||||
_, provider := metrictest.NewMeterProvider()
|
meter := testMeterWithRegistry("meter")
|
||||||
meter := provider.Meter("meter")
|
|
||||||
|
|
||||||
_, err := origf(meter, "this")
|
_, err := origf(meter, "this")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@ -119,14 +124,3 @@ func TestRegistryDiffInstruments(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestMeterProvider(t *testing.T) {
|
|
||||||
impl, _ := metrictest.NewMeter()
|
|
||||||
p := registry.NewMeterProvider(impl)
|
|
||||||
m1 := p.Meter("m1")
|
|
||||||
m1p := p.Meter("m1")
|
|
||||||
m2 := p.Meter("m2")
|
|
||||||
|
|
||||||
require.Equal(t, m1, m1p)
|
|
||||||
require.NotEqual(t, m1, m2)
|
|
||||||
}
|
|
@ -20,10 +20,8 @@ import (
|
|||||||
|
|
||||||
// InstrumentConfig contains options for metric instrument descriptors.
|
// InstrumentConfig contains options for metric instrument descriptors.
|
||||||
type InstrumentConfig struct {
|
type InstrumentConfig struct {
|
||||||
description string
|
description string
|
||||||
unit unit.Unit
|
unit unit.Unit
|
||||||
instrumentationName string
|
|
||||||
instrumentationVersion string
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Description describes the instrument in human-readable terms.
|
// Description describes the instrument in human-readable terms.
|
||||||
@ -36,18 +34,6 @@ func (cfg InstrumentConfig) Unit() unit.Unit {
|
|||||||
return cfg.unit
|
return cfg.unit
|
||||||
}
|
}
|
||||||
|
|
||||||
// InstrumentationName is the name of the library providing
|
|
||||||
// instrumentation.
|
|
||||||
func (cfg InstrumentConfig) InstrumentationName() string {
|
|
||||||
return cfg.instrumentationName
|
|
||||||
}
|
|
||||||
|
|
||||||
// InstrumentationVersion is the version of the library providing
|
|
||||||
// instrumentation.
|
|
||||||
func (cfg InstrumentConfig) InstrumentationVersion() string {
|
|
||||||
return cfg.instrumentationVersion
|
|
||||||
}
|
|
||||||
|
|
||||||
// InstrumentOption is an interface for applying metric instrument options.
|
// InstrumentOption is an interface for applying metric instrument options.
|
||||||
type InstrumentOption interface {
|
type InstrumentOption interface {
|
||||||
// ApplyMeter is used to set a InstrumentOption value of a
|
// ApplyMeter is used to set a InstrumentOption value of a
|
||||||
@ -85,16 +71,10 @@ func WithUnit(unit unit.Unit) InstrumentOption {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// WithInstrumentationName sets the instrumentation name.
|
|
||||||
func WithInstrumentationName(name string) InstrumentOption {
|
|
||||||
return instrumentOptionFunc(func(cfg *InstrumentConfig) {
|
|
||||||
cfg.instrumentationName = name
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// MeterConfig contains options for Meters.
|
// MeterConfig contains options for Meters.
|
||||||
type MeterConfig struct {
|
type MeterConfig struct {
|
||||||
instrumentationVersion string
|
instrumentationVersion string
|
||||||
|
schemaURL string
|
||||||
}
|
}
|
||||||
|
|
||||||
// InstrumentationVersion is the version of the library providing instrumentation.
|
// InstrumentationVersion is the version of the library providing instrumentation.
|
||||||
@ -102,6 +82,11 @@ func (cfg MeterConfig) InstrumentationVersion() string {
|
|||||||
return cfg.instrumentationVersion
|
return cfg.instrumentationVersion
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SchemaURL is the schema_url of the library providing instrumentation.
|
||||||
|
func (cfg MeterConfig) SchemaURL() string {
|
||||||
|
return cfg.schemaURL
|
||||||
|
}
|
||||||
|
|
||||||
// MeterOption is an interface for applying Meter options.
|
// MeterOption is an interface for applying Meter options.
|
||||||
type MeterOption interface {
|
type MeterOption interface {
|
||||||
// ApplyMeter is used to set a MeterOption value of a MeterConfig.
|
// ApplyMeter is used to set a MeterOption value of a MeterConfig.
|
||||||
@ -118,24 +103,22 @@ func NewMeterConfig(opts ...MeterOption) MeterConfig {
|
|||||||
return config
|
return config
|
||||||
}
|
}
|
||||||
|
|
||||||
// InstrumentMeterOption are options that can be used as both an InstrumentOption
|
type meterOptionFunc func(*MeterConfig)
|
||||||
// and MeterOption
|
|
||||||
type InstrumentMeterOption interface {
|
func (fn meterOptionFunc) applyMeter(cfg *MeterConfig) {
|
||||||
InstrumentOption
|
fn(cfg)
|
||||||
MeterOption
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// WithInstrumentationVersion sets the instrumentation version.
|
// WithInstrumentationVersion sets the instrumentation version.
|
||||||
func WithInstrumentationVersion(version string) InstrumentMeterOption {
|
func WithInstrumentationVersion(version string) MeterOption {
|
||||||
return instrumentationVersionOption(version)
|
return meterOptionFunc(func(config *MeterConfig) {
|
||||||
|
config.instrumentationVersion = version
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
type instrumentationVersionOption string
|
// WithSchemaURL sets the schema URL.
|
||||||
|
func WithSchemaURL(schemaURL string) MeterOption {
|
||||||
func (i instrumentationVersionOption) applyMeter(config *MeterConfig) {
|
return meterOptionFunc(func(config *MeterConfig) {
|
||||||
config.instrumentationVersion = string(i)
|
config.schemaURL = schemaURL
|
||||||
}
|
})
|
||||||
|
|
||||||
func (i instrumentationVersionOption) applyInstrument(config *InstrumentConfig) {
|
|
||||||
config.instrumentationVersion = string(i)
|
|
||||||
}
|
}
|
||||||
|
@ -38,8 +38,7 @@ type MeterProvider interface {
|
|||||||
//
|
//
|
||||||
// An uninitialized Meter is a no-op implementation.
|
// An uninitialized Meter is a no-op implementation.
|
||||||
type Meter struct {
|
type Meter struct {
|
||||||
impl MeterImpl
|
impl MeterImpl
|
||||||
name, version string
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// RecordBatch atomically records a batch of measurements.
|
// RecordBatch atomically records a batch of measurements.
|
||||||
@ -285,9 +284,8 @@ func (m Meter) newAsync(
|
|||||||
if m.impl == nil {
|
if m.impl == nil {
|
||||||
return NoopAsync{}, nil
|
return NoopAsync{}, nil
|
||||||
}
|
}
|
||||||
desc := NewDescriptor(name, mkind, nkind, opts...)
|
cfg := NewInstrumentConfig(opts...)
|
||||||
desc.config.instrumentationName = m.name
|
desc := NewDescriptor(name, mkind, nkind, cfg.description, cfg.unit)
|
||||||
desc.config.instrumentationVersion = m.version
|
|
||||||
return m.impl.NewAsyncInstrument(desc, runner)
|
return m.impl.NewAsyncInstrument(desc, runner)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -304,9 +302,8 @@ func (m Meter) newSync(
|
|||||||
if m.impl == nil {
|
if m.impl == nil {
|
||||||
return NoopSync{}, nil
|
return NoopSync{}, nil
|
||||||
}
|
}
|
||||||
desc := NewDescriptor(name, metricKind, numberKind, opts...)
|
cfg := NewInstrumentConfig(opts...)
|
||||||
desc.config.instrumentationName = m.name
|
desc := NewDescriptor(name, metricKind, numberKind, cfg.description, cfg.unit)
|
||||||
desc.config.instrumentationVersion = m.version
|
|
||||||
return m.impl.NewSyncInstrument(desc)
|
return m.impl.NewSyncInstrument(desc)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -524,16 +521,18 @@ type Descriptor struct {
|
|||||||
name string
|
name string
|
||||||
instrumentKind sdkapi.InstrumentKind
|
instrumentKind sdkapi.InstrumentKind
|
||||||
numberKind number.Kind
|
numberKind number.Kind
|
||||||
config InstrumentConfig
|
description string
|
||||||
|
unit unit.Unit
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewDescriptor returns a Descriptor with the given contents.
|
// NewDescriptor returns a Descriptor with the given contents.
|
||||||
func NewDescriptor(name string, ikind sdkapi.InstrumentKind, nkind number.Kind, opts ...InstrumentOption) Descriptor {
|
func NewDescriptor(name string, ikind sdkapi.InstrumentKind, nkind number.Kind, description string, unit unit.Unit) Descriptor {
|
||||||
return Descriptor{
|
return Descriptor{
|
||||||
name: name,
|
name: name,
|
||||||
instrumentKind: ikind,
|
instrumentKind: ikind,
|
||||||
numberKind: nkind,
|
numberKind: nkind,
|
||||||
config: NewInstrumentConfig(opts...),
|
description: description,
|
||||||
|
unit: unit,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -550,13 +549,13 @@ func (d Descriptor) InstrumentKind() sdkapi.InstrumentKind {
|
|||||||
// Description provides a human-readable description of the metric
|
// Description provides a human-readable description of the metric
|
||||||
// instrument.
|
// instrument.
|
||||||
func (d Descriptor) Description() string {
|
func (d Descriptor) Description() string {
|
||||||
return d.config.Description()
|
return d.description
|
||||||
}
|
}
|
||||||
|
|
||||||
// Unit describes the units of the metric instrument. Unitless
|
// Unit describes the units of the metric instrument. Unitless
|
||||||
// metrics return the empty string.
|
// metrics return the empty string.
|
||||||
func (d Descriptor) Unit() unit.Unit {
|
func (d Descriptor) Unit() unit.Unit {
|
||||||
return d.config.Unit()
|
return d.unit
|
||||||
}
|
}
|
||||||
|
|
||||||
// NumberKind returns whether this instrument is declared over int64,
|
// NumberKind returns whether this instrument is declared over int64,
|
||||||
@ -564,15 +563,3 @@ func (d Descriptor) Unit() unit.Unit {
|
|||||||
func (d Descriptor) NumberKind() number.Kind {
|
func (d Descriptor) NumberKind() number.Kind {
|
||||||
return d.numberKind
|
return d.numberKind
|
||||||
}
|
}
|
||||||
|
|
||||||
// InstrumentationName returns the name of the library that provided
|
|
||||||
// instrumentation for this instrument.
|
|
||||||
func (d Descriptor) InstrumentationName() string {
|
|
||||||
return d.config.InstrumentationName()
|
|
||||||
}
|
|
||||||
|
|
||||||
// InstrumentationVersion returns the version of the library that provided
|
|
||||||
// instrumentation for this instrument.
|
|
||||||
func (d Descriptor) InstrumentationVersion() string {
|
|
||||||
return d.config.InstrumentationVersion()
|
|
||||||
}
|
|
||||||
|
@ -86,10 +86,8 @@ type AsyncImpl interface {
|
|||||||
|
|
||||||
// WrapMeterImpl constructs a `Meter` implementation from a
|
// WrapMeterImpl constructs a `Meter` implementation from a
|
||||||
// `MeterImpl` implementation.
|
// `MeterImpl` implementation.
|
||||||
func WrapMeterImpl(impl MeterImpl, instrumentationName string, opts ...MeterOption) Meter {
|
func WrapMeterImpl(impl MeterImpl) Meter {
|
||||||
return Meter{
|
return Meter{
|
||||||
impl: impl,
|
impl: impl,
|
||||||
name: instrumentationName,
|
|
||||||
version: NewMeterConfig(opts...).InstrumentationVersion(),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -120,16 +120,16 @@ func TestPrecomputedSum(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func checkSyncBatches(ctx context.Context, t *testing.T, labels []attribute.KeyValue, mock *metrictest.MeterImpl, nkind number.Kind, mkind sdkapi.InstrumentKind, instrument metric.InstrumentImpl, expected ...float64) {
|
func checkSyncBatches(ctx context.Context, t *testing.T, labels []attribute.KeyValue, provider *metrictest.MeterProvider, nkind number.Kind, mkind sdkapi.InstrumentKind, instrument metric.InstrumentImpl, expected ...float64) {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
|
|
||||||
batchesCount := len(mock.MeasurementBatches)
|
batchesCount := len(provider.MeasurementBatches)
|
||||||
if len(mock.MeasurementBatches) != len(expected) {
|
if len(provider.MeasurementBatches) != len(expected) {
|
||||||
t.Errorf("Expected %d recorded measurement batches, got %d", batchesCount, len(mock.MeasurementBatches))
|
t.Errorf("Expected %d recorded measurement batches, got %d", batchesCount, len(provider.MeasurementBatches))
|
||||||
}
|
}
|
||||||
recorded := metrictest.AsStructs(mock.MeasurementBatches)
|
recorded := metrictest.AsStructs(provider.MeasurementBatches)
|
||||||
|
|
||||||
for i, batch := range mock.MeasurementBatches {
|
for i, batch := range provider.MeasurementBatches {
|
||||||
if len(batch.Measurements) != 1 {
|
if len(batch.Measurements) != 1 {
|
||||||
t.Errorf("Expected 1 measurement in batch %d, got %d", i, len(batch.Measurements))
|
t.Errorf("Expected 1 measurement in batch %d, got %d", i, len(batch.Measurements))
|
||||||
}
|
}
|
||||||
@ -138,10 +138,12 @@ func checkSyncBatches(ctx context.Context, t *testing.T, labels []attribute.KeyV
|
|||||||
descriptor := measurement.Instrument.Descriptor()
|
descriptor := measurement.Instrument.Descriptor()
|
||||||
|
|
||||||
expected := metrictest.Measured{
|
expected := metrictest.Measured{
|
||||||
Name: descriptor.Name(),
|
Name: descriptor.Name(),
|
||||||
InstrumentationName: descriptor.InstrumentationName(),
|
Library: metrictest.Library{
|
||||||
Labels: metrictest.LabelsToMap(labels...),
|
InstrumentationName: "apitest",
|
||||||
Number: metrictest.ResolveNumberByKind(t, nkind, expected[i]),
|
},
|
||||||
|
Labels: metrictest.LabelsToMap(labels...),
|
||||||
|
Number: metrictest.ResolveNumberByKind(t, nkind, expected[i]),
|
||||||
}
|
}
|
||||||
require.Equal(t, expected, recorded[i])
|
require.Equal(t, expected, recorded[i])
|
||||||
}
|
}
|
||||||
@ -149,31 +151,25 @@ func checkSyncBatches(ctx context.Context, t *testing.T, labels []attribute.KeyV
|
|||||||
|
|
||||||
func TestOptions(t *testing.T) {
|
func TestOptions(t *testing.T) {
|
||||||
type testcase struct {
|
type testcase struct {
|
||||||
name string
|
name string
|
||||||
opts []metric.InstrumentOption
|
opts []metric.InstrumentOption
|
||||||
desc string
|
desc string
|
||||||
unit unit.Unit
|
unit unit.Unit
|
||||||
iName string
|
|
||||||
iVer string
|
|
||||||
}
|
}
|
||||||
testcases := []testcase{
|
testcases := []testcase{
|
||||||
{
|
{
|
||||||
name: "no opts",
|
name: "no opts",
|
||||||
opts: nil,
|
opts: nil,
|
||||||
desc: "",
|
desc: "",
|
||||||
unit: "",
|
unit: "",
|
||||||
iName: "",
|
|
||||||
iVer: "",
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "description",
|
name: "description",
|
||||||
opts: []metric.InstrumentOption{
|
opts: []metric.InstrumentOption{
|
||||||
metric.WithDescription("stuff"),
|
metric.WithDescription("stuff"),
|
||||||
},
|
},
|
||||||
desc: "stuff",
|
desc: "stuff",
|
||||||
unit: "",
|
unit: "",
|
||||||
iName: "",
|
|
||||||
iVer: "",
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "description override",
|
name: "description override",
|
||||||
@ -181,20 +177,16 @@ func TestOptions(t *testing.T) {
|
|||||||
metric.WithDescription("stuff"),
|
metric.WithDescription("stuff"),
|
||||||
metric.WithDescription("things"),
|
metric.WithDescription("things"),
|
||||||
},
|
},
|
||||||
desc: "things",
|
desc: "things",
|
||||||
unit: "",
|
unit: "",
|
||||||
iName: "",
|
|
||||||
iVer: "",
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "unit",
|
name: "unit",
|
||||||
opts: []metric.InstrumentOption{
|
opts: []metric.InstrumentOption{
|
||||||
metric.WithUnit("s"),
|
metric.WithUnit("s"),
|
||||||
},
|
},
|
||||||
desc: "",
|
desc: "",
|
||||||
unit: "s",
|
unit: "s",
|
||||||
iName: "",
|
|
||||||
iVer: "",
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "description override",
|
name: "description override",
|
||||||
@ -202,20 +194,16 @@ func TestOptions(t *testing.T) {
|
|||||||
metric.WithDescription("stuff"),
|
metric.WithDescription("stuff"),
|
||||||
metric.WithDescription("things"),
|
metric.WithDescription("things"),
|
||||||
},
|
},
|
||||||
desc: "things",
|
desc: "things",
|
||||||
unit: "",
|
unit: "",
|
||||||
iName: "",
|
|
||||||
iVer: "",
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "unit",
|
name: "unit",
|
||||||
opts: []metric.InstrumentOption{
|
opts: []metric.InstrumentOption{
|
||||||
metric.WithUnit("s"),
|
metric.WithUnit("s"),
|
||||||
},
|
},
|
||||||
desc: "",
|
desc: "",
|
||||||
unit: "s",
|
unit: "s",
|
||||||
iName: "",
|
|
||||||
iVer: "",
|
|
||||||
},
|
},
|
||||||
|
|
||||||
{
|
{
|
||||||
@ -224,67 +212,17 @@ func TestOptions(t *testing.T) {
|
|||||||
metric.WithUnit("s"),
|
metric.WithUnit("s"),
|
||||||
metric.WithUnit("h"),
|
metric.WithUnit("h"),
|
||||||
},
|
},
|
||||||
desc: "",
|
desc: "",
|
||||||
unit: "h",
|
unit: "h",
|
||||||
iName: "",
|
|
||||||
iVer: "",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "name",
|
|
||||||
opts: []metric.InstrumentOption{
|
|
||||||
metric.WithInstrumentationName("n"),
|
|
||||||
},
|
|
||||||
desc: "",
|
|
||||||
unit: "",
|
|
||||||
iName: "n",
|
|
||||||
iVer: "",
|
|
||||||
},
|
|
||||||
|
|
||||||
{
|
|
||||||
name: "name override",
|
|
||||||
opts: []metric.InstrumentOption{
|
|
||||||
metric.WithInstrumentationName("n"),
|
|
||||||
metric.WithInstrumentationName("o"),
|
|
||||||
},
|
|
||||||
desc: "",
|
|
||||||
unit: "",
|
|
||||||
iName: "o",
|
|
||||||
iVer: "",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "version",
|
|
||||||
opts: []metric.InstrumentOption{
|
|
||||||
metric.WithInstrumentationVersion("v"),
|
|
||||||
},
|
|
||||||
desc: "",
|
|
||||||
unit: "",
|
|
||||||
iName: "",
|
|
||||||
iVer: "v",
|
|
||||||
},
|
|
||||||
|
|
||||||
{
|
|
||||||
name: "version override",
|
|
||||||
opts: []metric.InstrumentOption{
|
|
||||||
metric.WithInstrumentationVersion("v"),
|
|
||||||
metric.WithInstrumentationVersion("q"),
|
|
||||||
},
|
|
||||||
desc: "",
|
|
||||||
unit: "",
|
|
||||||
iName: "",
|
|
||||||
iVer: "q",
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "all",
|
name: "all",
|
||||||
opts: []metric.InstrumentOption{
|
opts: []metric.InstrumentOption{
|
||||||
metric.WithDescription("stuff"),
|
metric.WithDescription("stuff"),
|
||||||
metric.WithUnit("s"),
|
metric.WithUnit("s"),
|
||||||
metric.WithInstrumentationName("n"),
|
|
||||||
metric.WithInstrumentationVersion("v"),
|
|
||||||
},
|
},
|
||||||
desc: "stuff",
|
desc: "stuff",
|
||||||
unit: "s",
|
unit: "s",
|
||||||
iName: "n",
|
|
||||||
iVer: "v",
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
for idx, tt := range testcases {
|
for idx, tt := range testcases {
|
||||||
@ -296,20 +234,18 @@ func TestOptions(t *testing.T) {
|
|||||||
if diff := cmp.Diff(cfg.Unit(), tt.unit); diff != "" {
|
if diff := cmp.Diff(cfg.Unit(), tt.unit); diff != "" {
|
||||||
t.Errorf("Compare Unit: -got +want %s", diff)
|
t.Errorf("Compare Unit: -got +want %s", diff)
|
||||||
}
|
}
|
||||||
if diff := cmp.Diff(cfg.InstrumentationName(), tt.iName); diff != "" {
|
|
||||||
t.Errorf("Compare InstrumentationNam: -got +want %s", diff)
|
|
||||||
}
|
|
||||||
if diff := cmp.Diff(cfg.InstrumentationVersion(), tt.iVer); diff != "" {
|
|
||||||
t.Errorf("Compare InstrumentationVersion: -got +want %s", diff)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
func testPair() (*metrictest.MeterProvider, metric.Meter) {
|
||||||
|
provider := metrictest.NewMeterProvider()
|
||||||
|
return provider, provider.Meter("apitest")
|
||||||
|
}
|
||||||
|
|
||||||
func TestCounter(t *testing.T) {
|
func TestCounter(t *testing.T) {
|
||||||
// N.B. the API does not check for negative
|
// N.B. the API does not check for negative
|
||||||
// values, that's the SDK's responsibility.
|
// values, that's the SDK's responsibility.
|
||||||
t.Run("float64 counter", func(t *testing.T) {
|
t.Run("float64 counter", func(t *testing.T) {
|
||||||
mockSDK, meter := metrictest.NewMeter()
|
provider, meter := testPair()
|
||||||
c := Must(meter).NewFloat64Counter("test.counter.float")
|
c := Must(meter).NewFloat64Counter("test.counter.float")
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
labels := []attribute.KeyValue{attribute.String("A", "B")}
|
labels := []attribute.KeyValue{attribute.String("A", "B")}
|
||||||
@ -317,12 +253,12 @@ func TestCounter(t *testing.T) {
|
|||||||
boundInstrument := c.Bind(labels...)
|
boundInstrument := c.Bind(labels...)
|
||||||
boundInstrument.Add(ctx, -742)
|
boundInstrument.Add(ctx, -742)
|
||||||
meter.RecordBatch(ctx, labels, c.Measurement(42))
|
meter.RecordBatch(ctx, labels, c.Measurement(42))
|
||||||
checkSyncBatches(ctx, t, labels, mockSDK, number.Float64Kind, sdkapi.CounterInstrumentKind, c.SyncImpl(),
|
checkSyncBatches(ctx, t, labels, provider, number.Float64Kind, sdkapi.CounterInstrumentKind, c.SyncImpl(),
|
||||||
1994.1, -742, 42,
|
1994.1, -742, 42,
|
||||||
)
|
)
|
||||||
})
|
})
|
||||||
t.Run("int64 counter", func(t *testing.T) {
|
t.Run("int64 counter", func(t *testing.T) {
|
||||||
mockSDK, meter := metrictest.NewMeter()
|
provider, meter := testPair()
|
||||||
c := Must(meter).NewInt64Counter("test.counter.int")
|
c := Must(meter).NewInt64Counter("test.counter.int")
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
labels := []attribute.KeyValue{attribute.String("A", "B"), attribute.String("C", "D")}
|
labels := []attribute.KeyValue{attribute.String("A", "B"), attribute.String("C", "D")}
|
||||||
@ -330,13 +266,13 @@ func TestCounter(t *testing.T) {
|
|||||||
boundInstrument := c.Bind(labels...)
|
boundInstrument := c.Bind(labels...)
|
||||||
boundInstrument.Add(ctx, 4200)
|
boundInstrument.Add(ctx, 4200)
|
||||||
meter.RecordBatch(ctx, labels, c.Measurement(420000))
|
meter.RecordBatch(ctx, labels, c.Measurement(420000))
|
||||||
checkSyncBatches(ctx, t, labels, mockSDK, number.Int64Kind, sdkapi.CounterInstrumentKind, c.SyncImpl(),
|
checkSyncBatches(ctx, t, labels, provider, number.Int64Kind, sdkapi.CounterInstrumentKind, c.SyncImpl(),
|
||||||
42, 4200, 420000,
|
42, 4200, 420000,
|
||||||
)
|
)
|
||||||
|
|
||||||
})
|
})
|
||||||
t.Run("int64 updowncounter", func(t *testing.T) {
|
t.Run("int64 updowncounter", func(t *testing.T) {
|
||||||
mockSDK, meter := metrictest.NewMeter()
|
provider, meter := testPair()
|
||||||
c := Must(meter).NewInt64UpDownCounter("test.updowncounter.int")
|
c := Must(meter).NewInt64UpDownCounter("test.updowncounter.int")
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
labels := []attribute.KeyValue{attribute.String("A", "B"), attribute.String("C", "D")}
|
labels := []attribute.KeyValue{attribute.String("A", "B"), attribute.String("C", "D")}
|
||||||
@ -344,12 +280,12 @@ func TestCounter(t *testing.T) {
|
|||||||
boundInstrument := c.Bind(labels...)
|
boundInstrument := c.Bind(labels...)
|
||||||
boundInstrument.Add(ctx, -100)
|
boundInstrument.Add(ctx, -100)
|
||||||
meter.RecordBatch(ctx, labels, c.Measurement(42))
|
meter.RecordBatch(ctx, labels, c.Measurement(42))
|
||||||
checkSyncBatches(ctx, t, labels, mockSDK, number.Int64Kind, sdkapi.UpDownCounterInstrumentKind, c.SyncImpl(),
|
checkSyncBatches(ctx, t, labels, provider, number.Int64Kind, sdkapi.UpDownCounterInstrumentKind, c.SyncImpl(),
|
||||||
100, -100, 42,
|
100, -100, 42,
|
||||||
)
|
)
|
||||||
})
|
})
|
||||||
t.Run("float64 updowncounter", func(t *testing.T) {
|
t.Run("float64 updowncounter", func(t *testing.T) {
|
||||||
mockSDK, meter := metrictest.NewMeter()
|
provider, meter := testPair()
|
||||||
c := Must(meter).NewFloat64UpDownCounter("test.updowncounter.float")
|
c := Must(meter).NewFloat64UpDownCounter("test.updowncounter.float")
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
labels := []attribute.KeyValue{attribute.String("A", "B"), attribute.String("C", "D")}
|
labels := []attribute.KeyValue{attribute.String("A", "B"), attribute.String("C", "D")}
|
||||||
@ -357,7 +293,7 @@ func TestCounter(t *testing.T) {
|
|||||||
boundInstrument := c.Bind(labels...)
|
boundInstrument := c.Bind(labels...)
|
||||||
boundInstrument.Add(ctx, -76)
|
boundInstrument.Add(ctx, -76)
|
||||||
meter.RecordBatch(ctx, labels, c.Measurement(-100.1))
|
meter.RecordBatch(ctx, labels, c.Measurement(-100.1))
|
||||||
checkSyncBatches(ctx, t, labels, mockSDK, number.Float64Kind, sdkapi.UpDownCounterInstrumentKind, c.SyncImpl(),
|
checkSyncBatches(ctx, t, labels, provider, number.Float64Kind, sdkapi.UpDownCounterInstrumentKind, c.SyncImpl(),
|
||||||
100.1, -76, -100.1,
|
100.1, -76, -100.1,
|
||||||
)
|
)
|
||||||
})
|
})
|
||||||
@ -365,7 +301,7 @@ func TestCounter(t *testing.T) {
|
|||||||
|
|
||||||
func TestHistogram(t *testing.T) {
|
func TestHistogram(t *testing.T) {
|
||||||
t.Run("float64 histogram", func(t *testing.T) {
|
t.Run("float64 histogram", func(t *testing.T) {
|
||||||
mockSDK, meter := metrictest.NewMeter()
|
provider, meter := testPair()
|
||||||
m := Must(meter).NewFloat64Histogram("test.histogram.float")
|
m := Must(meter).NewFloat64Histogram("test.histogram.float")
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
labels := []attribute.KeyValue{}
|
labels := []attribute.KeyValue{}
|
||||||
@ -373,12 +309,12 @@ func TestHistogram(t *testing.T) {
|
|||||||
boundInstrument := m.Bind(labels...)
|
boundInstrument := m.Bind(labels...)
|
||||||
boundInstrument.Record(ctx, 0)
|
boundInstrument.Record(ctx, 0)
|
||||||
meter.RecordBatch(ctx, labels, m.Measurement(-100.5))
|
meter.RecordBatch(ctx, labels, m.Measurement(-100.5))
|
||||||
checkSyncBatches(ctx, t, labels, mockSDK, number.Float64Kind, sdkapi.HistogramInstrumentKind, m.SyncImpl(),
|
checkSyncBatches(ctx, t, labels, provider, number.Float64Kind, sdkapi.HistogramInstrumentKind, m.SyncImpl(),
|
||||||
42, 0, -100.5,
|
42, 0, -100.5,
|
||||||
)
|
)
|
||||||
})
|
})
|
||||||
t.Run("int64 histogram", func(t *testing.T) {
|
t.Run("int64 histogram", func(t *testing.T) {
|
||||||
mockSDK, meter := metrictest.NewMeter()
|
provider, meter := testPair()
|
||||||
m := Must(meter).NewInt64Histogram("test.histogram.int")
|
m := Must(meter).NewInt64Histogram("test.histogram.int")
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
labels := []attribute.KeyValue{attribute.Int("I", 1)}
|
labels := []attribute.KeyValue{attribute.Int("I", 1)}
|
||||||
@ -386,7 +322,7 @@ func TestHistogram(t *testing.T) {
|
|||||||
boundInstrument := m.Bind(labels...)
|
boundInstrument := m.Bind(labels...)
|
||||||
boundInstrument.Record(ctx, 80)
|
boundInstrument.Record(ctx, 80)
|
||||||
meter.RecordBatch(ctx, labels, m.Measurement(0))
|
meter.RecordBatch(ctx, labels, m.Measurement(0))
|
||||||
checkSyncBatches(ctx, t, labels, mockSDK, number.Int64Kind, sdkapi.HistogramInstrumentKind, m.SyncImpl(),
|
checkSyncBatches(ctx, t, labels, provider, number.Int64Kind, sdkapi.HistogramInstrumentKind, m.SyncImpl(),
|
||||||
173, 80, 0,
|
173, 80, 0,
|
||||||
)
|
)
|
||||||
})
|
})
|
||||||
@ -395,74 +331,74 @@ func TestHistogram(t *testing.T) {
|
|||||||
func TestObserverInstruments(t *testing.T) {
|
func TestObserverInstruments(t *testing.T) {
|
||||||
t.Run("float gauge", func(t *testing.T) {
|
t.Run("float gauge", func(t *testing.T) {
|
||||||
labels := []attribute.KeyValue{attribute.String("O", "P")}
|
labels := []attribute.KeyValue{attribute.String("O", "P")}
|
||||||
mockSDK, meter := metrictest.NewMeter()
|
provider, meter := testPair()
|
||||||
o := Must(meter).NewFloat64GaugeObserver("test.gauge.float", func(_ context.Context, result metric.Float64ObserverResult) {
|
o := Must(meter).NewFloat64GaugeObserver("test.gauge.float", func(_ context.Context, result metric.Float64ObserverResult) {
|
||||||
result.Observe(42.1, labels...)
|
result.Observe(42.1, labels...)
|
||||||
})
|
})
|
||||||
mockSDK.RunAsyncInstruments()
|
provider.RunAsyncInstruments()
|
||||||
checkObserverBatch(t, labels, mockSDK, number.Float64Kind, sdkapi.GaugeObserverInstrumentKind, o.AsyncImpl(),
|
checkObserverBatch(t, labels, provider, number.Float64Kind, sdkapi.GaugeObserverInstrumentKind, o.AsyncImpl(),
|
||||||
42.1,
|
42.1,
|
||||||
)
|
)
|
||||||
})
|
})
|
||||||
t.Run("int gauge", func(t *testing.T) {
|
t.Run("int gauge", func(t *testing.T) {
|
||||||
labels := []attribute.KeyValue{}
|
labels := []attribute.KeyValue{}
|
||||||
mockSDK, meter := metrictest.NewMeter()
|
provider, meter := testPair()
|
||||||
o := Must(meter).NewInt64GaugeObserver("test.observer.int", func(_ context.Context, result metric.Int64ObserverResult) {
|
o := Must(meter).NewInt64GaugeObserver("test.gauge.int", func(_ context.Context, result metric.Int64ObserverResult) {
|
||||||
result.Observe(-142, labels...)
|
result.Observe(-142, labels...)
|
||||||
})
|
})
|
||||||
mockSDK.RunAsyncInstruments()
|
provider.RunAsyncInstruments()
|
||||||
checkObserverBatch(t, labels, mockSDK, number.Int64Kind, sdkapi.GaugeObserverInstrumentKind, o.AsyncImpl(),
|
checkObserverBatch(t, labels, provider, number.Int64Kind, sdkapi.GaugeObserverInstrumentKind, o.AsyncImpl(),
|
||||||
-142,
|
-142,
|
||||||
)
|
)
|
||||||
})
|
})
|
||||||
t.Run("float counterobserver", func(t *testing.T) {
|
t.Run("float counterobserver", func(t *testing.T) {
|
||||||
labels := []attribute.KeyValue{attribute.String("O", "P")}
|
labels := []attribute.KeyValue{attribute.String("O", "P")}
|
||||||
mockSDK, meter := metrictest.NewMeter()
|
provider, meter := testPair()
|
||||||
o := Must(meter).NewFloat64CounterObserver("test.counterobserver.float", func(_ context.Context, result metric.Float64ObserverResult) {
|
o := Must(meter).NewFloat64CounterObserver("test.counter.float", func(_ context.Context, result metric.Float64ObserverResult) {
|
||||||
result.Observe(42.1, labels...)
|
result.Observe(42.1, labels...)
|
||||||
})
|
})
|
||||||
mockSDK.RunAsyncInstruments()
|
provider.RunAsyncInstruments()
|
||||||
checkObserverBatch(t, labels, mockSDK, number.Float64Kind, sdkapi.CounterObserverInstrumentKind, o.AsyncImpl(),
|
checkObserverBatch(t, labels, provider, number.Float64Kind, sdkapi.CounterObserverInstrumentKind, o.AsyncImpl(),
|
||||||
42.1,
|
42.1,
|
||||||
)
|
)
|
||||||
})
|
})
|
||||||
t.Run("int counterobserver", func(t *testing.T) {
|
t.Run("int counterobserver", func(t *testing.T) {
|
||||||
labels := []attribute.KeyValue{}
|
labels := []attribute.KeyValue{}
|
||||||
mockSDK, meter := metrictest.NewMeter()
|
provider, meter := testPair()
|
||||||
o := Must(meter).NewInt64CounterObserver("test.observer.int", func(_ context.Context, result metric.Int64ObserverResult) {
|
o := Must(meter).NewInt64CounterObserver("test.counter.int", func(_ context.Context, result metric.Int64ObserverResult) {
|
||||||
result.Observe(-142, labels...)
|
result.Observe(-142, labels...)
|
||||||
})
|
})
|
||||||
mockSDK.RunAsyncInstruments()
|
provider.RunAsyncInstruments()
|
||||||
checkObserverBatch(t, labels, mockSDK, number.Int64Kind, sdkapi.CounterObserverInstrumentKind, o.AsyncImpl(),
|
checkObserverBatch(t, labels, provider, number.Int64Kind, sdkapi.CounterObserverInstrumentKind, o.AsyncImpl(),
|
||||||
-142,
|
-142,
|
||||||
)
|
)
|
||||||
})
|
})
|
||||||
t.Run("float updowncounterobserver", func(t *testing.T) {
|
t.Run("float updowncounterobserver", func(t *testing.T) {
|
||||||
labels := []attribute.KeyValue{attribute.String("O", "P")}
|
labels := []attribute.KeyValue{attribute.String("O", "P")}
|
||||||
mockSDK, meter := metrictest.NewMeter()
|
provider, meter := testPair()
|
||||||
o := Must(meter).NewFloat64UpDownCounterObserver("test.updowncounterobserver.float", func(_ context.Context, result metric.Float64ObserverResult) {
|
o := Must(meter).NewFloat64UpDownCounterObserver("test.updowncounter.float", func(_ context.Context, result metric.Float64ObserverResult) {
|
||||||
result.Observe(42.1, labels...)
|
result.Observe(42.1, labels...)
|
||||||
})
|
})
|
||||||
mockSDK.RunAsyncInstruments()
|
provider.RunAsyncInstruments()
|
||||||
checkObserverBatch(t, labels, mockSDK, number.Float64Kind, sdkapi.UpDownCounterObserverInstrumentKind, o.AsyncImpl(),
|
checkObserverBatch(t, labels, provider, number.Float64Kind, sdkapi.UpDownCounterObserverInstrumentKind, o.AsyncImpl(),
|
||||||
42.1,
|
42.1,
|
||||||
)
|
)
|
||||||
})
|
})
|
||||||
t.Run("int updowncounterobserver", func(t *testing.T) {
|
t.Run("int updowncounterobserver", func(t *testing.T) {
|
||||||
labels := []attribute.KeyValue{}
|
labels := []attribute.KeyValue{}
|
||||||
mockSDK, meter := metrictest.NewMeter()
|
provider, meter := testPair()
|
||||||
o := Must(meter).NewInt64UpDownCounterObserver("test.observer.int", func(_ context.Context, result metric.Int64ObserverResult) {
|
o := Must(meter).NewInt64UpDownCounterObserver("test..int", func(_ context.Context, result metric.Int64ObserverResult) {
|
||||||
result.Observe(-142, labels...)
|
result.Observe(-142, labels...)
|
||||||
})
|
})
|
||||||
mockSDK.RunAsyncInstruments()
|
provider.RunAsyncInstruments()
|
||||||
checkObserverBatch(t, labels, mockSDK, number.Int64Kind, sdkapi.UpDownCounterObserverInstrumentKind, o.AsyncImpl(),
|
checkObserverBatch(t, labels, provider, number.Int64Kind, sdkapi.UpDownCounterObserverInstrumentKind, o.AsyncImpl(),
|
||||||
-142,
|
-142,
|
||||||
)
|
)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestBatchObserverInstruments(t *testing.T) {
|
func TestBatchObserverInstruments(t *testing.T) {
|
||||||
mockSDK, meter := metrictest.NewMeter()
|
provider, meter := testPair()
|
||||||
|
|
||||||
var obs1 metric.Int64GaugeObserver
|
var obs1 metric.Int64GaugeObserver
|
||||||
var obs2 metric.Float64GaugeObserver
|
var obs2 metric.Float64GaugeObserver
|
||||||
@ -480,12 +416,12 @@ func TestBatchObserverInstruments(t *testing.T) {
|
|||||||
)
|
)
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
obs1 = cb.NewInt64GaugeObserver("test.observer.int")
|
obs1 = cb.NewInt64GaugeObserver("test.gauge.int")
|
||||||
obs2 = cb.NewFloat64GaugeObserver("test.observer.float")
|
obs2 = cb.NewFloat64GaugeObserver("test.gauge.float")
|
||||||
|
|
||||||
mockSDK.RunAsyncInstruments()
|
provider.RunAsyncInstruments()
|
||||||
|
|
||||||
require.Len(t, mockSDK.MeasurementBatches, 1)
|
require.Len(t, provider.MeasurementBatches, 1)
|
||||||
|
|
||||||
impl1 := obs1.AsyncImpl().Implementation().(*metrictest.Async)
|
impl1 := obs1.AsyncImpl().Implementation().(*metrictest.Async)
|
||||||
impl2 := obs2.AsyncImpl().Implementation().(*metrictest.Async)
|
impl2 := obs2.AsyncImpl().Implementation().(*metrictest.Async)
|
||||||
@ -493,7 +429,7 @@ func TestBatchObserverInstruments(t *testing.T) {
|
|||||||
require.NotNil(t, impl1)
|
require.NotNil(t, impl1)
|
||||||
require.NotNil(t, impl2)
|
require.NotNil(t, impl2)
|
||||||
|
|
||||||
got := mockSDK.MeasurementBatches[0]
|
got := provider.MeasurementBatches[0]
|
||||||
require.Equal(t, labels, got.Labels)
|
require.Equal(t, labels, got.Labels)
|
||||||
require.Len(t, got.Measurements, 2)
|
require.Len(t, got.Measurements, 2)
|
||||||
|
|
||||||
@ -506,17 +442,17 @@ func TestBatchObserverInstruments(t *testing.T) {
|
|||||||
require.Equal(t, 0, m2.Number.CompareNumber(number.Float64Kind, metrictest.ResolveNumberByKind(t, number.Float64Kind, 42)))
|
require.Equal(t, 0, m2.Number.CompareNumber(number.Float64Kind, metrictest.ResolveNumberByKind(t, number.Float64Kind, 42)))
|
||||||
}
|
}
|
||||||
|
|
||||||
func checkObserverBatch(t *testing.T, labels []attribute.KeyValue, mock *metrictest.MeterImpl, nkind number.Kind, mkind sdkapi.InstrumentKind, observer metric.AsyncImpl, expected float64) {
|
func checkObserverBatch(t *testing.T, labels []attribute.KeyValue, provider *metrictest.MeterProvider, nkind number.Kind, mkind sdkapi.InstrumentKind, observer metric.AsyncImpl, expected float64) {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
assert.Len(t, mock.MeasurementBatches, 1)
|
assert.Len(t, provider.MeasurementBatches, 1)
|
||||||
if len(mock.MeasurementBatches) < 1 {
|
if len(provider.MeasurementBatches) < 1 {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
o := observer.Implementation().(*metrictest.Async)
|
o := observer.Implementation().(*metrictest.Async)
|
||||||
if !assert.NotNil(t, o) {
|
if !assert.NotNil(t, o) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
got := mock.MeasurementBatches[0]
|
got := provider.MeasurementBatches[0]
|
||||||
assert.Equal(t, labels, got.Labels)
|
assert.Equal(t, labels, got.Labels)
|
||||||
assert.Len(t, got.Measurements, 1)
|
assert.Len(t, got.Measurements, 1)
|
||||||
if len(got.Measurements) < 1 {
|
if len(got.Measurements) < 1 {
|
||||||
@ -547,7 +483,7 @@ func (testWrappedMeter) NewAsyncInstrument(_ metric.Descriptor, _ metric.AsyncRu
|
|||||||
|
|
||||||
func TestWrappedInstrumentError(t *testing.T) {
|
func TestWrappedInstrumentError(t *testing.T) {
|
||||||
impl := &testWrappedMeter{}
|
impl := &testWrappedMeter{}
|
||||||
meter := metric.WrapMeterImpl(impl, "test")
|
meter := metric.WrapMeterImpl(impl)
|
||||||
|
|
||||||
histogram, err := meter.NewInt64Histogram("test.histogram")
|
histogram, err := meter.NewInt64Histogram("test.histogram")
|
||||||
|
|
||||||
@ -562,7 +498,7 @@ func TestWrappedInstrumentError(t *testing.T) {
|
|||||||
|
|
||||||
func TestNilCallbackObserverNoop(t *testing.T) {
|
func TestNilCallbackObserverNoop(t *testing.T) {
|
||||||
// Tests that a nil callback yields a no-op observer without error.
|
// Tests that a nil callback yields a no-op observer without error.
|
||||||
_, meter := metrictest.NewMeter()
|
_, meter := testPair()
|
||||||
|
|
||||||
observer := Must(meter).NewInt64GaugeObserver("test.observer", nil)
|
observer := Must(meter).NewInt64GaugeObserver("test.observer", nil)
|
||||||
|
|
||||||
|
@ -23,7 +23,7 @@ import (
|
|||||||
internalmetric "go.opentelemetry.io/otel/internal/metric"
|
internalmetric "go.opentelemetry.io/otel/internal/metric"
|
||||||
"go.opentelemetry.io/otel/metric"
|
"go.opentelemetry.io/otel/metric"
|
||||||
"go.opentelemetry.io/otel/metric/number"
|
"go.opentelemetry.io/otel/metric/number"
|
||||||
"go.opentelemetry.io/otel/metric/registry"
|
"go.opentelemetry.io/otel/metric/sdkapi"
|
||||||
)
|
)
|
||||||
|
|
||||||
type (
|
type (
|
||||||
@ -32,21 +32,35 @@ type (
|
|||||||
Labels []attribute.KeyValue
|
Labels []attribute.KeyValue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Library is the same as "sdk/instrumentation".Library but there is
|
||||||
|
// a package cycle to use it.
|
||||||
|
Library struct {
|
||||||
|
InstrumentationName string
|
||||||
|
InstrumentationVersion string
|
||||||
|
SchemaURL string
|
||||||
|
}
|
||||||
|
|
||||||
Batch struct {
|
Batch struct {
|
||||||
// Measurement needs to be aligned for 64-bit atomic operations.
|
// Measurement needs to be aligned for 64-bit atomic operations.
|
||||||
Measurements []Measurement
|
Measurements []Measurement
|
||||||
Ctx context.Context
|
Ctx context.Context
|
||||||
Labels []attribute.KeyValue
|
Labels []attribute.KeyValue
|
||||||
LibraryName string
|
Library Library
|
||||||
}
|
}
|
||||||
|
|
||||||
// MeterImpl is an OpenTelemetry Meter implementation used for testing.
|
// MeterImpl is an OpenTelemetry Meter implementation used for testing.
|
||||||
MeterImpl struct {
|
MeterImpl struct {
|
||||||
|
library Library
|
||||||
|
provider *MeterProvider
|
||||||
|
asyncInstruments *internalmetric.AsyncInstrumentState
|
||||||
|
}
|
||||||
|
|
||||||
|
// MeterProvider is a collection of named MeterImpls used for testing.
|
||||||
|
MeterProvider struct {
|
||||||
lock sync.Mutex
|
lock sync.Mutex
|
||||||
|
|
||||||
MeasurementBatches []Batch
|
MeasurementBatches []Batch
|
||||||
|
impls []*MeterImpl
|
||||||
asyncInstruments *internalmetric.AsyncInstrumentState
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Measurement struct {
|
Measurement struct {
|
||||||
@ -78,6 +92,13 @@ var (
|
|||||||
_ metric.AsyncImpl = &Async{}
|
_ metric.AsyncImpl = &Async{}
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// NewDescriptor is a test helper for constructing test metric
|
||||||
|
// descriptors using standard options.
|
||||||
|
func NewDescriptor(name string, ikind sdkapi.InstrumentKind, nkind number.Kind, opts ...metric.InstrumentOption) metric.Descriptor {
|
||||||
|
cfg := metric.NewInstrumentConfig(opts...)
|
||||||
|
return metric.NewDescriptor(name, ikind, nkind, cfg.Description(), cfg.Unit())
|
||||||
|
}
|
||||||
|
|
||||||
func (i Instrument) Descriptor() metric.Descriptor {
|
func (i Instrument) Descriptor() metric.Descriptor {
|
||||||
return i.descriptor
|
return i.descriptor
|
||||||
}
|
}
|
||||||
@ -115,22 +136,32 @@ func (m *MeterImpl) doRecordSingle(ctx context.Context, labels []attribute.KeyVa
|
|||||||
}})
|
}})
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewMeterProvider() (*MeterImpl, metric.MeterProvider) {
|
// NewMeterProvider returns a MeterProvider suitable for testing.
|
||||||
|
// When the test is complete, consult MeterProvider.MeasurementBatches.
|
||||||
|
func NewMeterProvider() *MeterProvider {
|
||||||
|
return &MeterProvider{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Meter implements metric.MeterProvider.
|
||||||
|
func (p *MeterProvider) Meter(name string, opts ...metric.MeterOption) metric.Meter {
|
||||||
|
p.lock.Lock()
|
||||||
|
defer p.lock.Unlock()
|
||||||
|
cfg := metric.NewMeterConfig(opts...)
|
||||||
impl := &MeterImpl{
|
impl := &MeterImpl{
|
||||||
|
library: Library{
|
||||||
|
InstrumentationName: name,
|
||||||
|
InstrumentationVersion: cfg.InstrumentationVersion(),
|
||||||
|
SchemaURL: cfg.SchemaURL(),
|
||||||
|
},
|
||||||
|
provider: p,
|
||||||
asyncInstruments: internalmetric.NewAsyncInstrumentState(),
|
asyncInstruments: internalmetric.NewAsyncInstrumentState(),
|
||||||
}
|
}
|
||||||
return impl, registry.NewMeterProvider(impl)
|
p.impls = append(p.impls, impl)
|
||||||
}
|
return metric.WrapMeterImpl(impl)
|
||||||
|
|
||||||
func NewMeter() (*MeterImpl, metric.Meter) {
|
|
||||||
impl, p := NewMeterProvider()
|
|
||||||
return impl, p.Meter("mock")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NewSyncInstrument implements metric.MeterImpl.
|
||||||
func (m *MeterImpl) NewSyncInstrument(descriptor metric.Descriptor) (metric.SyncImpl, error) {
|
func (m *MeterImpl) NewSyncInstrument(descriptor metric.Descriptor) (metric.SyncImpl, error) {
|
||||||
m.lock.Lock()
|
|
||||||
defer m.lock.Unlock()
|
|
||||||
|
|
||||||
return &Sync{
|
return &Sync{
|
||||||
Instrument{
|
Instrument{
|
||||||
descriptor: descriptor,
|
descriptor: descriptor,
|
||||||
@ -139,10 +170,8 @@ func (m *MeterImpl) NewSyncInstrument(descriptor metric.Descriptor) (metric.Sync
|
|||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NewAsyncInstrument implements metric.MeterImpl.
|
||||||
func (m *MeterImpl) NewAsyncInstrument(descriptor metric.Descriptor, runner metric.AsyncRunner) (metric.AsyncImpl, error) {
|
func (m *MeterImpl) NewAsyncInstrument(descriptor metric.Descriptor, runner metric.AsyncRunner) (metric.AsyncImpl, error) {
|
||||||
m.lock.Lock()
|
|
||||||
defer m.lock.Unlock()
|
|
||||||
|
|
||||||
a := &Async{
|
a := &Async{
|
||||||
Instrument: Instrument{
|
Instrument: Instrument{
|
||||||
descriptor: descriptor,
|
descriptor: descriptor,
|
||||||
@ -150,10 +179,11 @@ func (m *MeterImpl) NewAsyncInstrument(descriptor metric.Descriptor, runner metr
|
|||||||
},
|
},
|
||||||
runner: runner,
|
runner: runner,
|
||||||
}
|
}
|
||||||
m.asyncInstruments.Register(a, runner)
|
m.provider.registerAsyncInstrument(a, m, runner)
|
||||||
return a, nil
|
return a, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// RecordBatch implements metric.MeterImpl.
|
||||||
func (m *MeterImpl) RecordBatch(ctx context.Context, labels []attribute.KeyValue, measurements ...metric.Measurement) {
|
func (m *MeterImpl) RecordBatch(ctx context.Context, labels []attribute.KeyValue, measurements ...metric.Measurement) {
|
||||||
mm := make([]Measurement, len(measurements))
|
mm := make([]Measurement, len(measurements))
|
||||||
for i := 0; i < len(measurements); i++ {
|
for i := 0; i < len(measurements); i++ {
|
||||||
@ -166,6 +196,7 @@ func (m *MeterImpl) RecordBatch(ctx context.Context, labels []attribute.KeyValue
|
|||||||
m.collect(ctx, labels, mm)
|
m.collect(ctx, labels, mm)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// CollectAsync is called from asyncInstruments.Run() with the lock held.
|
||||||
func (m *MeterImpl) CollectAsync(labels []attribute.KeyValue, obs ...metric.Observation) {
|
func (m *MeterImpl) CollectAsync(labels []attribute.KeyValue, obs ...metric.Observation) {
|
||||||
mm := make([]Measurement, len(obs))
|
mm := make([]Measurement, len(obs))
|
||||||
for i := 0; i < len(obs); i++ {
|
for i := 0; i < len(obs); i++ {
|
||||||
@ -178,29 +209,55 @@ func (m *MeterImpl) CollectAsync(labels []attribute.KeyValue, obs ...metric.Obse
|
|||||||
m.collect(context.Background(), labels, mm)
|
m.collect(context.Background(), labels, mm)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// collect is called from CollectAsync() or RecordBatch() with the lock held.
|
||||||
func (m *MeterImpl) collect(ctx context.Context, labels []attribute.KeyValue, measurements []Measurement) {
|
func (m *MeterImpl) collect(ctx context.Context, labels []attribute.KeyValue, measurements []Measurement) {
|
||||||
m.lock.Lock()
|
m.provider.addMeasurement(Batch{
|
||||||
defer m.lock.Unlock()
|
|
||||||
|
|
||||||
m.MeasurementBatches = append(m.MeasurementBatches, Batch{
|
|
||||||
Ctx: ctx,
|
Ctx: ctx,
|
||||||
Labels: labels,
|
Labels: labels,
|
||||||
Measurements: measurements,
|
Measurements: measurements,
|
||||||
|
Library: m.library,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *MeterImpl) RunAsyncInstruments() {
|
// registerAsyncInstrument locks the provider and registers the new Async instrument.
|
||||||
m.asyncInstruments.Run(context.Background(), m)
|
func (p *MeterProvider) registerAsyncInstrument(a *Async, m *MeterImpl, runner metric.AsyncRunner) {
|
||||||
|
p.lock.Lock()
|
||||||
|
defer p.lock.Unlock()
|
||||||
|
|
||||||
|
m.asyncInstruments.Register(a, runner)
|
||||||
|
}
|
||||||
|
|
||||||
|
// addMeasurement locks the provider and adds the new measurement batch.
|
||||||
|
func (p *MeterProvider) addMeasurement(b Batch) {
|
||||||
|
p.lock.Lock()
|
||||||
|
defer p.lock.Unlock()
|
||||||
|
p.MeasurementBatches = append(p.MeasurementBatches, b)
|
||||||
|
}
|
||||||
|
|
||||||
|
// copyImpls locks the provider and copies the current list of *MeterImpls.
|
||||||
|
func (p *MeterProvider) copyImpls() []*MeterImpl {
|
||||||
|
p.lock.Lock()
|
||||||
|
defer p.lock.Unlock()
|
||||||
|
cpy := make([]*MeterImpl, len(p.impls))
|
||||||
|
copy(cpy, p.impls)
|
||||||
|
return cpy
|
||||||
|
}
|
||||||
|
|
||||||
|
// RunAsyncInstruments is used in tests to trigger collection from
|
||||||
|
// asynchronous instruments.
|
||||||
|
func (p *MeterProvider) RunAsyncInstruments() {
|
||||||
|
for _, impl := range p.copyImpls() {
|
||||||
|
impl.asyncInstruments.Run(context.Background(), impl)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Measured is the helper struct which provides flat representation of recorded measurements
|
// Measured is the helper struct which provides flat representation of recorded measurements
|
||||||
// to simplify testing
|
// to simplify testing
|
||||||
type Measured struct {
|
type Measured struct {
|
||||||
Name string
|
Name string
|
||||||
InstrumentationName string
|
Labels map[attribute.Key]attribute.Value
|
||||||
InstrumentationVersion string
|
Number number.Number
|
||||||
Labels map[attribute.Key]attribute.Value
|
Library Library
|
||||||
Number number.Number
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// LabelsToMap converts label set to keyValue map, to be easily used in tests
|
// LabelsToMap converts label set to keyValue map, to be easily used in tests
|
||||||
@ -218,11 +275,10 @@ func AsStructs(batches []Batch) []Measured {
|
|||||||
for _, batch := range batches {
|
for _, batch := range batches {
|
||||||
for _, m := range batch.Measurements {
|
for _, m := range batch.Measurements {
|
||||||
r = append(r, Measured{
|
r = append(r, Measured{
|
||||||
Name: m.Instrument.Descriptor().Name(),
|
Name: m.Instrument.Descriptor().Name(),
|
||||||
InstrumentationName: m.Instrument.Descriptor().InstrumentationName(),
|
Labels: LabelsToMap(batch.Labels...),
|
||||||
InstrumentationVersion: m.Instrument.Descriptor().InstrumentationVersion(),
|
Number: m.Number,
|
||||||
Labels: LabelsToMap(batch.Labels...),
|
Library: batch.Library,
|
||||||
Number: m.Number,
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -19,7 +19,7 @@ import (
|
|||||||
|
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
"go.opentelemetry.io/otel/metric"
|
"go.opentelemetry.io/otel/metric/metrictest"
|
||||||
"go.opentelemetry.io/otel/metric/number"
|
"go.opentelemetry.io/otel/metric/number"
|
||||||
"go.opentelemetry.io/otel/metric/sdkapi"
|
"go.opentelemetry.io/otel/metric/sdkapi"
|
||||||
"go.opentelemetry.io/otel/sdk/export/metric/aggregation"
|
"go.opentelemetry.io/otel/sdk/export/metric/aggregation"
|
||||||
@ -60,7 +60,7 @@ func TestExportKindSelectors(t *testing.T) {
|
|||||||
seks := StatelessExportKindSelector()
|
seks := StatelessExportKindSelector()
|
||||||
|
|
||||||
for _, ikind := range append(deltaMemoryKinds, cumulativeMemoryKinds...) {
|
for _, ikind := range append(deltaMemoryKinds, cumulativeMemoryKinds...) {
|
||||||
desc := metric.NewDescriptor("instrument", ikind, number.Int64Kind)
|
desc := metrictest.NewDescriptor("instrument", ikind, number.Int64Kind)
|
||||||
|
|
||||||
var akind aggregation.Kind
|
var akind aggregation.Kind
|
||||||
if ikind.Adding() {
|
if ikind.Adding() {
|
||||||
|
@ -26,6 +26,7 @@ import (
|
|||||||
"go.opentelemetry.io/otel/metric/number"
|
"go.opentelemetry.io/otel/metric/number"
|
||||||
"go.opentelemetry.io/otel/metric/sdkapi"
|
"go.opentelemetry.io/otel/metric/sdkapi"
|
||||||
"go.opentelemetry.io/otel/sdk/export/metric/aggregation"
|
"go.opentelemetry.io/otel/sdk/export/metric/aggregation"
|
||||||
|
"go.opentelemetry.io/otel/sdk/instrumentation"
|
||||||
"go.opentelemetry.io/otel/sdk/resource"
|
"go.opentelemetry.io/otel/sdk/resource"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -111,13 +112,13 @@ type Checkpointer interface {
|
|||||||
// any time.
|
// any time.
|
||||||
Processor
|
Processor
|
||||||
|
|
||||||
// CheckpointSet returns the current data set. This may be
|
// Reader returns the current data set. This may be
|
||||||
// called before and after collection. The
|
// called before and after collection. The
|
||||||
// implementation is required to return the same value
|
// implementation is required to return the same value
|
||||||
// throughout its lifetime, since CheckpointSet exposes a
|
// throughout its lifetime, since Reader exposes a
|
||||||
// sync.Locker interface. The caller is responsible for
|
// sync.Locker interface. The caller is responsible for
|
||||||
// locking the CheckpointSet before initiating collection.
|
// locking the Reader before initiating collection.
|
||||||
CheckpointSet() CheckpointSet
|
Reader() Reader
|
||||||
|
|
||||||
// StartCollection begins a collection interval.
|
// StartCollection begins a collection interval.
|
||||||
StartCollection()
|
StartCollection()
|
||||||
@ -126,6 +127,12 @@ type Checkpointer interface {
|
|||||||
FinishCollection() error
|
FinishCollection() error
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// CheckpointerFactory is an interface for producing configured
|
||||||
|
// Checkpointer instances.
|
||||||
|
type CheckpointerFactory interface {
|
||||||
|
NewCheckpointer() Checkpointer
|
||||||
|
}
|
||||||
|
|
||||||
// Aggregator implements a specific aggregation behavior, e.g., a
|
// Aggregator implements a specific aggregation behavior, e.g., a
|
||||||
// behavior to track a sequence of updates to an instrument. Counter
|
// behavior to track a sequence of updates to an instrument. Counter
|
||||||
// instruments commonly use a simple Sum aggregator, but for the
|
// instruments commonly use a simple Sum aggregator, but for the
|
||||||
@ -209,9 +216,9 @@ type Exporter interface {
|
|||||||
// The Context comes from the controller that initiated
|
// The Context comes from the controller that initiated
|
||||||
// collection.
|
// collection.
|
||||||
//
|
//
|
||||||
// The CheckpointSet interface refers to the Processor that just
|
// The InstrumentationLibraryReader interface refers to the
|
||||||
// completed collection.
|
// Processor that just completed collection.
|
||||||
Export(ctx context.Context, resource *resource.Resource, checkpointSet CheckpointSet) error
|
Export(ctx context.Context, resource *resource.Resource, reader InstrumentationLibraryReader) error
|
||||||
|
|
||||||
// ExportKindSelector is an interface used by the Processor
|
// ExportKindSelector is an interface used by the Processor
|
||||||
// in deciding whether to compute Delta or Cumulative
|
// in deciding whether to compute Delta or Cumulative
|
||||||
@ -229,11 +236,20 @@ type ExportKindSelector interface {
|
|||||||
ExportKindFor(descriptor *metric.Descriptor, aggregatorKind aggregation.Kind) ExportKind
|
ExportKindFor(descriptor *metric.Descriptor, aggregatorKind aggregation.Kind) ExportKind
|
||||||
}
|
}
|
||||||
|
|
||||||
// CheckpointSet allows a controller to access a complete checkpoint of
|
// InstrumentationLibraryReader is an interface for exporters to iterate
|
||||||
// aggregated metrics from the Processor. This is passed to the
|
// over one instrumentation library of metric data at a time.
|
||||||
// Exporter which may then use ForEach to iterate over the collection
|
type InstrumentationLibraryReader interface {
|
||||||
// of aggregated metrics.
|
// ForEach calls the passed function once per instrumentation library,
|
||||||
type CheckpointSet interface {
|
// allowing the caller to emit metrics grouped by the library that
|
||||||
|
// produced them.
|
||||||
|
ForEach(readerFunc func(instrumentation.Library, Reader) error) error
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reader allows a controller to access a complete checkpoint of
|
||||||
|
// aggregated metrics from the Processor for a single library of
|
||||||
|
// metric data. This is passed to the Exporter which may then use
|
||||||
|
// ForEach to iterate over the collection of aggregated metrics.
|
||||||
|
type Reader interface {
|
||||||
// ForEach iterates over aggregated checkpoints for all
|
// ForEach iterates over aggregated checkpoints for all
|
||||||
// metrics that were updated during the last collection
|
// metrics that were updated during the last collection
|
||||||
// period. Each aggregated checkpoint returned by the
|
// period. Each aggregated checkpoint returned by the
|
||||||
|
@ -22,6 +22,7 @@ import (
|
|||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
"go.opentelemetry.io/otel/metric"
|
"go.opentelemetry.io/otel/metric"
|
||||||
|
"go.opentelemetry.io/otel/metric/metrictest"
|
||||||
"go.opentelemetry.io/otel/metric/number"
|
"go.opentelemetry.io/otel/metric/number"
|
||||||
"go.opentelemetry.io/otel/metric/sdkapi"
|
"go.opentelemetry.io/otel/metric/sdkapi"
|
||||||
"go.opentelemetry.io/otel/sdk/export/metric/aggregation"
|
"go.opentelemetry.io/otel/sdk/export/metric/aggregation"
|
||||||
@ -74,7 +75,7 @@ func TestRangeTest(t *testing.T) {
|
|||||||
// Only Counters implement a range test.
|
// Only Counters implement a range test.
|
||||||
for _, nkind := range []number.Kind{number.Float64Kind, number.Int64Kind} {
|
for _, nkind := range []number.Kind{number.Float64Kind, number.Int64Kind} {
|
||||||
t.Run(nkind.String(), func(t *testing.T) {
|
t.Run(nkind.String(), func(t *testing.T) {
|
||||||
desc := metric.NewDescriptor(
|
desc := metrictest.NewDescriptor(
|
||||||
"name",
|
"name",
|
||||||
sdkapi.CounterInstrumentKind,
|
sdkapi.CounterInstrumentKind,
|
||||||
nkind,
|
nkind,
|
||||||
@ -92,7 +93,7 @@ func TestNaNTest(t *testing.T) {
|
|||||||
sdkapi.HistogramInstrumentKind,
|
sdkapi.HistogramInstrumentKind,
|
||||||
sdkapi.GaugeObserverInstrumentKind,
|
sdkapi.GaugeObserverInstrumentKind,
|
||||||
} {
|
} {
|
||||||
desc := metric.NewDescriptor(
|
desc := metrictest.NewDescriptor(
|
||||||
"name",
|
"name",
|
||||||
mkind,
|
mkind,
|
||||||
nkind,
|
nkind,
|
||||||
|
@ -27,6 +27,7 @@ import (
|
|||||||
|
|
||||||
ottest "go.opentelemetry.io/otel/internal/internaltest"
|
ottest "go.opentelemetry.io/otel/internal/internaltest"
|
||||||
"go.opentelemetry.io/otel/metric"
|
"go.opentelemetry.io/otel/metric"
|
||||||
|
"go.opentelemetry.io/otel/metric/metrictest"
|
||||||
"go.opentelemetry.io/otel/metric/number"
|
"go.opentelemetry.io/otel/metric/number"
|
||||||
"go.opentelemetry.io/otel/metric/sdkapi"
|
"go.opentelemetry.io/otel/metric/sdkapi"
|
||||||
export "go.opentelemetry.io/otel/sdk/export/metric"
|
export "go.opentelemetry.io/otel/sdk/export/metric"
|
||||||
@ -66,7 +67,7 @@ func newProfiles() []Profile {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func NewAggregatorTest(mkind sdkapi.InstrumentKind, nkind number.Kind) *metric.Descriptor {
|
func NewAggregatorTest(mkind sdkapi.InstrumentKind, nkind number.Kind) *metric.Descriptor {
|
||||||
desc := metric.NewDescriptor("test.name", mkind, nkind)
|
desc := metrictest.NewDescriptor("test.name", mkind, nkind)
|
||||||
return &desc
|
return &desc
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -43,7 +43,7 @@ func newFixture(b *testing.B) *benchFixture {
|
|||||||
}
|
}
|
||||||
|
|
||||||
bf.accumulator = sdk.NewAccumulator(bf)
|
bf.accumulator = sdk.NewAccumulator(bf)
|
||||||
bf.meter = metric.WrapMeterImpl(bf.accumulator, "benchmarks")
|
bf.meter = metric.WrapMeterImpl(bf.accumulator)
|
||||||
return bf
|
return bf
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -21,9 +21,10 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"go.opentelemetry.io/otel"
|
"go.opentelemetry.io/otel"
|
||||||
|
"go.opentelemetry.io/otel/internal/metric/registry"
|
||||||
"go.opentelemetry.io/otel/metric"
|
"go.opentelemetry.io/otel/metric"
|
||||||
"go.opentelemetry.io/otel/metric/registry"
|
|
||||||
export "go.opentelemetry.io/otel/sdk/export/metric"
|
export "go.opentelemetry.io/otel/sdk/export/metric"
|
||||||
|
"go.opentelemetry.io/otel/sdk/instrumentation"
|
||||||
sdk "go.opentelemetry.io/otel/sdk/metric"
|
sdk "go.opentelemetry.io/otel/sdk/metric"
|
||||||
controllerTime "go.opentelemetry.io/otel/sdk/metric/controller/time"
|
controllerTime "go.opentelemetry.io/otel/sdk/metric/controller/time"
|
||||||
"go.opentelemetry.io/otel/sdk/resource"
|
"go.opentelemetry.io/otel/sdk/resource"
|
||||||
@ -52,19 +53,25 @@ var ErrControllerStarted = fmt.Errorf("controller already started")
|
|||||||
// collection
|
// collection
|
||||||
//
|
//
|
||||||
// The controller supports mixing push and pull access to metric data
|
// The controller supports mixing push and pull access to metric data
|
||||||
// using the export.CheckpointSet RWLock interface. Collection will
|
// using the export.Reader RWLock interface. Collection will
|
||||||
// be blocked by a pull request in the basic controller.
|
// be blocked by a pull request in the basic controller.
|
||||||
type Controller struct {
|
type Controller struct {
|
||||||
lock sync.Mutex
|
// lock protects libraries and synchronizes Start() and Stop().
|
||||||
accumulator *sdk.Accumulator
|
lock sync.Mutex
|
||||||
provider *registry.MeterProvider
|
// TODO: libraries is synchronized by lock, but could be
|
||||||
checkpointer export.Checkpointer
|
// accomplished using a sync.Map. The SDK specification will
|
||||||
resource *resource.Resource
|
// probably require this, as the draft already states that
|
||||||
exporter export.Exporter
|
// Stop() and MeterProvider.Meter() should not block each
|
||||||
wg sync.WaitGroup
|
// other.
|
||||||
stopCh chan struct{}
|
libraries map[instrumentation.Library]*registry.UniqueInstrumentMeterImpl
|
||||||
clock controllerTime.Clock
|
checkpointerFactory export.CheckpointerFactory
|
||||||
ticker controllerTime.Ticker
|
|
||||||
|
resource *resource.Resource
|
||||||
|
exporter export.Exporter
|
||||||
|
wg sync.WaitGroup
|
||||||
|
stopCh chan struct{}
|
||||||
|
clock controllerTime.Clock
|
||||||
|
ticker controllerTime.Ticker
|
||||||
|
|
||||||
collectPeriod time.Duration
|
collectPeriod time.Duration
|
||||||
collectTimeout time.Duration
|
collectTimeout time.Duration
|
||||||
@ -75,10 +82,44 @@ type Controller struct {
|
|||||||
collectedTime time.Time
|
collectedTime time.Time
|
||||||
}
|
}
|
||||||
|
|
||||||
// New constructs a Controller using the provided checkpointer and
|
var _ export.InstrumentationLibraryReader = &Controller{}
|
||||||
// options (including optional exporter) to configure a metric
|
var _ metric.MeterProvider = &Controller{}
|
||||||
|
|
||||||
|
func (c *Controller) Meter(instrumentationName string, opts ...metric.MeterOption) metric.Meter {
|
||||||
|
cfg := metric.NewMeterConfig(opts...)
|
||||||
|
library := instrumentation.Library{
|
||||||
|
Name: instrumentationName,
|
||||||
|
Version: cfg.InstrumentationVersion(),
|
||||||
|
SchemaURL: cfg.SchemaURL(),
|
||||||
|
}
|
||||||
|
|
||||||
|
c.lock.Lock()
|
||||||
|
defer c.lock.Unlock()
|
||||||
|
m, ok := c.libraries[library]
|
||||||
|
if !ok {
|
||||||
|
checkpointer := c.checkpointerFactory.NewCheckpointer()
|
||||||
|
accumulator := sdk.NewAccumulator(checkpointer)
|
||||||
|
m = registry.NewUniqueInstrumentMeterImpl(&accumulatorCheckpointer{
|
||||||
|
Accumulator: accumulator,
|
||||||
|
checkpointer: checkpointer,
|
||||||
|
library: library,
|
||||||
|
})
|
||||||
|
|
||||||
|
c.libraries[library] = m
|
||||||
|
}
|
||||||
|
return metric.WrapMeterImpl(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
type accumulatorCheckpointer struct {
|
||||||
|
*sdk.Accumulator
|
||||||
|
checkpointer export.Checkpointer
|
||||||
|
library instrumentation.Library
|
||||||
|
}
|
||||||
|
|
||||||
|
// New constructs a Controller using the provided checkpointer factory
|
||||||
|
// and options (including optional exporter) to configure a metric
|
||||||
// export pipeline.
|
// export pipeline.
|
||||||
func New(checkpointer export.Checkpointer, opts ...Option) *Controller {
|
func New(checkpointerFactory export.CheckpointerFactory, opts ...Option) *Controller {
|
||||||
c := &config{
|
c := &config{
|
||||||
CollectPeriod: DefaultPeriod,
|
CollectPeriod: DefaultPeriod,
|
||||||
CollectTimeout: DefaultPeriod,
|
CollectTimeout: DefaultPeriod,
|
||||||
@ -96,15 +137,13 @@ func New(checkpointer export.Checkpointer, opts ...Option) *Controller {
|
|||||||
otel.Handle(err)
|
otel.Handle(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
impl := sdk.NewAccumulator(checkpointer)
|
|
||||||
return &Controller{
|
return &Controller{
|
||||||
provider: registry.NewMeterProvider(impl),
|
libraries: map[instrumentation.Library]*registry.UniqueInstrumentMeterImpl{},
|
||||||
accumulator: impl,
|
checkpointerFactory: checkpointerFactory,
|
||||||
checkpointer: checkpointer,
|
exporter: c.Exporter,
|
||||||
resource: c.Resource,
|
resource: c.Resource,
|
||||||
exporter: c.Exporter,
|
stopCh: nil,
|
||||||
stopCh: nil,
|
clock: controllerTime.RealClock{},
|
||||||
clock: controllerTime.RealClock{},
|
|
||||||
|
|
||||||
collectPeriod: c.CollectPeriod,
|
collectPeriod: c.CollectPeriod,
|
||||||
collectTimeout: c.CollectTimeout,
|
collectTimeout: c.CollectTimeout,
|
||||||
@ -120,11 +159,6 @@ func (c *Controller) SetClock(clock controllerTime.Clock) {
|
|||||||
c.clock = clock
|
c.clock = clock
|
||||||
}
|
}
|
||||||
|
|
||||||
// MeterProvider returns a MeterProvider instance for this controller.
|
|
||||||
func (c *Controller) MeterProvider() metric.MeterProvider {
|
|
||||||
return c.provider
|
|
||||||
}
|
|
||||||
|
|
||||||
// Resource returns the *resource.Resource associated with this
|
// Resource returns the *resource.Resource associated with this
|
||||||
// controller.
|
// controller.
|
||||||
func (c *Controller) Resource() *resource.Resource {
|
func (c *Controller) Resource() *resource.Resource {
|
||||||
@ -165,19 +199,23 @@ func (c *Controller) Start(ctx context.Context) error {
|
|||||||
//
|
//
|
||||||
// Note that Stop() will not cancel an ongoing collection or export.
|
// Note that Stop() will not cancel an ongoing collection or export.
|
||||||
func (c *Controller) Stop(ctx context.Context) error {
|
func (c *Controller) Stop(ctx context.Context) error {
|
||||||
c.lock.Lock()
|
if lastCollection := func() bool {
|
||||||
defer c.lock.Unlock()
|
c.lock.Lock()
|
||||||
|
defer c.lock.Unlock()
|
||||||
|
|
||||||
if c.stopCh == nil {
|
if c.stopCh == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
close(c.stopCh)
|
||||||
|
c.stopCh = nil
|
||||||
|
c.wg.Wait()
|
||||||
|
c.ticker.Stop()
|
||||||
|
c.ticker = nil
|
||||||
|
return true
|
||||||
|
}(); !lastCollection {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
close(c.stopCh)
|
|
||||||
c.stopCh = nil
|
|
||||||
c.wg.Wait()
|
|
||||||
c.ticker.Stop()
|
|
||||||
c.ticker = nil
|
|
||||||
|
|
||||||
return c.collect(ctx)
|
return c.collect(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -198,9 +236,7 @@ func (c *Controller) runTicker(ctx context.Context, stopCh chan struct{}) {
|
|||||||
|
|
||||||
// collect computes a checkpoint and optionally exports it.
|
// collect computes a checkpoint and optionally exports it.
|
||||||
func (c *Controller) collect(ctx context.Context) error {
|
func (c *Controller) collect(ctx context.Context) error {
|
||||||
if err := c.checkpoint(ctx, func() bool {
|
if err := c.checkpoint(ctx); err != nil {
|
||||||
return true
|
|
||||||
}); err != nil {
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if c.exporter == nil {
|
if c.exporter == nil {
|
||||||
@ -212,19 +248,45 @@ func (c *Controller) collect(ctx context.Context) error {
|
|||||||
return c.export(ctx)
|
return c.export(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// accumulatorList returns a snapshot of current accumulators
|
||||||
|
// registered to this controller. This briefly locks the controller.
|
||||||
|
func (c *Controller) accumulatorList() []*accumulatorCheckpointer {
|
||||||
|
c.lock.Lock()
|
||||||
|
defer c.lock.Unlock()
|
||||||
|
|
||||||
|
var r []*accumulatorCheckpointer
|
||||||
|
for _, entry := range c.libraries {
|
||||||
|
acc, ok := entry.MeterImpl().(*accumulatorCheckpointer)
|
||||||
|
if ok {
|
||||||
|
r = append(r, acc)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return r
|
||||||
|
}
|
||||||
|
|
||||||
// checkpoint calls the Accumulator and Checkpointer interfaces to
|
// checkpoint calls the Accumulator and Checkpointer interfaces to
|
||||||
// compute the CheckpointSet. This applies the configured collection
|
// compute the Reader. This applies the configured collection
|
||||||
// timeout. Note that this does not try to cancel a Collect or Export
|
// timeout. Note that this does not try to cancel a Collect or Export
|
||||||
// when Stop() is called.
|
// when Stop() is called.
|
||||||
func (c *Controller) checkpoint(ctx context.Context, cond func() bool) error {
|
func (c *Controller) checkpoint(ctx context.Context) error {
|
||||||
ckpt := c.checkpointer.CheckpointSet()
|
for _, impl := range c.accumulatorList() {
|
||||||
|
if err := c.checkpointSingleAccumulator(ctx, impl); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// checkpointSingleAccumulator checkpoints a single instrumentation
|
||||||
|
// library's accumulator, which involves calling
|
||||||
|
// checkpointer.StartCollection, accumulator.Collect, and
|
||||||
|
// checkpointer.FinishCollection in sequence.
|
||||||
|
func (c *Controller) checkpointSingleAccumulator(ctx context.Context, ac *accumulatorCheckpointer) error {
|
||||||
|
ckpt := ac.checkpointer.Reader()
|
||||||
ckpt.Lock()
|
ckpt.Lock()
|
||||||
defer ckpt.Unlock()
|
defer ckpt.Unlock()
|
||||||
|
|
||||||
if !cond() {
|
ac.checkpointer.StartCollection()
|
||||||
return nil
|
|
||||||
}
|
|
||||||
c.checkpointer.StartCollection()
|
|
||||||
|
|
||||||
if c.collectTimeout > 0 {
|
if c.collectTimeout > 0 {
|
||||||
var cancel context.CancelFunc
|
var cancel context.CancelFunc
|
||||||
@ -232,7 +294,7 @@ func (c *Controller) checkpoint(ctx context.Context, cond func() bool) error {
|
|||||||
defer cancel()
|
defer cancel()
|
||||||
}
|
}
|
||||||
|
|
||||||
_ = c.accumulator.Collect(ctx)
|
_ = ac.Accumulator.Collect(ctx)
|
||||||
|
|
||||||
var err error
|
var err error
|
||||||
select {
|
select {
|
||||||
@ -243,7 +305,7 @@ func (c *Controller) checkpoint(ctx context.Context, cond func() bool) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Finish the checkpoint whether the accumulator timed out or not.
|
// Finish the checkpoint whether the accumulator timed out or not.
|
||||||
if cerr := c.checkpointer.FinishCollection(); cerr != nil {
|
if cerr := ac.checkpointer.FinishCollection(); cerr != nil {
|
||||||
if err == nil {
|
if err == nil {
|
||||||
err = cerr
|
err = cerr
|
||||||
} else {
|
} else {
|
||||||
@ -254,34 +316,36 @@ func (c *Controller) checkpoint(ctx context.Context, cond func() bool) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// export calls the exporter with a read lock on the CheckpointSet,
|
// export calls the exporter with a read lock on the Reader,
|
||||||
// applying the configured export timeout.
|
// applying the configured export timeout.
|
||||||
func (c *Controller) export(ctx context.Context) error {
|
func (c *Controller) export(ctx context.Context) error {
|
||||||
ckpt := c.checkpointer.CheckpointSet()
|
|
||||||
ckpt.RLock()
|
|
||||||
defer ckpt.RUnlock()
|
|
||||||
|
|
||||||
if c.pushTimeout > 0 {
|
if c.pushTimeout > 0 {
|
||||||
var cancel context.CancelFunc
|
var cancel context.CancelFunc
|
||||||
ctx, cancel = context.WithTimeout(ctx, c.pushTimeout)
|
ctx, cancel = context.WithTimeout(ctx, c.pushTimeout)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
}
|
}
|
||||||
|
|
||||||
return c.exporter.Export(ctx, c.resource, ckpt)
|
return c.exporter.Export(ctx, c.resource, c)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ForEach gives the caller read-locked access to the current
|
// ForEach implements export.InstrumentationLibraryReader.
|
||||||
// export.CheckpointSet.
|
func (c *Controller) ForEach(readerFunc func(l instrumentation.Library, r export.Reader) error) error {
|
||||||
func (c *Controller) ForEach(ks export.ExportKindSelector, f func(export.Record) error) error {
|
for _, acPair := range c.accumulatorList() {
|
||||||
ckpt := c.checkpointer.CheckpointSet()
|
reader := acPair.checkpointer.Reader()
|
||||||
ckpt.RLock()
|
// TODO: We should not fail fast; instead accumulate errors.
|
||||||
defer ckpt.RUnlock()
|
if err := func() error {
|
||||||
|
reader.RLock()
|
||||||
return ckpt.ForEach(ks, f)
|
defer reader.RUnlock()
|
||||||
|
return readerFunc(acPair.library, reader)
|
||||||
|
}(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsRunning returns true if the controller was started via Start(),
|
// IsRunning returns true if the controller was started via Start(),
|
||||||
// indicating that the current export.CheckpointSet is being kept
|
// indicating that the current export.Reader is being kept
|
||||||
// up-to-date.
|
// up-to-date.
|
||||||
func (c *Controller) IsRunning() bool {
|
func (c *Controller) IsRunning() bool {
|
||||||
c.lock.Lock()
|
c.lock.Lock()
|
||||||
@ -298,16 +362,20 @@ func (c *Controller) Collect(ctx context.Context) error {
|
|||||||
// computing checkpoints with the collection period.
|
// computing checkpoints with the collection period.
|
||||||
return ErrControllerStarted
|
return ErrControllerStarted
|
||||||
}
|
}
|
||||||
|
if !c.shouldCollect() {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
return c.checkpoint(ctx, c.shouldCollect)
|
return c.checkpoint(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
// shouldCollect returns true if the collector should collect now,
|
// shouldCollect returns true if the collector should collect now,
|
||||||
// based on the timestamp, the last collection time, and the
|
// based on the timestamp, the last collection time, and the
|
||||||
// configured period.
|
// configured period.
|
||||||
func (c *Controller) shouldCollect() bool {
|
func (c *Controller) shouldCollect() bool {
|
||||||
// This is called with the CheckpointSet exclusive
|
c.lock.Lock()
|
||||||
// lock held.
|
defer c.lock.Unlock()
|
||||||
|
|
||||||
if c.collectPeriod == 0 {
|
if c.collectPeriod == 0 {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
@ -28,6 +28,7 @@ import (
|
|||||||
"go.opentelemetry.io/otel/metric"
|
"go.opentelemetry.io/otel/metric"
|
||||||
export "go.opentelemetry.io/otel/sdk/export/metric"
|
export "go.opentelemetry.io/otel/sdk/export/metric"
|
||||||
"go.opentelemetry.io/otel/sdk/export/metric/aggregation"
|
"go.opentelemetry.io/otel/sdk/export/metric/aggregation"
|
||||||
|
"go.opentelemetry.io/otel/sdk/instrumentation"
|
||||||
controller "go.opentelemetry.io/otel/sdk/metric/controller/basic"
|
controller "go.opentelemetry.io/otel/sdk/metric/controller/basic"
|
||||||
"go.opentelemetry.io/otel/sdk/metric/controller/controllertest"
|
"go.opentelemetry.io/otel/sdk/metric/controller/controllertest"
|
||||||
processor "go.opentelemetry.io/otel/sdk/metric/processor/basic"
|
processor "go.opentelemetry.io/otel/sdk/metric/processor/basic"
|
||||||
@ -41,11 +42,14 @@ func getMap(t *testing.T, cont *controller.Controller) map[string]float64 {
|
|||||||
out := processortest.NewOutput(attribute.DefaultEncoder())
|
out := processortest.NewOutput(attribute.DefaultEncoder())
|
||||||
|
|
||||||
require.NoError(t, cont.ForEach(
|
require.NoError(t, cont.ForEach(
|
||||||
export.CumulativeExportKindSelector(),
|
func(_ instrumentation.Library, reader export.Reader) error {
|
||||||
func(record export.Record) error {
|
return reader.ForEach(
|
||||||
return out.AddRecord(record)
|
export.CumulativeExportKindSelector(),
|
||||||
},
|
func(record export.Record) error {
|
||||||
))
|
return out.AddRecord(record)
|
||||||
|
},
|
||||||
|
)
|
||||||
|
}))
|
||||||
return out.Map()
|
return out.Map()
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -113,7 +117,7 @@ func TestControllerUsesResource(t *testing.T) {
|
|||||||
sel := export.CumulativeExportKindSelector()
|
sel := export.CumulativeExportKindSelector()
|
||||||
exp := processortest.New(sel, attribute.DefaultEncoder())
|
exp := processortest.New(sel, attribute.DefaultEncoder())
|
||||||
cont := controller.New(
|
cont := controller.New(
|
||||||
processor.New(
|
processor.NewFactory(
|
||||||
processortest.AggregatorSelector(),
|
processortest.AggregatorSelector(),
|
||||||
exp,
|
exp,
|
||||||
),
|
),
|
||||||
@ -121,9 +125,8 @@ func TestControllerUsesResource(t *testing.T) {
|
|||||||
)
|
)
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
require.NoError(t, cont.Start(ctx))
|
require.NoError(t, cont.Start(ctx))
|
||||||
prov := cont.MeterProvider()
|
|
||||||
|
|
||||||
ctr := metric.Must(prov.Meter("named")).NewFloat64Counter("calls.sum")
|
ctr := metric.Must(cont.Meter("named")).NewFloat64Counter("calls.sum")
|
||||||
ctr.Add(context.Background(), 1.)
|
ctr.Add(context.Background(), 1.)
|
||||||
|
|
||||||
// Collect once
|
// Collect once
|
||||||
@ -139,7 +142,7 @@ func TestControllerUsesResource(t *testing.T) {
|
|||||||
|
|
||||||
func TestStartNoExporter(t *testing.T) {
|
func TestStartNoExporter(t *testing.T) {
|
||||||
cont := controller.New(
|
cont := controller.New(
|
||||||
processor.New(
|
processor.NewFactory(
|
||||||
processortest.AggregatorSelector(),
|
processortest.AggregatorSelector(),
|
||||||
export.CumulativeExportKindSelector(),
|
export.CumulativeExportKindSelector(),
|
||||||
),
|
),
|
||||||
@ -149,10 +152,9 @@ func TestStartNoExporter(t *testing.T) {
|
|||||||
mock := controllertest.NewMockClock()
|
mock := controllertest.NewMockClock()
|
||||||
cont.SetClock(mock)
|
cont.SetClock(mock)
|
||||||
|
|
||||||
prov := cont.MeterProvider()
|
|
||||||
calls := int64(0)
|
calls := int64(0)
|
||||||
|
|
||||||
_ = metric.Must(prov.Meter("named")).NewInt64CounterObserver("calls.lastvalue",
|
_ = metric.Must(cont.Meter("named")).NewInt64CounterObserver("calls.lastvalue",
|
||||||
func(ctx context.Context, result metric.Int64ObserverResult) {
|
func(ctx context.Context, result metric.Int64ObserverResult) {
|
||||||
calls++
|
calls++
|
||||||
checkTestContext(t, ctx)
|
checkTestContext(t, ctx)
|
||||||
@ -209,7 +211,7 @@ func TestStartNoExporter(t *testing.T) {
|
|||||||
|
|
||||||
func TestObserverCanceled(t *testing.T) {
|
func TestObserverCanceled(t *testing.T) {
|
||||||
cont := controller.New(
|
cont := controller.New(
|
||||||
processor.New(
|
processor.NewFactory(
|
||||||
processortest.AggregatorSelector(),
|
processortest.AggregatorSelector(),
|
||||||
export.CumulativeExportKindSelector(),
|
export.CumulativeExportKindSelector(),
|
||||||
),
|
),
|
||||||
@ -218,10 +220,9 @@ func TestObserverCanceled(t *testing.T) {
|
|||||||
controller.WithResource(resource.Empty()),
|
controller.WithResource(resource.Empty()),
|
||||||
)
|
)
|
||||||
|
|
||||||
prov := cont.MeterProvider()
|
|
||||||
calls := int64(0)
|
calls := int64(0)
|
||||||
|
|
||||||
_ = metric.Must(prov.Meter("named")).NewInt64CounterObserver("done.lastvalue",
|
_ = metric.Must(cont.Meter("named")).NewInt64CounterObserver("done.lastvalue",
|
||||||
func(ctx context.Context, result metric.Int64ObserverResult) {
|
func(ctx context.Context, result metric.Int64ObserverResult) {
|
||||||
<-ctx.Done()
|
<-ctx.Done()
|
||||||
calls++
|
calls++
|
||||||
@ -242,7 +243,7 @@ func TestObserverCanceled(t *testing.T) {
|
|||||||
|
|
||||||
func TestObserverContext(t *testing.T) {
|
func TestObserverContext(t *testing.T) {
|
||||||
cont := controller.New(
|
cont := controller.New(
|
||||||
processor.New(
|
processor.NewFactory(
|
||||||
processortest.AggregatorSelector(),
|
processortest.AggregatorSelector(),
|
||||||
export.CumulativeExportKindSelector(),
|
export.CumulativeExportKindSelector(),
|
||||||
),
|
),
|
||||||
@ -250,9 +251,7 @@ func TestObserverContext(t *testing.T) {
|
|||||||
controller.WithResource(resource.Empty()),
|
controller.WithResource(resource.Empty()),
|
||||||
)
|
)
|
||||||
|
|
||||||
prov := cont.MeterProvider()
|
_ = metric.Must(cont.Meter("named")).NewInt64CounterObserver("done.lastvalue",
|
||||||
|
|
||||||
_ = metric.Must(prov.Meter("named")).NewInt64CounterObserver("done.lastvalue",
|
|
||||||
func(ctx context.Context, result metric.Int64ObserverResult) {
|
func(ctx context.Context, result metric.Int64ObserverResult) {
|
||||||
time.Sleep(10 * time.Millisecond)
|
time.Sleep(10 * time.Millisecond)
|
||||||
checkTestContext(t, ctx)
|
checkTestContext(t, ctx)
|
||||||
@ -284,7 +283,7 @@ func newBlockingExporter() *blockingExporter {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *blockingExporter) Export(ctx context.Context, res *resource.Resource, output export.CheckpointSet) error {
|
func (b *blockingExporter) Export(ctx context.Context, res *resource.Resource, output export.InstrumentationLibraryReader) error {
|
||||||
var err error
|
var err error
|
||||||
_ = b.exporter.Export(ctx, res, output)
|
_ = b.exporter.Export(ctx, res, output)
|
||||||
if b.calls == 0 {
|
if b.calls == 0 {
|
||||||
@ -306,7 +305,7 @@ func (*blockingExporter) ExportKindFor(
|
|||||||
func TestExportTimeout(t *testing.T) {
|
func TestExportTimeout(t *testing.T) {
|
||||||
exporter := newBlockingExporter()
|
exporter := newBlockingExporter()
|
||||||
cont := controller.New(
|
cont := controller.New(
|
||||||
processor.New(
|
processor.NewFactory(
|
||||||
processortest.AggregatorSelector(),
|
processortest.AggregatorSelector(),
|
||||||
export.CumulativeExportKindSelector(),
|
export.CumulativeExportKindSelector(),
|
||||||
),
|
),
|
||||||
@ -318,10 +317,8 @@ func TestExportTimeout(t *testing.T) {
|
|||||||
mock := controllertest.NewMockClock()
|
mock := controllertest.NewMockClock()
|
||||||
cont.SetClock(mock)
|
cont.SetClock(mock)
|
||||||
|
|
||||||
prov := cont.MeterProvider()
|
|
||||||
|
|
||||||
calls := int64(0)
|
calls := int64(0)
|
||||||
_ = metric.Must(prov.Meter("named")).NewInt64CounterObserver("one.lastvalue",
|
_ = metric.Must(cont.Meter("named")).NewInt64CounterObserver("one.lastvalue",
|
||||||
func(ctx context.Context, result metric.Int64ObserverResult) {
|
func(ctx context.Context, result metric.Int64ObserverResult) {
|
||||||
calls++
|
calls++
|
||||||
result.Observe(calls)
|
result.Observe(calls)
|
||||||
@ -363,7 +360,7 @@ func TestCollectAfterStopThenStartAgain(t *testing.T) {
|
|||||||
attribute.DefaultEncoder(),
|
attribute.DefaultEncoder(),
|
||||||
)
|
)
|
||||||
cont := controller.New(
|
cont := controller.New(
|
||||||
processor.New(
|
processor.NewFactory(
|
||||||
processortest.AggregatorSelector(),
|
processortest.AggregatorSelector(),
|
||||||
exp,
|
exp,
|
||||||
),
|
),
|
||||||
@ -374,10 +371,8 @@ func TestCollectAfterStopThenStartAgain(t *testing.T) {
|
|||||||
mock := controllertest.NewMockClock()
|
mock := controllertest.NewMockClock()
|
||||||
cont.SetClock(mock)
|
cont.SetClock(mock)
|
||||||
|
|
||||||
prov := cont.MeterProvider()
|
|
||||||
|
|
||||||
calls := 0
|
calls := 0
|
||||||
_ = metric.Must(prov.Meter("named")).NewInt64CounterObserver("one.lastvalue",
|
_ = metric.Must(cont.Meter("named")).NewInt64CounterObserver("one.lastvalue",
|
||||||
func(ctx context.Context, result metric.Int64ObserverResult) {
|
func(ctx context.Context, result metric.Int64ObserverResult) {
|
||||||
calls++
|
calls++
|
||||||
result.Observe(int64(calls))
|
result.Observe(int64(calls))
|
||||||
@ -437,3 +432,46 @@ func TestCollectAfterStopThenStartAgain(t *testing.T) {
|
|||||||
"one.lastvalue//": 6,
|
"one.lastvalue//": 6,
|
||||||
}, exp.Values())
|
}, exp.Values())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestRegistryFunction(t *testing.T) {
|
||||||
|
exp := processortest.New(
|
||||||
|
export.CumulativeExportKindSelector(),
|
||||||
|
attribute.DefaultEncoder(),
|
||||||
|
)
|
||||||
|
cont := controller.New(
|
||||||
|
processor.NewFactory(
|
||||||
|
processortest.AggregatorSelector(),
|
||||||
|
exp,
|
||||||
|
),
|
||||||
|
controller.WithCollectPeriod(time.Second),
|
||||||
|
controller.WithExporter(exp),
|
||||||
|
controller.WithResource(resource.Empty()),
|
||||||
|
)
|
||||||
|
|
||||||
|
m1 := cont.Meter("test")
|
||||||
|
m2 := cont.Meter("test")
|
||||||
|
|
||||||
|
require.NotNil(t, m1)
|
||||||
|
require.Equal(t, m1, m2)
|
||||||
|
|
||||||
|
c1, err := m1.NewInt64Counter("counter.sum")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
c2, err := m1.NewInt64Counter("counter.sum")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
require.Equal(t, c1, c2)
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
|
require.NoError(t, cont.Start(ctx))
|
||||||
|
|
||||||
|
c1.Add(ctx, 10)
|
||||||
|
c2.Add(ctx, 10)
|
||||||
|
|
||||||
|
require.NoError(t, cont.Stop(ctx))
|
||||||
|
|
||||||
|
require.EqualValues(t, map[string]float64{
|
||||||
|
"counter.sum//": 20,
|
||||||
|
}, exp.Values())
|
||||||
|
}
|
||||||
|
@ -34,7 +34,7 @@ import (
|
|||||||
|
|
||||||
func TestPullNoCollect(t *testing.T) {
|
func TestPullNoCollect(t *testing.T) {
|
||||||
puller := controller.New(
|
puller := controller.New(
|
||||||
processor.New(
|
processor.NewFactory(
|
||||||
processortest.AggregatorSelector(),
|
processortest.AggregatorSelector(),
|
||||||
export.CumulativeExportKindSelector(),
|
export.CumulativeExportKindSelector(),
|
||||||
processor.WithMemory(true),
|
processor.WithMemory(true),
|
||||||
@ -44,14 +44,14 @@ func TestPullNoCollect(t *testing.T) {
|
|||||||
)
|
)
|
||||||
|
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
meter := puller.MeterProvider().Meter("nocache")
|
meter := puller.Meter("nocache")
|
||||||
counter := metric.Must(meter).NewInt64Counter("counter.sum")
|
counter := metric.Must(meter).NewInt64Counter("counter.sum")
|
||||||
|
|
||||||
counter.Add(ctx, 10, attribute.String("A", "B"))
|
counter.Add(ctx, 10, attribute.String("A", "B"))
|
||||||
|
|
||||||
require.NoError(t, puller.Collect(ctx))
|
require.NoError(t, puller.Collect(ctx))
|
||||||
records := processortest.NewOutput(attribute.DefaultEncoder())
|
records := processortest.NewOutput(attribute.DefaultEncoder())
|
||||||
require.NoError(t, puller.ForEach(export.CumulativeExportKindSelector(), records.AddRecord))
|
require.NoError(t, controllertest.ReadAll(puller, export.CumulativeExportKindSelector(), records.AddInstrumentationLibraryRecord))
|
||||||
|
|
||||||
require.EqualValues(t, map[string]float64{
|
require.EqualValues(t, map[string]float64{
|
||||||
"counter.sum/A=B/": 10,
|
"counter.sum/A=B/": 10,
|
||||||
@ -61,7 +61,7 @@ func TestPullNoCollect(t *testing.T) {
|
|||||||
|
|
||||||
require.NoError(t, puller.Collect(ctx))
|
require.NoError(t, puller.Collect(ctx))
|
||||||
records = processortest.NewOutput(attribute.DefaultEncoder())
|
records = processortest.NewOutput(attribute.DefaultEncoder())
|
||||||
require.NoError(t, puller.ForEach(export.CumulativeExportKindSelector(), records.AddRecord))
|
require.NoError(t, controllertest.ReadAll(puller, export.CumulativeExportKindSelector(), records.AddInstrumentationLibraryRecord))
|
||||||
|
|
||||||
require.EqualValues(t, map[string]float64{
|
require.EqualValues(t, map[string]float64{
|
||||||
"counter.sum/A=B/": 20,
|
"counter.sum/A=B/": 20,
|
||||||
@ -70,7 +70,7 @@ func TestPullNoCollect(t *testing.T) {
|
|||||||
|
|
||||||
func TestPullWithCollect(t *testing.T) {
|
func TestPullWithCollect(t *testing.T) {
|
||||||
puller := controller.New(
|
puller := controller.New(
|
||||||
processor.New(
|
processor.NewFactory(
|
||||||
processortest.AggregatorSelector(),
|
processortest.AggregatorSelector(),
|
||||||
export.CumulativeExportKindSelector(),
|
export.CumulativeExportKindSelector(),
|
||||||
processor.WithMemory(true),
|
processor.WithMemory(true),
|
||||||
@ -82,14 +82,14 @@ func TestPullWithCollect(t *testing.T) {
|
|||||||
puller.SetClock(mock)
|
puller.SetClock(mock)
|
||||||
|
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
meter := puller.MeterProvider().Meter("nocache")
|
meter := puller.Meter("nocache")
|
||||||
counter := metric.Must(meter).NewInt64Counter("counter.sum")
|
counter := metric.Must(meter).NewInt64Counter("counter.sum")
|
||||||
|
|
||||||
counter.Add(ctx, 10, attribute.String("A", "B"))
|
counter.Add(ctx, 10, attribute.String("A", "B"))
|
||||||
|
|
||||||
require.NoError(t, puller.Collect(ctx))
|
require.NoError(t, puller.Collect(ctx))
|
||||||
records := processortest.NewOutput(attribute.DefaultEncoder())
|
records := processortest.NewOutput(attribute.DefaultEncoder())
|
||||||
require.NoError(t, puller.ForEach(export.CumulativeExportKindSelector(), records.AddRecord))
|
require.NoError(t, controllertest.ReadAll(puller, export.CumulativeExportKindSelector(), records.AddInstrumentationLibraryRecord))
|
||||||
|
|
||||||
require.EqualValues(t, map[string]float64{
|
require.EqualValues(t, map[string]float64{
|
||||||
"counter.sum/A=B/": 10,
|
"counter.sum/A=B/": 10,
|
||||||
@ -100,7 +100,7 @@ func TestPullWithCollect(t *testing.T) {
|
|||||||
// Cached value!
|
// Cached value!
|
||||||
require.NoError(t, puller.Collect(ctx))
|
require.NoError(t, puller.Collect(ctx))
|
||||||
records = processortest.NewOutput(attribute.DefaultEncoder())
|
records = processortest.NewOutput(attribute.DefaultEncoder())
|
||||||
require.NoError(t, puller.ForEach(export.CumulativeExportKindSelector(), records.AddRecord))
|
require.NoError(t, controllertest.ReadAll(puller, export.CumulativeExportKindSelector(), records.AddInstrumentationLibraryRecord))
|
||||||
|
|
||||||
require.EqualValues(t, map[string]float64{
|
require.EqualValues(t, map[string]float64{
|
||||||
"counter.sum/A=B/": 10,
|
"counter.sum/A=B/": 10,
|
||||||
@ -112,7 +112,7 @@ func TestPullWithCollect(t *testing.T) {
|
|||||||
// Re-computed value!
|
// Re-computed value!
|
||||||
require.NoError(t, puller.Collect(ctx))
|
require.NoError(t, puller.Collect(ctx))
|
||||||
records = processortest.NewOutput(attribute.DefaultEncoder())
|
records = processortest.NewOutput(attribute.DefaultEncoder())
|
||||||
require.NoError(t, puller.ForEach(export.CumulativeExportKindSelector(), records.AddRecord))
|
require.NoError(t, controllertest.ReadAll(puller, export.CumulativeExportKindSelector(), records.AddInstrumentationLibraryRecord))
|
||||||
|
|
||||||
require.EqualValues(t, map[string]float64{
|
require.EqualValues(t, map[string]float64{
|
||||||
"counter.sum/A=B/": 20,
|
"counter.sum/A=B/": 20,
|
||||||
|
@ -72,19 +72,17 @@ func newExporter() *processortest.Exporter {
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
func newCheckpointer() export.Checkpointer {
|
func newCheckpointerFactory() export.CheckpointerFactory {
|
||||||
return processortest.Checkpointer(
|
return processortest.NewCheckpointerFactory(
|
||||||
processortest.NewProcessor(
|
processortest.AggregatorSelector(),
|
||||||
processortest.AggregatorSelector(),
|
attribute.DefaultEncoder(),
|
||||||
attribute.DefaultEncoder(),
|
|
||||||
),
|
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestPushDoubleStop(t *testing.T) {
|
func TestPushDoubleStop(t *testing.T) {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
exporter := newExporter()
|
exporter := newExporter()
|
||||||
checkpointer := newCheckpointer()
|
checkpointer := newCheckpointerFactory()
|
||||||
p := controller.New(checkpointer, controller.WithExporter(exporter))
|
p := controller.New(checkpointer, controller.WithExporter(exporter))
|
||||||
require.NoError(t, p.Start(ctx))
|
require.NoError(t, p.Start(ctx))
|
||||||
require.NoError(t, p.Stop(ctx))
|
require.NoError(t, p.Stop(ctx))
|
||||||
@ -94,7 +92,7 @@ func TestPushDoubleStop(t *testing.T) {
|
|||||||
func TestPushDoubleStart(t *testing.T) {
|
func TestPushDoubleStart(t *testing.T) {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
exporter := newExporter()
|
exporter := newExporter()
|
||||||
checkpointer := newCheckpointer()
|
checkpointer := newCheckpointerFactory()
|
||||||
p := controller.New(checkpointer, controller.WithExporter(exporter))
|
p := controller.New(checkpointer, controller.WithExporter(exporter))
|
||||||
require.NoError(t, p.Start(ctx))
|
require.NoError(t, p.Start(ctx))
|
||||||
err := p.Start(ctx)
|
err := p.Start(ctx)
|
||||||
@ -105,14 +103,14 @@ func TestPushDoubleStart(t *testing.T) {
|
|||||||
|
|
||||||
func TestPushTicker(t *testing.T) {
|
func TestPushTicker(t *testing.T) {
|
||||||
exporter := newExporter()
|
exporter := newExporter()
|
||||||
checkpointer := newCheckpointer()
|
checkpointer := newCheckpointerFactory()
|
||||||
p := controller.New(
|
p := controller.New(
|
||||||
checkpointer,
|
checkpointer,
|
||||||
controller.WithExporter(exporter),
|
controller.WithExporter(exporter),
|
||||||
controller.WithCollectPeriod(time.Second),
|
controller.WithCollectPeriod(time.Second),
|
||||||
controller.WithResource(testResource),
|
controller.WithResource(testResource),
|
||||||
)
|
)
|
||||||
meter := p.MeterProvider().Meter("name")
|
meter := p.Meter("name")
|
||||||
|
|
||||||
mock := controllertest.NewMockClock()
|
mock := controllertest.NewMockClock()
|
||||||
p.SetClock(mock)
|
p.SetClock(mock)
|
||||||
@ -185,7 +183,7 @@ func TestPushExportError(t *testing.T) {
|
|||||||
// This test validates the error handling
|
// This test validates the error handling
|
||||||
// behavior of the basic Processor is honored
|
// behavior of the basic Processor is honored
|
||||||
// by the push processor.
|
// by the push processor.
|
||||||
checkpointer := processor.New(processortest.AggregatorSelector(), exporter)
|
checkpointer := processor.NewFactory(processortest.AggregatorSelector(), exporter)
|
||||||
p := controller.New(
|
p := controller.New(
|
||||||
checkpointer,
|
checkpointer,
|
||||||
controller.WithExporter(exporter),
|
controller.WithExporter(exporter),
|
||||||
@ -198,7 +196,7 @@ func TestPushExportError(t *testing.T) {
|
|||||||
|
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
|
|
||||||
meter := p.MeterProvider().Meter("name")
|
meter := p.Meter("name")
|
||||||
counter1 := metric.Must(meter).NewInt64Counter("counter1.sum")
|
counter1 := metric.Must(meter).NewInt64Counter("counter1.sum")
|
||||||
counter2 := metric.Must(meter).NewInt64Counter("counter2.sum")
|
counter2 := metric.Must(meter).NewInt64Counter("counter2.sum")
|
||||||
|
|
||||||
|
@ -19,6 +19,8 @@ import (
|
|||||||
|
|
||||||
"github.com/benbjohnson/clock"
|
"github.com/benbjohnson/clock"
|
||||||
|
|
||||||
|
export "go.opentelemetry.io/otel/sdk/export/metric"
|
||||||
|
"go.opentelemetry.io/otel/sdk/instrumentation"
|
||||||
controllerTime "go.opentelemetry.io/otel/sdk/metric/controller/time"
|
controllerTime "go.opentelemetry.io/otel/sdk/metric/controller/time"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -56,3 +58,18 @@ func (t MockTicker) Stop() {
|
|||||||
func (t MockTicker) C() <-chan time.Time {
|
func (t MockTicker) C() <-chan time.Time {
|
||||||
return t.ticker.C
|
return t.ticker.C
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ReadAll is a helper for tests that want a flat iterator over all
|
||||||
|
// metrics instead of a two-level iterator (instrumentation library,
|
||||||
|
// metric).
|
||||||
|
func ReadAll(
|
||||||
|
reader export.InstrumentationLibraryReader,
|
||||||
|
kind export.ExportKindSelector,
|
||||||
|
apply func(instrumentation.Library, export.Record) error,
|
||||||
|
) error {
|
||||||
|
return reader.ForEach(func(library instrumentation.Library, reader export.Reader) error {
|
||||||
|
return reader.ForEach(kind, func(record export.Record) error {
|
||||||
|
return apply(library, record)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
@ -86,7 +86,7 @@ func newSDK(t *testing.T) (metric.Meter, *metricsdk.Accumulator, *testSelector,
|
|||||||
accum := metricsdk.NewAccumulator(
|
accum := metricsdk.NewAccumulator(
|
||||||
processor,
|
processor,
|
||||||
)
|
)
|
||||||
meter := metric.WrapMeterImpl(accum, "test")
|
meter := metric.WrapMeterImpl(accum)
|
||||||
return meter, accum, testSelector, processor
|
return meter, accum, testSelector, processor
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -102,7 +102,7 @@ record has an associated aggregator.
|
|||||||
Processor is an interface which sits between the SDK and an exporter.
|
Processor is an interface which sits between the SDK and an exporter.
|
||||||
The Processor embeds an AggregatorSelector, used by the SDK to assign
|
The Processor embeds an AggregatorSelector, used by the SDK to assign
|
||||||
new Aggregators. The Processor supports a Process() API for submitting
|
new Aggregators. The Processor supports a Process() API for submitting
|
||||||
checkpointed aggregators to the processor, and a CheckpointSet() API
|
checkpointed aggregators to the processor, and a Reader() API
|
||||||
for producing a complete checkpoint for the exporter. Two default
|
for producing a complete checkpoint for the exporter. Two default
|
||||||
Processor implementations are provided, the "defaultkeys" Processor groups
|
Processor implementations are provided, the "defaultkeys" Processor groups
|
||||||
aggregate metrics by their recommended Descriptor.Keys(), the
|
aggregate metrics by their recommended Descriptor.Keys(), the
|
||||||
@ -113,9 +113,9 @@ provide the serialization logic for labels. This allows avoiding
|
|||||||
duplicate serialization of labels, once as a unique key in the SDK (or
|
duplicate serialization of labels, once as a unique key in the SDK (or
|
||||||
Processor) and once in the exporter.
|
Processor) and once in the exporter.
|
||||||
|
|
||||||
CheckpointSet is an interface between the Processor and the Exporter.
|
Reader is an interface between the Processor and the Exporter.
|
||||||
After completing a collection pass, the Processor.CheckpointSet() method
|
After completing a collection pass, the Processor.Reader() method
|
||||||
returns a CheckpointSet, which the Exporter uses to iterate over all
|
returns a Reader, which the Exporter uses to iterate over all
|
||||||
the updated metrics.
|
the updated metrics.
|
||||||
|
|
||||||
Record is a struct containing the state of an individual exported
|
Record is a struct containing the state of an individual exported
|
||||||
@ -126,7 +126,7 @@ Labels is a struct containing an ordered set of labels, the
|
|||||||
corresponding unique encoding, and the encoder that produced it.
|
corresponding unique encoding, and the encoder that produced it.
|
||||||
|
|
||||||
Exporter is the final stage of an export pipeline. It is called with
|
Exporter is the final stage of an export pipeline. It is called with
|
||||||
a CheckpointSet capable of enumerating all the updated metrics.
|
a Reader capable of enumerating all the updated metrics.
|
||||||
|
|
||||||
Controller is not an export interface per se, but it orchestrates the
|
Controller is not an export interface per se, but it orchestrates the
|
||||||
export pipeline. For example, a "push" controller will establish a
|
export pipeline. For example, a "push" controller will establish a
|
||||||
|
@ -22,14 +22,14 @@ import (
|
|||||||
|
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
"go.opentelemetry.io/otel/metric"
|
"go.opentelemetry.io/otel/metric/metrictest"
|
||||||
"go.opentelemetry.io/otel/metric/number"
|
"go.opentelemetry.io/otel/metric/number"
|
||||||
"go.opentelemetry.io/otel/metric/sdkapi"
|
"go.opentelemetry.io/otel/metric/sdkapi"
|
||||||
"go.opentelemetry.io/otel/sdk/metric/aggregator/histogram"
|
"go.opentelemetry.io/otel/sdk/metric/aggregator/histogram"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestStressInt64Histogram(t *testing.T) {
|
func TestStressInt64Histogram(t *testing.T) {
|
||||||
desc := metric.NewDescriptor("some_metric", sdkapi.HistogramInstrumentKind, number.Int64Kind)
|
desc := metrictest.NewDescriptor("some_metric", sdkapi.HistogramInstrumentKind, number.Int64Kind)
|
||||||
|
|
||||||
alloc := histogram.New(2, &desc, histogram.WithExplicitBoundaries([]float64{25, 50, 75}))
|
alloc := histogram.New(2, &desc, histogram.WithExplicitBoundaries([]float64{25, 50, 75}))
|
||||||
h, ckpt := &alloc[0], &alloc[1]
|
h, ckpt := &alloc[0], &alloc[1]
|
||||||
|
@ -20,14 +20,14 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"go.opentelemetry.io/otel/metric"
|
"go.opentelemetry.io/otel/metric/metrictest"
|
||||||
"go.opentelemetry.io/otel/metric/number"
|
"go.opentelemetry.io/otel/metric/number"
|
||||||
"go.opentelemetry.io/otel/metric/sdkapi"
|
"go.opentelemetry.io/otel/metric/sdkapi"
|
||||||
"go.opentelemetry.io/otel/sdk/metric/aggregator/minmaxsumcount"
|
"go.opentelemetry.io/otel/sdk/metric/aggregator/minmaxsumcount"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestStressInt64MinMaxSumCount(t *testing.T) {
|
func TestStressInt64MinMaxSumCount(t *testing.T) {
|
||||||
desc := metric.NewDescriptor("some_metric", sdkapi.HistogramInstrumentKind, number.Int64Kind)
|
desc := metrictest.NewDescriptor("some_metric", sdkapi.HistogramInstrumentKind, number.Int64Kind)
|
||||||
alloc := minmaxsumcount.New(2, &desc)
|
alloc := minmaxsumcount.New(2, &desc)
|
||||||
mmsc, ckpt := &alloc[0], &alloc[1]
|
mmsc, ckpt := &alloc[0], &alloc[1]
|
||||||
|
|
||||||
|
@ -90,7 +90,7 @@ type (
|
|||||||
state struct {
|
state struct {
|
||||||
config config
|
config config
|
||||||
|
|
||||||
// RWMutex implements locking for the `CheckpointSet` interface.
|
// RWMutex implements locking for the `Reader` interface.
|
||||||
sync.RWMutex
|
sync.RWMutex
|
||||||
values map[stateKey]*stateValue
|
values map[stateKey]*stateValue
|
||||||
|
|
||||||
@ -113,7 +113,7 @@ type (
|
|||||||
|
|
||||||
var _ export.Processor = &Processor{}
|
var _ export.Processor = &Processor{}
|
||||||
var _ export.Checkpointer = &Processor{}
|
var _ export.Checkpointer = &Processor{}
|
||||||
var _ export.CheckpointSet = &state{}
|
var _ export.Reader = &state{}
|
||||||
|
|
||||||
// ErrInconsistentState is returned when the sequence of collection's starts and finishes are incorrectly balanced.
|
// ErrInconsistentState is returned when the sequence of collection's starts and finishes are incorrectly balanced.
|
||||||
var ErrInconsistentState = fmt.Errorf("inconsistent processor state")
|
var ErrInconsistentState = fmt.Errorf("inconsistent processor state")
|
||||||
@ -127,20 +127,43 @@ var ErrInvalidExportKind = fmt.Errorf("invalid export kind")
|
|||||||
// data, so that this Processor can prepare to compute Delta or
|
// data, so that this Processor can prepare to compute Delta or
|
||||||
// Cumulative Aggregations as needed.
|
// Cumulative Aggregations as needed.
|
||||||
func New(aselector export.AggregatorSelector, eselector export.ExportKindSelector, opts ...Option) *Processor {
|
func New(aselector export.AggregatorSelector, eselector export.ExportKindSelector, opts ...Option) *Processor {
|
||||||
|
return NewFactory(aselector, eselector, opts...).NewCheckpointer().(*Processor)
|
||||||
|
}
|
||||||
|
|
||||||
|
type factory struct {
|
||||||
|
aselector export.AggregatorSelector
|
||||||
|
eselector export.ExportKindSelector
|
||||||
|
config config
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewFactory(aselector export.AggregatorSelector, eselector export.ExportKindSelector, opts ...Option) export.CheckpointerFactory {
|
||||||
|
var config config
|
||||||
|
for _, opt := range opts {
|
||||||
|
opt.applyProcessor(&config)
|
||||||
|
}
|
||||||
|
return factory{
|
||||||
|
aselector: aselector,
|
||||||
|
eselector: eselector,
|
||||||
|
config: config,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ export.CheckpointerFactory = factory{}
|
||||||
|
|
||||||
|
func (f factory) NewCheckpointer() export.Checkpointer {
|
||||||
now := time.Now()
|
now := time.Now()
|
||||||
p := &Processor{
|
p := &Processor{
|
||||||
AggregatorSelector: aselector,
|
AggregatorSelector: f.aselector,
|
||||||
ExportKindSelector: eselector,
|
ExportKindSelector: f.eselector,
|
||||||
state: state{
|
state: state{
|
||||||
values: map[stateKey]*stateValue{},
|
values: map[stateKey]*stateValue{},
|
||||||
processStart: now,
|
processStart: now,
|
||||||
intervalStart: now,
|
intervalStart: now,
|
||||||
|
config: f.config,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
for _, opt := range opts {
|
|
||||||
opt.applyProcessor(&p.config)
|
|
||||||
}
|
|
||||||
return p
|
return p
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Process implements export.Processor.
|
// Process implements export.Processor.
|
||||||
@ -241,11 +264,11 @@ func (b *Processor) Process(accum export.Accumulation) error {
|
|||||||
return value.current.Merge(agg, desc)
|
return value.current.Merge(agg, desc)
|
||||||
}
|
}
|
||||||
|
|
||||||
// CheckpointSet returns the associated CheckpointSet. Use the
|
// Reader returns the associated Reader. Use the
|
||||||
// CheckpointSet Locker interface to synchronize access to this
|
// Reader Locker interface to synchronize access to this
|
||||||
// object. The CheckpointSet.ForEach() method cannot be called
|
// object. The Reader.ForEach() method cannot be called
|
||||||
// concurrently with Process().
|
// concurrently with Process().
|
||||||
func (b *Processor) CheckpointSet() export.CheckpointSet {
|
func (b *Processor) Reader() export.Reader {
|
||||||
return &b.state
|
return &b.state
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -260,7 +283,7 @@ func (b *Processor) StartCollection() {
|
|||||||
|
|
||||||
// FinishCollection signals to the Processor that a complete
|
// FinishCollection signals to the Processor that a complete
|
||||||
// collection has finished and that ForEach will be called to access
|
// collection has finished and that ForEach will be called to access
|
||||||
// the CheckpointSet.
|
// the Reader.
|
||||||
func (b *Processor) FinishCollection() error {
|
func (b *Processor) FinishCollection() error {
|
||||||
b.intervalEnd = time.Now()
|
b.intervalEnd = time.Now()
|
||||||
if b.startedCollection != b.finishedCollection+1 {
|
if b.startedCollection != b.finishedCollection+1 {
|
||||||
@ -314,7 +337,7 @@ func (b *Processor) FinishCollection() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// ForEach iterates through the CheckpointSet, passing an
|
// ForEach iterates through the Reader, passing an
|
||||||
// export.Record with the appropriate Cumulative or Delta aggregation
|
// export.Record with the appropriate Cumulative or Delta aggregation
|
||||||
// to an exporter.
|
// to an exporter.
|
||||||
func (b *state) ForEach(exporter export.ExportKindSelector, f func(export.Record) error) error {
|
func (b *state) ForEach(exporter export.ExportKindSelector, f func(export.Record) error) error {
|
||||||
|
@ -26,10 +26,12 @@ import (
|
|||||||
|
|
||||||
"go.opentelemetry.io/otel/attribute"
|
"go.opentelemetry.io/otel/attribute"
|
||||||
"go.opentelemetry.io/otel/metric"
|
"go.opentelemetry.io/otel/metric"
|
||||||
|
"go.opentelemetry.io/otel/metric/metrictest"
|
||||||
"go.opentelemetry.io/otel/metric/number"
|
"go.opentelemetry.io/otel/metric/number"
|
||||||
"go.opentelemetry.io/otel/metric/sdkapi"
|
"go.opentelemetry.io/otel/metric/sdkapi"
|
||||||
export "go.opentelemetry.io/otel/sdk/export/metric"
|
export "go.opentelemetry.io/otel/sdk/export/metric"
|
||||||
"go.opentelemetry.io/otel/sdk/export/metric/aggregation"
|
"go.opentelemetry.io/otel/sdk/export/metric/aggregation"
|
||||||
|
"go.opentelemetry.io/otel/sdk/instrumentation"
|
||||||
sdk "go.opentelemetry.io/otel/sdk/metric"
|
sdk "go.opentelemetry.io/otel/sdk/metric"
|
||||||
"go.opentelemetry.io/otel/sdk/metric/aggregator/aggregatortest"
|
"go.opentelemetry.io/otel/sdk/metric/aggregator/aggregatortest"
|
||||||
"go.opentelemetry.io/otel/sdk/metric/processor/basic"
|
"go.opentelemetry.io/otel/sdk/metric/processor/basic"
|
||||||
@ -136,8 +138,8 @@ func testProcessor(
|
|||||||
|
|
||||||
instSuffix := fmt.Sprint(".", strings.ToLower(akind.String()))
|
instSuffix := fmt.Sprint(".", strings.ToLower(akind.String()))
|
||||||
|
|
||||||
desc1 := metric.NewDescriptor(fmt.Sprint("inst1", instSuffix), mkind, nkind)
|
desc1 := metrictest.NewDescriptor(fmt.Sprint("inst1", instSuffix), mkind, nkind)
|
||||||
desc2 := metric.NewDescriptor(fmt.Sprint("inst2", instSuffix), mkind, nkind)
|
desc2 := metrictest.NewDescriptor(fmt.Sprint("inst2", instSuffix), mkind, nkind)
|
||||||
|
|
||||||
for nc := 0; nc < nCheckpoint; nc++ {
|
for nc := 0; nc < nCheckpoint; nc++ {
|
||||||
|
|
||||||
@ -174,7 +176,7 @@ func testProcessor(
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
checkpointSet := processor.CheckpointSet()
|
reader := processor.Reader()
|
||||||
|
|
||||||
for _, repetitionAfterEmptyInterval := range []bool{false, true} {
|
for _, repetitionAfterEmptyInterval := range []bool{false, true} {
|
||||||
if repetitionAfterEmptyInterval {
|
if repetitionAfterEmptyInterval {
|
||||||
@ -188,7 +190,7 @@ func testProcessor(
|
|||||||
|
|
||||||
// Test the final checkpoint state.
|
// Test the final checkpoint state.
|
||||||
records1 := processorTest.NewOutput(attribute.DefaultEncoder())
|
records1 := processorTest.NewOutput(attribute.DefaultEncoder())
|
||||||
err = checkpointSet.ForEach(export.ConstantExportKindSelector(ekind), records1.AddRecord)
|
err = reader.ForEach(export.ConstantExportKindSelector(ekind), records1.AddRecord)
|
||||||
|
|
||||||
// Test for an allowed error:
|
// Test for an allowed error:
|
||||||
if err != nil && err != aggregation.ErrNoSubtraction {
|
if err != nil && err != aggregation.ErrNoSubtraction {
|
||||||
@ -267,7 +269,7 @@ func (bogusExporter) ExportKindFor(*metric.Descriptor, aggregation.Kind) export.
|
|||||||
return 1000000
|
return 1000000
|
||||||
}
|
}
|
||||||
|
|
||||||
func (bogusExporter) Export(context.Context, export.CheckpointSet) error {
|
func (bogusExporter) Export(context.Context, export.Reader) error {
|
||||||
panic("Not called")
|
panic("Not called")
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -300,7 +302,7 @@ func TestBasicInconsistent(t *testing.T) {
|
|||||||
// Test no start
|
// Test no start
|
||||||
b = basic.New(processorTest.AggregatorSelector(), export.StatelessExportKindSelector())
|
b = basic.New(processorTest.AggregatorSelector(), export.StatelessExportKindSelector())
|
||||||
|
|
||||||
desc := metric.NewDescriptor("inst", sdkapi.CounterInstrumentKind, number.Int64Kind)
|
desc := metrictest.NewDescriptor("inst", sdkapi.CounterInstrumentKind, number.Int64Kind)
|
||||||
accum := export.NewAccumulation(&desc, attribute.EmptySet(), aggregatortest.NoopAggregator{})
|
accum := export.NewAccumulation(&desc, attribute.EmptySet(), aggregatortest.NoopAggregator{})
|
||||||
require.Equal(t, basic.ErrInconsistentState, b.Process(accum))
|
require.Equal(t, basic.ErrInconsistentState, b.Process(accum))
|
||||||
|
|
||||||
@ -325,7 +327,7 @@ func TestBasicTimestamps(t *testing.T) {
|
|||||||
time.Sleep(time.Nanosecond)
|
time.Sleep(time.Nanosecond)
|
||||||
afterNew := time.Now()
|
afterNew := time.Now()
|
||||||
|
|
||||||
desc := metric.NewDescriptor("inst", sdkapi.CounterInstrumentKind, number.Int64Kind)
|
desc := metrictest.NewDescriptor("inst", sdkapi.CounterInstrumentKind, number.Int64Kind)
|
||||||
accum := export.NewAccumulation(&desc, attribute.EmptySet(), aggregatortest.NoopAggregator{})
|
accum := export.NewAccumulation(&desc, attribute.EmptySet(), aggregatortest.NoopAggregator{})
|
||||||
|
|
||||||
b.StartCollection()
|
b.StartCollection()
|
||||||
@ -370,11 +372,11 @@ func TestBasicTimestamps(t *testing.T) {
|
|||||||
func TestStatefulNoMemoryCumulative(t *testing.T) {
|
func TestStatefulNoMemoryCumulative(t *testing.T) {
|
||||||
ekindSel := export.CumulativeExportKindSelector()
|
ekindSel := export.CumulativeExportKindSelector()
|
||||||
|
|
||||||
desc := metric.NewDescriptor("inst.sum", sdkapi.CounterInstrumentKind, number.Int64Kind)
|
desc := metrictest.NewDescriptor("inst.sum", sdkapi.CounterInstrumentKind, number.Int64Kind)
|
||||||
selector := processorTest.AggregatorSelector()
|
selector := processorTest.AggregatorSelector()
|
||||||
|
|
||||||
processor := basic.New(selector, ekindSel, basic.WithMemory(false))
|
processor := basic.New(selector, ekindSel, basic.WithMemory(false))
|
||||||
checkpointSet := processor.CheckpointSet()
|
reader := processor.Reader()
|
||||||
|
|
||||||
for i := 1; i < 3; i++ {
|
for i := 1; i < 3; i++ {
|
||||||
// Empty interval
|
// Empty interval
|
||||||
@ -383,7 +385,7 @@ func TestStatefulNoMemoryCumulative(t *testing.T) {
|
|||||||
|
|
||||||
// Verify zero elements
|
// Verify zero elements
|
||||||
records := processorTest.NewOutput(attribute.DefaultEncoder())
|
records := processorTest.NewOutput(attribute.DefaultEncoder())
|
||||||
require.NoError(t, checkpointSet.ForEach(ekindSel, records.AddRecord))
|
require.NoError(t, reader.ForEach(ekindSel, records.AddRecord))
|
||||||
require.EqualValues(t, map[string]float64{}, records.Map())
|
require.EqualValues(t, map[string]float64{}, records.Map())
|
||||||
|
|
||||||
// Add 10
|
// Add 10
|
||||||
@ -393,7 +395,7 @@ func TestStatefulNoMemoryCumulative(t *testing.T) {
|
|||||||
|
|
||||||
// Verify one element
|
// Verify one element
|
||||||
records = processorTest.NewOutput(attribute.DefaultEncoder())
|
records = processorTest.NewOutput(attribute.DefaultEncoder())
|
||||||
require.NoError(t, checkpointSet.ForEach(ekindSel, records.AddRecord))
|
require.NoError(t, reader.ForEach(ekindSel, records.AddRecord))
|
||||||
require.EqualValues(t, map[string]float64{
|
require.EqualValues(t, map[string]float64{
|
||||||
"inst.sum/A=B/": float64(i * 10),
|
"inst.sum/A=B/": float64(i * 10),
|
||||||
}, records.Map())
|
}, records.Map())
|
||||||
@ -403,11 +405,11 @@ func TestStatefulNoMemoryCumulative(t *testing.T) {
|
|||||||
func TestStatefulNoMemoryDelta(t *testing.T) {
|
func TestStatefulNoMemoryDelta(t *testing.T) {
|
||||||
ekindSel := export.DeltaExportKindSelector()
|
ekindSel := export.DeltaExportKindSelector()
|
||||||
|
|
||||||
desc := metric.NewDescriptor("inst.sum", sdkapi.CounterObserverInstrumentKind, number.Int64Kind)
|
desc := metrictest.NewDescriptor("inst.sum", sdkapi.CounterObserverInstrumentKind, number.Int64Kind)
|
||||||
selector := processorTest.AggregatorSelector()
|
selector := processorTest.AggregatorSelector()
|
||||||
|
|
||||||
processor := basic.New(selector, ekindSel, basic.WithMemory(false))
|
processor := basic.New(selector, ekindSel, basic.WithMemory(false))
|
||||||
checkpointSet := processor.CheckpointSet()
|
reader := processor.Reader()
|
||||||
|
|
||||||
for i := 1; i < 3; i++ {
|
for i := 1; i < 3; i++ {
|
||||||
// Empty interval
|
// Empty interval
|
||||||
@ -416,7 +418,7 @@ func TestStatefulNoMemoryDelta(t *testing.T) {
|
|||||||
|
|
||||||
// Verify zero elements
|
// Verify zero elements
|
||||||
records := processorTest.NewOutput(attribute.DefaultEncoder())
|
records := processorTest.NewOutput(attribute.DefaultEncoder())
|
||||||
require.NoError(t, checkpointSet.ForEach(ekindSel, records.AddRecord))
|
require.NoError(t, reader.ForEach(ekindSel, records.AddRecord))
|
||||||
require.EqualValues(t, map[string]float64{}, records.Map())
|
require.EqualValues(t, map[string]float64{}, records.Map())
|
||||||
|
|
||||||
// Add 10
|
// Add 10
|
||||||
@ -426,7 +428,7 @@ func TestStatefulNoMemoryDelta(t *testing.T) {
|
|||||||
|
|
||||||
// Verify one element
|
// Verify one element
|
||||||
records = processorTest.NewOutput(attribute.DefaultEncoder())
|
records = processorTest.NewOutput(attribute.DefaultEncoder())
|
||||||
require.NoError(t, checkpointSet.ForEach(ekindSel, records.AddRecord))
|
require.NoError(t, reader.ForEach(ekindSel, records.AddRecord))
|
||||||
require.EqualValues(t, map[string]float64{
|
require.EqualValues(t, map[string]float64{
|
||||||
"inst.sum/A=B/": 10,
|
"inst.sum/A=B/": 10,
|
||||||
}, records.Map())
|
}, records.Map())
|
||||||
@ -439,11 +441,11 @@ func TestMultiObserverSum(t *testing.T) {
|
|||||||
export.DeltaExportKindSelector(),
|
export.DeltaExportKindSelector(),
|
||||||
} {
|
} {
|
||||||
|
|
||||||
desc := metric.NewDescriptor("observe.sum", sdkapi.CounterObserverInstrumentKind, number.Int64Kind)
|
desc := metrictest.NewDescriptor("observe.sum", sdkapi.CounterObserverInstrumentKind, number.Int64Kind)
|
||||||
selector := processorTest.AggregatorSelector()
|
selector := processorTest.AggregatorSelector()
|
||||||
|
|
||||||
processor := basic.New(selector, ekindSel, basic.WithMemory(false))
|
processor := basic.New(selector, ekindSel, basic.WithMemory(false))
|
||||||
checkpointSet := processor.CheckpointSet()
|
reader := processor.Reader()
|
||||||
|
|
||||||
for i := 1; i < 3; i++ {
|
for i := 1; i < 3; i++ {
|
||||||
// Add i*10*3 times
|
// Add i*10*3 times
|
||||||
@ -461,7 +463,7 @@ func TestMultiObserverSum(t *testing.T) {
|
|||||||
|
|
||||||
// Verify one element
|
// Verify one element
|
||||||
records := processorTest.NewOutput(attribute.DefaultEncoder())
|
records := processorTest.NewOutput(attribute.DefaultEncoder())
|
||||||
require.NoError(t, checkpointSet.ForEach(ekindSel, records.AddRecord))
|
require.NoError(t, reader.ForEach(ekindSel, records.AddRecord))
|
||||||
require.EqualValues(t, map[string]float64{
|
require.EqualValues(t, map[string]float64{
|
||||||
"observe.sum/A=B/": float64(3 * 10 * multiplier),
|
"observe.sum/A=B/": float64(3 * 10 * multiplier),
|
||||||
}, records.Map())
|
}, records.Map())
|
||||||
@ -477,7 +479,7 @@ func TestCounterObserverEndToEnd(t *testing.T) {
|
|||||||
eselector,
|
eselector,
|
||||||
)
|
)
|
||||||
accum := sdk.NewAccumulator(proc)
|
accum := sdk.NewAccumulator(proc)
|
||||||
meter := metric.WrapMeterImpl(accum, "testing")
|
meter := metric.WrapMeterImpl(accum)
|
||||||
|
|
||||||
var calls int64
|
var calls int64
|
||||||
metric.Must(meter).NewInt64CounterObserver("observer.sum",
|
metric.Must(meter).NewInt64CounterObserver("observer.sum",
|
||||||
@ -486,19 +488,23 @@ func TestCounterObserverEndToEnd(t *testing.T) {
|
|||||||
result.Observe(calls)
|
result.Observe(calls)
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
data := proc.CheckpointSet()
|
reader := proc.Reader()
|
||||||
|
|
||||||
var startTime [3]time.Time
|
var startTime [3]time.Time
|
||||||
var endTime [3]time.Time
|
var endTime [3]time.Time
|
||||||
|
|
||||||
for i := range startTime {
|
for i := range startTime {
|
||||||
|
data := proc.Reader()
|
||||||
data.Lock()
|
data.Lock()
|
||||||
proc.StartCollection()
|
proc.StartCollection()
|
||||||
accum.Collect(ctx)
|
accum.Collect(ctx)
|
||||||
require.NoError(t, proc.FinishCollection())
|
require.NoError(t, proc.FinishCollection())
|
||||||
|
|
||||||
exporter := processortest.New(eselector, attribute.DefaultEncoder())
|
exporter := processortest.New(eselector, attribute.DefaultEncoder())
|
||||||
require.NoError(t, exporter.Export(ctx, resource.Empty(), data))
|
require.NoError(t, exporter.Export(ctx, resource.Empty(), processortest.OneInstrumentationLibraryReader(
|
||||||
|
instrumentation.Library{
|
||||||
|
Name: "test",
|
||||||
|
}, reader)))
|
||||||
|
|
||||||
require.EqualValues(t, map[string]float64{
|
require.EqualValues(t, map[string]float64{
|
||||||
"observer.sum//": float64(i + 1),
|
"observer.sum//": float64(i + 1),
|
||||||
|
@ -18,7 +18,7 @@ package basic // import "go.opentelemetry.io/otel/sdk/metric/processor/basic"
|
|||||||
type config struct {
|
type config struct {
|
||||||
// Memory controls whether the processor remembers metric
|
// Memory controls whether the processor remembers metric
|
||||||
// instruments and label sets that were previously reported.
|
// instruments and label sets that were previously reported.
|
||||||
// When Memory is true, CheckpointSet.ForEach() will visit
|
// When Memory is true, Reader.ForEach() will visit
|
||||||
// metrics that were not updated in the most recent interval.
|
// metrics that were not updated in the most recent interval.
|
||||||
Memory bool
|
Memory bool
|
||||||
}
|
}
|
||||||
|
@ -26,6 +26,7 @@ import (
|
|||||||
"go.opentelemetry.io/otel/metric/number"
|
"go.opentelemetry.io/otel/metric/number"
|
||||||
export "go.opentelemetry.io/otel/sdk/export/metric"
|
export "go.opentelemetry.io/otel/sdk/export/metric"
|
||||||
"go.opentelemetry.io/otel/sdk/export/metric/aggregation"
|
"go.opentelemetry.io/otel/sdk/export/metric/aggregation"
|
||||||
|
"go.opentelemetry.io/otel/sdk/instrumentation"
|
||||||
"go.opentelemetry.io/otel/sdk/metric/aggregator/exact"
|
"go.opentelemetry.io/otel/sdk/metric/aggregator/exact"
|
||||||
"go.opentelemetry.io/otel/sdk/metric/aggregator/histogram"
|
"go.opentelemetry.io/otel/sdk/metric/aggregator/histogram"
|
||||||
"go.opentelemetry.io/otel/sdk/metric/aggregator/lastvalue"
|
"go.opentelemetry.io/otel/sdk/metric/aggregator/lastvalue"
|
||||||
@ -45,14 +46,14 @@ type (
|
|||||||
}
|
}
|
||||||
|
|
||||||
// mapValue is value stored in a processor used to produce a
|
// mapValue is value stored in a processor used to produce a
|
||||||
// CheckpointSet.
|
// Reader.
|
||||||
mapValue struct {
|
mapValue struct {
|
||||||
labels *attribute.Set
|
labels *attribute.Set
|
||||||
resource *resource.Resource
|
resource *resource.Resource
|
||||||
aggregator export.Aggregator
|
aggregator export.Aggregator
|
||||||
}
|
}
|
||||||
|
|
||||||
// Output implements export.CheckpointSet.
|
// Output implements export.Reader.
|
||||||
Output struct {
|
Output struct {
|
||||||
m map[mapKey]mapValue
|
m map[mapKey]mapValue
|
||||||
labelEncoder attribute.Encoder
|
labelEncoder attribute.Encoder
|
||||||
@ -92,6 +93,28 @@ type (
|
|||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
|
type testFactory struct {
|
||||||
|
selector export.AggregatorSelector
|
||||||
|
encoder attribute.Encoder
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewCheckpointerFactory(selector export.AggregatorSelector, encoder attribute.Encoder) export.CheckpointerFactory {
|
||||||
|
return testFactory{
|
||||||
|
selector: selector,
|
||||||
|
encoder: encoder,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewCheckpointer(p *Processor) export.Checkpointer {
|
||||||
|
return &testCheckpointer{
|
||||||
|
Processor: p,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f testFactory) NewCheckpointer() export.Checkpointer {
|
||||||
|
return NewCheckpointer(NewProcessor(f.selector, f.encoder))
|
||||||
|
}
|
||||||
|
|
||||||
// NewProcessor returns a new testing Processor implementation.
|
// NewProcessor returns a new testing Processor implementation.
|
||||||
// Verify expected outputs using Values(), e.g.:
|
// Verify expected outputs using Values(), e.g.:
|
||||||
//
|
//
|
||||||
@ -126,14 +149,6 @@ func (p *Processor) Reset() {
|
|||||||
p.output.Reset()
|
p.output.Reset()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Checkpointer returns a checkpointer that computes a single
|
|
||||||
// interval.
|
|
||||||
func Checkpointer(p *Processor) export.Checkpointer {
|
|
||||||
return &testCheckpointer{
|
|
||||||
Processor: p,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// StartCollection implements export.Checkpointer.
|
// StartCollection implements export.Checkpointer.
|
||||||
func (c *testCheckpointer) StartCollection() {
|
func (c *testCheckpointer) StartCollection() {
|
||||||
if c.started != c.finished {
|
if c.started != c.finished {
|
||||||
@ -153,8 +168,8 @@ func (c *testCheckpointer) FinishCollection() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// CheckpointSet implements export.Checkpointer.
|
// Reader implements export.Checkpointer.
|
||||||
func (c *testCheckpointer) CheckpointSet() export.CheckpointSet {
|
func (c *testCheckpointer) Reader() export.Reader {
|
||||||
return c.Processor.output
|
return c.Processor.output
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -214,7 +229,7 @@ func NewOutput(labelEncoder attribute.Encoder) *Output {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// ForEach implements export.CheckpointSet.
|
// ForEach implements export.Reader.
|
||||||
func (o *Output) ForEach(_ export.ExportKindSelector, ff func(export.Record) error) error {
|
func (o *Output) ForEach(_ export.ExportKindSelector, ff func(export.Record) error) error {
|
||||||
for key, value := range o.m {
|
for key, value := range o.m {
|
||||||
if err := ff(export.NewRecord(
|
if err := ff(export.NewRecord(
|
||||||
@ -238,6 +253,10 @@ func (o *Output) AddRecord(rec export.Record) error {
|
|||||||
return o.AddRecordWithResource(rec, resource.Empty())
|
return o.AddRecordWithResource(rec, resource.Empty())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (o *Output) AddInstrumentationLibraryRecord(_ instrumentation.Library, rec export.Record) error {
|
||||||
|
return o.AddRecordWithResource(rec, resource.Empty())
|
||||||
|
}
|
||||||
|
|
||||||
func (o *Output) AddRecordWithResource(rec export.Record, res *resource.Resource) error {
|
func (o *Output) AddRecordWithResource(rec export.Record, res *resource.Resource) error {
|
||||||
key := mapKey{
|
key := mapKey{
|
||||||
desc: rec.Descriptor(),
|
desc: rec.Descriptor(),
|
||||||
@ -332,17 +351,19 @@ func New(selector export.ExportKindSelector, encoder attribute.Encoder) *Exporte
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *Exporter) Export(_ context.Context, res *resource.Resource, ckpt export.CheckpointSet) error {
|
func (e *Exporter) Export(_ context.Context, res *resource.Resource, ckpt export.InstrumentationLibraryReader) error {
|
||||||
e.output.Lock()
|
e.output.Lock()
|
||||||
defer e.output.Unlock()
|
defer e.output.Unlock()
|
||||||
e.exportCount++
|
e.exportCount++
|
||||||
return ckpt.ForEach(e.ExportKindSelector, func(r export.Record) error {
|
return ckpt.ForEach(func(library instrumentation.Library, mr export.Reader) error {
|
||||||
if e.InjectErr != nil {
|
return mr.ForEach(e.ExportKindSelector, func(r export.Record) error {
|
||||||
if err := e.InjectErr(r); err != nil {
|
if e.InjectErr != nil {
|
||||||
return err
|
if err := e.InjectErr(r); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
return e.output.AddRecordWithResource(r, res)
|
||||||
return e.output.AddRecordWithResource(r, res)
|
})
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -372,3 +393,51 @@ func (e *Exporter) Reset() {
|
|||||||
e.output.Reset()
|
e.output.Reset()
|
||||||
e.exportCount = 0
|
e.exportCount = 0
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func OneInstrumentationLibraryReader(l instrumentation.Library, r export.Reader) export.InstrumentationLibraryReader {
|
||||||
|
return oneLibraryReader{l, r}
|
||||||
|
}
|
||||||
|
|
||||||
|
type oneLibraryReader struct {
|
||||||
|
library instrumentation.Library
|
||||||
|
reader export.Reader
|
||||||
|
}
|
||||||
|
|
||||||
|
func (o oneLibraryReader) ForEach(readerFunc func(instrumentation.Library, export.Reader) error) error {
|
||||||
|
return readerFunc(o.library, o.reader)
|
||||||
|
}
|
||||||
|
|
||||||
|
func MultiInstrumentationLibraryReader(records map[instrumentation.Library][]export.Record) export.InstrumentationLibraryReader {
|
||||||
|
return instrumentationLibraryReader{records: records}
|
||||||
|
}
|
||||||
|
|
||||||
|
type instrumentationLibraryReader struct {
|
||||||
|
records map[instrumentation.Library][]export.Record
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ export.InstrumentationLibraryReader = instrumentationLibraryReader{}
|
||||||
|
|
||||||
|
func (m instrumentationLibraryReader) ForEach(fn func(instrumentation.Library, export.Reader) error) error {
|
||||||
|
for library, records := range m.records {
|
||||||
|
if err := fn(library, &metricReader{records: records}); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type metricReader struct {
|
||||||
|
sync.RWMutex
|
||||||
|
records []export.Record
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ export.Reader = &metricReader{}
|
||||||
|
|
||||||
|
func (m *metricReader) ForEach(_ export.ExportKindSelector, fn func(export.Record) error) error {
|
||||||
|
for _, record := range m.records {
|
||||||
|
if err := fn(record); err != nil && err != aggregation.ErrNoData {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
@ -23,7 +23,9 @@ import (
|
|||||||
"go.opentelemetry.io/otel/attribute"
|
"go.opentelemetry.io/otel/attribute"
|
||||||
"go.opentelemetry.io/otel/metric"
|
"go.opentelemetry.io/otel/metric"
|
||||||
export "go.opentelemetry.io/otel/sdk/export/metric"
|
export "go.opentelemetry.io/otel/sdk/export/metric"
|
||||||
|
"go.opentelemetry.io/otel/sdk/instrumentation"
|
||||||
metricsdk "go.opentelemetry.io/otel/sdk/metric"
|
metricsdk "go.opentelemetry.io/otel/sdk/metric"
|
||||||
|
"go.opentelemetry.io/otel/sdk/metric/processor/processortest"
|
||||||
processorTest "go.opentelemetry.io/otel/sdk/metric/processor/processortest"
|
processorTest "go.opentelemetry.io/otel/sdk/metric/processor/processortest"
|
||||||
"go.opentelemetry.io/otel/sdk/resource"
|
"go.opentelemetry.io/otel/sdk/resource"
|
||||||
)
|
)
|
||||||
@ -31,7 +33,7 @@ import (
|
|||||||
func generateTestData(proc export.Processor) {
|
func generateTestData(proc export.Processor) {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
accum := metricsdk.NewAccumulator(proc)
|
accum := metricsdk.NewAccumulator(proc)
|
||||||
meter := metric.WrapMeterImpl(accum, "testing")
|
meter := metric.WrapMeterImpl(accum)
|
||||||
|
|
||||||
counter := metric.Must(meter).NewFloat64Counter("counter.sum")
|
counter := metric.Must(meter).NewFloat64Counter("counter.sum")
|
||||||
|
|
||||||
@ -51,12 +53,12 @@ func generateTestData(proc export.Processor) {
|
|||||||
func TestProcessorTesting(t *testing.T) {
|
func TestProcessorTesting(t *testing.T) {
|
||||||
// Test the Processor test helper using a real Accumulator to
|
// Test the Processor test helper using a real Accumulator to
|
||||||
// generate Accumulations.
|
// generate Accumulations.
|
||||||
testProc := processorTest.NewProcessor(
|
checkpointer := processorTest.NewCheckpointer(
|
||||||
processorTest.AggregatorSelector(),
|
processorTest.NewProcessor(
|
||||||
attribute.DefaultEncoder(),
|
processorTest.AggregatorSelector(),
|
||||||
|
attribute.DefaultEncoder(),
|
||||||
|
),
|
||||||
)
|
)
|
||||||
checkpointer := processorTest.Checkpointer(testProc)
|
|
||||||
|
|
||||||
generateTestData(checkpointer)
|
generateTestData(checkpointer)
|
||||||
|
|
||||||
res := resource.NewSchemaless(attribute.String("R", "V"))
|
res := resource.NewSchemaless(attribute.String("R", "V"))
|
||||||
@ -73,7 +75,12 @@ func TestProcessorTesting(t *testing.T) {
|
|||||||
attribute.DefaultEncoder(),
|
attribute.DefaultEncoder(),
|
||||||
)
|
)
|
||||||
|
|
||||||
err := exporter.Export(context.Background(), res, checkpointer.CheckpointSet())
|
err := exporter.Export(context.Background(), res, processortest.OneInstrumentationLibraryReader(
|
||||||
|
instrumentation.Library{
|
||||||
|
Name: "test",
|
||||||
|
},
|
||||||
|
checkpointer.Reader(),
|
||||||
|
))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.EqualValues(t, expect, exporter.Values())
|
require.EqualValues(t, expect, exporter.Values())
|
||||||
}
|
}
|
||||||
|
@ -23,8 +23,10 @@ import (
|
|||||||
"go.opentelemetry.io/otel/attribute"
|
"go.opentelemetry.io/otel/attribute"
|
||||||
"go.opentelemetry.io/otel/metric"
|
"go.opentelemetry.io/otel/metric"
|
||||||
export "go.opentelemetry.io/otel/sdk/export/metric"
|
export "go.opentelemetry.io/otel/sdk/export/metric"
|
||||||
|
"go.opentelemetry.io/otel/sdk/instrumentation"
|
||||||
metricsdk "go.opentelemetry.io/otel/sdk/metric"
|
metricsdk "go.opentelemetry.io/otel/sdk/metric"
|
||||||
"go.opentelemetry.io/otel/sdk/metric/processor/basic"
|
"go.opentelemetry.io/otel/sdk/metric/processor/basic"
|
||||||
|
"go.opentelemetry.io/otel/sdk/metric/processor/processortest"
|
||||||
processorTest "go.opentelemetry.io/otel/sdk/metric/processor/processortest"
|
processorTest "go.opentelemetry.io/otel/sdk/metric/processor/processortest"
|
||||||
"go.opentelemetry.io/otel/sdk/metric/processor/reducer"
|
"go.opentelemetry.io/otel/sdk/metric/processor/reducer"
|
||||||
"go.opentelemetry.io/otel/sdk/resource"
|
"go.opentelemetry.io/otel/sdk/resource"
|
||||||
@ -53,7 +55,7 @@ func (testFilter) LabelFilterFor(_ *metric.Descriptor) attribute.Filter {
|
|||||||
|
|
||||||
func generateData(impl metric.MeterImpl) {
|
func generateData(impl metric.MeterImpl) {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
meter := metric.WrapMeterImpl(impl, "testing")
|
meter := metric.WrapMeterImpl(impl)
|
||||||
|
|
||||||
counter := metric.Must(meter).NewFloat64Counter("counter.sum")
|
counter := metric.Must(meter).NewFloat64Counter("counter.sum")
|
||||||
|
|
||||||
@ -74,7 +76,7 @@ func TestFilterProcessor(t *testing.T) {
|
|||||||
attribute.DefaultEncoder(),
|
attribute.DefaultEncoder(),
|
||||||
)
|
)
|
||||||
accum := metricsdk.NewAccumulator(
|
accum := metricsdk.NewAccumulator(
|
||||||
reducer.New(testFilter{}, processorTest.Checkpointer(testProc)),
|
reducer.New(testFilter{}, processorTest.NewCheckpointer(testProc)),
|
||||||
)
|
)
|
||||||
generateData(accum)
|
generateData(accum)
|
||||||
|
|
||||||
@ -103,7 +105,9 @@ func TestFilterBasicProcessor(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
res := resource.NewSchemaless(attribute.String("R", "V"))
|
res := resource.NewSchemaless(attribute.String("R", "V"))
|
||||||
require.NoError(t, exporter.Export(context.Background(), res, basicProc.CheckpointSet()))
|
require.NoError(t, exporter.Export(context.Background(), res, processortest.OneInstrumentationLibraryReader(instrumentation.Library{
|
||||||
|
Name: "test",
|
||||||
|
}, basicProc.Reader())))
|
||||||
|
|
||||||
require.EqualValues(t, map[string]float64{
|
require.EqualValues(t, map[string]float64{
|
||||||
"counter.sum/A=1,C=3/R=V": 200,
|
"counter.sum/A=1,C=3/R=V": 200,
|
||||||
|
@ -20,6 +20,7 @@ import (
|
|||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
"go.opentelemetry.io/otel/metric"
|
"go.opentelemetry.io/otel/metric"
|
||||||
|
"go.opentelemetry.io/otel/metric/metrictest"
|
||||||
"go.opentelemetry.io/otel/metric/number"
|
"go.opentelemetry.io/otel/metric/number"
|
||||||
"go.opentelemetry.io/otel/metric/sdkapi"
|
"go.opentelemetry.io/otel/metric/sdkapi"
|
||||||
export "go.opentelemetry.io/otel/sdk/export/metric"
|
export "go.opentelemetry.io/otel/sdk/export/metric"
|
||||||
@ -32,12 +33,12 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
testCounterDesc = metric.NewDescriptor("counter", sdkapi.CounterInstrumentKind, number.Int64Kind)
|
testCounterDesc = metrictest.NewDescriptor("counter", sdkapi.CounterInstrumentKind, number.Int64Kind)
|
||||||
testUpDownCounterDesc = metric.NewDescriptor("updowncounter", sdkapi.UpDownCounterInstrumentKind, number.Int64Kind)
|
testUpDownCounterDesc = metrictest.NewDescriptor("updowncounter", sdkapi.UpDownCounterInstrumentKind, number.Int64Kind)
|
||||||
testCounterObserverDesc = metric.NewDescriptor("counterobserver", sdkapi.CounterObserverInstrumentKind, number.Int64Kind)
|
testCounterObserverDesc = metrictest.NewDescriptor("counterobserver", sdkapi.CounterObserverInstrumentKind, number.Int64Kind)
|
||||||
testUpDownCounterObserverDesc = metric.NewDescriptor("updowncounterobserver", sdkapi.UpDownCounterObserverInstrumentKind, number.Int64Kind)
|
testUpDownCounterObserverDesc = metrictest.NewDescriptor("updowncounterobserver", sdkapi.UpDownCounterObserverInstrumentKind, number.Int64Kind)
|
||||||
testHistogramDesc = metric.NewDescriptor("histogram", sdkapi.HistogramInstrumentKind, number.Int64Kind)
|
testHistogramDesc = metrictest.NewDescriptor("histogram", sdkapi.HistogramInstrumentKind, number.Int64Kind)
|
||||||
testGaugeObserverDesc = metric.NewDescriptor("gauge", sdkapi.GaugeObserverInstrumentKind, number.Int64Kind)
|
testGaugeObserverDesc = metrictest.NewDescriptor("gauge", sdkapi.GaugeObserverInstrumentKind, number.Int64Kind)
|
||||||
)
|
)
|
||||||
|
|
||||||
func oneAgg(sel export.AggregatorSelector, desc *metric.Descriptor) export.Aggregator {
|
func oneAgg(sel export.AggregatorSelector, desc *metric.Descriptor) export.Aggregator {
|
||||||
|
@ -246,7 +246,7 @@ func (f *testFixture) preCollect() {
|
|||||||
f.dupCheck = map[testKey]int{}
|
f.dupCheck = map[testKey]int{}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (*testFixture) CheckpointSet() export.CheckpointSet {
|
func (*testFixture) Reader() export.Reader {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -296,7 +296,7 @@ func stressTest(t *testing.T, impl testImpl) {
|
|||||||
cc := concurrency()
|
cc := concurrency()
|
||||||
|
|
||||||
sdk := NewAccumulator(fixture)
|
sdk := NewAccumulator(fixture)
|
||||||
meter := metric.WrapMeterImpl(sdk, "stress_test")
|
meter := metric.WrapMeterImpl(sdk)
|
||||||
fixture.wg.Add(cc + 1)
|
fixture.wg.Add(cc + 1)
|
||||||
|
|
||||||
for i := 0; i < cc; i++ {
|
for i := 0; i < cc; i++ {
|
||||||
|
Loading…
x
Reference in New Issue
Block a user