1
0
mirror of https://github.com/open-telemetry/opentelemetry-go.git synced 2024-12-30 21:20:04 +02:00

Enable exporting Histogram aggregation to OTLP metric (#1209)

* Add Count() to Histogram interface

* Fix copy/pasted comment for Count() interface

* Implement Histogram conversion to OTLP protobuf

* Add CHANGELOG message

* Filled in PR # for Changelog

Co-authored-by: Tyler Yahn <MrAlias@users.noreply.github.com>
This commit is contained in:
ET 2020-09-28 17:58:15 -07:00 committed by GitHub
parent a69f8fbe7a
commit 04297f4d04
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 92 additions and 29 deletions

View File

@ -8,6 +8,10 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm
## [Unreleased]
### Added
- OTLP Metric exporter supports Histogram aggregation. (#1209)
## [0.12.0] - 2020-09-24
### Added

View File

@ -257,6 +257,13 @@ func Record(r export.Record) (*metricpb.Metric, error) {
}
return minMaxSumCount(r, mmsc)
case aggregation.HistogramKind:
h, ok := agg.(aggregation.Histogram)
if !ok {
return nil, fmt.Errorf("%w: %T", ErrIncompatibleAgg, agg)
}
return histogram(r, h)
case aggregation.SumKind:
s, ok := agg.(aggregation.Sum)
if !ok {
@ -327,7 +334,7 @@ func scalar(record export.Record, num metric.Number, start, end time.Time) (*met
}
// minMaxSumCountValue returns the values of the MinMaxSumCount Aggregator
// as discret values.
// as discrete values.
func minMaxSumCountValues(a aggregation.MinMaxSumCount) (min, max, sum metric.Number, count int64, err error) {
if min, err = a.Min(); err != nil {
return
@ -383,6 +390,67 @@ func minMaxSumCount(record export.Record, a aggregation.MinMaxSumCount) (*metric
}, nil
}
func histogramValues(a aggregation.Histogram) (boundaries []float64, counts []float64, err error) {
var buckets aggregation.Buckets
if buckets, err = a.Histogram(); err != nil {
return
}
boundaries, counts = buckets.Boundaries, buckets.Counts
if len(counts) != len(boundaries)+1 {
err = ErrTransforming
return
}
return
}
// histogram transforms a Histogram Aggregator into an OTLP Metric.
func histogram(record export.Record, a aggregation.Histogram) (*metricpb.Metric, error) {
desc := record.Descriptor()
labels := record.Labels()
boundaries, counts, err := histogramValues(a)
if err != nil {
return nil, err
}
count, err := a.Count()
if err != nil {
return nil, err
}
sum, err := a.Sum()
if err != nil {
return nil, err
}
buckets := make([]*metricpb.HistogramDataPoint_Bucket, len(counts))
for i := 0; i < len(counts); i++ {
buckets[i] = &metricpb.HistogramDataPoint_Bucket{
Count: uint64(counts[i]),
}
}
numKind := desc.NumberKind()
return &metricpb.Metric{
MetricDescriptor: &metricpb.MetricDescriptor{
Name: desc.Name(),
Description: desc.Description(),
Unit: string(desc.Unit()),
Type: metricpb.MetricDescriptor_HISTOGRAM,
},
HistogramDataPoints: []*metricpb.HistogramDataPoint{
{
Labels: stringKeyValues(labels.Iter()),
StartTimeUnixNano: toNanos(record.StartTime()),
TimeUnixNano: toNanos(record.EndTime()),
Count: uint64(count),
Sum: sum.CoerceToFloat64(numKind),
Buckets: buckets,
ExplicitBounds: boundaries,
},
},
}, nil
}
// stringKeyValues transforms a label iterator into an OTLP StringKeyValues.
func stringKeyValues(iter label.Iterator) []*commonpb.StringKeyValue {
l := iter.Len()

View File

@ -33,7 +33,7 @@ import (
metricsdk "go.opentelemetry.io/otel/sdk/export/metric"
"go.opentelemetry.io/otel/sdk/export/metric/aggregation"
"go.opentelemetry.io/otel/sdk/export/metric/metrictest"
"go.opentelemetry.io/otel/sdk/metric/aggregator/minmaxsumcount"
"go.opentelemetry.io/otel/sdk/metric/aggregator/histogram"
"go.opentelemetry.io/otel/sdk/metric/aggregator/sum"
"go.opentelemetry.io/otel/sdk/resource"
@ -107,6 +107,8 @@ var (
testInstA = resource.New(label.String("instance", "tester-a"))
testInstB = resource.New(label.String("instance", "tester-b"))
testHistogramBoundaries = []float64{2.0, 4.0, 8.0}
md = &metricpb.MetricDescriptor{
Name: "int64-count",
Type: metricpb.MetricDescriptor_INT64,
@ -229,9 +231,9 @@ func TestValuerecorderMetricGroupingExport(t *testing.T) {
{
MetricDescriptor: &metricpb.MetricDescriptor{
Name: "valuerecorder",
Type: metricpb.MetricDescriptor_SUMMARY,
Type: metricpb.MetricDescriptor_HISTOGRAM,
},
SummaryDataPoints: []*metricpb.SummaryDataPoint{
HistogramDataPoints: []*metricpb.HistogramDataPoint{
{
Labels: []*commonpb.StringKeyValue{
{
@ -243,20 +245,14 @@ func TestValuerecorderMetricGroupingExport(t *testing.T) {
Value: "test.com",
},
},
Count: 2,
Sum: 11,
PercentileValues: []*metricpb.SummaryDataPoint_ValueAtPercentile{
{
Percentile: 0.0,
Value: 1.0,
},
{
Percentile: 100.0,
Value: 10.0,
},
},
StartTimeUnixNano: startTime(),
TimeUnixNano: pointTime(),
Count: 2,
Sum: 11,
ExplicitBounds: testHistogramBoundaries,
Buckets: []*metricpb.HistogramDataPoint_Bucket{
{Count: 1}, {Count: 0}, {Count: 0}, {Count: 1},
},
},
{
Labels: []*commonpb.StringKeyValue{
@ -269,17 +265,11 @@ func TestValuerecorderMetricGroupingExport(t *testing.T) {
Value: "test.com",
},
},
Count: 2,
Sum: 11,
PercentileValues: []*metricpb.SummaryDataPoint_ValueAtPercentile{
{
Percentile: 0.0,
Value: 1.0,
},
{
Percentile: 100.0,
Value: 10.0,
},
Count: 2,
Sum: 11,
ExplicitBounds: testHistogramBoundaries,
Buckets: []*metricpb.HistogramDataPoint_Bucket{
{Count: 1}, {Count: 0}, {Count: 0}, {Count: 1},
},
StartTimeUnixNano: startTime(),
TimeUnixNano: pointTime(),
@ -690,7 +680,7 @@ func runMetricExportTest(t *testing.T, exp *Exporter, rs []record, expected []me
case metric.CounterKind:
agg, ckpt = metrictest.Unslice2(sum.New(2))
default:
agg, ckpt = metrictest.Unslice2(minmaxsumcount.New(2, &desc))
agg, ckpt = metrictest.Unslice2(histogram.New(2, &desc, testHistogramBoundaries))
}
ctx := context.Background()

View File

@ -40,7 +40,7 @@ type (
Sum() (metric.Number, error)
}
// Sum returns the number of values that were aggregated.
// Count returns the number of values that were aggregated.
Count interface {
Aggregation
Count() (int64, error)
@ -95,6 +95,7 @@ type (
// Histogram returns the count of events in pre-determined buckets.
Histogram interface {
Aggregation
Count() (int64, error)
Sum() (metric.Number, error)
Histogram() (Buckets, error)
}