1
0
mirror of https://github.com/open-telemetry/opentelemetry-go.git synced 2025-03-03 14:52:56 +02:00

Merge pull request #808 from jmacd/jmacd/agg_refactor

Add aggregation.Kind and rename sdk/export/metric/aggregator to aggregation
This commit is contained in:
Tyler Yahn 2020-06-10 10:20:58 -07:00 committed by GitHub
commit 3b22e73328
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
26 changed files with 249 additions and 138 deletions

View File

@ -27,7 +27,7 @@ import (
"go.opentelemetry.io/otel/api/label"
"go.opentelemetry.io/otel/api/metric"
export "go.opentelemetry.io/otel/sdk/export/metric"
"go.opentelemetry.io/otel/sdk/export/metric/aggregator"
"go.opentelemetry.io/otel/sdk/export/metric/aggregation"
"go.opentelemetry.io/otel/sdk/metric/controller/pull"
"go.opentelemetry.io/otel/sdk/metric/selector/simple"
)
@ -216,11 +216,11 @@ func (c *collector) Collect(ch chan<- prometheus.Metric) {
desc := c.toDesc(record, labelKeys)
if hist, ok := agg.(aggregator.Histogram); ok {
if hist, ok := agg.(aggregation.Histogram); ok {
if err := c.exportHistogram(ch, hist, numberKind, desc, labels); err != nil {
return fmt.Errorf("exporting histogram: %w", err)
}
} else if dist, ok := agg.(aggregator.Distribution); ok {
} else if dist, ok := agg.(aggregation.Distribution); ok {
// TODO: summaries values are never being resetted.
// As measurements are recorded, new records starts to have less impact on these summaries.
// We should implement an solution that is similar to the Prometheus Clients
@ -232,11 +232,11 @@ func (c *collector) Collect(ch chan<- prometheus.Metric) {
if err := c.exportSummary(ch, dist, numberKind, desc, labels); err != nil {
return fmt.Errorf("exporting summary: %w", err)
}
} else if sum, ok := agg.(aggregator.Sum); ok {
} else if sum, ok := agg.(aggregation.Sum); ok {
if err := c.exportCounter(ch, sum, numberKind, desc, labels); err != nil {
return fmt.Errorf("exporting counter: %w", err)
}
} else if lastValue, ok := agg.(aggregator.LastValue); ok {
} else if lastValue, ok := agg.(aggregation.LastValue); ok {
if err := c.exportLastValue(ch, lastValue, numberKind, desc, labels); err != nil {
return fmt.Errorf("exporting last value: %w", err)
}
@ -248,7 +248,7 @@ func (c *collector) Collect(ch chan<- prometheus.Metric) {
}
}
func (c *collector) exportLastValue(ch chan<- prometheus.Metric, lvagg aggregator.LastValue, kind metric.NumberKind, desc *prometheus.Desc, labels []string) error {
func (c *collector) exportLastValue(ch chan<- prometheus.Metric, lvagg aggregation.LastValue, kind metric.NumberKind, desc *prometheus.Desc, labels []string) error {
lv, _, err := lvagg.LastValue()
if err != nil {
return fmt.Errorf("error retrieving last value: %w", err)
@ -263,7 +263,7 @@ func (c *collector) exportLastValue(ch chan<- prometheus.Metric, lvagg aggregato
return nil
}
func (c *collector) exportCounter(ch chan<- prometheus.Metric, sum aggregator.Sum, kind metric.NumberKind, desc *prometheus.Desc, labels []string) error {
func (c *collector) exportCounter(ch chan<- prometheus.Metric, sum aggregation.Sum, kind metric.NumberKind, desc *prometheus.Desc, labels []string) error {
v, err := sum.Sum()
if err != nil {
return fmt.Errorf("error retrieving counter: %w", err)
@ -278,7 +278,7 @@ func (c *collector) exportCounter(ch chan<- prometheus.Metric, sum aggregator.Su
return nil
}
func (c *collector) exportSummary(ch chan<- prometheus.Metric, dist aggregator.Distribution, kind metric.NumberKind, desc *prometheus.Desc, labels []string) error {
func (c *collector) exportSummary(ch chan<- prometheus.Metric, dist aggregation.Distribution, kind metric.NumberKind, desc *prometheus.Desc, labels []string) error {
count, err := dist.Count()
if err != nil {
return fmt.Errorf("error retrieving count: %w", err)
@ -305,7 +305,7 @@ func (c *collector) exportSummary(ch chan<- prometheus.Metric, dist aggregator.D
return nil
}
func (c *collector) exportHistogram(ch chan<- prometheus.Metric, hist aggregator.Histogram, kind metric.NumberKind, desc *prometheus.Desc, labels []string) error {
func (c *collector) exportHistogram(ch chan<- prometheus.Metric, hist aggregation.Histogram, kind metric.NumberKind, desc *prometheus.Desc, labels []string) error {
buckets, err := hist.Histogram()
if err != nil {
return fmt.Errorf("error retrieving histogram: %w", err)

View File

@ -27,7 +27,7 @@ import (
"go.opentelemetry.io/otel/api/label"
export "go.opentelemetry.io/otel/sdk/export/metric"
"go.opentelemetry.io/otel/sdk/export/metric/aggregator"
"go.opentelemetry.io/otel/sdk/export/metric/aggregation"
"go.opentelemetry.io/otel/sdk/metric/controller/push"
"go.opentelemetry.io/otel/sdk/metric/selector/simple"
)
@ -98,7 +98,7 @@ func NewRawExporter(config Config) (*Exporter, error) {
} else {
for _, q := range config.Quantiles {
if q < 0 || q > 1 {
return nil, aggregator.ErrInvalidQuantile
return nil, aggregation.ErrInvalidQuantile
}
}
}
@ -164,7 +164,7 @@ func (e *Exporter) Export(_ context.Context, checkpointSet export.CheckpointSet)
var expose expoLine
if sum, ok := agg.(aggregator.Sum); ok {
if sum, ok := agg.(aggregation.Sum); ok {
value, err := sum.Sum()
if err != nil {
return err
@ -172,7 +172,7 @@ func (e *Exporter) Export(_ context.Context, checkpointSet export.CheckpointSet)
expose.Sum = value.AsInterface(kind)
}
if mmsc, ok := agg.(aggregator.MinMaxSumCount); ok {
if mmsc, ok := agg.(aggregation.MinMaxSumCount); ok {
count, err := mmsc.Count()
if err != nil {
return err
@ -191,7 +191,7 @@ func (e *Exporter) Export(_ context.Context, checkpointSet export.CheckpointSet)
}
expose.Min = min.AsInterface(kind)
if dist, ok := agg.(aggregator.Distribution); ok && len(e.config.Quantiles) != 0 {
if dist, ok := agg.(aggregation.Distribution); ok && len(e.config.Quantiles) != 0 {
summary := make([]expoQuantile, len(e.config.Quantiles))
expose.Quantiles = summary
@ -208,7 +208,7 @@ func (e *Exporter) Export(_ context.Context, checkpointSet export.CheckpointSet)
}
}
}
} else if lv, ok := agg.(aggregator.LastValue); ok {
} else if lv, ok := agg.(aggregation.LastValue); ok {
value, timestamp, err := lv.LastValue()
if err != nil {
return err

View File

@ -29,7 +29,7 @@ import (
"go.opentelemetry.io/otel/exporters/metric/stdout"
"go.opentelemetry.io/otel/exporters/metric/test"
export "go.opentelemetry.io/otel/sdk/export/metric"
"go.opentelemetry.io/otel/sdk/export/metric/aggregator"
"go.opentelemetry.io/otel/sdk/export/metric/aggregation"
"go.opentelemetry.io/otel/sdk/metric/aggregator/array"
"go.opentelemetry.io/otel/sdk/metric/aggregator/ddsketch"
"go.opentelemetry.io/otel/sdk/metric/aggregator/lastvalue"
@ -80,7 +80,7 @@ func TestStdoutInvalidQuantile(t *testing.T) {
Quantiles: []float64{1.1, 0.9},
})
require.Error(t, err, "Invalid quantile error expected")
require.Equal(t, aggregator.ErrInvalidQuantile, err)
require.Equal(t, aggregation.ErrInvalidQuantile, err)
}
func TestStdoutTimestamp(t *testing.T) {

View File

@ -23,7 +23,7 @@ import (
"go.opentelemetry.io/otel/api/label"
"go.opentelemetry.io/otel/api/metric"
export "go.opentelemetry.io/otel/sdk/export/metric"
"go.opentelemetry.io/otel/sdk/export/metric/aggregator"
"go.opentelemetry.io/otel/sdk/export/metric/aggregation"
"go.opentelemetry.io/otel/sdk/metric/aggregator/array"
"go.opentelemetry.io/otel/sdk/metric/aggregator/histogram"
"go.opentelemetry.io/otel/sdk/metric/aggregator/lastvalue"
@ -117,7 +117,7 @@ func (p *CheckpointSet) updateAggregator(desc *metric.Descriptor, newAgg export.
func (p *CheckpointSet) ForEach(f func(export.Record) error) error {
for _, r := range p.updates {
if err := f(r); err != nil && !errors.Is(err, aggregator.ErrNoData) {
if err := f(r); err != nil && !errors.Is(err, aggregation.ErrNoData) {
return err
}
}

View File

@ -30,7 +30,7 @@ import (
"go.opentelemetry.io/otel/api/label"
"go.opentelemetry.io/otel/api/metric"
export "go.opentelemetry.io/otel/sdk/export/metric"
"go.opentelemetry.io/otel/sdk/export/metric/aggregator"
"go.opentelemetry.io/otel/sdk/export/metric/aggregation"
"go.opentelemetry.io/otel/sdk/resource"
)
@ -230,9 +230,9 @@ func Record(r export.Record) (*metricpb.Metric, error) {
d := r.Descriptor()
l := r.Labels()
switch a := r.Aggregator().(type) {
case aggregator.MinMaxSumCount:
case aggregation.MinMaxSumCount:
return minMaxSumCount(d, l, a)
case aggregator.Sum:
case aggregation.Sum:
return sum(d, l, a)
default:
return nil, fmt.Errorf("%w: %v", ErrUnimplementedAgg, a)
@ -240,7 +240,7 @@ func Record(r export.Record) (*metricpb.Metric, error) {
}
// sum transforms a Sum Aggregator into an OTLP Metric.
func sum(desc *metric.Descriptor, labels *label.Set, a aggregator.Sum) (*metricpb.Metric, error) {
func sum(desc *metric.Descriptor, labels *label.Set, a aggregation.Sum) (*metricpb.Metric, error) {
sum, err := a.Sum()
if err != nil {
return nil, err
@ -275,7 +275,7 @@ func sum(desc *metric.Descriptor, labels *label.Set, a aggregator.Sum) (*metricp
// minMaxSumCountValue returns the values of the MinMaxSumCount Aggregator
// as discret values.
func minMaxSumCountValues(a aggregator.MinMaxSumCount) (min, max, sum metric.Number, count int64, err error) {
func minMaxSumCountValues(a aggregation.MinMaxSumCount) (min, max, sum metric.Number, count int64, err error) {
if min, err = a.Min(); err != nil {
return
}
@ -292,7 +292,7 @@ func minMaxSumCountValues(a aggregator.MinMaxSumCount) (min, max, sum metric.Num
}
// minMaxSumCount transforms a MinMaxSumCount Aggregator into an OTLP Metric.
func minMaxSumCount(desc *metric.Descriptor, labels *label.Set, a aggregator.MinMaxSumCount) (*metricpb.Metric, error) {
func minMaxSumCount(desc *metric.Descriptor, labels *label.Set, a aggregation.MinMaxSumCount) (*metricpb.Metric, error) {
min, max, sum, count, err := minMaxSumCountValues(a)
if err != nil {
return nil, err

View File

@ -27,7 +27,7 @@ import (
"go.opentelemetry.io/otel/api/label"
"go.opentelemetry.io/otel/api/metric"
"go.opentelemetry.io/otel/api/unit"
"go.opentelemetry.io/otel/sdk/export/metric/aggregator"
"go.opentelemetry.io/otel/sdk/export/metric/aggregation"
"go.opentelemetry.io/otel/sdk/metric/aggregator/minmaxsumcount"
sumAgg "go.opentelemetry.io/otel/sdk/metric/aggregator/sum"
)
@ -86,7 +86,7 @@ func TestMinMaxSumCountValue(t *testing.T) {
// Prior to checkpointing ErrNoData should be returned.
_, _, _, _, err := minMaxSumCountValues(mmsc)
assert.EqualError(t, err, aggregator.ErrNoData.Error())
assert.EqualError(t, err, aggregation.ErrNoData.Error())
// Checkpoint to set non-zero values
mmsc.Checkpoint(&metric.Descriptor{})
@ -198,7 +198,7 @@ func TestMinMaxSumCountPropagatesErrors(t *testing.T) {
mmsc := minmaxsumcount.New(&metric.Descriptor{})
_, _, _, _, err := minMaxSumCountValues(mmsc)
assert.Error(t, err)
assert.Equal(t, aggregator.ErrNoData, err)
assert.Equal(t, aggregation.ErrNoData, err)
}
func TestSumMetricDescriptor(t *testing.T) {

View File

@ -30,7 +30,7 @@ import (
"go.opentelemetry.io/otel/api/label"
"go.opentelemetry.io/otel/api/metric"
metricsdk "go.opentelemetry.io/otel/sdk/export/metric"
"go.opentelemetry.io/otel/sdk/export/metric/aggregator"
"go.opentelemetry.io/otel/sdk/export/metric/aggregation"
"go.opentelemetry.io/otel/sdk/metric/aggregator/minmaxsumcount"
"go.opentelemetry.io/otel/sdk/metric/aggregator/sum"
"go.opentelemetry.io/otel/sdk/resource"
@ -67,7 +67,7 @@ type checkpointSet struct {
func (m *checkpointSet) ForEach(fn func(metricsdk.Record) error) error {
for _, r := range m.records {
if err := fn(r); err != nil && err != aggregator.ErrNoData {
if err := fn(r); err != nil && err != aggregation.ErrNoData {
return err
}
}

View File

@ -12,54 +12,68 @@
// See the License for the specific language governing permissions and
// limitations under the License.
package aggregator // import "go.opentelemetry.io/otel/sdk/export/metric/aggregator"
package aggregation // import "go.opentelemetry.io/otel/sdk/export/metric/aggregation"
import (
"fmt"
"math"
"time"
"go.opentelemetry.io/otel/api/metric"
export "go.opentelemetry.io/otel/sdk/export/metric"
)
// These interfaces describe the various ways to access state from an
// Aggregator.
// Aggregation.
type (
// Aggregation is an interface returned by the Aggregator
// containing an interval of metric data.
Aggregation interface {
// Kind returns a short identifying string to identify
// the Aggregator that was used to produce the
// Aggregation (e.g., "Sum").
Kind() Kind
}
// Sum returns an aggregated sum.
Sum interface {
Aggregation
Sum() (metric.Number, error)
}
// Sum returns the number of values that were aggregated.
Count interface {
Aggregation
Count() (int64, error)
}
// Min returns the minimum value over the set of values that were aggregated.
Min interface {
Aggregation
Min() (metric.Number, error)
}
// Max returns the maximum value over the set of values that were aggregated.
Max interface {
Aggregation
Max() (metric.Number, error)
}
// Quantile returns an exact or estimated quantile over the
// set of values that were aggregated.
Quantile interface {
Aggregation
Quantile(float64) (metric.Number, error)
}
// LastValue returns the latest value that was aggregated.
LastValue interface {
Aggregation
LastValue() (metric.Number, time.Time, error)
}
// Points returns the raw set of values that were aggregated.
Points interface {
Aggregation
Points() ([]metric.Number, error)
}
@ -80,26 +94,58 @@ type (
// Histogram returns the count of events in pre-determined buckets.
Histogram interface {
Sum
Aggregation
Sum() (metric.Number, error)
Histogram() (Buckets, error)
}
// MinMaxSumCount supports the Min, Max, Sum, and Count interfaces.
MinMaxSumCount interface {
Min
Max
Sum
Count
Aggregation
Min() (metric.Number, error)
Max() (metric.Number, error)
Sum() (metric.Number, error)
Count() (int64, error)
}
// Distribution supports the Min, Max, Sum, Count, and Quantile
// interfaces.
Distribution interface {
MinMaxSumCount
Quantile
Aggregation
Min() (metric.Number, error)
Max() (metric.Number, error)
Sum() (metric.Number, error)
Count() (int64, error)
Quantile(float64) (metric.Number, error)
}
)
type (
// Kind is a short name for the Aggregator that produces an
// Aggregation, used for descriptive purpose only. Kind is a
// string to allow user-defined Aggregators.
//
// When deciding how to handle an Aggregation, Exporters are
// encouraged to decide based on conversion to the above
// interfaces based on strength, not on Kind value, when
// deciding how to expose metric data. This enables
// user-supplied Aggregators to replace builtin Aggregators.
//
// For example, test for a Distribution before testing for a
// MinMaxSumCount, test for a Histogram before testing for a
// Sum, and so on.
Kind string
)
const (
SumKind Kind = "Sum"
MinMaxSumCountKind Kind = "MinMaxSumCount"
HistogramKind Kind = "Histogram"
LastValueKind Kind = "Lastvalue"
SketchKind Kind = "Sketch"
ExactKind Kind = "Exact"
)
var (
ErrInvalidQuantile = fmt.Errorf("the requested quantile is out of range")
ErrNegativeInput = fmt.Errorf("negative value is out of range for this instrument")
@ -112,29 +158,7 @@ var (
ErrNoData = fmt.Errorf("no data collected by this aggregator")
)
// NewInconsistentMergeError formats an error describing an attempt to
// merge different-type aggregators. The result can be unwrapped as
// an ErrInconsistentType.
func NewInconsistentMergeError(a1, a2 export.Aggregator) error {
return fmt.Errorf("cannot merge %T with %T: %w", a1, a2, ErrInconsistentType)
}
// RangeTest is a commmon routine for testing for valid input values.
// This rejects NaN values. This rejects negative values when the
// metric instrument does not support negative values, including
// monotonic counter metrics and absolute ValueRecorder metrics.
func RangeTest(number metric.Number, descriptor *metric.Descriptor) error {
numberKind := descriptor.NumberKind()
if numberKind == metric.Float64NumberKind && math.IsNaN(number.AsFloat64()) {
return ErrNaNInput
}
switch descriptor.MetricKind() {
case metric.CounterKind, metric.SumObserverKind:
if number.IsNegative(numberKind) {
return ErrNegativeInput
}
}
return nil
// String returns the string value of Kind.
func (k Kind) String() string {
return string(k)
}

View File

@ -0,0 +1,51 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package aggregator // import "go.opentelemetry.io/otel/sdk/metric/aggregator"
import (
"fmt"
"math"
"go.opentelemetry.io/otel/api/metric"
export "go.opentelemetry.io/otel/sdk/export/metric"
"go.opentelemetry.io/otel/sdk/export/metric/aggregation"
)
// NewInconsistentMergeError formats an error describing an attempt to
// merge different-type aggregators. The result can be unwrapped as
// an ErrInconsistentType.
func NewInconsistentMergeError(a1, a2 export.Aggregator) error {
return fmt.Errorf("cannot merge %T with %T: %w", a1, a2, aggregation.ErrInconsistentType)
}
// RangeTest is a commmon routine for testing for valid input values.
// This rejects NaN values. This rejects negative values when the
// metric instrument does not support negative values, including
// monotonic counter metrics and absolute ValueRecorder metrics.
func RangeTest(number metric.Number, descriptor *metric.Descriptor) error {
numberKind := descriptor.NumberKind()
if numberKind == metric.Float64NumberKind && math.IsNaN(number.AsFloat64()) {
return aggregation.ErrNaNInput
}
switch descriptor.MetricKind() {
case metric.CounterKind, metric.SumObserverKind:
if number.IsNegative(numberKind) {
return aggregation.ErrNegativeInput
}
}
return nil
}

View File

@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
package aggregator_test // import "go.opentelemetry.io/otel/sdk/export/metric/aggregator"
package aggregator_test // import "go.opentelemetry.io/otel/sdk/metric/aggregator"
import (
"errors"
@ -22,7 +22,8 @@ import (
"github.com/stretchr/testify/require"
"go.opentelemetry.io/otel/api/metric"
"go.opentelemetry.io/otel/sdk/export/metric/aggregator"
"go.opentelemetry.io/otel/sdk/export/metric/aggregation"
"go.opentelemetry.io/otel/sdk/metric/aggregator"
"go.opentelemetry.io/otel/sdk/metric/aggregator/lastvalue"
"go.opentelemetry.io/otel/sdk/metric/aggregator/sum"
)
@ -34,7 +35,7 @@ func TestInconsistentMergeErr(t *testing.T) {
"cannot merge *sum.Aggregator with *lastvalue.Aggregator: inconsistent aggregator types",
err.Error(),
)
require.True(t, errors.Is(err, aggregator.ErrInconsistentType))
require.True(t, errors.Is(err, aggregation.ErrInconsistentType))
}
func testRangeNaN(t *testing.T, desc *metric.Descriptor) {
@ -43,7 +44,7 @@ func testRangeNaN(t *testing.T, desc *metric.Descriptor) {
err := aggregator.RangeTest(nan, desc)
if desc.NumberKind() == metric.Float64NumberKind {
require.Equal(t, aggregator.ErrNaNInput, err)
require.Equal(t, aggregation.ErrNaNInput, err)
} else {
require.Nil(t, err)
}
@ -64,7 +65,7 @@ func testRangeNegative(t *testing.T, desc *metric.Descriptor) {
negErr := aggregator.RangeTest(neg, desc)
require.Nil(t, posErr)
require.Equal(t, negErr, aggregator.ErrNegativeInput)
require.Equal(t, negErr, aggregation.ErrNegativeInput)
}
func TestRangeTest(t *testing.T) {

View File

@ -23,7 +23,8 @@ import (
"go.opentelemetry.io/otel/api/metric"
export "go.opentelemetry.io/otel/sdk/export/metric"
"go.opentelemetry.io/otel/sdk/export/metric/aggregator"
"go.opentelemetry.io/otel/sdk/export/metric/aggregation"
"go.opentelemetry.io/otel/sdk/metric/aggregator"
)
type (
@ -41,9 +42,9 @@ type (
)
var _ export.Aggregator = &Aggregator{}
var _ aggregator.MinMaxSumCount = &Aggregator{}
var _ aggregator.Distribution = &Aggregator{}
var _ aggregator.Points = &Aggregator{}
var _ aggregation.MinMaxSumCount = &Aggregator{}
var _ aggregation.Distribution = &Aggregator{}
var _ aggregation.Points = &Aggregator{}
// New returns a new array aggregator, which aggregates recorded
// measurements by storing them in an array. This type uses a mutex
@ -52,6 +53,11 @@ func New() *Aggregator {
return &Aggregator{}
}
// Kind returns aggregation.ExactKind.
func (c *Aggregator) Kind() aggregation.Kind {
return aggregation.ExactKind
}
// Sum returns the sum of values in the checkpoint.
func (c *Aggregator) Sum() (metric.Number, error) {
return c.ckptSum, nil
@ -179,11 +185,11 @@ func (p *points) Swap(i, j int) {
// of a quantile.
func (p *points) Quantile(q float64) (metric.Number, error) {
if len(*p) == 0 {
return metric.Number(0), aggregator.ErrNoData
return metric.Number(0), aggregation.ErrNoData
}
if q < 0 || q > 1 {
return metric.Number(0), aggregator.ErrInvalidQuantile
return metric.Number(0), aggregation.ErrInvalidQuantile
}
if q == 0 || len(*p) == 1 {

View File

@ -25,7 +25,7 @@ import (
"go.opentelemetry.io/otel/api/metric"
ottest "go.opentelemetry.io/otel/internal/testing"
"go.opentelemetry.io/otel/sdk/export/metric/aggregator"
"go.opentelemetry.io/otel/sdk/export/metric/aggregation"
"go.opentelemetry.io/otel/sdk/metric/aggregator/test"
)
@ -199,15 +199,15 @@ func TestArrayErrors(t *testing.T) {
_, err := agg.Max()
require.Error(t, err)
require.Equal(t, err, aggregator.ErrNoData)
require.Equal(t, err, aggregation.ErrNoData)
_, err = agg.Min()
require.Error(t, err)
require.Equal(t, err, aggregator.ErrNoData)
require.Equal(t, err, aggregation.ErrNoData)
_, err = agg.Quantile(0.1)
require.Error(t, err)
require.Equal(t, err, aggregator.ErrNoData)
require.Equal(t, err, aggregation.ErrNoData)
descriptor := test.NewAggregatorTest(metric.ValueRecorderKind, profile.NumberKind)
@ -228,11 +228,11 @@ func TestArrayErrors(t *testing.T) {
_, err = agg.Quantile(-0.0001)
require.Error(t, err)
require.Equal(t, err, aggregator.ErrInvalidQuantile)
require.Equal(t, err, aggregation.ErrInvalidQuantile)
_, err = agg.Quantile(1.0001)
require.Error(t, err)
require.Equal(t, err, aggregator.ErrInvalidQuantile)
require.Equal(t, err, aggregation.ErrInvalidQuantile)
})
}

View File

@ -21,9 +21,9 @@ import (
sdk "github.com/DataDog/sketches-go/ddsketch"
"go.opentelemetry.io/otel/api/metric"
export "go.opentelemetry.io/otel/sdk/export/metric"
"go.opentelemetry.io/otel/sdk/export/metric/aggregator"
"go.opentelemetry.io/otel/sdk/export/metric/aggregation"
"go.opentelemetry.io/otel/sdk/metric/aggregator"
)
// Config is an alias for the underlying DDSketch config object.
@ -39,8 +39,8 @@ type Aggregator struct {
}
var _ export.Aggregator = &Aggregator{}
var _ aggregator.MinMaxSumCount = &Aggregator{}
var _ aggregator.Distribution = &Aggregator{}
var _ aggregation.MinMaxSumCount = &Aggregator{}
var _ aggregation.Distribution = &Aggregator{}
// New returns a new DDSketch aggregator.
func New(desc *metric.Descriptor, cfg *Config) *Aggregator {
@ -52,6 +52,11 @@ func New(desc *metric.Descriptor, cfg *Config) *Aggregator {
}
}
// Kind returns aggregation.SketchKind.
func (c *Aggregator) Kind() aggregation.Kind {
return aggregation.SketchKind
}
// NewDefaultConfig returns a new, default DDSketch config.
//
// TODO: Should the Config constructor set minValue to -Inf to
@ -85,11 +90,11 @@ func (c *Aggregator) Min() (metric.Number, error) {
// It is an error if `q` is less than 0 or greated than 1.
func (c *Aggregator) Quantile(q float64) (metric.Number, error) {
if c.checkpoint.Count() == 0 {
return metric.Number(0), aggregator.ErrNoData
return metric.Number(0), aggregation.ErrNoData
}
f := c.checkpoint.Quantile(q)
if math.IsNaN(f) {
return metric.Number(0), aggregator.ErrInvalidQuantile
return metric.Number(0), aggregation.ErrInvalidQuantile
}
return c.toNumber(f), nil
}

View File

@ -21,7 +21,8 @@ import (
"go.opentelemetry.io/otel/api/metric"
export "go.opentelemetry.io/otel/sdk/export/metric"
"go.opentelemetry.io/otel/sdk/export/metric/aggregator"
"go.opentelemetry.io/otel/sdk/export/metric/aggregation"
"go.opentelemetry.io/otel/sdk/metric/aggregator"
)
// Note: This code uses a Mutex to govern access to the exclusive
@ -51,9 +52,9 @@ type (
)
var _ export.Aggregator = &Aggregator{}
var _ aggregator.Sum = &Aggregator{}
var _ aggregator.Count = &Aggregator{}
var _ aggregator.Histogram = &Aggregator{}
var _ aggregation.Sum = &Aggregator{}
var _ aggregation.Count = &Aggregator{}
var _ aggregation.Histogram = &Aggregator{}
// New returns a new aggregator for computing Histograms.
//
@ -79,6 +80,11 @@ func New(desc *metric.Descriptor, boundaries []float64) *Aggregator {
}
}
// Kind returns aggregation.HistogramKind.
func (c *Aggregator) Kind() aggregation.Kind {
return aggregation.HistogramKind
}
// Sum returns the sum of all values in the checkpoint.
func (c *Aggregator) Sum() (metric.Number, error) {
c.lock.Lock()
@ -94,10 +100,10 @@ func (c *Aggregator) Count() (int64, error) {
}
// Histogram returns the count of events in pre-determined buckets.
func (c *Aggregator) Histogram() (aggregator.Buckets, error) {
func (c *Aggregator) Histogram() (aggregation.Buckets, error) {
c.lock.Lock()
defer c.lock.Unlock()
return aggregator.Buckets{
return aggregation.Buckets{
Boundaries: c.boundaries,
Counts: c.checkpoint.bucketCounts,
}, nil

View File

@ -22,7 +22,8 @@ import (
"go.opentelemetry.io/otel/api/metric"
export "go.opentelemetry.io/otel/sdk/export/metric"
"go.opentelemetry.io/otel/sdk/export/metric/aggregator"
"go.opentelemetry.io/otel/sdk/export/metric/aggregation"
"go.opentelemetry.io/otel/sdk/metric/aggregator"
)
type (
@ -53,7 +54,7 @@ type (
)
var _ export.Aggregator = &Aggregator{}
var _ aggregator.LastValue = &Aggregator{}
var _ aggregation.LastValue = &Aggregator{}
// An unset lastValue has zero timestamp and zero value.
var unsetLastValue = &lastValueData{}
@ -67,14 +68,19 @@ func New() *Aggregator {
}
}
// Kind returns aggregation.LastValueKind.
func (g *Aggregator) Kind() aggregation.Kind {
return aggregation.LastValueKind
}
// LastValue returns the last-recorded lastValue value and the
// corresponding timestamp. The error value aggregator.ErrNoData
// corresponding timestamp. The error value aggregation.ErrNoData
// will be returned if (due to a race condition) the checkpoint was
// computed before the first value was set.
func (g *Aggregator) LastValue() (metric.Number, time.Time, error) {
gd := (*lastValueData)(g.checkpoint)
if gd == unsetLastValue {
return metric.Number(0), time.Time{}, aggregator.ErrNoData
return metric.Number(0), time.Time{}, aggregation.ErrNoData
}
return gd.value.AsNumber(), gd.timestamp, nil
}

View File

@ -25,7 +25,7 @@ import (
"go.opentelemetry.io/otel/api/metric"
ottest "go.opentelemetry.io/otel/internal/testing"
export "go.opentelemetry.io/otel/sdk/export/metric"
"go.opentelemetry.io/otel/sdk/export/metric/aggregator"
"go.opentelemetry.io/otel/sdk/export/metric/aggregation"
"go.opentelemetry.io/otel/sdk/metric/aggregator/test"
)
@ -108,7 +108,7 @@ func TestLastValueNotSet(t *testing.T) {
g.Checkpoint(descriptor)
value, timestamp, err := g.LastValue()
require.Equal(t, aggregator.ErrNoData, err)
require.Equal(t, aggregation.ErrNoData, err)
require.True(t, timestamp.IsZero())
require.Equal(t, metric.Number(0), value)
}

View File

@ -20,7 +20,8 @@ import (
"go.opentelemetry.io/otel/api/metric"
export "go.opentelemetry.io/otel/sdk/export/metric"
"go.opentelemetry.io/otel/sdk/export/metric/aggregator"
"go.opentelemetry.io/otel/sdk/export/metric/aggregation"
"go.opentelemetry.io/otel/sdk/metric/aggregator"
)
type (
@ -42,7 +43,7 @@ type (
)
var _ export.Aggregator = &Aggregator{}
var _ aggregator.MinMaxSumCount = &Aggregator{}
var _ aggregation.MinMaxSumCount = &Aggregator{}
// New returns a new aggregator for computing the min, max, sum, and
// count. It does not compute quantile information other than Min and
@ -62,6 +63,11 @@ func New(desc *metric.Descriptor) *Aggregator {
}
}
// Kind returns aggregation.MinMaxSumCountKind.
func (c *Aggregator) Kind() aggregation.Kind {
return aggregation.MinMaxSumCountKind
}
// Sum returns the sum of values in the checkpoint.
func (c *Aggregator) Sum() (metric.Number, error) {
c.lock.Lock()
@ -77,25 +83,25 @@ func (c *Aggregator) Count() (int64, error) {
}
// Min returns the minimum value in the checkpoint.
// The error value aggregator.ErrNoData will be returned
// The error value aggregation.ErrNoData will be returned
// if there were no measurements recorded during the checkpoint.
func (c *Aggregator) Min() (metric.Number, error) {
c.lock.Lock()
defer c.lock.Unlock()
if c.checkpoint.count.IsZero(metric.Uint64NumberKind) {
return c.kind.Zero(), aggregator.ErrNoData
return c.kind.Zero(), aggregation.ErrNoData
}
return c.checkpoint.min, nil
}
// Max returns the maximum value in the checkpoint.
// The error value aggregator.ErrNoData will be returned
// The error value aggregation.ErrNoData will be returned
// if there were no measurements recorded during the checkpoint.
func (c *Aggregator) Max() (metric.Number, error) {
c.lock.Lock()
defer c.lock.Unlock()
if c.checkpoint.count.IsZero(metric.Uint64NumberKind) {
return c.kind.Zero(), aggregator.ErrNoData
return c.kind.Zero(), aggregation.ErrNoData
}
return c.checkpoint.max, nil
}

View File

@ -22,7 +22,7 @@ import (
"github.com/stretchr/testify/require"
"go.opentelemetry.io/otel/api/metric"
"go.opentelemetry.io/otel/sdk/export/metric/aggregator"
"go.opentelemetry.io/otel/sdk/export/metric/aggregation"
"go.opentelemetry.io/otel/sdk/metric/aggregator/test"
)
@ -193,7 +193,7 @@ func TestMaxSumCountNotSet(t *testing.T) {
require.Nil(t, err)
max, err := agg.Max()
require.Equal(t, aggregator.ErrNoData, err)
require.Equal(t, aggregation.ErrNoData, err)
require.Equal(t, metric.Number(0), max)
})
}

View File

@ -19,7 +19,8 @@ import (
"go.opentelemetry.io/otel/api/metric"
export "go.opentelemetry.io/otel/sdk/export/metric"
"go.opentelemetry.io/otel/sdk/export/metric/aggregator"
"go.opentelemetry.io/otel/sdk/export/metric/aggregation"
"go.opentelemetry.io/otel/sdk/metric/aggregator"
)
// Aggregator aggregates counter events.
@ -34,15 +35,20 @@ type Aggregator struct {
}
var _ export.Aggregator = &Aggregator{}
var _ aggregator.Sum = &Aggregator{}
var _ aggregation.Sum = &Aggregator{}
// New returns a new counter aggregator implemented by atomic
// operations. This aggregator implements the aggregator.Sum
// operations. This aggregator implements the aggregation.Sum
// export interface.
func New() *Aggregator {
return &Aggregator{}
}
// Kind returns aggregation.SumKind.
func (c *Aggregator) Kind() aggregation.Kind {
return aggregation.SumKind
}
// Sum returns the last-checkpointed sum. This will never return an
// error.
func (c *Aggregator) Sum() (metric.Number, error) {

View File

@ -25,7 +25,7 @@ import (
"go.opentelemetry.io/otel/api/metric"
ottest "go.opentelemetry.io/otel/internal/testing"
export "go.opentelemetry.io/otel/sdk/export/metric"
"go.opentelemetry.io/otel/sdk/export/metric/aggregator"
"go.opentelemetry.io/otel/sdk/metric/aggregator"
)
const Magnitude = 1000

View File

@ -30,7 +30,7 @@ import (
"go.opentelemetry.io/otel/api/metric"
"go.opentelemetry.io/otel/exporters/metric/test"
export "go.opentelemetry.io/otel/sdk/export/metric"
"go.opentelemetry.io/otel/sdk/export/metric/aggregator"
"go.opentelemetry.io/otel/sdk/export/metric/aggregation"
"go.opentelemetry.io/otel/sdk/metric/aggregator/sum"
"go.opentelemetry.io/otel/sdk/metric/controller/push"
controllerTest "go.opentelemetry.io/otel/sdk/metric/controller/test"
@ -175,7 +175,7 @@ func TestPushTicker(t *testing.T) {
require.Equal(t, "counter", records[0].Descriptor().Name())
require.Equal(t, "R=V", records[0].Resource().Encoded(label.DefaultEncoder()))
sum, err := records[0].Aggregator().(aggregator.Sum).Sum()
sum, err := records[0].Aggregator().(aggregation.Sum).Sum()
require.Equal(t, int64(3), sum.AsInt64())
require.Nil(t, err)
@ -192,7 +192,7 @@ func TestPushTicker(t *testing.T) {
require.Equal(t, "counter", records[0].Descriptor().Name())
require.Equal(t, "R=V", records[0].Resource().Encoded(label.DefaultEncoder()))
sum, err = records[0].Aggregator().(aggregator.Sum).Sum()
sum, err = records[0].Aggregator().(aggregation.Sum).Sum()
require.Equal(t, int64(7), sum.AsInt64())
require.Nil(t, err)
@ -216,7 +216,7 @@ func TestPushExportError(t *testing.T) {
expectedError error
}{
{"errNone", nil, []string{"counter1{R=V,X=Y}", "counter2{R=V,}"}, nil},
{"errNoData", aggregator.ErrNoData, []string{"counter2{R=V,}"}, nil},
{"errNoData", aggregation.ErrNoData, []string{"counter2{R=V,}"}, nil},
{"errUnexpected", errAggregator, []string{}, errAggregator},
}
for _, tt := range tests {

View File

@ -30,7 +30,7 @@ import (
"go.opentelemetry.io/otel/api/label"
"go.opentelemetry.io/otel/api/metric"
export "go.opentelemetry.io/otel/sdk/export/metric"
"go.opentelemetry.io/otel/sdk/export/metric/aggregator"
"go.opentelemetry.io/otel/sdk/export/metric/aggregation"
metricsdk "go.opentelemetry.io/otel/sdk/metric"
"go.opentelemetry.io/otel/sdk/metric/aggregator/array"
"go.opentelemetry.io/otel/sdk/metric/aggregator/sum"
@ -131,7 +131,7 @@ func TestInputRangeCounter(t *testing.T) {
counter := Must(meter).NewInt64Counter("name.counter")
counter.Add(ctx, -1)
require.Equal(t, aggregator.ErrNegativeInput, testHandler.Flush())
require.Equal(t, aggregation.ErrNegativeInput, testHandler.Flush())
checkpointed := sdk.Collect(ctx)
require.Equal(t, 0, checkpointed)
@ -139,7 +139,7 @@ func TestInputRangeCounter(t *testing.T) {
integrator.records = nil
counter.Add(ctx, 1)
checkpointed = sdk.Collect(ctx)
sum, err := integrator.records[0].Aggregator().(aggregator.Sum).Sum()
sum, err := integrator.records[0].Aggregator().(aggregation.Sum).Sum()
require.Equal(t, int64(1), sum.AsInt64())
require.Equal(t, 1, checkpointed)
require.Nil(t, err)
@ -158,7 +158,7 @@ func TestInputRangeUpDownCounter(t *testing.T) {
counter.Add(ctx, 1)
checkpointed := sdk.Collect(ctx)
sum, err := integrator.records[0].Aggregator().(aggregator.Sum).Sum()
sum, err := integrator.records[0].Aggregator().(aggregation.Sum).Sum()
require.Equal(t, int64(1), sum.AsInt64())
require.Equal(t, 1, checkpointed)
require.Nil(t, err)
@ -172,7 +172,7 @@ func TestInputRangeValueRecorder(t *testing.T) {
valuerecorder := Must(meter).NewFloat64ValueRecorder("name.valuerecorder")
valuerecorder.Record(ctx, math.NaN())
require.Equal(t, aggregator.ErrNaNInput, testHandler.Flush())
require.Equal(t, aggregation.ErrNaNInput, testHandler.Flush())
checkpointed := sdk.Collect(ctx)
require.Equal(t, 0, checkpointed)
@ -183,7 +183,7 @@ func TestInputRangeValueRecorder(t *testing.T) {
integrator.records = nil
checkpointed = sdk.Collect(ctx)
count, err := integrator.records[0].Aggregator().(aggregator.Distribution).Count()
count, err := integrator.records[0].Aggregator().(aggregation.Distribution).Count()
require.Equal(t, int64(2), count)
require.Equal(t, 1, checkpointed)
require.Nil(t, testHandler.Flush())
@ -269,7 +269,7 @@ func TestSDKLabelsDeduplication(t *testing.T) {
var actual [][]kv.KeyValue
for _, rec := range integrator.records {
sum, _ := rec.Aggregator().(aggregator.Sum).Sum()
sum, _ := rec.Aggregator().(aggregation.Sum).Sum()
require.Equal(t, sum, metric.NewInt64Number(2))
kvs := rec.Labels().ToSlice()
@ -392,15 +392,15 @@ func TestSumObserverInputRange(t *testing.T) {
_ = Must(meter).NewFloat64SumObserver("float.sumobserver", func(_ context.Context, result metric.Float64ObserverResult) {
result.Observe(-2, kv.String("A", "B"))
require.Equal(t, aggregator.ErrNegativeInput, testHandler.Flush())
require.Equal(t, aggregation.ErrNegativeInput, testHandler.Flush())
result.Observe(-1, kv.String("C", "D"))
require.Equal(t, aggregator.ErrNegativeInput, testHandler.Flush())
require.Equal(t, aggregation.ErrNegativeInput, testHandler.Flush())
})
_ = Must(meter).NewInt64SumObserver("int.sumobserver", func(_ context.Context, result metric.Int64ObserverResult) {
result.Observe(-1, kv.String("A", "B"))
require.Equal(t, aggregator.ErrNegativeInput, testHandler.Flush())
require.Equal(t, aggregation.ErrNegativeInput, testHandler.Flush())
result.Observe(-1)
require.Equal(t, aggregator.ErrNegativeInput, testHandler.Flush())
require.Equal(t, aggregation.ErrNegativeInput, testHandler.Flush())
})
collected := sdk.Collect(ctx)

View File

@ -21,7 +21,7 @@ import (
"go.opentelemetry.io/otel/api/label"
"go.opentelemetry.io/otel/api/metric"
export "go.opentelemetry.io/otel/sdk/export/metric"
"go.opentelemetry.io/otel/sdk/export/metric/aggregator"
"go.opentelemetry.io/otel/sdk/export/metric/aggregation"
"go.opentelemetry.io/otel/sdk/resource"
)
@ -119,7 +119,7 @@ func (b *batch) ForEach(f func(export.Record) error) error {
value.labels,
value.resource,
value.aggregator,
)); err != nil && !errors.Is(err, aggregator.ErrNoData) {
)); err != nil && !errors.Is(err, aggregation.ErrNoData) {
return err
}
}

View File

@ -23,7 +23,7 @@ import (
"go.opentelemetry.io/otel/api/label"
"go.opentelemetry.io/otel/api/metric"
export "go.opentelemetry.io/otel/sdk/export/metric"
"go.opentelemetry.io/otel/sdk/export/metric/aggregator"
"go.opentelemetry.io/otel/sdk/export/metric/aggregation"
"go.opentelemetry.io/otel/sdk/metric/aggregator/lastvalue"
"go.opentelemetry.io/otel/sdk/metric/aggregator/sum"
"go.opentelemetry.io/otel/sdk/resource"
@ -162,10 +162,10 @@ func (o Output) AddTo(rec export.Record) error {
key := fmt.Sprint(rec.Descriptor().Name(), "/", encoded, "/", rencoded)
var value float64
if s, ok := rec.Aggregator().(aggregator.Sum); ok {
if s, ok := rec.Aggregator().(aggregation.Sum); ok {
sum, _ := s.Sum()
value = sum.CoerceToFloat64(rec.Descriptor().NumberKind())
} else if l, ok := rec.Aggregator().(aggregator.LastValue); ok {
} else if l, ok := rec.Aggregator().(aggregation.LastValue); ok {
last, _, _ := l.LastValue()
value = last.CoerceToFloat64(rec.Descriptor().NumberKind())
} else {

View File

@ -28,7 +28,7 @@ import (
api "go.opentelemetry.io/otel/api/metric"
internal "go.opentelemetry.io/otel/internal/metric"
export "go.opentelemetry.io/otel/sdk/export/metric"
"go.opentelemetry.io/otel/sdk/export/metric/aggregator"
"go.opentelemetry.io/otel/sdk/metric/aggregator"
"go.opentelemetry.io/otel/sdk/resource"
)

View File

@ -35,7 +35,7 @@ import (
"go.opentelemetry.io/otel/api/metric"
api "go.opentelemetry.io/otel/api/metric"
export "go.opentelemetry.io/otel/sdk/export/metric"
"go.opentelemetry.io/otel/sdk/export/metric/aggregator"
"go.opentelemetry.io/otel/sdk/export/metric/aggregation"
"go.opentelemetry.io/otel/sdk/metric/aggregator/lastvalue"
"go.opentelemetry.io/otel/sdk/metric/aggregator/sum"
)
@ -280,14 +280,14 @@ func (f *testFixture) Process(record export.Record) error {
agg := record.Aggregator()
switch record.Descriptor().MetricKind() {
case metric.CounterKind:
sum, err := agg.(aggregator.Sum).Sum()
sum, err := agg.(aggregation.Sum).Sum()
if err != nil {
f.T.Fatal("Sum error: ", err)
}
f.impl.storeCollect(actual, sum, time.Time{})
case metric.ValueRecorderKind:
lv, ts, err := agg.(aggregator.LastValue).LastValue()
if err != nil && err != aggregator.ErrNoData {
lv, ts, err := agg.(aggregation.LastValue).LastValue()
if err != nil && err != aggregation.ErrNoData {
f.T.Fatal("Last value error: ", err)
}
f.impl.storeCollect(actual, lv, ts)