1
0
mirror of https://github.com/open-telemetry/opentelemetry-go.git synced 2024-12-24 20:14:40 +02:00

Metrics stdout export pipeline (#265)

* Add MetricAggregator.Merge() implementations

* Update from feedback

* Type

* Ckpt

* Ckpt

* Add push controller

* Ckpt

* Add aggregator interfaces, stdout encoder

* Modify basic main.go

* Main is working

* Batch stdout output

* Sum udpate

* Rename stdout

* Add stateless/stateful Batcher options

* Undo a for-loop in the example, remove a done TODO

* Update imports

* Add note

* Rename defaultkeys

* Support variable label encoder to speed OpenMetrics/Statsd export

* Lint

* Doc

* Precommit/lint

* Simplify Aggregator API

* Record->Identifier

* Remove export.Record a.k.a. Identifier

* Checkpoint

* Propagate errors to the SDK, remove a bunch of 'TODO warn'

* Checkpoint

* Introduce export.Labels

* Comments in export/metric.go

* Comment

* More merge

* More doc

* Complete example

* Lint fixes

* Add a testable example

* Lint

* Let Export return an error

* add a basic stdout exporter test

* Add measure test; fix aggregator APIs

* Use JSON numbers, not strings

* Test stdout exporter error

* Add a test for the call to RangeTest

* Add error handler API to improve correctness test; return errors from RecordOne

* Undo the previous -- do not expose errors

* Add simple selector variations, test

* Repair examples

* Test push controller error handling

* Add SDK label encoder tests

* Add a defaultkeys batcher test

* Add an ungrouped batcher test

* Lint new tests

* Respond to krnowak's feedback

* Undo comment

* Use concrete receivers for export records and labels, since the constructors return structs not pointers

* Bug fix for stateful batchers; clone an aggregator for long term storage

* Remove TODO addressed in #318

* Add errors to all aggregator interfaces

* Handle ErrNoLastValue case in stdout exporter

* Move aggregator API into sdk/export/metric/aggregator

* Update all aggregator exported-method comments

* Document the aggregator APIs

* More aggregator comments

* Add multiple updates to the ungrouped test

* Fixes for feedback from Gustavo and Liz

* Producer->CheckpointSet; add FinishedCollection

* Process takes an export.Record

* ReadCheckpoint->CheckpointSet

* EncodeLabels->Encode

* Format a better inconsistent type error; add more aggregator API tests

* More RangeTest test coverage

* Make benbjohnson/clock a test-only dependency

* Handle ErrNoLastValue in stress_test
This commit is contained in:
Joshua MacDonald 2019-11-15 13:01:20 -08:00 committed by rghetia
parent c3d5b7b16d
commit 9878f3b700
48 changed files with 3312 additions and 491 deletions

View File

@ -18,6 +18,7 @@ package core
import (
"fmt"
"math"
"sync/atomic"
)
@ -569,6 +570,21 @@ func (n Number) Emit(kind NumberKind) string {
}
}
// AsInterface returns the number as an interface{}, typically used
// for NumberKind-correct JSON conversion.
func (n Number) AsInterface(kind NumberKind) interface{} {
switch kind {
case Int64NumberKind:
return n.AsInt64()
case Float64NumberKind:
return n.AsFloat64()
case Uint64NumberKind:
return n.AsUint64()
default:
return math.NaN()
}
}
// - private stuff
func (n Number) compareWithZero(kind NumberKind) int {

View File

@ -17,6 +17,8 @@ package core
import (
"testing"
"unsafe"
"github.com/stretchr/testify/require"
)
func TestNumber(t *testing.T) {
@ -157,3 +159,9 @@ func TestNumberZero(t *testing.T) {
t.Errorf("Invalid zero representations")
}
}
func TestNumberAsInterface(t *testing.T) {
require.Equal(t, int64(10), NewInt64Number(10).AsInterface(Int64NumberKind).(int64))
require.Equal(t, float64(11.11), NewFloat64Number(11.11).AsInterface(Float64NumberKind).(float64))
require.Equal(t, uint64(100), NewUint64Number(100).AsInterface(Uint64NumberKind).(uint64))
}

View File

@ -1,5 +1,6 @@
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/DataDog/sketches-go v0.0.0-20190923095040-43f19ad77ff7 h1:qELHH0AWCvf98Yf+CNIJx9vOZOfHFDDzgDRYsnNk/vs=
github.com/DataDog/sketches-go v0.0.0-20190923095040-43f19ad77ff7/go.mod h1:Q5DbzQ+3AkgGwymQO7aZFNP7ns2lZKGtvRBzRXfdi60=
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
github.com/OpenPeeDeeP/depguard v1.0.1/go.mod h1:xsIw86fROiiwelg+jB2uM9PiKihMMmUx/1V+TNhjQvM=
@ -7,6 +8,8 @@ github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrU
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
github.com/benbjohnson/clock v1.0.0 h1:78Jk/r6m4wCi6sndMpty7A//t4dw/RW5fV4ZgDVfX1w=
github.com/benbjohnson/clock v1.0.0/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/bombsimon/wsl v1.2.5/go.mod h1:43lEF/i0kpXbLCeDXL9LMT8c92HyBywXb0AsgMHYngM=
@ -21,6 +24,7 @@ github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfc
github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
@ -82,6 +86,7 @@ github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.1 h1:Xye71clBPdm5HgqGwUkwhbynsUJZhDbS20FvLhQ2izg=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
@ -107,9 +112,11 @@ github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgo
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw=
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/logrusorgru/aurora v0.0.0-20181002194514-a7b3b318ed4e/go.mod h1:7rIyQOR62GCctdiQpZ/zOJlFyk6y+94wXzv6RNZgaR4=
@ -136,6 +143,7 @@ github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/9
github.com/pelletier/go-toml v1.5.0/go.mod h1:5N711Q9dKgbdkxHL+MEfF31hpT7l0S0s/t2kKREewys=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
@ -177,6 +185,7 @@ github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+
github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/timakin/bodyclose v0.0.0-20190930140734-f7f2e9bca95e/go.mod h1:Qimiffbc6q9tBWlVV6x0P9sat/ao1xEkREYPPj9hphk=
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
@ -274,6 +283,7 @@ google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRn
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
@ -282,6 +292,7 @@ gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWD
gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=

View File

@ -17,13 +17,19 @@ package main
import (
"context"
"log"
"time"
"go.opentelemetry.io/otel/api/distributedcontext"
"go.opentelemetry.io/otel/api/key"
"go.opentelemetry.io/otel/api/metric"
"go.opentelemetry.io/otel/api/trace"
"go.opentelemetry.io/otel/exporter/trace/stdout"
metricstdout "go.opentelemetry.io/otel/exporter/metric/stdout"
tracestdout "go.opentelemetry.io/otel/exporter/trace/stdout"
"go.opentelemetry.io/otel/global"
metricsdk "go.opentelemetry.io/otel/sdk/metric"
"go.opentelemetry.io/otel/sdk/metric/batcher/defaultkeys"
"go.opentelemetry.io/otel/sdk/metric/controller/push"
"go.opentelemetry.io/otel/sdk/metric/selector/simple"
sdktrace "go.opentelemetry.io/otel/sdk/trace"
)
@ -37,23 +43,44 @@ var (
// initTracer creates and registers trace provider instance.
func initTracer() {
var err error
exp, err := stdout.NewExporter(stdout.Options{PrettyPrint: false})
exp, err := tracestdout.NewExporter(tracestdout.Options{PrettyPrint: false})
if err != nil {
log.Panicf("failed to initialize stdout exporter %v\n", err)
log.Panicf("failed to initialize trace stdout exporter %v", err)
return
}
tp, err := sdktrace.NewProvider(sdktrace.WithSyncer(exp),
sdktrace.WithConfig(sdktrace.Config{DefaultSampler: sdktrace.AlwaysSample()}))
if err != nil {
log.Panicf("failed to initialize trace provider %v\n", err)
log.Panicf("failed to initialize trace provider %v", err)
}
global.SetTraceProvider(tp)
}
func initMeter() *push.Controller {
selector := simple.NewWithExactMeasure()
exporter, err := metricstdout.New(metricstdout.Options{
Quantiles: []float64{0.5, 0.9, 0.99},
PrettyPrint: false,
})
if err != nil {
log.Panicf("failed to initialize metric stdout exporter %v", err)
}
batcher := defaultkeys.New(selector, metricsdk.DefaultLabelEncoder(), true)
pusher := push.New(batcher, exporter, time.Second)
pusher.Start()
global.SetMeterProvider(pusher)
return pusher
}
func main() {
defer initMeter().Stop()
initTracer()
// Note: Have to get the meter and tracer after the global is
// initialized. See OTEP 0005.
tracer := global.TraceProvider().GetTracer("ex.com/basic")
// TODO: Meter doesn't work yet, check if resources to be shared afterwards.
meter := global.MeterProvider().GetMeter("ex.com/basic")
oneMetric := meter.NewFloat64Gauge("ex.com.one",
@ -70,7 +97,7 @@ func main() {
barKey.String("bar1"),
)
commonLabels := meter.Labels(lemonsKey.Int(10))
commonLabels := meter.Labels(lemonsKey.Int(10), key.String("A", "1"), key.String("B", "2"), key.String("C", "3"))
gauge := oneMetric.AcquireHandle(commonLabels)
defer gauge.Release()

View File

@ -22,6 +22,7 @@ github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrU
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
github.com/benbjohnson/clock v1.0.0/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/bombsimon/wsl v1.2.5/go.mod h1:43lEF/i0kpXbLCeDXL9LMT8c92HyBywXb0AsgMHYngM=

View File

@ -7,6 +7,7 @@ github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrU
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
github.com/benbjohnson/clock v1.0.0/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/bombsimon/wsl v1.2.5/go.mod h1:43lEF/i0kpXbLCeDXL9LMT8c92HyBywXb0AsgMHYngM=
@ -21,6 +22,7 @@ github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfc
github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
@ -141,6 +143,7 @@ github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/9
github.com/pelletier/go-toml v1.5.0/go.mod h1:5N711Q9dKgbdkxHL+MEfF31hpT7l0S0s/t2kKREewys=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
@ -182,6 +185,7 @@ github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+
github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/timakin/bodyclose v0.0.0-20190930140734-f7f2e9bca95e/go.mod h1:Qimiffbc6q9tBWlVV6x0P9sat/ao1xEkREYPPj9hphk=
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
@ -283,6 +287,7 @@ gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLks
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
@ -294,6 +299,7 @@ gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bl
gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=

View File

@ -11,6 +11,7 @@ github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRF
github.com/apache/thrift v0.13.0 h1:5hryIiq9gtn+MiLVn0wP37kb/uTeRZgN08WoCsAhIhI=
github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
github.com/benbjohnson/clock v1.0.0/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/bombsimon/wsl v1.2.5/go.mod h1:43lEF/i0kpXbLCeDXL9LMT8c92HyBywXb0AsgMHYngM=

View File

@ -7,6 +7,7 @@ github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrU
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
github.com/benbjohnson/clock v1.0.0/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/bombsimon/wsl v1.2.5/go.mod h1:43lEF/i0kpXbLCeDXL9LMT8c92HyBywXb0AsgMHYngM=
@ -21,6 +22,7 @@ github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfc
github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
@ -107,9 +109,11 @@ github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgo
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw=
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/logrusorgru/aurora v0.0.0-20181002194514-a7b3b318ed4e/go.mod h1:7rIyQOR62GCctdiQpZ/zOJlFyk6y+94wXzv6RNZgaR4=
@ -136,6 +140,7 @@ github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/9
github.com/pelletier/go-toml v1.5.0/go.mod h1:5N711Q9dKgbdkxHL+MEfF31hpT7l0S0s/t2kKREewys=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
@ -177,6 +182,7 @@ github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+
github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/timakin/bodyclose v0.0.0-20190930140734-f7f2e9bca95e/go.mod h1:Qimiffbc6q9tBWlVV6x0P9sat/ao1xEkREYPPj9hphk=
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
@ -274,6 +280,7 @@ google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRn
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
@ -282,6 +289,7 @@ gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWD
gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=

View File

@ -0,0 +1,214 @@
// Copyright 2019, OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package stdout // import "go.opentelemetry.io/otel/exporter/metric/stdout"
import (
"context"
"encoding/json"
"fmt"
"io"
"os"
"strings"
"time"
export "go.opentelemetry.io/otel/sdk/export/metric"
"go.opentelemetry.io/otel/sdk/export/metric/aggregator"
)
type Exporter struct {
options Options
}
var _ export.Exporter = &Exporter{}
// Options are the options to be used when initializing a stdout export.
type Options struct {
// File is the destination. If not set, os.Stdout is used.
File io.Writer
// PrettyPrint will pretty the json representation of the span,
// making it print "pretty". Default is false.
PrettyPrint bool
// DoNotPrintTime suppresses timestamp printing. This is
// useful to create deterministic test conditions.
DoNotPrintTime bool
// Quantiles are the desired aggregation quantiles for measure
// metric data, used when the configured aggregator supports
// quantiles.
//
// Note: this exporter is meant as a demonstration; a real
// exporter may wish to configure quantiles on a per-metric
// basis.
Quantiles []float64
}
type expoBatch struct {
Timestamp *time.Time `json:"time,omitempty"`
Updates []expoLine `json:"updates"`
}
type expoLine struct {
Name string `json:"name"`
Max interface{} `json:"max,omitempty"`
Sum interface{} `json:"sum,omitempty"`
Count interface{} `json:"count,omitempty"`
LastValue interface{} `json:"last,omitempty"`
Quantiles interface{} `json:"quantiles,omitempty"`
// Note: this is a pointer because omitempty doesn't work when time.IsZero()
Timestamp *time.Time `json:"time,omitempty"`
}
type expoQuantile struct {
Q interface{} `json:"q"`
V interface{} `json:"v"`
}
func New(options Options) (*Exporter, error) {
if options.File == nil {
options.File = os.Stdout
}
if options.Quantiles == nil {
options.Quantiles = []float64{0.5, 0.9, 0.99}
} else {
for _, q := range options.Quantiles {
if q < 0 || q > 1 {
return nil, aggregator.ErrInvalidQuantile
}
}
}
return &Exporter{
options: options,
}, nil
}
func (e *Exporter) Export(_ context.Context, checkpointSet export.CheckpointSet) error {
// N.B. Only return one aggError, if any occur. They're likely
// to be duplicates of the same error.
var aggError error
var batch expoBatch
if !e.options.DoNotPrintTime {
ts := time.Now()
batch.Timestamp = &ts
}
checkpointSet.ForEach(func(record export.Record) {
desc := record.Descriptor()
agg := record.Aggregator()
kind := desc.NumberKind()
var expose expoLine
if sum, ok := agg.(aggregator.Sum); ok {
if value, err := sum.Sum(); err != nil {
aggError = err
expose.Sum = "NaN"
} else {
expose.Sum = value.AsInterface(kind)
}
}
if msc, ok := agg.(aggregator.MaxSumCount); ok {
if count, err := msc.Count(); err != nil {
aggError = err
expose.Count = "NaN"
} else {
expose.Count = count
}
// TODO: Should tolerate ErrEmptyDataSet here,
// just like ErrNoLastValue below, since
// there's a race condition between creating
// the Aggregator and updating the first
// value.
if max, err := msc.Max(); err != nil {
aggError = err
expose.Max = "NaN"
} else {
expose.Max = max.AsInterface(kind)
}
if dist, ok := agg.(aggregator.Distribution); ok && len(e.options.Quantiles) != 0 {
summary := make([]expoQuantile, len(e.options.Quantiles))
expose.Quantiles = summary
for i, q := range e.options.Quantiles {
var vstr interface{}
if value, err := dist.Quantile(q); err != nil {
aggError = err
vstr = "NaN"
} else {
vstr = value.AsInterface(kind)
}
summary[i] = expoQuantile{
Q: q,
V: vstr,
}
}
}
} else if lv, ok := agg.(aggregator.LastValue); ok {
if value, timestamp, err := lv.LastValue(); err != nil {
if err == aggregator.ErrNoLastValue {
// This is a special case, indicates an aggregator that
// was checkpointed before its first value was set.
return
}
aggError = err
expose.LastValue = "NaN"
} else {
expose.LastValue = value.AsInterface(kind)
if !e.options.DoNotPrintTime {
expose.Timestamp = &timestamp
}
}
}
var sb strings.Builder
sb.WriteString(desc.Name())
if labels := record.Labels(); labels.Len() > 0 {
sb.WriteRune('{')
sb.WriteString(labels.Encoded())
sb.WriteRune('}')
}
expose.Name = sb.String()
batch.Updates = append(batch.Updates, expose)
})
var data []byte
var err error
if e.options.PrettyPrint {
data, err = json.MarshalIndent(batch, "", "\t")
} else {
data, err = json.Marshal(batch)
}
if err == nil {
fmt.Fprintln(e.options.File, string(data))
} else {
return err
}
return aggError
}

View File

@ -0,0 +1,256 @@
package stdout_test
import (
"bytes"
"context"
"encoding/json"
"strings"
"testing"
"time"
"github.com/stretchr/testify/require"
"go.opentelemetry.io/otel/api/core"
"go.opentelemetry.io/otel/api/key"
"go.opentelemetry.io/otel/exporter/metric/stdout"
"go.opentelemetry.io/otel/exporter/metric/test"
export "go.opentelemetry.io/otel/sdk/export/metric"
"go.opentelemetry.io/otel/sdk/export/metric/aggregator"
sdk "go.opentelemetry.io/otel/sdk/metric"
"go.opentelemetry.io/otel/sdk/metric/aggregator/array"
"go.opentelemetry.io/otel/sdk/metric/aggregator/counter"
"go.opentelemetry.io/otel/sdk/metric/aggregator/ddsketch"
"go.opentelemetry.io/otel/sdk/metric/aggregator/gauge"
"go.opentelemetry.io/otel/sdk/metric/aggregator/maxsumcount"
aggtest "go.opentelemetry.io/otel/sdk/metric/aggregator/test"
)
type testFixture struct {
t *testing.T
ctx context.Context
exporter *stdout.Exporter
output *bytes.Buffer
}
func newFixture(t *testing.T, options stdout.Options) testFixture {
buf := &bytes.Buffer{}
options.File = buf
options.DoNotPrintTime = true
exp, err := stdout.New(options)
if err != nil {
t.Fatal("Error building fixture: ", err)
}
return testFixture{
t: t,
ctx: context.Background(),
exporter: exp,
output: buf,
}
}
func (fix testFixture) Output() string {
return strings.TrimSpace(fix.output.String())
}
func (fix testFixture) Export(checkpointSet export.CheckpointSet) {
err := fix.exporter.Export(fix.ctx, checkpointSet)
if err != nil {
fix.t.Error("export failed: ", err)
}
}
func TestStdoutInvalidQuantile(t *testing.T) {
_, err := stdout.New(stdout.Options{
Quantiles: []float64{1.1, 0.9},
})
require.Error(t, err, "Invalid quantile error expected")
require.Equal(t, aggregator.ErrInvalidQuantile, err)
}
func TestStdoutTimestamp(t *testing.T) {
var buf bytes.Buffer
exporter, err := stdout.New(stdout.Options{
File: &buf,
DoNotPrintTime: false,
})
if err != nil {
t.Fatal("Invalid options: ", err)
}
before := time.Now()
checkpointSet := test.NewCheckpointSet(sdk.DefaultLabelEncoder())
ctx := context.Background()
desc := export.NewDescriptor("test.name", export.GaugeKind, nil, "", "", core.Int64NumberKind, false)
gagg := gauge.New()
aggtest.CheckedUpdate(t, gagg, core.NewInt64Number(321), desc)
gagg.Checkpoint(ctx, desc)
checkpointSet.Add(desc, gagg)
if err := exporter.Export(ctx, checkpointSet); err != nil {
t.Fatal("Unexpected export error: ", err)
}
after := time.Now()
var printed map[string]interface{}
if err := json.Unmarshal(buf.Bytes(), &printed); err != nil {
t.Fatal("JSON parse error: ", err)
}
updateTS := printed["time"].(string)
updateTimestamp, err := time.Parse(time.RFC3339Nano, updateTS)
if err != nil {
t.Fatal("JSON parse error: ", updateTS, ": ", err)
}
gaugeTS := printed["updates"].([]interface{})[0].(map[string]interface{})["time"].(string)
gaugeTimestamp, err := time.Parse(time.RFC3339Nano, gaugeTS)
if err != nil {
t.Fatal("JSON parse error: ", gaugeTS, ": ", err)
}
require.True(t, updateTimestamp.After(before))
require.True(t, updateTimestamp.Before(after))
require.True(t, gaugeTimestamp.After(before))
require.True(t, gaugeTimestamp.Before(after))
require.True(t, gaugeTimestamp.Before(updateTimestamp))
}
func TestStdoutCounterFormat(t *testing.T) {
fix := newFixture(t, stdout.Options{})
checkpointSet := test.NewCheckpointSet(sdk.DefaultLabelEncoder())
desc := export.NewDescriptor("test.name", export.CounterKind, nil, "", "", core.Int64NumberKind, false)
cagg := counter.New()
aggtest.CheckedUpdate(fix.t, cagg, core.NewInt64Number(123), desc)
cagg.Checkpoint(fix.ctx, desc)
checkpointSet.Add(desc, cagg, key.String("A", "B"), key.String("C", "D"))
fix.Export(checkpointSet)
require.Equal(t, `{"updates":[{"name":"test.name{A=B,C=D}","sum":123}]}`, fix.Output())
}
func TestStdoutGaugeFormat(t *testing.T) {
fix := newFixture(t, stdout.Options{})
checkpointSet := test.NewCheckpointSet(sdk.DefaultLabelEncoder())
desc := export.NewDescriptor("test.name", export.GaugeKind, nil, "", "", core.Float64NumberKind, false)
gagg := gauge.New()
aggtest.CheckedUpdate(fix.t, gagg, core.NewFloat64Number(123.456), desc)
gagg.Checkpoint(fix.ctx, desc)
checkpointSet.Add(desc, gagg, key.String("A", "B"), key.String("C", "D"))
fix.Export(checkpointSet)
require.Equal(t, `{"updates":[{"name":"test.name{A=B,C=D}","last":123.456}]}`, fix.Output())
}
func TestStdoutMaxSumCount(t *testing.T) {
fix := newFixture(t, stdout.Options{})
checkpointSet := test.NewCheckpointSet(sdk.DefaultLabelEncoder())
desc := export.NewDescriptor("test.name", export.MeasureKind, nil, "", "", core.Float64NumberKind, false)
magg := maxsumcount.New()
aggtest.CheckedUpdate(fix.t, magg, core.NewFloat64Number(123.456), desc)
aggtest.CheckedUpdate(fix.t, magg, core.NewFloat64Number(876.543), desc)
magg.Checkpoint(fix.ctx, desc)
checkpointSet.Add(desc, magg, key.String("A", "B"), key.String("C", "D"))
fix.Export(checkpointSet)
require.Equal(t, `{"updates":[{"name":"test.name{A=B,C=D}","max":876.543,"sum":999.999,"count":2}]}`, fix.Output())
}
func TestStdoutMeasureFormat(t *testing.T) {
fix := newFixture(t, stdout.Options{
PrettyPrint: true,
})
checkpointSet := test.NewCheckpointSet(sdk.DefaultLabelEncoder())
desc := export.NewDescriptor("test.name", export.MeasureKind, nil, "", "", core.Float64NumberKind, false)
magg := array.New()
for i := 0; i < 1000; i++ {
aggtest.CheckedUpdate(fix.t, magg, core.NewFloat64Number(float64(i)+0.5), desc)
}
magg.Checkpoint(fix.ctx, desc)
checkpointSet.Add(desc, magg, key.String("A", "B"), key.String("C", "D"))
fix.Export(checkpointSet)
require.Equal(t, `{
"updates": [
{
"name": "test.name{A=B,C=D}",
"max": 999.5,
"sum": 500000,
"count": 1000,
"quantiles": [
{
"q": 0.5,
"v": 500.5
},
{
"q": 0.9,
"v": 900.5
},
{
"q": 0.99,
"v": 990.5
}
]
}
]
}`, fix.Output())
}
func TestStdoutAggError(t *testing.T) {
fix := newFixture(t, stdout.Options{})
checkpointSet := test.NewCheckpointSet(sdk.DefaultLabelEncoder())
desc := export.NewDescriptor("test.name", export.MeasureKind, nil, "", "", core.Float64NumberKind, false)
magg := ddsketch.New(ddsketch.NewDefaultConfig(), desc)
magg.Checkpoint(fix.ctx, desc)
checkpointSet.Add(desc, magg)
err := fix.exporter.Export(fix.ctx, checkpointSet)
// An error is returned and NaN values are printed.
require.Error(t, err)
require.Equal(t, aggregator.ErrEmptyDataSet, err)
require.Equal(t, `{"updates":[{"name":"test.name","max":"NaN","sum":0,"count":0,"quantiles":[{"q":0.5,"v":"NaN"},{"q":0.9,"v":"NaN"},{"q":0.99,"v":"NaN"}]}]}`, fix.Output())
}
func TestStdoutGaugeNotSet(t *testing.T) {
fix := newFixture(t, stdout.Options{})
checkpointSet := test.NewCheckpointSet(sdk.DefaultLabelEncoder())
desc := export.NewDescriptor("test.name", export.GaugeKind, nil, "", "", core.Float64NumberKind, false)
gagg := gauge.New()
gagg.Checkpoint(fix.ctx, desc)
checkpointSet.Add(desc, gagg, key.String("A", "B"), key.String("C", "D"))
fix.Export(checkpointSet)
require.Equal(t, `{"updates":null}`, fix.Output())
}

View File

@ -0,0 +1,34 @@
package test
import (
"go.opentelemetry.io/otel/api/core"
export "go.opentelemetry.io/otel/sdk/export/metric"
)
type CheckpointSet struct {
encoder export.LabelEncoder
updates []export.Record
}
func NewCheckpointSet(encoder export.LabelEncoder) *CheckpointSet {
return &CheckpointSet{
encoder: encoder,
}
}
func (p *CheckpointSet) Reset() {
p.updates = nil
}
func (p *CheckpointSet) Add(desc *export.Descriptor, agg export.Aggregator, labels ...core.KeyValue) {
encoded := p.encoder.Encode(labels)
elabels := export.NewLabels(labels, encoded, p.encoder)
p.updates = append(p.updates, export.NewRecord(desc, elabels, agg))
}
func (p *CheckpointSet) ForEach(f func(export.Record)) {
for _, r := range p.updates {
f(r)
}
}

View File

@ -11,6 +11,7 @@ github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRF
github.com/apache/thrift v0.13.0 h1:5hryIiq9gtn+MiLVn0wP37kb/uTeRZgN08WoCsAhIhI=
github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
github.com/benbjohnson/clock v1.0.0/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/bombsimon/wsl v1.2.5/go.mod h1:43lEF/i0kpXbLCeDXL9LMT8c92HyBywXb0AsgMHYngM=

View File

@ -21,6 +21,7 @@ github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrU
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
github.com/benbjohnson/clock v1.0.0/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/bombsimon/wsl v1.2.5/go.mod h1:43lEF/i0kpXbLCeDXL9LMT8c92HyBywXb0AsgMHYngM=

1
go.mod
View File

@ -4,6 +4,7 @@ go 1.13
require (
github.com/DataDog/sketches-go v0.0.0-20190923095040-43f19ad77ff7
github.com/benbjohnson/clock v1.0.0
github.com/client9/misspell v0.3.4
github.com/gogo/protobuf v1.3.1 // indirect
github.com/golangci/gocyclo v0.0.0-20180528144436-0a533e8fa43d // indirect

3
go.sum
View File

@ -10,6 +10,8 @@ github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrU
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
github.com/benbjohnson/clock v1.0.0 h1:78Jk/r6m4wCi6sndMpty7A//t4dw/RW5fV4ZgDVfX1w=
github.com/benbjohnson/clock v1.0.0/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/bombsimon/wsl v1.2.5 h1:9gTOkIwVtoDZywvX802SDHokeX4kW1cKnV8ZTVAPkRs=
@ -138,6 +140,7 @@ github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
github.com/jonboulle/clockwork v0.1.0 h1:VKV+ZcuP6l3yW9doeqz6ziZGgcynBVQO+obU0+0hcPo=
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=

View File

@ -0,0 +1,116 @@
// Copyright 2019, OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package aggregator // import "go.opentelemetry.io/otel/sdk/metric/aggregator"
import (
"fmt"
"math"
"time"
"go.opentelemetry.io/otel/api/core"
export "go.opentelemetry.io/otel/sdk/export/metric"
)
// These interfaces describe the various ways to access state from an
// Aggregator.
type (
// Sum returns an aggregated sum.
Sum interface {
Sum() (core.Number, error)
}
// Sum returns the number of values that were aggregated.
Count interface {
Count() (int64, error)
}
// Max returns the maximum value over the set of values that were aggregated.
Max interface {
Max() (core.Number, error)
}
// Quantile returns an exact or estimated quantile over the
// set of values that were aggregated.
Quantile interface {
Quantile(float64) (core.Number, error)
}
// LastValue returns the latest value that was aggregated.
LastValue interface {
LastValue() (core.Number, time.Time, error)
}
// MaxSumCount supports the Max, Sum, and Count interfaces.
MaxSumCount interface {
Sum
Count
Max
}
// MaxSumCount supports the Max, Sum, Count, and Quantile
// interfaces.
Distribution interface {
MaxSumCount
Quantile
}
)
var (
ErrInvalidQuantile = fmt.Errorf("The requested quantile is out of range")
ErrNegativeInput = fmt.Errorf("Negative value is out of range for this instrument")
ErrNaNInput = fmt.Errorf("NaN value is an invalid input")
ErrNonMonotoneInput = fmt.Errorf("The new value is not monotone")
ErrInconsistentType = fmt.Errorf("Inconsistent aggregator types")
// ErrNoLastValue is returned by the LastValue interface when
// (due to a race with collection) the Aggregator is
// checkpointed before the first value is set. The aggregator
// should simply be skipped in this case.
ErrNoLastValue = fmt.Errorf("No value has been set")
// ErrEmptyDataSet is returned by Max and Quantile interfaces
// when (due to a race with collection) the Aggregator is
// checkpointed before the first value is set. The aggregator
// should simply be skipped in this case.
ErrEmptyDataSet = fmt.Errorf("The result is not defined on an empty data set")
)
// NewInconsistentMergeError formats an error describing an attempt to
// merge different-type aggregators. The result can be unwrapped as
// an ErrInconsistentType.
func NewInconsistentMergeError(a1, a2 export.Aggregator) error {
return fmt.Errorf("Cannot merge %T with %T: %w", a1, a2, ErrInconsistentType)
}
// RangeTest is a commmon routine for testing for valid input values.
// This rejects NaN values. This rejects negative values when the
// metric instrument does not support negative values, including
// monotonic counter metrics and absolute measure metrics.
func RangeTest(number core.Number, descriptor *export.Descriptor) error {
numberKind := descriptor.NumberKind()
if numberKind == core.Float64NumberKind && math.IsNaN(number.AsFloat64()) {
return ErrNaNInput
}
switch descriptor.MetricKind() {
case export.CounterKind, export.MeasureKind:
if !descriptor.Alternate() && number.IsNegative(numberKind) {
return ErrNegativeInput
}
}
return nil
}

View File

@ -0,0 +1,105 @@
// Copyright 2019, OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package aggregator_test // import "go.opentelemetry.io/otel/sdk/metric/aggregator"
import (
"errors"
"fmt"
"math"
"testing"
"github.com/stretchr/testify/require"
"go.opentelemetry.io/otel/api/core"
export "go.opentelemetry.io/otel/sdk/export/metric"
"go.opentelemetry.io/otel/sdk/export/metric/aggregator"
"go.opentelemetry.io/otel/sdk/metric/aggregator/counter"
"go.opentelemetry.io/otel/sdk/metric/aggregator/gauge"
)
func TestInconsistentMergeErr(t *testing.T) {
err := aggregator.NewInconsistentMergeError(counter.New(), gauge.New())
require.Equal(
t,
"Cannot merge *counter.Aggregator with *gauge.Aggregator: Inconsistent aggregator types",
err.Error(),
)
require.True(t, errors.Is(err, aggregator.ErrInconsistentType))
}
func testRangeNaN(t *testing.T, desc *export.Descriptor) {
// If the descriptor uses int64 numbers, this won't register as NaN
nan := core.NewFloat64Number(math.NaN())
err := aggregator.RangeTest(nan, desc)
if desc.NumberKind() == core.Float64NumberKind {
require.Equal(t, aggregator.ErrNaNInput, err)
} else {
require.Nil(t, err)
}
}
func testRangeNegative(t *testing.T, alt bool, desc *export.Descriptor) {
var neg, pos core.Number
if desc.NumberKind() == core.Float64NumberKind {
pos = core.NewFloat64Number(+1)
neg = core.NewFloat64Number(-1)
} else {
pos = core.NewInt64Number(+1)
neg = core.NewInt64Number(-1)
}
posErr := aggregator.RangeTest(pos, desc)
negErr := aggregator.RangeTest(neg, desc)
require.Nil(t, posErr)
if desc.MetricKind() == export.GaugeKind {
require.Nil(t, negErr)
} else {
require.Equal(t, negErr == nil, alt)
}
}
func TestRangeTest(t *testing.T) {
for _, nkind := range []core.NumberKind{core.Float64NumberKind, core.Int64NumberKind} {
t.Run(nkind.String(), func(t *testing.T) {
for _, mkind := range []export.MetricKind{
export.CounterKind,
export.GaugeKind,
export.MeasureKind,
} {
t.Run(mkind.String(), func(t *testing.T) {
for _, alt := range []bool{true, false} {
t.Run(fmt.Sprint(alt), func(t *testing.T) {
desc := export.NewDescriptor(
"name",
mkind,
nil,
"",
"",
nkind,
alt,
)
testRangeNaN(t, desc)
testRangeNegative(t, alt, desc)
})
}
})
}
})
}
}

View File

@ -12,7 +12,9 @@
// See the License for the specific language governing permissions and
// limitations under the License.
package metric // import "go.opentelemetry.io/otel/sdk/export/metric"
package export
//go:generate stringer -type=MetricKind
import (
"context"
@ -21,70 +23,288 @@ import (
"go.opentelemetry.io/otel/api/unit"
)
// Batcher is responsible for deciding which kind of aggregation
// to use and gathering exported results from the SDK. The standard SDK
// supports binding only one of these interfaces, i.e., a single exporter.
// Batcher is responsible for deciding which kind of aggregation to
// use (via AggregationSelector), gathering exported results from the
// SDK during collection, and deciding over which dimensions to group
// the exported data.
//
// Multiple-exporters could be implemented by implementing this interface
// for a group of Batcher.
// The SDK supports binding only one of these interfaces, as it has
// the sole responsibility of determining which Aggregator to use for
// each record.
//
// The embedded AggregationSelector interface is called (concurrently)
// in instrumentation context to select the appropriate Aggregator for
// an instrument.
//
// The `Process` method is called during collection in a
// single-threaded context from the SDK, after the aggregator is
// checkpointed, allowing the batcher to build the set of metrics
// currently being exported.
//
// The `CheckpointSet` method is called during collection in a
// single-threaded context from the Exporter, giving the exporter
// access to a producer for iterating over the complete checkpoint.
type Batcher interface {
// AggregatorFor should return the kind of aggregator
// suited to the requested export. Returning `nil`
// indicates to ignore the metric update.
// AggregationSelector is responsible for selecting the
// concrete type of Aggregator used for a metric in the SDK.
//
// Note: This is context-free because the handle should not be
// bound to the incoming context. This call should not block.
AggregatorFor(Record) Aggregator
// This may be a static decision based on fields of the
// Descriptor, or it could use an external configuration
// source to customize the treatment of each metric
// instrument.
//
// The result from AggregatorSelector.AggregatorFor should be
// the same type for a given Descriptor or else nil. The same
// type should be returned for a given descriptor, because
// Aggregators only know how to Merge with their own type. If
// the result is nil, the metric instrument will be disabled.
//
// Note that the SDK only calls AggregatorFor when new records
// require an Aggregator. This does not provide a way to
// disable metrics with active records.
AggregationSelector
// Export receives pairs of records and aggregators
// during the SDK Collect(). Exporter implementations
// must access the specific aggregator to receive the
// exporter data, since the format of the data varies
// by aggregation.
Export(context.Context, Record, Aggregator)
// Process is called by the SDK once per internal record,
// passing the export Record (a Descriptor, the corresponding
// Labels, and the checkpointed Aggregator). The Batcher
// should be prepared to process duplicate (Descriptor,
// Labels) pairs during this pass due to race conditions, but
// this will usually be the ordinary course of events, as
// Aggregators are typically merged according the output set
// of labels.
//
// The Context argument originates from the controller that
// orchestrates collection.
Process(ctx context.Context, record Record) error
// CheckpointSet is the interface used by the controller to
// access the fully aggregated checkpoint after collection.
//
// The returned CheckpointSet is passed to the Exporter.
CheckpointSet() CheckpointSet
// FinishedCollection informs the Batcher that a complete
// collection round was completed. Stateless batchers might
// reset state in this method, for example.
FinishedCollection()
}
// Aggregator implements a specific aggregation behavior, e.g.,
// a counter, a gauge, a histogram.
// AggregationSelector supports selecting the kind of Aggregator to
// use at runtime for a specific metric instrument.
type AggregationSelector interface {
// AggregatorFor returns the kind of aggregator suited to the
// requested export. Returning `nil` indicates to ignore this
// metric instrument. This must return a consistent type to
// avoid confusion in later stages of the metrics export
// process, i.e., when Merging multiple aggregators for a
// specific instrument.
//
// Note: This is context-free because the aggregator should
// not relate to the incoming context. This call should not
// block.
AggregatorFor(*Descriptor) Aggregator
}
// Aggregator implements a specific aggregation behavior, e.g., a
// behavior to track a sequence of updates to a counter, a gauge, or a
// measure instrument. For the most part, counter and gauge semantics
// are fixed and the provided implementations should be used. Measure
// metrics offer a wide range of potential tradeoffs and several
// implementations are provided.
//
// Aggregators are meant to compute the change (i.e., delta) in state
// from one checkpoint to the next, with the exception of gauge
// aggregators. Gauge aggregators are required to maintain the last
// value across checkpoints to implement montonic gauge support.
//
// Note that any Aggregator may be attached to any instrument--this is
// the result of the OpenTelemetry API/SDK separation. It is possible
// to attach a counter aggregator to a measure instrument (to compute
// a simple sum) or a gauge instrument to a measure instrument (to
// compute the last value).
type Aggregator interface {
// Update receives a new measured value and incorporates it
// into the aggregation.
Update(context.Context, core.Number, Record)
// into the aggregation. Update() calls may arrive
// concurrently as the SDK does not provide synchronization.
//
// Descriptor.NumberKind() should be consulted to determine
// whether the provided number is an int64 or float64.
//
// The Context argument comes from user-level code and could be
// inspected for distributed or span context.
Update(context.Context, core.Number, *Descriptor) error
// Collect is called during the SDK Collect() to
// finish one period of aggregation. Collect() is
// called in a single-threaded context. Update()
// calls may arrive concurrently.
Collect(context.Context, Record, Batcher)
// Checkpoint is called during collection to finish one period
// of aggregation by atomically saving the current value.
// Checkpoint() is called concurrently with Update().
// Checkpoint should reset the current state to the empty
// state, in order to begin computing a new delta for the next
// collection period.
//
// After the checkpoint is taken, the current value may be
// accessed using by converting to one a suitable interface
// types in the `aggregator` sub-package.
//
// The Context argument originates from the controller that
// orchestrates collection.
Checkpoint(context.Context, *Descriptor)
// Merge combines state from two aggregators into one.
Merge(Aggregator, *Descriptor)
// Merge combines the checkpointed state from the argument
// aggregator into this aggregator's checkpointed state.
// Merge() is called in a single-threaded context, no locking
// is required.
Merge(Aggregator, *Descriptor) error
}
// Record is the unit of export, pairing a metric
// instrument and set of labels.
type Record interface {
// Descriptor() describes the metric instrument.
Descriptor() *Descriptor
// Labels() describe the labsels corresponding the
// aggregation being performed.
Labels() []core.KeyValue
// Exporter handles presentation of the checkpoint of aggregate
// metrics. This is the final stage of a metrics export pipeline,
// where metric data are formatted for a specific system.
type Exporter interface {
// Export is called immediately after completing a collection
// pass in the SDK.
//
// The Context comes from the controller that initiated
// collection.
//
// The CheckpointSet interface refers to the Batcher that just
// completed collection.
Export(context.Context, CheckpointSet) error
}
// Kind describes the kind of instrument.
type Kind int8
// LabelEncoder enables an optimization for export pipelines that use
// text to encode their label sets.
//
// This interface allows configuring the encoder used in the SDK
// and/or the Batcher so that by the time the exporter is called, the
// same encoding may be used.
//
// If none is provided, a default will be used.
type LabelEncoder interface {
// Encode is called (concurrently) in instrumentation context.
// It should return a unique representation of the labels
// suitable for the SDK to use as a map key.
//
// The exported Labels object retains a reference to its
// LabelEncoder to determine which encoding was used.
//
// The expectation is that Exporters with a pre-determined to
// syntax for serialized label sets should implement
// LabelEncoder, thus avoiding duplicate computation in the
// export path.
Encode([]core.KeyValue) string
}
// CheckpointSet allows a controller to access a complete checkpoint of
// aggregated metrics from the Batcher. This is passed to the
// Exporter which may then use ForEach to iterate over the collection
// of aggregated metrics.
type CheckpointSet interface {
// ForEach iterates over aggregated checkpoints for all
// metrics that were updated during the last collection
// period.
ForEach(func(Record))
}
// Record contains the exported data for a single metric instrument
// and label set.
type Record struct {
descriptor *Descriptor
labels Labels
aggregator Aggregator
}
// Labels stores complete information about a computed label set,
// including the labels in an appropriate order (as defined by the
// Batcher). If the batcher does not re-order labels, they are
// presented in sorted order by the SDK.
type Labels struct {
ordered []core.KeyValue
encoded string
encoder LabelEncoder
}
// NewLabels builds a Labels object, consisting of an ordered set of
// labels, a unique encoded representation, and the encoder that
// produced it.
func NewLabels(ordered []core.KeyValue, encoded string, encoder LabelEncoder) Labels {
return Labels{
ordered: ordered,
encoded: encoded,
encoder: encoder,
}
}
// Ordered returns the labels in a specified order, according to the
// Batcher.
func (l Labels) Ordered() []core.KeyValue {
return l.ordered
}
// Encoded is a pre-encoded form of the ordered labels.
func (l Labels) Encoded() string {
return l.encoded
}
// Encoder is the encoder that computed the Encoded() representation.
func (l Labels) Encoder() LabelEncoder {
return l.encoder
}
// Len returns the number of labels.
func (l Labels) Len() int {
return len(l.ordered)
}
// NewRecord allows Batcher implementations to construct export
// records. The Descriptor, Labels, and Aggregator represent
// aggregate metric events received over a single collection period.
func NewRecord(descriptor *Descriptor, labels Labels, aggregator Aggregator) Record {
return Record{
descriptor: descriptor,
labels: labels,
aggregator: aggregator,
}
}
// Aggregator returns the checkpointed aggregator. It is safe to
// access the checkpointed state without locking.
func (r Record) Aggregator() Aggregator {
return r.aggregator
}
// Descriptor describes the metric instrument being exported.
func (r Record) Descriptor() *Descriptor {
return r.descriptor
}
// Labels describes the labels associated with the instrument and the
// aggregated data.
func (r Record) Labels() Labels {
return r.labels
}
// MetricKind describes the kind of instrument.
type MetricKind int8
const (
CounterKind Kind = iota
// Counter kind indicates a counter instrument.
CounterKind MetricKind = iota
// Gauge kind indicates a gauge instrument.
GaugeKind
// Measure kind indicates a measure instrument.
MeasureKind
)
// Descriptor describes a metric instrument to the exporter.
//
// Descriptors are created once per instrument and a pointer to the
// descriptor may be used to uniquely identify the instrument in an
// exporter.
type Descriptor struct {
name string
metricKind Kind
metricKind MetricKind
keys []core.Key
description string
unit unit.Unit
@ -93,10 +313,14 @@ type Descriptor struct {
}
// NewDescriptor builds a new descriptor, for use by `Meter`
// implementations to interface with a metric export pipeline.
// implementations in constructing new metric instruments.
//
// Descriptors are created once per instrument and a pointer to the
// descriptor may be used to uniquely identify the instrument in an
// exporter.
func NewDescriptor(
name string,
metricKind Kind,
metricKind MetricKind,
keys []core.Key,
description string,
unit unit.Unit,
@ -114,30 +338,51 @@ func NewDescriptor(
}
}
// Name returns the metric instrument's name.
func (d *Descriptor) Name() string {
return d.name
}
func (d *Descriptor) MetricKind() Kind {
// MetricKind returns the kind of instrument: counter, gauge, or
// measure.
func (d *Descriptor) MetricKind() MetricKind {
return d.metricKind
}
// Keys returns the recommended keys included in the metric
// definition. These keys may be used by a Batcher as a default set
// of grouping keys for the metric instrument.
func (d *Descriptor) Keys() []core.Key {
return d.keys
}
// Description provides a human-readable description of the metric
// instrument.
func (d *Descriptor) Description() string {
return d.description
}
// Unit describes the units of the metric instrument. Unitless
// metrics return the empty string.
func (d *Descriptor) Unit() unit.Unit {
return d.unit
}
// NumberKind returns whether this instrument is declared over int64
// or a float64 values.
func (d *Descriptor) NumberKind() core.NumberKind {
return d.numberKind
}
// Alternate returns true when the non-default behavior of the
// instrument was selected. It returns true if:
//
// - A counter instrument is non-monotonic
// - A gauge instrument is monotonic
// - A measure instrument is non-absolute
//
// TODO: Consider renaming this method, or expanding to provide
// kind-specific tests (e.g., Monotonic(), Absolute()).
func (d *Descriptor) Alternate() bool {
return d.alternate
}

View File

@ -0,0 +1,25 @@
// Code generated by "stringer -type=MetricKind"; DO NOT EDIT.
package export
import "strconv"
func _() {
// An "invalid array index" compiler error signifies that the constant values have changed.
// Re-run the stringer command to generate them again.
var x [1]struct{}
_ = x[CounterKind-0]
_ = x[GaugeKind-1]
_ = x[MeasureKind-2]
}
const _MetricKind_name = "CounterKindGaugeKindMeasureKind"
var _MetricKind_index = [...]uint8{0, 11, 20, 31}
func (i MetricKind) String() string {
if i < 0 || i >= MetricKind(len(_MetricKind_index)-1) {
return "MetricKind(" + strconv.FormatInt(int64(i), 10) + ")"
}
return _MetricKind_name[_MetricKind_index[i]:_MetricKind_index[i+1]]
}

View File

@ -23,7 +23,7 @@ import (
"go.opentelemetry.io/otel/api/core"
export "go.opentelemetry.io/otel/sdk/export/metric"
"go.opentelemetry.io/otel/sdk/metric/aggregator"
"go.opentelemetry.io/otel/sdk/export/metric/aggregator"
)
type (
@ -38,44 +38,55 @@ type (
)
var _ export.Aggregator = &Aggregator{}
var _ aggregator.MaxSumCount = &Aggregator{}
var _ aggregator.Distribution = &Aggregator{}
// New returns a new array aggregator, which aggregates recorded
// measurements by storing them in an array. This type uses a mutex
// for Update() and Checkpoint() concurrency.
func New() *Aggregator {
return &Aggregator{}
}
// Sum returns the sum of the checkpoint.
func (c *Aggregator) Sum() core.Number {
return c.ckptSum
// Sum returns the sum of values in the checkpoint.
func (c *Aggregator) Sum() (core.Number, error) {
return c.ckptSum, nil
}
// Count returns the count of the checkpoint.
func (c *Aggregator) Count() int64 {
return int64(len(c.checkpoint))
// Count returns the number of values in the checkpoint.
func (c *Aggregator) Count() (int64, error) {
return int64(len(c.checkpoint)), nil
}
// Max returns the max of the checkpoint.
// Max returns the maximum value in the checkpoint.
func (c *Aggregator) Max() (core.Number, error) {
return c.checkpoint.Quantile(1)
}
// Min returns the min of the checkpoint.
// Min returns the mininum value in the checkpoint.
func (c *Aggregator) Min() (core.Number, error) {
return c.checkpoint.Quantile(0)
}
// Quantile returns the estimated quantile of the checkpoint.
// Quantile returns the estimated quantile of data in the checkpoint.
// It is an error if `q` is less than 0 or greated than 1.
func (c *Aggregator) Quantile(q float64) (core.Number, error) {
return c.checkpoint.Quantile(q)
}
func (c *Aggregator) Collect(ctx context.Context, rec export.Record, exp export.Batcher) {
// Checkpoint saves the current state and resets the current state to
// the empty set, taking a lock to prevent concurrent Update() calls.
func (c *Aggregator) Checkpoint(ctx context.Context, desc *export.Descriptor) {
c.lock.Lock()
c.checkpoint, c.current = c.current, nil
c.lock.Unlock()
desc := rec.Descriptor()
kind := desc.NumberKind()
// TODO: This sort should be done lazily, only when quantiles
// are requested. The SDK specification says you can use this
// aggregator to simply list values in the order they were
// received as an alternative to requesting quantile information.
c.sort(kind)
c.ckptSum = core.Number(0)
@ -83,39 +94,28 @@ func (c *Aggregator) Collect(ctx context.Context, rec export.Record, exp export.
for _, v := range c.checkpoint {
c.ckptSum.AddNumber(kind, v)
}
exp.Export(ctx, rec, c)
}
func (c *Aggregator) Update(_ context.Context, number core.Number, rec export.Record) {
desc := rec.Descriptor()
kind := desc.NumberKind()
if kind == core.Float64NumberKind && math.IsNaN(number.AsFloat64()) {
// TODO warn
// NOTE: add this to the specification.
return
}
if !desc.Alternate() && number.IsNegative(kind) {
// TODO warn
return
}
// Update adds the recorded measurement to the current data set.
// Update takes a lock to prevent concurrent Update() and Checkpoint()
// calls.
func (c *Aggregator) Update(_ context.Context, number core.Number, desc *export.Descriptor) error {
c.lock.Lock()
c.current = append(c.current, number)
c.lock.Unlock()
return nil
}
func (c *Aggregator) Merge(oa export.Aggregator, desc *export.Descriptor) {
// Merge combines two data sets into one.
func (c *Aggregator) Merge(oa export.Aggregator, desc *export.Descriptor) error {
o, _ := oa.(*Aggregator)
if o == nil {
// TODO warn
return
return aggregator.NewInconsistentMergeError(c, oa)
}
c.ckptSum.AddNumber(desc.NumberKind(), o.ckptSum)
c.checkpoint = combine(c.checkpoint, o.checkpoint, desc.NumberKind())
return nil
}
func (c *Aggregator) sort(kind core.NumberKind) {
@ -166,7 +166,8 @@ func (p *Points) Swap(i, j int) {
}
// Quantile returns the least X such that Pr(x<X)>=q, where X is an
// element of the data set.
// element of the data set. This uses the "Nearest-Rank" definition
// of a quantile.
func (p *Points) Quantile(q float64) (core.Number, error) {
if len(*p) == 0 {
return core.Number(0), aggregator.ErrEmptyDataSet
@ -182,9 +183,6 @@ func (p *Points) Quantile(q float64) (core.Number, error) {
return (*p)[len(*p)-1], nil
}
// Note: There's no interpolation being done here. There are
// many definitions for "quantile", some interpolate, some do
// not. What is expected?
position := float64(len(*p)-1) * q
ceil := int(math.Ceil(position))
return (*p)[ceil], nil

View File

@ -24,7 +24,7 @@ import (
"go.opentelemetry.io/otel/api/core"
export "go.opentelemetry.io/otel/sdk/export/metric"
"go.opentelemetry.io/otel/sdk/metric/aggregator"
"go.opentelemetry.io/otel/sdk/export/metric/aggregator"
"go.opentelemetry.io/otel/sdk/metric/aggregator/test"
)
@ -34,9 +34,7 @@ type updateTest struct {
}
func (ut *updateTest) run(t *testing.T, profile test.Profile) {
ctx := context.Background()
batcher, record := test.NewAggregatorTest(export.MeasureKind, profile.NumberKind, !ut.absolute)
descriptor := test.NewAggregatorTest(export.MeasureKind, profile.NumberKind, !ut.absolute)
agg := New()
@ -45,25 +43,30 @@ func (ut *updateTest) run(t *testing.T, profile test.Profile) {
for i := 0; i < ut.count; i++ {
x := profile.Random(+1)
all.Append(x)
agg.Update(ctx, x, record)
test.CheckedUpdate(t, agg, x, descriptor)
if !ut.absolute {
y := profile.Random(-1)
all.Append(y)
agg.Update(ctx, y, record)
test.CheckedUpdate(t, agg, y, descriptor)
}
}
agg.Collect(ctx, record, batcher)
ctx := context.Background()
agg.Checkpoint(ctx, descriptor)
all.Sort()
sum, err := agg.Sum()
require.InEpsilon(t,
all.Sum().CoerceToFloat64(profile.NumberKind),
agg.Sum().CoerceToFloat64(profile.NumberKind),
sum.CoerceToFloat64(profile.NumberKind),
0.0000001,
"Same sum - absolute")
require.Equal(t, all.Count(), agg.Count(), "Same count - absolute")
require.Nil(t, err)
count, err := agg.Count()
require.Nil(t, err)
require.Equal(t, all.Count(), count, "Same count - absolute")
min, err := agg.Min()
require.Nil(t, err)
@ -106,7 +109,7 @@ type mergeTest struct {
func (mt *mergeTest) run(t *testing.T, profile test.Profile) {
ctx := context.Background()
batcher, record := test.NewAggregatorTest(export.MeasureKind, profile.NumberKind, !mt.absolute)
descriptor := test.NewAggregatorTest(export.MeasureKind, profile.NumberKind, !mt.absolute)
agg1 := New()
agg2 := New()
@ -116,36 +119,40 @@ func (mt *mergeTest) run(t *testing.T, profile test.Profile) {
for i := 0; i < mt.count; i++ {
x1 := profile.Random(+1)
all.Append(x1)
agg1.Update(ctx, x1, record)
test.CheckedUpdate(t, agg1, x1, descriptor)
x2 := profile.Random(+1)
all.Append(x2)
agg2.Update(ctx, x2, record)
test.CheckedUpdate(t, agg2, x2, descriptor)
if !mt.absolute {
y1 := profile.Random(-1)
all.Append(y1)
agg1.Update(ctx, y1, record)
test.CheckedUpdate(t, agg1, y1, descriptor)
y2 := profile.Random(-1)
all.Append(y2)
agg2.Update(ctx, y2, record)
test.CheckedUpdate(t, agg2, y2, descriptor)
}
}
agg1.Collect(ctx, record, batcher)
agg2.Collect(ctx, record, batcher)
agg1.Checkpoint(ctx, descriptor)
agg2.Checkpoint(ctx, descriptor)
agg1.Merge(agg2, record.Descriptor())
test.CheckedMerge(t, agg1, agg2, descriptor)
all.Sort()
sum, err := agg1.Sum()
require.InEpsilon(t,
all.Sum().CoerceToFloat64(profile.NumberKind),
agg1.Sum().CoerceToFloat64(profile.NumberKind),
sum.CoerceToFloat64(profile.NumberKind),
0.0000001,
"Same sum - absolute")
require.Equal(t, all.Count(), agg1.Count(), "Same count - absolute")
require.Nil(t, err)
count, err := agg1.Count()
require.Nil(t, err)
require.Equal(t, all.Count(), count, "Same count - absolute")
min, err := agg1.Min()
require.Nil(t, err)
@ -198,16 +205,18 @@ func TestArrayErrors(t *testing.T) {
ctx := context.Background()
batcher, record := test.NewAggregatorTest(export.MeasureKind, profile.NumberKind, false)
descriptor := test.NewAggregatorTest(export.MeasureKind, profile.NumberKind, false)
agg.Update(ctx, core.Number(0), record)
test.CheckedUpdate(t, agg, core.Number(0), descriptor)
if profile.NumberKind == core.Float64NumberKind {
agg.Update(ctx, core.NewFloat64Number(math.NaN()), record)
test.CheckedUpdate(t, agg, core.NewFloat64Number(math.NaN()), descriptor)
}
agg.Collect(ctx, record, batcher)
agg.Checkpoint(ctx, descriptor)
require.Equal(t, int64(1), agg.Count(), "NaN value was not counted")
count, err := agg.Count()
require.Equal(t, int64(1), count, "NaN value was not counted")
require.Nil(t, err)
num, err := agg.Quantile(0)
require.Nil(t, err)
@ -226,7 +235,7 @@ func TestArrayErrors(t *testing.T) {
func TestArrayFloat64(t *testing.T) {
for _, absolute := range []bool{false, true} {
t.Run(fmt.Sprint("Absolute=", absolute), func(t *testing.T) {
batcher, record := test.NewAggregatorTest(export.MeasureKind, core.Float64NumberKind, !absolute)
descriptor := test.NewAggregatorTest(export.MeasureKind, core.Float64NumberKind, !absolute)
fpsf := func(sign int) []float64 {
// Check behavior of a bunch of odd floating
@ -263,23 +272,27 @@ func TestArrayFloat64(t *testing.T) {
for _, f := range fpsf(1) {
all.Append(core.NewFloat64Number(f))
agg.Update(ctx, core.NewFloat64Number(f), record)
test.CheckedUpdate(t, agg, core.NewFloat64Number(f), descriptor)
}
if !absolute {
for _, f := range fpsf(-1) {
all.Append(core.NewFloat64Number(f))
agg.Update(ctx, core.NewFloat64Number(f), record)
test.CheckedUpdate(t, agg, core.NewFloat64Number(f), descriptor)
}
}
agg.Collect(ctx, record, batcher)
agg.Checkpoint(ctx, descriptor)
all.Sort()
require.InEpsilon(t, all.Sum().AsFloat64(), agg.Sum().AsFloat64(), 0.0000001, "Same sum")
sum, err := agg.Sum()
require.InEpsilon(t, all.Sum().AsFloat64(), sum.AsFloat64(), 0.0000001, "Same sum")
require.Nil(t, err)
require.Equal(t, all.Count(), agg.Count(), "Same count")
count, err := agg.Count()
require.Equal(t, all.Count(), count, "Same count")
require.Nil(t, err)
min, err := agg.Min()
require.Nil(t, err)

View File

@ -19,6 +19,7 @@ import (
"go.opentelemetry.io/otel/api/core"
export "go.opentelemetry.io/otel/sdk/export/metric"
"go.opentelemetry.io/otel/sdk/export/metric/aggregator"
)
// Aggregator aggregates counter events.
@ -26,47 +27,44 @@ type Aggregator struct {
// current holds current increments to this counter record
current core.Number
// checkpoint is a temporary used during Collect()
// checkpoint is a temporary used during Checkpoint()
checkpoint core.Number
}
var _ export.Aggregator = &Aggregator{}
var _ aggregator.Sum = &Aggregator{}
// New returns a new counter aggregator. This aggregator computes an
// atomic sum.
// New returns a new counter aggregator implemented by atomic
// operations. This aggregator implements the aggregator.Sum
// export interface.
func New() *Aggregator {
return &Aggregator{}
}
// AsNumber returns the accumulated count as an int64.
func (c *Aggregator) AsNumber() core.Number {
return c.checkpoint.AsNumber()
// Sum returns the last-checkpointed sum. This will never return an
// error.
func (c *Aggregator) Sum() (core.Number, error) {
return c.checkpoint, nil
}
// Collect checkpoints the current value (atomically) and exports it.
func (c *Aggregator) Collect(ctx context.Context, rec export.Record, exp export.Batcher) {
// Checkpoint atomically saves the current value and resets the
// current sum to zero.
func (c *Aggregator) Checkpoint(ctx context.Context, _ *export.Descriptor) {
c.checkpoint = c.current.SwapNumberAtomic(core.Number(0))
exp.Export(ctx, rec, c)
}
// Update modifies the current value (atomically) for later export.
func (c *Aggregator) Update(_ context.Context, number core.Number, rec export.Record) {
desc := rec.Descriptor()
kind := desc.NumberKind()
if !desc.Alternate() && number.IsNegative(kind) {
// TODO warn
return
}
c.current.AddNumberAtomic(kind, number)
// Update atomically adds to the current value.
func (c *Aggregator) Update(_ context.Context, number core.Number, desc *export.Descriptor) error {
c.current.AddNumberAtomic(desc.NumberKind(), number)
return nil
}
func (c *Aggregator) Merge(oa export.Aggregator, desc *export.Descriptor) {
// Merge combines two counters by adding their sums.
func (c *Aggregator) Merge(oa export.Aggregator, desc *export.Descriptor) error {
o, _ := oa.(*Aggregator)
if o == nil {
// TODO warn
return
return aggregator.NewInconsistentMergeError(c, oa)
}
c.checkpoint.AddNumber(desc.NumberKind(), o.checkpoint)
return nil
}

View File

@ -33,18 +33,20 @@ func TestCounterMonotonic(t *testing.T) {
test.RunProfiles(t, func(t *testing.T, profile test.Profile) {
agg := New()
batcher, record := test.NewAggregatorTest(export.CounterKind, profile.NumberKind, false)
descriptor := test.NewAggregatorTest(export.CounterKind, profile.NumberKind, false)
sum := core.Number(0)
for i := 0; i < count; i++ {
x := profile.Random(+1)
sum.AddNumber(profile.NumberKind, x)
agg.Update(ctx, x, record)
test.CheckedUpdate(t, agg, x, descriptor)
}
agg.Collect(ctx, record, batcher)
agg.Checkpoint(ctx, descriptor)
require.Equal(t, sum, agg.AsNumber(), "Same sum - monotonic")
asum, err := agg.Sum()
require.Equal(t, sum, asum, "Same sum - monotonic")
require.Nil(t, err)
})
}
@ -54,17 +56,19 @@ func TestCounterMonotonicNegative(t *testing.T) {
test.RunProfiles(t, func(t *testing.T, profile test.Profile) {
agg := New()
batcher, record := test.NewAggregatorTest(export.CounterKind, profile.NumberKind, false)
descriptor := test.NewAggregatorTest(export.CounterKind, profile.NumberKind, false)
for i := 0; i < count; i++ {
agg.Update(ctx, profile.Random(-1), record)
test.CheckedUpdate(t, agg, profile.Random(-1), descriptor)
}
sum := profile.Random(+1)
agg.Update(ctx, sum, record)
agg.Collect(ctx, record, batcher)
test.CheckedUpdate(t, agg, sum, descriptor)
agg.Checkpoint(ctx, descriptor)
require.Equal(t, sum, agg.AsNumber(), "Same sum - monotonic")
asum, err := agg.Sum()
require.Equal(t, sum, asum, "Same sum - monotonic")
require.Nil(t, err)
})
}
@ -74,7 +78,7 @@ func TestCounterNonMonotonic(t *testing.T) {
test.RunProfiles(t, func(t *testing.T, profile test.Profile) {
agg := New()
batcher, record := test.NewAggregatorTest(export.CounterKind, profile.NumberKind, true)
descriptor := test.NewAggregatorTest(export.CounterKind, profile.NumberKind, true)
sum := core.Number(0)
for i := 0; i < count; i++ {
@ -82,13 +86,15 @@ func TestCounterNonMonotonic(t *testing.T) {
y := profile.Random(-1)
sum.AddNumber(profile.NumberKind, x)
sum.AddNumber(profile.NumberKind, y)
agg.Update(ctx, x, record)
agg.Update(ctx, y, record)
test.CheckedUpdate(t, agg, x, descriptor)
test.CheckedUpdate(t, agg, y, descriptor)
}
agg.Collect(ctx, record, batcher)
agg.Checkpoint(ctx, descriptor)
require.Equal(t, sum, agg.AsNumber(), "Same sum - monotonic")
asum, err := agg.Sum()
require.Equal(t, sum, asum, "Same sum - monotonic")
require.Nil(t, err)
})
}
@ -99,23 +105,25 @@ func TestCounterMerge(t *testing.T) {
agg1 := New()
agg2 := New()
batcher, record := test.NewAggregatorTest(export.CounterKind, profile.NumberKind, false)
descriptor := test.NewAggregatorTest(export.CounterKind, profile.NumberKind, false)
sum := core.Number(0)
for i := 0; i < count; i++ {
x := profile.Random(+1)
sum.AddNumber(profile.NumberKind, x)
agg1.Update(ctx, x, record)
agg2.Update(ctx, x, record)
test.CheckedUpdate(t, agg1, x, descriptor)
test.CheckedUpdate(t, agg2, x, descriptor)
}
agg1.Collect(ctx, record, batcher)
agg2.Collect(ctx, record, batcher)
agg1.Checkpoint(ctx, descriptor)
agg2.Checkpoint(ctx, descriptor)
agg1.Merge(agg2, record.Descriptor())
test.CheckedMerge(t, agg1, agg2, descriptor)
sum.AddNumber(record.Descriptor().NumberKind(), sum)
sum.AddNumber(descriptor.NumberKind(), sum)
require.Equal(t, sum, agg1.AsNumber(), "Same sum - monotonic")
asum, err := agg1.Sum()
require.Equal(t, sum, asum, "Same sum - monotonic")
require.Nil(t, err)
})
}

View File

@ -23,22 +23,27 @@ import (
"go.opentelemetry.io/otel/api/core"
export "go.opentelemetry.io/otel/sdk/export/metric"
"go.opentelemetry.io/otel/sdk/metric/aggregator"
"go.opentelemetry.io/otel/sdk/export/metric/aggregator"
)
// Config is an alias for the underlying DDSketch config object.
type Config = sdk.Config
// Aggregator aggregates measure events.
type Aggregator struct {
lock sync.Mutex
cfg *sdk.Config
cfg *Config
kind core.NumberKind
current *sdk.DDSketch
checkpoint *sdk.DDSketch
}
var _ export.Aggregator = &Aggregator{}
var _ aggregator.MaxSumCount = &Aggregator{}
var _ aggregator.Distribution = &Aggregator{}
// New returns a new DDSketch aggregator.
func New(cfg *sdk.Config, desc *export.Descriptor) *Aggregator {
func New(cfg *Config, desc *export.Descriptor) *Aggregator {
return &Aggregator{
cfg: cfg,
kind: desc.NumberKind(),
@ -48,35 +53,39 @@ func New(cfg *sdk.Config, desc *export.Descriptor) *Aggregator {
// NewDefaultConfig returns a new, default DDSketch config.
//
// TODO: The Config constructor should probably set minValue to -Inf
// to aggregate metrics with absolute=false. This requires providing values
// for alpha and maxNumBins
func NewDefaultConfig() *sdk.Config {
// TODO: Should the Config constructor set minValue to -Inf to
// when the descriptor has absolute=false? This requires providing
// values for alpha and maxNumBins, apparently.
func NewDefaultConfig() *Config {
return sdk.NewDefaultConfig()
}
// Sum returns the sum of the checkpoint.
func (c *Aggregator) Sum() core.Number {
return c.toNumber(c.checkpoint.Sum())
// Sum returns the sum of values in the checkpoint.
func (c *Aggregator) Sum() (core.Number, error) {
return c.toNumber(c.checkpoint.Sum()), nil
}
// Count returns the count of the checkpoint.
func (c *Aggregator) Count() int64 {
return c.checkpoint.Count()
// Count returns the number of values in the checkpoint.
func (c *Aggregator) Count() (int64, error) {
return c.checkpoint.Count(), nil
}
// Max returns the max of the checkpoint.
// Max returns the maximum value in the checkpoint.
func (c *Aggregator) Max() (core.Number, error) {
return c.Quantile(1)
}
// Min returns the min of the checkpoint.
// Min returns the mininum value in the checkpoint.
func (c *Aggregator) Min() (core.Number, error) {
return c.Quantile(0)
}
// Quantile returns the estimated quantile of the checkpoint.
// Quantile returns the estimated quantile of data in the checkpoint.
// It is an error if `q` is less than 0 or greated than 1.
func (c *Aggregator) Quantile(q float64) (core.Number, error) {
if c.checkpoint.Count() == 0 {
return core.Number(0), aggregator.ErrEmptyDataSet
}
f := c.checkpoint.Quantile(q)
if math.IsNaN(f) {
return core.Number(0), aggregator.ErrInvalidQuantile
@ -91,41 +100,34 @@ func (c *Aggregator) toNumber(f float64) core.Number {
return core.NewInt64Number(int64(f))
}
// Collect checkpoints the current value (atomically) and exports it.
func (c *Aggregator) Collect(ctx context.Context, rec export.Record, exp export.Batcher) {
// Checkpoint saves the current state and resets the current state to
// the empty set, taking a lock to prevent concurrent Update() calls.
func (c *Aggregator) Checkpoint(ctx context.Context, _ *export.Descriptor) {
replace := sdk.NewDDSketch(c.cfg)
c.lock.Lock()
c.checkpoint = c.current
c.current = replace
c.lock.Unlock()
if c.checkpoint.Count() != 0 {
exp.Export(ctx, rec, c)
}
}
// Update modifies the current value (atomically) for later export.
func (c *Aggregator) Update(_ context.Context, number core.Number, rec export.Record) {
desc := rec.Descriptor()
kind := desc.NumberKind()
if !desc.Alternate() && number.IsNegative(kind) {
// TODO warn
return
}
// Update adds the recorded measurement to the current data set.
// Update takes a lock to prevent concurrent Update() and Checkpoint()
// calls.
func (c *Aggregator) Update(_ context.Context, number core.Number, desc *export.Descriptor) error {
c.lock.Lock()
defer c.lock.Unlock()
c.current.Add(number.CoerceToFloat64(kind))
c.current.Add(number.CoerceToFloat64(desc.NumberKind()))
return nil
}
func (c *Aggregator) Merge(oa export.Aggregator, d *export.Descriptor) {
// Merge combines two sketches into one.
func (c *Aggregator) Merge(oa export.Aggregator, d *export.Descriptor) error {
o, _ := oa.(*Aggregator)
if o == nil {
// TODO warn
return
return aggregator.NewInconsistentMergeError(c, oa)
}
c.checkpoint.Merge(o.checkpoint)
return nil
}

View File

@ -34,32 +34,37 @@ type updateTest struct {
func (ut *updateTest) run(t *testing.T, profile test.Profile) {
ctx := context.Background()
batcher, record := test.NewAggregatorTest(export.MeasureKind, profile.NumberKind, !ut.absolute)
agg := New(NewDefaultConfig(), record.Descriptor())
descriptor := test.NewAggregatorTest(export.MeasureKind, profile.NumberKind, !ut.absolute)
agg := New(NewDefaultConfig(), descriptor)
all := test.NewNumbers(profile.NumberKind)
for i := 0; i < count; i++ {
x := profile.Random(+1)
all.Append(x)
agg.Update(ctx, x, record)
test.CheckedUpdate(t, agg, x, descriptor)
if !ut.absolute {
y := profile.Random(-1)
all.Append(y)
agg.Update(ctx, y, record)
test.CheckedUpdate(t, agg, y, descriptor)
}
}
agg.Collect(ctx, record, batcher)
agg.Checkpoint(ctx, descriptor)
all.Sort()
sum, err := agg.Sum()
require.InDelta(t,
all.Sum().CoerceToFloat64(profile.NumberKind),
agg.Sum().CoerceToFloat64(profile.NumberKind),
sum.CoerceToFloat64(profile.NumberKind),
1,
"Same sum - absolute")
require.Equal(t, all.Count(), agg.Count(), "Same count - absolute")
require.Nil(t, err)
count, err := agg.Count()
require.Equal(t, all.Count(), count, "Same count - absolute")
require.Nil(t, err)
max, err := agg.Max()
require.Nil(t, err)
@ -96,49 +101,54 @@ type mergeTest struct {
func (mt *mergeTest) run(t *testing.T, profile test.Profile) {
ctx := context.Background()
batcher, record := test.NewAggregatorTest(export.MeasureKind, profile.NumberKind, !mt.absolute)
descriptor := test.NewAggregatorTest(export.MeasureKind, profile.NumberKind, !mt.absolute)
agg1 := New(NewDefaultConfig(), record.Descriptor())
agg2 := New(NewDefaultConfig(), record.Descriptor())
agg1 := New(NewDefaultConfig(), descriptor)
agg2 := New(NewDefaultConfig(), descriptor)
all := test.NewNumbers(profile.NumberKind)
for i := 0; i < count; i++ {
x := profile.Random(+1)
all.Append(x)
agg1.Update(ctx, x, record)
test.CheckedUpdate(t, agg1, x, descriptor)
if !mt.absolute {
y := profile.Random(-1)
all.Append(y)
agg1.Update(ctx, y, record)
test.CheckedUpdate(t, agg1, y, descriptor)
}
}
for i := 0; i < count; i++ {
x := profile.Random(+1)
all.Append(x)
agg2.Update(ctx, x, record)
test.CheckedUpdate(t, agg2, x, descriptor)
if !mt.absolute {
y := profile.Random(-1)
all.Append(y)
agg2.Update(ctx, y, record)
test.CheckedUpdate(t, agg2, y, descriptor)
}
}
agg1.Collect(ctx, record, batcher)
agg2.Collect(ctx, record, batcher)
agg1.Checkpoint(ctx, descriptor)
agg2.Checkpoint(ctx, descriptor)
agg1.Merge(agg2, record.Descriptor())
test.CheckedMerge(t, agg1, agg2, descriptor)
all.Sort()
asum, err := agg1.Sum()
require.InDelta(t,
all.Sum().CoerceToFloat64(profile.NumberKind),
agg1.Sum().CoerceToFloat64(profile.NumberKind),
asum.CoerceToFloat64(profile.NumberKind),
1,
"Same sum - absolute")
require.Equal(t, all.Count(), agg1.Count(), "Same count - absolute")
require.Nil(t, err)
count, err := agg1.Count()
require.Equal(t, all.Count(), count, "Same count - absolute")
require.Nil(t, err)
max, err := agg1.Max()
require.Nil(t, err)

View File

@ -1,22 +0,0 @@
// Copyright 2019, OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package aggregator
import "fmt"
var (
ErrEmptyDataSet = fmt.Errorf("The result is not defined on an empty data set")
ErrInvalidQuantile = fmt.Errorf("The requested quantile is out of range")
)

View File

@ -22,6 +22,7 @@ import (
"go.opentelemetry.io/otel/api/core"
export "go.opentelemetry.io/otel/sdk/export/metric"
"go.opentelemetry.io/otel/sdk/export/metric/aggregator"
)
// Note: This aggregator enforces the behavior of monotonic gauges to
@ -36,7 +37,7 @@ type (
// current is an atomic pointer to *gaugeData. It is never nil.
current unsafe.Pointer
// checkpoint is a copy of the current value taken in Collect()
// checkpoint is a copy of the current value taken in Checkpoint()
checkpoint unsafe.Pointer
}
@ -55,6 +56,7 @@ type (
)
var _ export.Aggregator = &Aggregator{}
var _ aggregator.LastValue = &Aggregator{}
// An unset gauge has zero timestamp and zero value.
var unsetGauge = &gaugeData{}
@ -68,31 +70,30 @@ func New() *Aggregator {
}
}
// AsNumber returns the recorded gauge value as an int64.
func (g *Aggregator) AsNumber() core.Number {
return (*gaugeData)(g.checkpoint).value.AsNumber()
// LastValue returns the last-recorded gauge value and the
// corresponding timestamp. The error value aggregator.ErrNoLastValue
// will be returned if (due to a race condition) the checkpoint was
// computed before the first value was set.
func (g *Aggregator) LastValue() (core.Number, time.Time, error) {
gd := (*gaugeData)(g.checkpoint)
if gd == unsetGauge {
return core.Number(0), time.Time{}, aggregator.ErrNoLastValue
}
return gd.value.AsNumber(), gd.timestamp, nil
}
// Timestamp returns the timestamp of the alst recorded gauge value.
func (g *Aggregator) Timestamp() time.Time {
return (*gaugeData)(g.checkpoint).timestamp
}
// Collect checkpoints the current value (atomically) and exports it.
func (g *Aggregator) Collect(ctx context.Context, rec export.Record, exp export.Batcher) {
// Checkpoint atomically saves the current value.
func (g *Aggregator) Checkpoint(ctx context.Context, _ *export.Descriptor) {
g.checkpoint = atomic.LoadPointer(&g.current)
exp.Export(ctx, rec, g)
}
// Update modifies the current value (atomically) for later export.
func (g *Aggregator) Update(_ context.Context, number core.Number, rec export.Record) {
desc := rec.Descriptor()
// Update atomically sets the current "last" value.
func (g *Aggregator) Update(_ context.Context, number core.Number, desc *export.Descriptor) error {
if !desc.Alternate() {
g.updateNonMonotonic(number)
} else {
g.updateMonotonic(number, desc)
return nil
}
return g.updateMonotonic(number, desc)
}
func (g *Aggregator) updateNonMonotonic(number core.Number) {
@ -103,7 +104,7 @@ func (g *Aggregator) updateNonMonotonic(number core.Number) {
atomic.StorePointer(&g.current, unsafe.Pointer(ngd))
}
func (g *Aggregator) updateMonotonic(number core.Number, desc *export.Descriptor) {
func (g *Aggregator) updateMonotonic(number core.Number, desc *export.Descriptor) error {
ngd := &gaugeData{
timestamp: time.Now(),
value: number,
@ -114,21 +115,23 @@ func (g *Aggregator) updateMonotonic(number core.Number, desc *export.Descriptor
gd := (*gaugeData)(atomic.LoadPointer(&g.current))
if gd.value.CompareNumber(kind, number) > 0 {
// TODO warn
return
return aggregator.ErrNonMonotoneInput
}
if atomic.CompareAndSwapPointer(&g.current, unsafe.Pointer(gd), unsafe.Pointer(ngd)) {
return
return nil
}
}
}
func (g *Aggregator) Merge(oa export.Aggregator, desc *export.Descriptor) {
// Merge combines state from two aggregators. If the gauge is
// declared as monotonic, the greater value is chosen. If the gauge
// is declared as non-monotonic, the most-recently set value is
// chosen.
func (g *Aggregator) Merge(oa export.Aggregator, desc *export.Descriptor) error {
o, _ := oa.(*Aggregator)
if o == nil {
// TODO warn
return
return aggregator.NewInconsistentMergeError(g, oa)
}
ggd := (*gaugeData)(atomic.LoadPointer(&g.checkpoint))
@ -139,18 +142,19 @@ func (g *Aggregator) Merge(oa export.Aggregator, desc *export.Descriptor) {
cmp := ggd.value.CompareNumber(desc.NumberKind(), ogd.value)
if cmp > 0 {
return
return nil
}
if cmp < 0 {
g.checkpoint = unsafe.Pointer(ogd)
return
return nil
}
}
// Non-monotonic gauge or equal values
if ggd.timestamp.After(ogd.timestamp) {
return
return nil
}
g.checkpoint = unsafe.Pointer(ogd)
return nil
}

View File

@ -23,6 +23,7 @@ import (
"go.opentelemetry.io/otel/api/core"
export "go.opentelemetry.io/otel/sdk/export/metric"
"go.opentelemetry.io/otel/sdk/export/metric/aggregator"
"go.opentelemetry.io/otel/sdk/metric/aggregator/test"
)
@ -36,18 +37,20 @@ func TestGaugeNonMonotonic(t *testing.T) {
test.RunProfiles(t, func(t *testing.T, profile test.Profile) {
agg := New()
batcher, record := test.NewAggregatorTest(export.GaugeKind, profile.NumberKind, false)
record := test.NewAggregatorTest(export.GaugeKind, profile.NumberKind, false)
var last core.Number
for i := 0; i < count; i++ {
x := profile.Random(rand.Intn(1)*2 - 1)
last = x
agg.Update(ctx, x, record)
test.CheckedUpdate(t, agg, x, record)
}
agg.Collect(ctx, record, batcher)
agg.Checkpoint(ctx, record)
require.Equal(t, last, agg.AsNumber(), "Same last value - non-monotonic")
lv, _, err := agg.LastValue()
require.Equal(t, last, lv, "Same last value - non-monotonic")
require.Nil(t, err)
})
}
@ -57,19 +60,21 @@ func TestGaugeMonotonic(t *testing.T) {
test.RunProfiles(t, func(t *testing.T, profile test.Profile) {
agg := New()
batcher, record := test.NewAggregatorTest(export.GaugeKind, profile.NumberKind, true)
record := test.NewAggregatorTest(export.GaugeKind, profile.NumberKind, true)
small := profile.Random(+1)
last := small
for i := 0; i < count; i++ {
x := profile.Random(+1)
last.AddNumber(profile.NumberKind, x)
agg.Update(ctx, last, record)
test.CheckedUpdate(t, agg, last, record)
}
agg.Collect(ctx, record, batcher)
agg.Checkpoint(ctx, record)
require.Equal(t, last, agg.AsNumber(), "Same last value - monotonic")
lv, _, err := agg.LastValue()
require.Equal(t, last, lv, "Same last value - monotonic")
require.Nil(t, err)
})
}
@ -79,19 +84,25 @@ func TestGaugeMonotonicDescending(t *testing.T) {
test.RunProfiles(t, func(t *testing.T, profile test.Profile) {
agg := New()
batcher, record := test.NewAggregatorTest(export.GaugeKind, profile.NumberKind, true)
record := test.NewAggregatorTest(export.GaugeKind, profile.NumberKind, true)
first := profile.Random(+1)
agg.Update(ctx, first, record)
test.CheckedUpdate(t, agg, first, record)
for i := 0; i < count; i++ {
x := profile.Random(-1)
agg.Update(ctx, x, record)
err := agg.Update(ctx, x, record)
if err != aggregator.ErrNonMonotoneInput {
t.Error("Expected ErrNonMonotoneInput", err)
}
}
agg.Collect(ctx, record, batcher)
agg.Checkpoint(ctx, record)
require.Equal(t, first, agg.AsNumber(), "Same last value - monotonic")
lv, _, err := agg.LastValue()
require.Equal(t, first, lv, "Same last value - monotonic")
require.Nil(t, err)
})
}
@ -102,26 +113,30 @@ func TestGaugeNormalMerge(t *testing.T) {
agg1 := New()
agg2 := New()
batcher, record := test.NewAggregatorTest(export.GaugeKind, profile.NumberKind, false)
descriptor := test.NewAggregatorTest(export.GaugeKind, profile.NumberKind, false)
first1 := profile.Random(+1)
first2 := profile.Random(+1)
first1.AddNumber(profile.NumberKind, first2)
agg1.Update(ctx, first1, record)
agg2.Update(ctx, first2, record)
test.CheckedUpdate(t, agg1, first1, descriptor)
test.CheckedUpdate(t, agg2, first2, descriptor)
agg1.Collect(ctx, record, batcher)
agg2.Collect(ctx, record, batcher)
agg1.Checkpoint(ctx, descriptor)
agg2.Checkpoint(ctx, descriptor)
t1 := agg1.Timestamp()
t2 := agg2.Timestamp()
_, t1, err := agg1.LastValue()
require.Nil(t, err)
_, t2, err := agg2.LastValue()
require.Nil(t, err)
require.True(t, t1.Before(t2))
agg1.Merge(agg2, record.Descriptor())
test.CheckedMerge(t, agg1, agg2, descriptor)
require.Equal(t, t2, agg1.Timestamp(), "Merged timestamp - non-monotonic")
require.Equal(t, first2, agg1.AsNumber(), "Merged value - non-monotonic")
lv, ts, err := agg1.LastValue()
require.Nil(t, err)
require.Equal(t, t2, ts, "Merged timestamp - non-monotonic")
require.Equal(t, first2, lv, "Merged value - non-monotonic")
})
}
@ -132,21 +147,38 @@ func TestGaugeMonotonicMerge(t *testing.T) {
agg1 := New()
agg2 := New()
batcher, record := test.NewAggregatorTest(export.GaugeKind, profile.NumberKind, true)
descriptor := test.NewAggregatorTest(export.GaugeKind, profile.NumberKind, true)
first1 := profile.Random(+1)
agg1.Update(ctx, first1, record)
test.CheckedUpdate(t, agg1, first1, descriptor)
first2 := profile.Random(+1)
first2.AddNumber(profile.NumberKind, first1)
agg2.Update(ctx, first2, record)
test.CheckedUpdate(t, agg2, first2, descriptor)
agg1.Collect(ctx, record, batcher)
agg2.Collect(ctx, record, batcher)
agg1.Checkpoint(ctx, descriptor)
agg2.Checkpoint(ctx, descriptor)
agg1.Merge(agg2, record.Descriptor())
test.CheckedMerge(t, agg1, agg2, descriptor)
require.Equal(t, first2, agg1.AsNumber(), "Merged value - monotonic")
require.Equal(t, agg2.Timestamp(), agg1.Timestamp(), "Merged timestamp - monotonic")
_, ts2, err := agg1.LastValue()
require.Nil(t, err)
lv, ts1, err := agg1.LastValue()
require.Nil(t, err)
require.Equal(t, first2, lv, "Merged value - monotonic")
require.Equal(t, ts2, ts1, "Merged timestamp - monotonic")
})
}
func TestGaugeNotSet(t *testing.T) {
descriptor := test.NewAggregatorTest(export.GaugeKind, core.Int64NumberKind, true)
g := New()
g.Checkpoint(context.Background(), descriptor)
value, timestamp, err := g.LastValue()
require.Equal(t, aggregator.ErrNoLastValue, err)
require.True(t, timestamp.IsZero())
require.Equal(t, core.Number(0), value)
}

View File

@ -19,6 +19,7 @@ import (
"go.opentelemetry.io/otel/api/core"
export "go.opentelemetry.io/otel/sdk/export/metric"
"go.opentelemetry.io/otel/sdk/export/metric/aggregator"
)
type (
@ -36,30 +37,43 @@ type (
}
)
var _ export.Aggregator = &Aggregator{}
// TODO: The SDK specification says this type should support Min
// values, see #319.
// New returns a new measure aggregator for computing max, sum, and count.
var _ export.Aggregator = &Aggregator{}
var _ aggregator.MaxSumCount = &Aggregator{}
// New returns a new measure aggregator for computing max, sum, and
// count. It does not compute quantile information other than Max.
//
// Note that this aggregator maintains each value using independent
// atomic operations, which introduces the possibility that
// checkpoints are inconsistent. For greater consistency and lower
// performance, consider using Array or DDSketch aggregators.
func New() *Aggregator {
return &Aggregator{}
}
// Sum returns the accumulated sum as a Number.
func (c *Aggregator) Sum() core.Number {
return c.checkpoint.sum
// Sum returns the sum of values in the checkpoint.
func (c *Aggregator) Sum() (core.Number, error) {
return c.checkpoint.sum, nil
}
// Count returns the accumulated count.
func (c *Aggregator) Count() int64 {
return int64(c.checkpoint.count.AsUint64())
// Count returns the number of values in the checkpoint.
func (c *Aggregator) Count() (int64, error) {
return int64(c.checkpoint.count.AsUint64()), nil
}
// Max returns the accumulated max as a Number.
// Max returns the maximum value in the checkpoint.
func (c *Aggregator) Max() (core.Number, error) {
return c.checkpoint.max, nil
}
// Collect checkpoints the current value (atomically) and exports it.
func (c *Aggregator) Collect(ctx context.Context, rec export.Record, exp export.Batcher) {
// Checkpoint saves the current state and resets the current state to
// the empty set. Since no locks are taken, there is a chance that
// the independent Max, Sum, and Count are not consistent with each
// other.
func (c *Aggregator) Checkpoint(ctx context.Context, _ *export.Descriptor) {
// N.B. There is no atomic operation that can update all three
// values at once without a memory allocation.
//
@ -73,20 +87,12 @@ func (c *Aggregator) Collect(ctx context.Context, rec export.Record, exp export.
c.checkpoint.count.SetUint64(c.current.count.SwapUint64Atomic(0))
c.checkpoint.sum = c.current.sum.SwapNumberAtomic(core.Number(0))
c.checkpoint.max = c.current.max.SwapNumberAtomic(core.Number(0))
exp.Export(ctx, rec, c)
}
// Update modifies the current value (atomically) for later export.
func (c *Aggregator) Update(_ context.Context, number core.Number, rec export.Record) {
desc := rec.Descriptor()
// Update adds the recorded measurement to the current data set.
func (c *Aggregator) Update(_ context.Context, number core.Number, desc *export.Descriptor) error {
kind := desc.NumberKind()
if !desc.Alternate() && number.IsNegative(kind) {
// TODO warn
return
}
c.current.count.AddUint64Atomic(1)
c.current.sum.AddNumberAtomic(kind, number)
@ -100,13 +106,14 @@ func (c *Aggregator) Update(_ context.Context, number core.Number, rec export.Re
break
}
}
return nil
}
func (c *Aggregator) Merge(oa export.Aggregator, desc *export.Descriptor) {
// Merge combines two data sets into one.
func (c *Aggregator) Merge(oa export.Aggregator, desc *export.Descriptor) error {
o, _ := oa.(*Aggregator)
if o == nil {
// TODO warn
return
return aggregator.NewInconsistentMergeError(c, oa)
}
c.checkpoint.sum.AddNumber(desc.NumberKind(), o.checkpoint.sum)
@ -115,4 +122,5 @@ func (c *Aggregator) Merge(oa export.Aggregator, desc *export.Descriptor) {
if c.checkpoint.max.CompareNumber(desc.NumberKind(), o.checkpoint.max) < 0 {
c.checkpoint.max.SetNumber(o.checkpoint.max)
}
return nil
}

View File

@ -30,7 +30,7 @@ func TestMaxSumCountAbsolute(t *testing.T) {
ctx := context.Background()
test.RunProfiles(t, func(t *testing.T, profile test.Profile) {
batcher, record := test.NewAggregatorTest(export.MeasureKind, profile.NumberKind, false)
record := test.NewAggregatorTest(export.MeasureKind, profile.NumberKind, false)
agg := New()
@ -39,19 +39,24 @@ func TestMaxSumCountAbsolute(t *testing.T) {
for i := 0; i < count; i++ {
x := profile.Random(+1)
all.Append(x)
agg.Update(ctx, x, record)
test.CheckedUpdate(t, agg, x, record)
}
agg.Collect(ctx, record, batcher)
agg.Checkpoint(ctx, record)
all.Sort()
asum, err := agg.Sum()
require.InEpsilon(t,
all.Sum().CoerceToFloat64(profile.NumberKind),
agg.Sum().CoerceToFloat64(profile.NumberKind),
asum.CoerceToFloat64(profile.NumberKind),
0.000000001,
"Same sum - absolute")
require.Equal(t, all.Count(), agg.Count(), "Same count - absolute")
require.Nil(t, err)
count, err := agg.Count()
require.Equal(t, all.Count(), count, "Same count - absolute")
require.Nil(t, err)
max, err := agg.Max()
require.Nil(t, err)
@ -66,7 +71,7 @@ func TestMaxSumCountMerge(t *testing.T) {
ctx := context.Background()
test.RunProfiles(t, func(t *testing.T, profile test.Profile) {
batcher, record := test.NewAggregatorTest(export.MeasureKind, profile.NumberKind, false)
descriptor := test.NewAggregatorTest(export.MeasureKind, profile.NumberKind, false)
agg1 := New()
agg2 := New()
@ -76,27 +81,32 @@ func TestMaxSumCountMerge(t *testing.T) {
for i := 0; i < count; i++ {
x := profile.Random(+1)
all.Append(x)
agg1.Update(ctx, x, record)
test.CheckedUpdate(t, agg1, x, descriptor)
}
for i := 0; i < count; i++ {
x := profile.Random(+1)
all.Append(x)
agg2.Update(ctx, x, record)
test.CheckedUpdate(t, agg2, x, descriptor)
}
agg1.Collect(ctx, record, batcher)
agg2.Collect(ctx, record, batcher)
agg1.Checkpoint(ctx, descriptor)
agg2.Checkpoint(ctx, descriptor)
agg1.Merge(agg2, record.Descriptor())
test.CheckedMerge(t, agg1, agg2, descriptor)
all.Sort()
asum, err := agg1.Sum()
require.InEpsilon(t,
all.Sum().CoerceToFloat64(profile.NumberKind),
agg1.Sum().CoerceToFloat64(profile.NumberKind),
asum.CoerceToFloat64(profile.NumberKind),
0.000000001,
"Same sum - absolute")
require.Equal(t, all.Count(), agg1.Count(), "Same count - absolute")
require.Nil(t, err)
count, err := agg1.Count()
require.Equal(t, all.Count(), count, "Same count - absolute")
require.Nil(t, err)
max, err := agg1.Max()
require.Nil(t, err)

View File

@ -22,11 +22,9 @@ import (
"go.opentelemetry.io/otel/api/core"
export "go.opentelemetry.io/otel/sdk/export/metric"
"go.opentelemetry.io/otel/sdk/export/metric/aggregator"
)
var _ export.Batcher = &metricBatcher{}
var _ export.Record = &metricRecord{}
const Magnitude = 1000
type Profile struct {
@ -52,31 +50,9 @@ func newProfiles() []Profile {
}
}
type metricBatcher struct {
}
type metricRecord struct {
descriptor *export.Descriptor
}
func NewAggregatorTest(mkind export.Kind, nkind core.NumberKind, alternate bool) (export.Batcher, export.Record) {
func NewAggregatorTest(mkind export.MetricKind, nkind core.NumberKind, alternate bool) *export.Descriptor {
desc := export.NewDescriptor("test.name", mkind, nil, "", "", nkind, alternate)
return &metricBatcher{}, &metricRecord{descriptor: desc}
}
func (t *metricRecord) Descriptor() *export.Descriptor {
return t.descriptor
}
func (t *metricRecord) Labels() []core.KeyValue {
return nil
}
func (m *metricBatcher) AggregatorFor(rec export.Record) export.Aggregator {
return nil
}
func (m *metricBatcher) Export(context.Context, export.Record, export.Aggregator) {
return desc
}
func RunProfiles(t *testing.T, f func(*testing.T, Profile)) {
@ -147,3 +123,26 @@ func (n *Numbers) Median() core.Number {
// specified quantile.
return n.numbers[len(n.numbers)/2]
}
// Performs the same range test the SDK does on behalf of the aggregator.
func CheckedUpdate(t *testing.T, agg export.Aggregator, number core.Number, descriptor *export.Descriptor) {
ctx := context.Background()
// Note: Aggregator tests are written assuming that the SDK
// has performed the RangeTest. Therefore we skip errors that
// would have been detected by the RangeTest.
err := aggregator.RangeTest(number, descriptor)
if err != nil {
return
}
if err := agg.Update(ctx, number, descriptor); err != nil {
t.Error("Unexpected Update failure", err)
}
}
func CheckedMerge(t *testing.T, aggInto, aggFrom export.Aggregator, descriptor *export.Descriptor) {
if err := aggInto.Merge(aggFrom, descriptor); err != nil {
t.Error("Unexpected Merge failure", err)
}
}

View File

@ -0,0 +1,146 @@
// Copyright 2019, OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package defaultkeys // import "go.opentelemetry.io/otel/sdk/metric/batcher/defaultkeys"
import (
"context"
"go.opentelemetry.io/otel/api/core"
export "go.opentelemetry.io/otel/sdk/export/metric"
)
type (
Batcher struct {
selector export.AggregationSelector
labelEncoder export.LabelEncoder
stateful bool
descKeyIndex descKeyIndexMap
aggCheckpoint aggCheckpointMap
}
// descKeyIndexMap is a mapping, for each Descriptor, from the
// Key to the position in the descriptor's recommended keys.
descKeyIndexMap map[*export.Descriptor]map[core.Key]int
// aggCheckpointMap is a mapping from encoded label set to current
// export record. If the batcher is stateful, this map is
// never cleared.
aggCheckpointMap map[string]export.Record
checkpointSet struct {
aggCheckpointMap aggCheckpointMap
labelEncoder export.LabelEncoder
}
)
var _ export.Batcher = &Batcher{}
var _ export.CheckpointSet = &checkpointSet{}
func New(selector export.AggregationSelector, labelEncoder export.LabelEncoder, stateful bool) *Batcher {
return &Batcher{
selector: selector,
labelEncoder: labelEncoder,
descKeyIndex: descKeyIndexMap{},
aggCheckpoint: aggCheckpointMap{},
stateful: stateful,
}
}
func (b *Batcher) AggregatorFor(descriptor *export.Descriptor) export.Aggregator {
return b.selector.AggregatorFor(descriptor)
}
func (b *Batcher) Process(_ context.Context, record export.Record) error {
desc := record.Descriptor()
keys := desc.Keys()
// Cache the mapping from Descriptor->Key->Index
ki, ok := b.descKeyIndex[desc]
if !ok {
ki = map[core.Key]int{}
b.descKeyIndex[desc] = ki
for i, k := range keys {
ki[k] = i
}
}
// Compute the value list. Note: Unspecified values become
// empty strings. TODO: pin this down, we have no appropriate
// Value constructor.
outputLabels := make([]core.KeyValue, len(keys))
for i, key := range keys {
outputLabels[i] = key.String("")
}
// Note also the possibility to speed this computation of
// "encoded" via "outputLabels" in the form of a (Descriptor,
// LabelSet)->(Labels, Encoded) cache.
for _, kv := range record.Labels().Ordered() {
pos, ok := ki[kv.Key]
if !ok {
continue
}
outputLabels[pos].Value = kv.Value
}
// Compute an encoded lookup key.
encoded := b.labelEncoder.Encode(outputLabels)
// Merge this aggregator with all preceding aggregators that
// map to the same set of `outputLabels` labels.
agg := record.Aggregator()
rag, ok := b.aggCheckpoint[encoded]
if ok {
return rag.Aggregator().Merge(agg, desc)
}
// If this Batcher is stateful, create a copy of the
// Aggregator for long-term storage. Otherwise the
// Meter implementation will checkpoint the aggregator
// again, overwriting the long-lived state.
if b.stateful {
tmp := agg
agg = b.AggregatorFor(desc)
if err := agg.Merge(tmp, desc); err != nil {
return err
}
}
b.aggCheckpoint[encoded] = export.NewRecord(
desc,
export.NewLabels(outputLabels, encoded, b.labelEncoder),
agg,
)
return nil
}
func (b *Batcher) CheckpointSet() export.CheckpointSet {
return &checkpointSet{
aggCheckpointMap: b.aggCheckpoint,
labelEncoder: b.labelEncoder,
}
}
func (b *Batcher) FinishedCollection() {
if !b.stateful {
b.aggCheckpoint = aggCheckpointMap{}
}
}
func (p *checkpointSet) ForEach(f func(export.Record)) {
for _, entry := range p.aggCheckpointMap {
f(entry)
}
}

View File

@ -0,0 +1,116 @@
// Copyright 2019, OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package defaultkeys_test
import (
"context"
"testing"
"github.com/stretchr/testify/require"
"go.opentelemetry.io/otel/api/core"
export "go.opentelemetry.io/otel/sdk/export/metric"
"go.opentelemetry.io/otel/sdk/metric/batcher/defaultkeys"
"go.opentelemetry.io/otel/sdk/metric/batcher/test"
)
func TestGroupingStateless(t *testing.T) {
ctx := context.Background()
b := defaultkeys.New(test.NewAggregationSelector(), test.GroupEncoder, false)
_ = b.Process(ctx, export.NewRecord(test.GaugeDesc, test.Labels1, test.GaugeAgg(10)))
_ = b.Process(ctx, export.NewRecord(test.GaugeDesc, test.Labels2, test.GaugeAgg(20)))
_ = b.Process(ctx, export.NewRecord(test.GaugeDesc, test.Labels3, test.GaugeAgg(30)))
_ = b.Process(ctx, export.NewRecord(test.CounterDesc, test.Labels1, test.CounterAgg(10)))
_ = b.Process(ctx, export.NewRecord(test.CounterDesc, test.Labels2, test.CounterAgg(20)))
_ = b.Process(ctx, export.NewRecord(test.CounterDesc, test.Labels3, test.CounterAgg(40)))
checkpointSet := b.CheckpointSet()
b.FinishedCollection()
records := test.Output{}
checkpointSet.ForEach(records.AddTo)
// Output gauge should have only the "G=H" and "G=" keys.
// Output counter should have only the "C=D" and "C=" keys.
require.EqualValues(t, map[string]int64{
"counter/C=D": 30, // labels1 + labels2
"counter/C=": 40, // labels3
"gauge/G=H": 10, // labels1
"gauge/G=": 30, // labels3 = last value
}, records)
// Verify that state is reset by FinishedCollection()
checkpointSet = b.CheckpointSet()
b.FinishedCollection()
checkpointSet.ForEach(func(rec export.Record) {
t.Fatal("Unexpected call")
})
}
func TestGroupingStateful(t *testing.T) {
ctx := context.Background()
b := defaultkeys.New(test.NewAggregationSelector(), test.GroupEncoder, true)
cagg := test.CounterAgg(10)
_ = b.Process(ctx, export.NewRecord(test.CounterDesc, test.Labels1, cagg))
checkpointSet := b.CheckpointSet()
b.FinishedCollection()
records1 := test.Output{}
checkpointSet.ForEach(records1.AddTo)
require.EqualValues(t, map[string]int64{
"counter/C=D": 10, // labels1
}, records1)
// Test that state was NOT reset
checkpointSet = b.CheckpointSet()
b.FinishedCollection()
records2 := test.Output{}
checkpointSet.ForEach(records2.AddTo)
require.EqualValues(t, records1, records2)
// Update and re-checkpoint the original record.
_ = cagg.Update(ctx, core.NewInt64Number(20), test.CounterDesc)
cagg.Checkpoint(ctx, test.CounterDesc)
// As yet cagg has not been passed to Batcher.Process. Should
// not see an update.
checkpointSet = b.CheckpointSet()
b.FinishedCollection()
records3 := test.Output{}
checkpointSet.ForEach(records3.AddTo)
require.EqualValues(t, records1, records3)
// Now process the second update
_ = b.Process(ctx, export.NewRecord(test.CounterDesc, test.Labels1, cagg))
checkpointSet = b.CheckpointSet()
b.FinishedCollection()
records4 := test.Output{}
checkpointSet.ForEach(records4.AddTo)
require.EqualValues(t, map[string]int64{
"counter/C=D": 30,
}, records4)
}

View File

@ -0,0 +1,136 @@
// Copyright 2019, OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package test
import (
"context"
"fmt"
"strings"
"go.opentelemetry.io/otel/api/core"
"go.opentelemetry.io/otel/api/key"
export "go.opentelemetry.io/otel/sdk/export/metric"
sdk "go.opentelemetry.io/otel/sdk/metric"
"go.opentelemetry.io/otel/sdk/metric/aggregator/counter"
"go.opentelemetry.io/otel/sdk/metric/aggregator/gauge"
)
type (
// Encoder is an alternate label encoder to validate grouping logic.
Encoder struct{}
// Output collects distinct metric/label set outputs.
Output map[string]int64
// testAggregationSelector returns aggregators consistent with
// the test variables below, needed for testing stateful
// batchers, which clone Aggregators using AggregatorFor(desc).
testAggregationSelector struct{}
)
var (
// GaugeDesc groups by "G"
GaugeDesc = export.NewDescriptor(
"gauge", export.GaugeKind, []core.Key{key.New("G")}, "", "", core.Int64NumberKind, false)
// CounterDesc groups by "C"
CounterDesc = export.NewDescriptor(
"counter", export.CounterKind, []core.Key{key.New("C")}, "", "", core.Int64NumberKind, false)
// SdkEncoder uses a non-standard encoder like K1~V1&K2~V2
SdkEncoder = &Encoder{}
// GroupEncoder uses the SDK default encoder
GroupEncoder = sdk.DefaultLabelEncoder()
// Gauge groups are (labels1), (labels2+labels3)
// Counter groups are (labels1+labels2), (labels3)
// Labels1 has G=H and C=D
Labels1 = makeLabels(SdkEncoder, key.String("G", "H"), key.String("C", "D"))
// Labels2 has C=D and E=F
Labels2 = makeLabels(SdkEncoder, key.String("C", "D"), key.String("E", "F"))
// Labels3 is the empty set
Labels3 = makeLabels(SdkEncoder)
)
// NewAggregationSelector returns a policy that is consistent with the
// test descriptors above. I.e., it returns counter.New() for counter
// instruments and gauge.New for gauge instruments.
func NewAggregationSelector() export.AggregationSelector {
return &testAggregationSelector{}
}
func (*testAggregationSelector) AggregatorFor(desc *export.Descriptor) export.Aggregator {
switch desc.MetricKind() {
case export.CounterKind:
return counter.New()
case export.GaugeKind:
return gauge.New()
default:
panic("Invalid descriptor MetricKind for this test")
}
}
func makeLabels(encoder export.LabelEncoder, labels ...core.KeyValue) export.Labels {
encoded := encoder.Encode(labels)
return export.NewLabels(labels, encoded, encoder)
}
func (Encoder) Encode(labels []core.KeyValue) string {
var sb strings.Builder
for i, l := range labels {
if i > 0 {
sb.WriteString("&")
}
sb.WriteString(string(l.Key))
sb.WriteString("~")
sb.WriteString(l.Value.Emit())
}
return sb.String()
}
// GaugeAgg returns a checkpointed gauge aggregator w/ the specified value.
func GaugeAgg(v int64) export.Aggregator {
ctx := context.Background()
gagg := gauge.New()
_ = gagg.Update(ctx, core.NewInt64Number(v), GaugeDesc)
gagg.Checkpoint(ctx, CounterDesc)
return gagg
}
// CounterAgg returns a checkpointed counter aggregator w/ the specified value.
func CounterAgg(v int64) export.Aggregator {
ctx := context.Background()
cagg := counter.New()
_ = cagg.Update(ctx, core.NewInt64Number(v), CounterDesc)
cagg.Checkpoint(ctx, CounterDesc)
return cagg
}
// AddTo adds a name/label-encoding entry with the gauge or counter
// value to the output map.
func (o Output) AddTo(rec export.Record) {
labels := rec.Labels()
key := fmt.Sprint(rec.Descriptor().Name(), "/", labels.Encoded())
var value int64
switch t := rec.Aggregator().(type) {
case *counter.Aggregator:
sum, _ := t.Sum()
value = sum.AsInt64()
case *gauge.Aggregator:
lv, _, _ := t.LastValue()
value = lv.AsInt64()
}
o[key] = value
}

View File

@ -0,0 +1,105 @@
// Copyright 2019, OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ungrouped // import "go.opentelemetry.io/otel/sdk/metric/batcher/ungrouped"
import (
"context"
export "go.opentelemetry.io/otel/sdk/export/metric"
)
type (
Batcher struct {
selector export.AggregationSelector
batchMap batchMap
stateful bool
}
batchKey struct {
descriptor *export.Descriptor
encoded string
}
batchValue struct {
aggregator export.Aggregator
labels export.Labels
}
batchMap map[batchKey]batchValue
)
var _ export.Batcher = &Batcher{}
var _ export.CheckpointSet = batchMap{}
func New(selector export.AggregationSelector, stateful bool) *Batcher {
return &Batcher{
selector: selector,
batchMap: batchMap{},
stateful: stateful,
}
}
func (b *Batcher) AggregatorFor(descriptor *export.Descriptor) export.Aggregator {
return b.selector.AggregatorFor(descriptor)
}
func (b *Batcher) Process(_ context.Context, record export.Record) error {
desc := record.Descriptor()
key := batchKey{
descriptor: desc,
encoded: record.Labels().Encoded(),
}
agg := record.Aggregator()
value, ok := b.batchMap[key]
if ok {
return value.aggregator.Merge(agg, desc)
}
// If this Batcher is stateful, create a copy of the
// Aggregator for long-term storage. Otherwise the
// Meter implementation will checkpoint the aggregator
// again, overwriting the long-lived state.
if b.stateful {
tmp := agg
agg = b.AggregatorFor(desc)
if err := agg.Merge(tmp, desc); err != nil {
return err
}
}
b.batchMap[key] = batchValue{
aggregator: agg,
labels: record.Labels(),
}
return nil
}
func (b *Batcher) CheckpointSet() export.CheckpointSet {
return b.batchMap
}
func (b *Batcher) FinishedCollection() {
if !b.stateful {
b.batchMap = batchMap{}
}
}
func (c batchMap) ForEach(f func(export.Record)) {
for key, value := range c {
f(export.NewRecord(
key.descriptor,
value.labels,
value.aggregator,
))
}
}

View File

@ -0,0 +1,128 @@
// Copyright 2019, OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ungrouped_test
import (
"context"
"testing"
"github.com/stretchr/testify/require"
"go.opentelemetry.io/otel/api/core"
export "go.opentelemetry.io/otel/sdk/export/metric"
"go.opentelemetry.io/otel/sdk/metric/batcher/test"
"go.opentelemetry.io/otel/sdk/metric/batcher/ungrouped"
)
// These tests use the ../test label encoding.
func TestUngroupedStateless(t *testing.T) {
ctx := context.Background()
b := ungrouped.New(test.NewAggregationSelector(), false)
// Set initial gauge values
_ = b.Process(ctx, export.NewRecord(test.GaugeDesc, test.Labels1, test.GaugeAgg(10)))
_ = b.Process(ctx, export.NewRecord(test.GaugeDesc, test.Labels2, test.GaugeAgg(20)))
_ = b.Process(ctx, export.NewRecord(test.GaugeDesc, test.Labels3, test.GaugeAgg(30)))
// Another gauge Set for Labels1
_ = b.Process(ctx, export.NewRecord(test.GaugeDesc, test.Labels1, test.GaugeAgg(50)))
// Set initial counter values
_ = b.Process(ctx, export.NewRecord(test.CounterDesc, test.Labels1, test.CounterAgg(10)))
_ = b.Process(ctx, export.NewRecord(test.CounterDesc, test.Labels2, test.CounterAgg(20)))
_ = b.Process(ctx, export.NewRecord(test.CounterDesc, test.Labels3, test.CounterAgg(40)))
// Another counter Add for Labels1
_ = b.Process(ctx, export.NewRecord(test.CounterDesc, test.Labels1, test.CounterAgg(50)))
checkpointSet := b.CheckpointSet()
b.FinishedCollection()
records := test.Output{}
checkpointSet.ForEach(records.AddTo)
// Output gauge should have only the "G=H" and "G=" keys.
// Output counter should have only the "C=D" and "C=" keys.
require.EqualValues(t, map[string]int64{
"counter/G~H&C~D": 60, // labels1
"counter/C~D&E~F": 20, // labels2
"counter/": 40, // labels3
"gauge/G~H&C~D": 50, // labels1
"gauge/C~D&E~F": 20, // labels2
"gauge/": 30, // labels3
}, records)
// Verify that state was reset
checkpointSet = b.CheckpointSet()
b.FinishedCollection()
checkpointSet.ForEach(func(rec export.Record) {
t.Fatal("Unexpected call")
})
}
func TestUngroupedStateful(t *testing.T) {
ctx := context.Background()
b := ungrouped.New(test.NewAggregationSelector(), true)
cagg := test.CounterAgg(10)
_ = b.Process(ctx, export.NewRecord(test.CounterDesc, test.Labels1, cagg))
checkpointSet := b.CheckpointSet()
b.FinishedCollection()
records1 := test.Output{}
checkpointSet.ForEach(records1.AddTo)
require.EqualValues(t, map[string]int64{
"counter/G~H&C~D": 10, // labels1
}, records1)
// Test that state was NOT reset
checkpointSet = b.CheckpointSet()
b.FinishedCollection()
records2 := test.Output{}
checkpointSet.ForEach(records2.AddTo)
require.EqualValues(t, records1, records2)
// Update and re-checkpoint the original record.
_ = cagg.Update(ctx, core.NewInt64Number(20), test.CounterDesc)
cagg.Checkpoint(ctx, test.CounterDesc)
// As yet cagg has not been passed to Batcher.Process. Should
// not see an update.
checkpointSet = b.CheckpointSet()
b.FinishedCollection()
records3 := test.Output{}
checkpointSet.ForEach(records3.AddTo)
require.EqualValues(t, records1, records3)
// Now process the second update
_ = b.Process(ctx, export.NewRecord(test.CounterDesc, test.Labels1, cagg))
checkpointSet = b.CheckpointSet()
b.FinishedCollection()
records4 := test.Output{}
checkpointSet.ForEach(records4.AddTo)
require.EqualValues(t, map[string]int64{
"counter/G~H&C~D": 30,
}, records4)
}

View File

@ -42,29 +42,37 @@ func newFixture(b *testing.B) *benchFixture {
bf := &benchFixture{
B: b,
}
bf.sdk = sdk.New(bf)
bf.sdk = sdk.New(bf, sdk.DefaultLabelEncoder())
return bf
}
func (bf *benchFixture) AggregatorFor(rec export.Record) export.Aggregator {
switch rec.Descriptor().MetricKind() {
func (*benchFixture) AggregatorFor(descriptor *export.Descriptor) export.Aggregator {
switch descriptor.MetricKind() {
case export.CounterKind:
return counter.New()
case export.GaugeKind:
return gauge.New()
case export.MeasureKind:
if strings.HasSuffix(rec.Descriptor().Name(), "maxsumcount") {
if strings.HasSuffix(descriptor.Name(), "maxsumcount") {
return maxsumcount.New()
} else if strings.HasSuffix(rec.Descriptor().Name(), "ddsketch") {
return ddsketch.New(ddsketch.NewDefaultConfig(), rec.Descriptor())
} else if strings.HasSuffix(rec.Descriptor().Name(), "array") {
return ddsketch.New(ddsketch.NewDefaultConfig(), rec.Descriptor())
} else if strings.HasSuffix(descriptor.Name(), "ddsketch") {
return ddsketch.New(ddsketch.NewDefaultConfig(), descriptor)
} else if strings.HasSuffix(descriptor.Name(), "array") {
return ddsketch.New(ddsketch.NewDefaultConfig(), descriptor)
}
}
return nil
}
func (bf *benchFixture) Export(ctx context.Context, rec export.Record, agg export.Aggregator) {
func (*benchFixture) Process(context.Context, export.Record) error {
return nil
}
func (*benchFixture) CheckpointSet() export.CheckpointSet {
return nil
}
func (*benchFixture) FinishedCollection() {
}
func makeLabelSets(n int) [][]core.KeyValue {

View File

@ -0,0 +1,186 @@
// Copyright 2019, OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package push // import "go.opentelemetry.io/otel/sdk/metric/controller/push"
import (
"context"
"sync"
"time"
"go.opentelemetry.io/otel/api/metric"
export "go.opentelemetry.io/otel/sdk/export/metric"
sdk "go.opentelemetry.io/otel/sdk/metric"
)
// Controller organizes a periodic push of metric data.
type Controller struct {
lock sync.Mutex
sdk *sdk.SDK
errorHandler sdk.ErrorHandler
batcher export.Batcher
exporter export.Exporter
wg sync.WaitGroup
ch chan struct{}
period time.Duration
ticker Ticker
clock Clock
}
var _ metric.Provider = &Controller{}
// Several types below are created to match "github.com/benbjohnson/clock"
// so that it remains a test-only dependency.
type Clock interface {
Now() time.Time
Ticker(time.Duration) Ticker
}
type Ticker interface {
Stop()
C() <-chan time.Time
}
type realClock struct {
}
type realTicker struct {
ticker *time.Ticker
}
var _ Clock = realClock{}
var _ Ticker = realTicker{}
// New constructs a Controller, an implementation of metric.Provider,
// using the provided batcher, exporter, and collection period to
// configure an SDK with periodic collection. The batcher itself is
// configured with the aggregation selector policy.
//
// If the Exporter implements the export.LabelEncoder interface, the
// exporter will be used as the label encoder for the SDK itself,
// otherwise the SDK will be configured with the default label
// encoder.
func New(batcher export.Batcher, exporter export.Exporter, period time.Duration) *Controller {
lencoder, _ := exporter.(export.LabelEncoder)
if lencoder == nil {
lencoder = sdk.DefaultLabelEncoder()
}
return &Controller{
sdk: sdk.New(batcher, lencoder),
errorHandler: sdk.DefaultErrorHandler,
batcher: batcher,
exporter: exporter,
ch: make(chan struct{}),
period: period,
clock: realClock{},
}
}
// SetClock supports setting a mock clock for testing. This must be
// called before Start().
func (c *Controller) SetClock(clock Clock) {
c.lock.Lock()
defer c.lock.Unlock()
c.clock = clock
}
func (c *Controller) SetErrorHandler(errorHandler sdk.ErrorHandler) {
c.lock.Lock()
defer c.lock.Unlock()
c.errorHandler = errorHandler
c.sdk.SetErrorHandler(errorHandler)
}
// GetMeter returns a named Meter, satisifying the metric.Provider
// interface.
func (c *Controller) GetMeter(name string) metric.Meter {
return c.sdk
}
// Start begins a ticker that periodically collects and exports
// metrics with the configured interval.
func (c *Controller) Start() {
c.lock.Lock()
defer c.lock.Unlock()
if c.ticker != nil {
return
}
c.ticker = c.clock.Ticker(c.period)
c.wg.Add(1)
go c.run(c.ch)
}
// Stop waits for the background goroutine to return and then collects
// and exports metrics one last time before returning.
func (c *Controller) Stop() {
c.lock.Lock()
defer c.lock.Unlock()
if c.ch == nil {
return
}
close(c.ch)
c.ch = nil
c.wg.Wait()
c.ticker.Stop()
c.tick()
}
func (c *Controller) run(ch chan struct{}) {
for {
select {
case <-ch:
c.wg.Done()
return
case <-c.ticker.C():
c.tick()
}
}
}
func (c *Controller) tick() {
// TODO: either remove the context argument from Export() or
// configure a timeout here?
ctx := context.Background()
c.sdk.Collect(ctx)
err := c.exporter.Export(ctx, c.batcher.CheckpointSet())
c.batcher.FinishedCollection()
if err != nil {
c.errorHandler(err)
}
}
func (realClock) Now() time.Time {
return time.Now()
}
func (realClock) Ticker(period time.Duration) Ticker {
return realTicker{time.NewTicker(period)}
}
func (t realTicker) Stop() {
t.ticker.Stop()
}
func (t realTicker) C() <-chan time.Time {
return t.ticker.C
}

View File

@ -0,0 +1,229 @@
// Copyright 2019, OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package push_test
import (
"context"
"fmt"
"runtime"
"testing"
"time"
"github.com/benbjohnson/clock"
"github.com/stretchr/testify/require"
"go.opentelemetry.io/otel/exporter/metric/test"
export "go.opentelemetry.io/otel/sdk/export/metric"
"go.opentelemetry.io/otel/sdk/export/metric/aggregator"
sdk "go.opentelemetry.io/otel/sdk/metric"
"go.opentelemetry.io/otel/sdk/metric/aggregator/counter"
"go.opentelemetry.io/otel/sdk/metric/controller/push"
)
type testBatcher struct {
t *testing.T
checkpointSet *test.CheckpointSet
checkpoints int
finishes int
}
type testExporter struct {
t *testing.T
exports int
records []export.Record
retErr error
}
type testFixture struct {
checkpointSet *test.CheckpointSet
batcher *testBatcher
exporter *testExporter
}
type mockClock struct {
mock *clock.Mock
}
type mockTicker struct {
ticker *clock.Ticker
}
var _ push.Clock = mockClock{}
var _ push.Ticker = mockTicker{}
func newFixture(t *testing.T) testFixture {
checkpointSet := test.NewCheckpointSet(sdk.DefaultLabelEncoder())
batcher := &testBatcher{
t: t,
checkpointSet: checkpointSet,
}
exporter := &testExporter{
t: t,
}
return testFixture{
checkpointSet: checkpointSet,
batcher: batcher,
exporter: exporter,
}
}
func (b *testBatcher) AggregatorFor(*export.Descriptor) export.Aggregator {
return counter.New()
}
func (b *testBatcher) CheckpointSet() export.CheckpointSet {
b.checkpoints++
return b.checkpointSet
}
func (b *testBatcher) FinishedCollection() {
b.finishes++
}
func (b *testBatcher) Process(_ context.Context, record export.Record) error {
b.checkpointSet.Add(record.Descriptor(), record.Aggregator(), record.Labels().Ordered()...)
return nil
}
func (e *testExporter) Export(_ context.Context, checkpointSet export.CheckpointSet) error {
e.exports++
checkpointSet.ForEach(func(r export.Record) {
e.records = append(e.records, r)
})
return e.retErr
}
func (c mockClock) Now() time.Time {
return c.mock.Now()
}
func (c mockClock) Ticker(period time.Duration) push.Ticker {
return mockTicker{c.mock.Ticker(period)}
}
func (c mockClock) Add(d time.Duration) {
c.mock.Add(d)
}
func (t mockTicker) Stop() {
t.ticker.Stop()
}
func (t mockTicker) C() <-chan time.Time {
return t.ticker.C
}
func TestPushDoubleStop(t *testing.T) {
fix := newFixture(t)
p := push.New(fix.batcher, fix.exporter, time.Second)
p.Start()
p.Stop()
p.Stop()
}
func TestPushDoubleStart(t *testing.T) {
fix := newFixture(t)
p := push.New(fix.batcher, fix.exporter, time.Second)
p.Start()
p.Start()
p.Stop()
}
func TestPushTicker(t *testing.T) {
fix := newFixture(t)
p := push.New(fix.batcher, fix.exporter, time.Second)
meter := p.GetMeter("name")
mock := mockClock{clock.NewMock()}
p.SetClock(mock)
ctx := context.Background()
counter := meter.NewInt64Counter("counter")
p.Start()
counter.Add(ctx, 3, meter.Labels())
require.Equal(t, 0, fix.batcher.checkpoints)
require.Equal(t, 0, fix.batcher.finishes)
require.Equal(t, 0, fix.exporter.exports)
require.Equal(t, 0, len(fix.exporter.records))
mock.Add(time.Second)
runtime.Gosched()
require.Equal(t, 1, fix.batcher.checkpoints)
require.Equal(t, 1, fix.exporter.exports)
require.Equal(t, 1, fix.batcher.finishes)
require.Equal(t, 1, len(fix.exporter.records))
require.Equal(t, "counter", fix.exporter.records[0].Descriptor().Name())
sum, err := fix.exporter.records[0].Aggregator().(aggregator.Sum).Sum()
require.Equal(t, int64(3), sum.AsInt64())
require.Nil(t, err)
fix.checkpointSet.Reset()
fix.exporter.records = nil
counter.Add(ctx, 7, meter.Labels())
mock.Add(time.Second)
runtime.Gosched()
require.Equal(t, 2, fix.batcher.checkpoints)
require.Equal(t, 2, fix.batcher.finishes)
require.Equal(t, 2, fix.exporter.exports)
require.Equal(t, 1, len(fix.exporter.records))
require.Equal(t, "counter", fix.exporter.records[0].Descriptor().Name())
sum, err = fix.exporter.records[0].Aggregator().(aggregator.Sum).Sum()
require.Equal(t, int64(7), sum.AsInt64())
require.Nil(t, err)
p.Stop()
}
func TestPushExportError(t *testing.T) {
fix := newFixture(t)
fix.exporter.retErr = fmt.Errorf("Test export error")
p := push.New(fix.batcher, fix.exporter, time.Second)
var err error
p.SetErrorHandler(func(sdkErr error) {
err = sdkErr
})
mock := mockClock{clock.NewMock()}
p.SetClock(mock)
p.Start()
runtime.Gosched()
require.Equal(t, 0, fix.exporter.exports)
require.Nil(t, err)
mock.Add(time.Second)
runtime.Gosched()
require.Equal(t, 1, fix.exporter.exports)
require.Error(t, err)
require.Equal(t, fix.exporter.retErr, err)
p.Stop()
}

194
sdk/metric/correct_test.go Normal file
View File

@ -0,0 +1,194 @@
// Copyright 2019, OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package metric_test
import (
"context"
"fmt"
"math"
"testing"
"github.com/stretchr/testify/require"
"go.opentelemetry.io/otel/api/core"
"go.opentelemetry.io/otel/api/key"
"go.opentelemetry.io/otel/api/metric"
export "go.opentelemetry.io/otel/sdk/export/metric"
"go.opentelemetry.io/otel/sdk/export/metric/aggregator"
sdk "go.opentelemetry.io/otel/sdk/metric"
"go.opentelemetry.io/otel/sdk/metric/aggregator/array"
"go.opentelemetry.io/otel/sdk/metric/aggregator/counter"
"go.opentelemetry.io/otel/sdk/metric/aggregator/gauge"
)
type correctnessBatcher struct {
t *testing.T
agg export.Aggregator
records []export.Record
}
type testLabelEncoder struct{}
func (cb *correctnessBatcher) AggregatorFor(*export.Descriptor) export.Aggregator {
return cb.agg
}
func (cb *correctnessBatcher) CheckpointSet() export.CheckpointSet {
cb.t.Fatal("Should not be called")
return nil
}
func (*correctnessBatcher) FinishedCollection() {
}
func (cb *correctnessBatcher) Process(_ context.Context, record export.Record) error {
cb.records = append(cb.records, record)
return nil
}
func (testLabelEncoder) Encode(labels []core.KeyValue) string {
return fmt.Sprint(labels)
}
func TestInputRangeTestCounter(t *testing.T) {
ctx := context.Background()
cagg := counter.New()
batcher := &correctnessBatcher{
t: t,
agg: cagg,
}
sdk := sdk.New(batcher, sdk.DefaultLabelEncoder())
var sdkErr error
sdk.SetErrorHandler(func(handleErr error) {
sdkErr = handleErr
})
counter := sdk.NewInt64Counter("counter.name", metric.WithMonotonic(true))
counter.Add(ctx, -1, sdk.Labels())
require.Equal(t, aggregator.ErrNegativeInput, sdkErr)
sdkErr = nil
sdk.Collect(ctx)
sum, err := cagg.Sum()
require.Equal(t, int64(0), sum.AsInt64())
require.Nil(t, err)
counter.Add(ctx, 1, sdk.Labels())
checkpointed := sdk.Collect(ctx)
sum, err = cagg.Sum()
require.Equal(t, int64(1), sum.AsInt64())
require.Equal(t, 1, checkpointed)
require.Nil(t, err)
require.Nil(t, sdkErr)
}
func TestInputRangeTestMeasure(t *testing.T) {
ctx := context.Background()
magg := array.New()
batcher := &correctnessBatcher{
t: t,
agg: magg,
}
sdk := sdk.New(batcher, sdk.DefaultLabelEncoder())
var sdkErr error
sdk.SetErrorHandler(func(handleErr error) {
sdkErr = handleErr
})
measure := sdk.NewFloat64Measure("measure.name", metric.WithAbsolute(true))
measure.Record(ctx, -1, sdk.Labels())
require.Equal(t, aggregator.ErrNegativeInput, sdkErr)
sdkErr = nil
sdk.Collect(ctx)
count, err := magg.Count()
require.Equal(t, int64(0), count)
require.Nil(t, err)
measure.Record(ctx, 1, sdk.Labels())
measure.Record(ctx, 2, sdk.Labels())
checkpointed := sdk.Collect(ctx)
count, err = magg.Count()
require.Equal(t, int64(2), count)
require.Equal(t, 1, checkpointed)
require.Nil(t, sdkErr)
require.Nil(t, err)
}
func TestDisabledInstrument(t *testing.T) {
ctx := context.Background()
batcher := &correctnessBatcher{
t: t,
agg: nil,
}
sdk := sdk.New(batcher, sdk.DefaultLabelEncoder())
measure := sdk.NewFloat64Measure("measure.name", metric.WithAbsolute(true))
measure.Record(ctx, -1, sdk.Labels())
checkpointed := sdk.Collect(ctx)
require.Equal(t, 0, checkpointed)
}
func TestRecordNaN(t *testing.T) {
ctx := context.Background()
batcher := &correctnessBatcher{
t: t,
agg: gauge.New(),
}
sdk := sdk.New(batcher, sdk.DefaultLabelEncoder())
var sdkErr error
sdk.SetErrorHandler(func(handleErr error) {
sdkErr = handleErr
})
g := sdk.NewFloat64Gauge("gauge.name")
require.Nil(t, sdkErr)
g.Set(ctx, math.NaN(), sdk.Labels())
require.Error(t, sdkErr)
}
func TestSDKLabelEncoder(t *testing.T) {
ctx := context.Background()
cagg := counter.New()
batcher := &correctnessBatcher{
t: t,
agg: cagg,
}
sdk := sdk.New(batcher, testLabelEncoder{})
measure := sdk.NewFloat64Measure("measure")
measure.Record(ctx, 1, sdk.Labels(key.String("A", "B"), key.String("C", "D")))
sdk.Collect(ctx)
require.Equal(t, 1, len(batcher.records))
labels := batcher.records[0].Labels()
require.Equal(t, `[{A {8 0 B}} {C {8 0 D}}]`, labels.Encoded())
}
func TestDefaultLabelEncoder(t *testing.T) {
encoder := sdk.DefaultLabelEncoder()
encoded := encoder.Encode([]core.KeyValue{key.String("A", "B"), key.String("C", "D")})
require.Equal(t, `A=B,C=D`, encoded)
}

View File

@ -13,48 +13,157 @@
// limitations under the License.
/*
Package metric implements the OpenTelemetry metric.Meter API. The SDK
supports configurable metrics export behavior through a collection of
export interfaces that support various export strategies, described below.
Package metric implements the OpenTelemetry `Meter` API. The SDK
supports configurable metrics export behavior through a
`export.MetricBatcher` API. Most metrics behavior is controlled
by the `MetricBatcher`, including:
The metric.Meter API consists of methods for constructing each of the
basic kinds of metric instrument. There are six types of instrument
available to the end user, comprised of three basic kinds of metric
instrument (Counter, Gauge, Measure) crossed with two kinds of number
(int64, float64).
1. Selecting the concrete type of aggregation to use
2. Receiving exported data during SDK.Collect()
The API assists the SDK by consolidating the variety of metric instruments
into a narrower interface, allowing the SDK to avoid repetition of
boilerplate. The API and SDK are separated such that an event reaching
the SDK has a uniform structure: an instrument, a label set, and a
numerical value.
The call to SDK.Collect() initiates collection. The SDK calls the
`MetricBatcher` for each current record, asking the aggregator to
export itself. Aggregators, found in `./aggregators`, are responsible
for receiving updates and exporting their current state.
To this end, the API uses a core.Number type to represent either an int64
or a float64, depending on the instrument's definition. A single
implementation interface is used for instruments, metric.InstrumentImpl,
and a single implementation interface is used for handles,
metric.HandleImpl.
The SDK.Collect() API should be called by an exporter. During the
call to Collect(), the exporter receives calls in a single-threaded
context. No locking is required because the SDK.Collect() call
prevents concurrency.
There are three entry points for events in the Metrics API: via instrument
handles, via direct instrument calls, and via BatchRecord. The SDK is
designed with handles as the primary entry point, the other two entry
points are implemented in terms of short-lived handles. For example, the
implementation of a direct call allocates a handle, operates on the
handle, and releases the handle. Similarly, the implementation of
RecordBatch uses a short-lived handle for each measurement in the batch.
The SDK uses lock-free algorithms to maintain its internal state.
There are three central data structures at work:
Internal Structure
1. A sync.Map maps unique (InstrumentID, LabelSet) to records
2. A "primary" atomic list of records
3. A "reclaim" atomic list of records
The SDK is designed with minimal use of locking, to avoid adding
contention for user-level code. For each handle, whether it is held by
user-level code or a short-lived device, there exists an internal record
managed by the SDK. Each internal record corresponds to a specific
instrument and label set combination.
Collection is oriented around epochs. The SDK internally has a
notion of the "current" epoch, which is incremented each time
Collect() is called. Records contain two atomic counter values,
the epoch in which it was last modified and the epoch in which it
was last collected. Records may be garbage collected when the
epoch in which they were last updated is less than the epoch in
which they were last collected.
A sync.Map maintains the mapping of current instruments and label sets to
internal records. To create a new handle, the SDK consults the Map to
locate an existing record, otherwise it constructs a new record. The SDK
maintains a count of the number of references to each record, ensuring
that records are not reclaimed from the Map while they are still active
from the user's perspective.
Collect() performs a record-by-record scan of all active records
and exports their current state, before incrementing the current
epoch. Collection events happen at a point in time during
`Collect()`, but all records are not collected in the same instant.
Metric collection is performed via a single-threaded call to Collect that
sweeps through all records in the SDK, checkpointing their state. When a
record is discovered that has no references and has not been updated since
the prior collection pass, it is marked for reclamation and removed from
the Map. There exists, at this moment, a race condition since another
goroutine could, in the same instant, obtain a reference to the handle.
The SDK is designed to tolerate this sort of race condition, in the name
of reducing lock contention. It is possible for more than one record with
identical instrument and label set to exist simultaneously, though only
one can be linked from the Map at a time. To avoid lost updates, the SDK
maintains two additional linked lists of records, one managed by the
collection code path and one managed by the instrumentation code path.
The SDK maintains a current epoch number, corresponding to the number of
completed collections. Each record contains the last epoch during which
it was collected and updated. These variables allow the collection code
path to detect stale records while allowing the instrumentation code path
to detect potential reclamations. When the instrumentation code path
detects a potential reclamation, it adds itself to the second linked list,
where records are saved from reclamation.
Each record has an associated aggregator, which maintains the current
state resulting from all metric events since its last checkpoint.
Aggregators may be lock-free or they may use locking, but they should
expect to be called concurrently. Because of the tolerated race condition
described above, aggregators must be capable of merging with another
aggregator of the same type.
Export Pipeline
While the SDK serves to maintain a current set of records and
coordinate collection, the behavior of a metrics export pipeline is
configured through the export types in
go.opentelemetry.io/otel/sdk/export/metric. It is important to keep
in mind the context these interfaces are called from. There are two
contexts, instrumentation context, where a user-level goroutine that
enters the SDK resulting in a new record, and collection context,
where a system-level thread performs a collection pass through the
SDK.
Descriptor is a struct that describes the metric instrument to the
export pipeline, containing the name, recommended aggregation keys,
units, description, metric kind (counter, gauge, or measure), number
kind (int64 or float64), and whether the instrument has alternate
semantics or not (i.e., monotonic=false counter, monotonic=true gauge,
absolute=false measure). A Descriptor accompanies metric data as it
passes through the export pipeline.
The AggregationSelector interface supports choosing the method of
aggregation to apply to a particular instrument. Given the
Descriptor, this AggregatorFor method returns an implementation of
Aggregator. If this interface returns nil, the metric will be
disabled. The aggregator should be matched to the capabilities of the
exporter. Selecting the aggregator for counter and gauge instruments
is relatively straightforward, but for measure instruments there are
numerous choices with different cost and quality tradeoffs.
Aggregator is an interface which implements a concrete strategy for
aggregating metric updates. Several Aggregator implementations are
provided by the SDK. Aggregators may be lock-free or use locking,
depending on their structure and semantics. Aggregators implement an
Update method, called in instrumentation context, to receive a single
metric event. Aggregators implement a Checkpoint method, called in
collection context, to save a checkpoint of the current state.
Aggregators implement a Merge method, also called in collection
context, that combines state from two aggregators into one. Each SDK
record has an associated aggregator.
Batcher is an interface which sits between the SDK and an exporter.
The Batcher embeds an AggregationSelector, used by the SDK to assign
new Aggregators. The Batcher supports a Process() API for submitting
checkpointed aggregators to the batcher, and a CheckpointSet() API
for producing a complete checkpoint for the exporter. Two default
Batcher implementations are provided, the "defaultkeys" Batcher groups
aggregate metrics by their recommended Descriptor.Keys(), the
"ungrouped" Batcher aggregates metrics at full dimensionality.
LabelEncoder is an optional optimization that allows an exporter to
provide the serialization logic for labels. This allows avoiding
duplicate serialization of labels, once as a unique key in the SDK (or
Batcher) and once in the exporter.
CheckpointSet is an interface between the Batcher and the Exporter.
After completing a collection pass, the Batcher.CheckpointSet() method
returns a CheckpointSet, which the Exporter uses to iterate over all
the updated metrics.
Record is a struct containing the state of an individual exported
metric. This is the result of one collection interface for one
instrument and one label set.
Labels is a struct containing an ordered set of labels, the
corresponding unique encoding, and the encoder that produced it.
Exporter is the final stage of an export pipeline. It is called with
a CheckpointSet capable of enumerating all the updated metrics.
Controller is not an export interface per se, but it orchestrates the
export pipeline. For example, a "push" controller will establish a
periodic timer to regularly collect and export metrics. A "pull"
controller will await a pull request before initiating metric
collection. Either way, the job of the controller is to call the SDK
Collect() method, then read the checkpoint, then invoke the exporter.
Controllers are expected to implement the public metric.MeterProvider
API, meaning they can be installed as the global Meter provider.
The purpose of the two lists: the primary list is appended-to when
new handles are created and atomically cleared during collect. The
reclaim list is used as a second chance, in case there is a race
between looking up a record and record deletion.
*/
package metric
package metric // import "go.opentelemetry.io/otel/sdk/metric"

View File

@ -0,0 +1,64 @@
// Copyright 2019, OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package metric_test
import (
"context"
"fmt"
"time"
"go.opentelemetry.io/otel/api/key"
"go.opentelemetry.io/otel/api/metric"
"go.opentelemetry.io/otel/exporter/metric/stdout"
sdk "go.opentelemetry.io/otel/sdk/metric"
"go.opentelemetry.io/otel/sdk/metric/batcher/defaultkeys"
"go.opentelemetry.io/otel/sdk/metric/controller/push"
"go.opentelemetry.io/otel/sdk/metric/selector/simple"
)
func ExampleNew() {
selector := simple.NewWithInexpensiveMeasure()
exporter, err := stdout.New(stdout.Options{
PrettyPrint: true,
DoNotPrintTime: true, // This makes the output deterministic
})
if err != nil {
panic(fmt.Sprintln("Could not initialize stdout exporter:", err))
}
batcher := defaultkeys.New(selector, sdk.DefaultLabelEncoder(), true)
pusher := push.New(batcher, exporter, time.Second)
pusher.Start()
defer pusher.Stop()
ctx := context.Background()
key := key.New("key")
meter := pusher.GetMeter("example")
counter := meter.NewInt64Counter("a.counter", metric.WithKeys(key))
labels := meter.Labels(key.String("value"))
counter.Add(ctx, 100, labels)
// Output:
// {
// "updates": [
// {
// "name": "a.counter{key=value}",
// "sum": 100
// }
// ]
// }
}

View File

@ -0,0 +1,62 @@
// Copyright 2019, OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package metric
import (
"bytes"
"sync"
"go.opentelemetry.io/otel/api/core"
export "go.opentelemetry.io/otel/sdk/export/metric"
)
type defaultLabelEncoder struct {
// pool is a pool of labelset builders. The buffers in this
// pool grow to a size that most label encodings will not
// allocate new memory. This pool reduces the number of
// allocations per new LabelSet to 3, typically, as seen in
// the benchmarks. (It should be 2--one for the LabelSet
// object and one for the buffer.String() here--see the extra
// allocation in the call to sort.Stable).
pool sync.Pool // *bytes.Buffer
}
var _ export.LabelEncoder = &defaultLabelEncoder{}
func DefaultLabelEncoder() export.LabelEncoder {
return &defaultLabelEncoder{
pool: sync.Pool{
New: func() interface{} {
return &bytes.Buffer{}
},
},
}
}
func (d *defaultLabelEncoder) Encode(labels []core.KeyValue) string {
buf := d.pool.Get().(*bytes.Buffer)
defer d.pool.Put(buf)
buf.Reset()
for i, kv := range labels {
if i > 0 {
_, _ = buf.WriteRune(',')
}
_, _ = buf.WriteString(string(kv.Key))
_, _ = buf.WriteRune('=')
_, _ = buf.WriteString(kv.Value.Emit())
}
return buf.String()
}

View File

@ -25,6 +25,7 @@ import (
"go.opentelemetry.io/otel/api/key"
"go.opentelemetry.io/otel/api/metric"
export "go.opentelemetry.io/otel/sdk/export/metric"
"go.opentelemetry.io/otel/sdk/export/metric/aggregator"
sdk "go.opentelemetry.io/otel/sdk/metric"
"go.opentelemetry.io/otel/sdk/metric/aggregator/gauge"
)
@ -37,23 +38,31 @@ type monotoneBatcher struct {
currentTime *time.Time
}
func (m *monotoneBatcher) AggregatorFor(rec export.Record) export.Aggregator {
func (*monotoneBatcher) AggregatorFor(*export.Descriptor) export.Aggregator {
return gauge.New()
}
func (m *monotoneBatcher) Export(_ context.Context, record export.Record, agg export.Aggregator) {
require.Equal(m.t, "my.gauge.name", record.Descriptor().Name())
require.Equal(m.t, 1, len(record.Labels()))
require.Equal(m.t, "a", string(record.Labels()[0].Key))
require.Equal(m.t, "b", record.Labels()[0].Value.Emit())
func (*monotoneBatcher) CheckpointSet() export.CheckpointSet {
return nil
}
gauge := agg.(*gauge.Aggregator)
val := gauge.AsNumber()
ts := gauge.Timestamp()
func (*monotoneBatcher) FinishedCollection() {
}
func (m *monotoneBatcher) Process(_ context.Context, record export.Record) error {
require.Equal(m.t, "my.gauge.name", record.Descriptor().Name())
require.Equal(m.t, 1, record.Labels().Len())
require.Equal(m.t, "a", string(record.Labels().Ordered()[0].Key))
require.Equal(m.t, "b", record.Labels().Ordered()[0].Value.Emit())
gauge := record.Aggregator().(*gauge.Aggregator)
val, ts, err := gauge.LastValue()
require.Nil(m.t, err)
m.currentValue = &val
m.currentTime = &ts
m.collections++
return nil
}
func TestMonotoneGauge(t *testing.T) {
@ -61,7 +70,9 @@ func TestMonotoneGauge(t *testing.T) {
batcher := &monotoneBatcher{
t: t,
}
sdk := sdk.New(batcher)
sdk := sdk.New(batcher, sdk.DefaultLabelEncoder())
sdk.SetErrorHandler(func(error) { t.Fatal("Unexpected") })
gauge := sdk.NewInt64Gauge("my.gauge.name", metric.WithMonotonic(true))
@ -106,7 +117,14 @@ func TestMonotoneGauge(t *testing.T) {
require.Equal(t, 4, batcher.collections)
// Try to lower the value to 1, it will fail.
var err error
sdk.SetErrorHandler(func(sdkErr error) {
err = sdkErr
})
handle.Set(ctx, 1)
require.Equal(t, aggregator.ErrNonMonotoneInput, err)
sdk.SetErrorHandler(func(error) { t.Fatal("Unexpected") })
sdk.Collect(ctx)
// The value and timestamp are both unmodified

View File

@ -15,8 +15,9 @@
package metric
import (
"bytes"
"context"
"fmt"
"os"
"sort"
"sync"
"sync/atomic"
@ -26,24 +27,22 @@ import (
"go.opentelemetry.io/otel/api/metric"
api "go.opentelemetry.io/otel/api/metric"
export "go.opentelemetry.io/otel/sdk/export/metric"
"go.opentelemetry.io/otel/sdk/export/metric/aggregator"
)
type (
// SDK implements the OpenTelemetry Meter API. The SDK is
// bound to a single export.MetricBatcher in `New()`.
// bound to a single export.Batcher in `New()`.
//
// The SDK supports a Collect() API to gather and export
// current data. Collect() should be arranged according to
// the exporter model. Push-based exporters will setup a
// timer to call Collect() periodically. Pull-based exporters
// the batcher model. Push-based batchers will setup a
// timer to call Collect() periodically. Pull-based batchers
// will call Collect() when a pull request arrives.
SDK struct {
// current maps `mapkey` to *record.
current sync.Map
// pool is a pool of labelset builders.
pool sync.Pool // *bytes.Buffer
// empty is the (singleton) result of Labels()
// w/ zero arguments.
empty labels
@ -56,11 +55,17 @@ type (
// incremented in `Collect()`.
currentEpoch int64
// exporter is the configured exporter+configuration.
exporter export.Batcher
// batcher is the configured batcher+configuration.
batcher export.Batcher
// lencoder determines how labels are uniquely encoded.
labelEncoder export.LabelEncoder
// collectLock prevents simultaneous calls to Collect().
collectLock sync.Mutex
// errorHandler supports delivering errors to the user.
errorHandler ErrorHandler
}
instrument struct {
@ -127,6 +132,8 @@ type (
next doublePtr
}
ErrorHandler func(error)
// singlePointer wraps an unsafe.Pointer and supports basic
// load(), store(), clear(), and swapNil() operations.
singlePtr struct {
@ -145,7 +152,6 @@ var (
_ api.LabelSet = &labels{}
_ api.InstrumentImpl = &instrument{}
_ api.HandleImpl = &record{}
_ export.Record = &record{}
// hazardRecord is used as a pointer value that indicates the
// value is not included in any list. (`nil` would be
@ -158,6 +164,10 @@ func (i *instrument) Meter() api.Meter {
return i.meter
}
func (m *SDK) SetErrorHandler(f ErrorHandler) {
m.errorHandler = f
}
func (i *instrument) acquireHandle(ls *labels) *record {
// Create lookup key for sync.Map (one allocation)
mk := mapkey{
@ -179,10 +189,9 @@ func (i *instrument) acquireHandle(ls *labels) *record {
refcount: 1,
collectedEpoch: -1,
modifiedEpoch: 0,
recorder: i.meter.batcher.AggregatorFor(i.descriptor),
}
rec.recorder = i.meter.exporter.AggregatorFor(rec)
// Load/Store: there's a memory allocation to place `mk` into
// an interface here.
if actual, loaded := i.meter.current.LoadOrStore(mk, rec); loaded {
@ -208,37 +217,38 @@ func (i *instrument) RecordOne(ctx context.Context, number core.Number, ls api.L
h.RecordOne(ctx, number)
}
// New constructs a new SDK for the given exporter. This SDK supports
// only a single exporter.
// New constructs a new SDK for the given batcher. This SDK supports
// only a single batcher.
//
// The SDK does not start any background process to collect itself
// periodically, this responsbility lies with the exporter, typically,
// periodically, this responsbility lies with the batcher, typically,
// depending on the type of export. For example, a pull-based
// exporter will call Collect() when it receives a request to scrape
// current metric values. A push-based exporter should configure its
// batcher will call Collect() when it receives a request to scrape
// current metric values. A push-based batcher should configure its
// own periodic collection.
func New(exporter export.Batcher) *SDK {
func New(batcher export.Batcher, labelEncoder export.LabelEncoder) *SDK {
m := &SDK{
pool: sync.Pool{
New: func() interface{} {
return &bytes.Buffer{}
},
},
exporter: exporter,
batcher: batcher,
labelEncoder: labelEncoder,
errorHandler: DefaultErrorHandler,
}
m.empty.meter = m
return m
}
func DefaultErrorHandler(err error) {
fmt.Fprintln(os.Stderr, "Metrics SDK error:", err)
}
// Labels returns a LabelSet corresponding to the arguments. Passed
// labels are de-duplicated, with last-value-wins semantics.
func (m *SDK) Labels(kvs ...core.KeyValue) api.LabelSet {
// Note: This computes a canonical encoding of the labels to
// use as a map key. It happens to use the encoding used by
// statsd for labels, allowing an optimization for statsd
// exporters. This could be made configurable in the
// batchers. This could be made configurable in the
// constructor, to support the same optimization for different
// exporters.
// batchers.
// Check for empty set.
if len(kvs) == 0 {
@ -263,21 +273,7 @@ func (m *SDK) Labels(kvs ...core.KeyValue) api.LabelSet {
}
ls.sorted = ls.sorted[0:oi]
// Serialize.
buf := m.pool.Get().(*bytes.Buffer)
defer m.pool.Put(buf)
buf.Reset()
_, _ = buf.WriteRune('|')
delimiter := '#'
for _, kv := range ls.sorted {
_, _ = buf.WriteRune(delimiter)
_, _ = buf.WriteString(string(kv.Key))
_, _ = buf.WriteRune(':')
_, _ = buf.WriteString(kv.Value.Emit())
delimiter = ','
}
ls.encoded = buf.String()
ls.encoded = m.labelEncoder.Encode(ls.sorted)
return ls
}
@ -291,7 +287,7 @@ func (m *SDK) labsFor(ls api.LabelSet) *labels {
return &m.empty
}
func (m *SDK) newInstrument(name string, metricKind export.Kind, numberKind core.NumberKind, opts *api.Options) *instrument {
func (m *SDK) newInstrument(name string, metricKind export.MetricKind, numberKind core.NumberKind, opts *api.Options) *instrument {
descriptor := export.NewDescriptor(
name,
metricKind,
@ -370,10 +366,14 @@ func (m *SDK) saveFromReclaim(rec *record) {
//
// During the collection pass, the export.Batcher will receive
// one Export() call per current aggregation.
func (m *SDK) Collect(ctx context.Context) {
//
// Returns the number of records that were checkpointed.
func (m *SDK) Collect(ctx context.Context) int {
m.collectLock.Lock()
defer m.collectLock.Unlock()
checkpointed := 0
var next *record
for inuse := m.records.primary.swapNil(); inuse != nil; inuse = next {
next = inuse.next.primary.load()
@ -381,14 +381,14 @@ func (m *SDK) Collect(ctx context.Context) {
refcount := atomic.LoadInt64(&inuse.refcount)
if refcount > 0 {
m.collect(ctx, inuse)
checkpointed += m.checkpoint(ctx, inuse)
m.addPrimary(inuse)
continue
}
modified := atomic.LoadInt64(&inuse.modifiedEpoch)
collected := atomic.LoadInt64(&inuse.collectedEpoch)
m.collect(ctx, inuse)
checkpointed += m.checkpoint(ctx, inuse)
if modified >= collected {
atomic.StoreInt64(&inuse.collectedEpoch, m.currentEpoch)
@ -409,18 +409,27 @@ func (m *SDK) Collect(ctx context.Context) {
atomic.StoreInt64(&chances.reclaim, 0)
if chances.next.primary.load() == hazardRecord {
m.collect(ctx, chances)
checkpointed += m.checkpoint(ctx, chances)
m.addPrimary(chances)
}
}
m.currentEpoch++
return checkpointed
}
func (m *SDK) collect(ctx context.Context, r *record) {
if r.recorder != nil {
r.recorder.Collect(ctx, r, m.exporter)
func (m *SDK) checkpoint(ctx context.Context, r *record) int {
if r.recorder == nil {
return 0
}
r.recorder.Checkpoint(ctx, r.descriptor)
labels := export.NewLabels(r.labels.sorted, r.labels.encoded, m.labelEncoder)
err := m.batcher.Process(ctx, export.NewRecord(r.descriptor, labels, r.recorder))
if err != nil {
m.errorHandler(err)
}
return 1
}
// RecordBatch enters a batch of metric events.
@ -439,13 +448,18 @@ func (m *SDK) GetDescriptor(inst metric.InstrumentImpl) *export.Descriptor {
return nil
}
func (l *labels) Meter() api.Meter {
return l.meter
}
func (r *record) RecordOne(ctx context.Context, number core.Number) {
if r.recorder != nil {
r.recorder.Update(ctx, number, r)
if r.recorder == nil {
// The instrument is disabled according to the AggregationSelector.
return
}
if err := aggregator.RangeTest(number, r.descriptor); err != nil {
r.labels.meter.errorHandler(err)
return
}
if err := r.recorder.Update(ctx, number, r.descriptor); err != nil {
r.labels.meter.errorHandler(err)
return
}
}
@ -481,11 +495,3 @@ func (r *record) mapkey() mapkey {
encoded: r.labels.encoded,
}
}
func (r *record) Descriptor() *export.Descriptor {
return r.descriptor
}
func (r *record) Labels() []core.KeyValue {
return r.labels.sorted
}

View File

@ -0,0 +1,100 @@
// Copyright 2019, OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package simple // import "go.opentelemetry.io/otel/sdk/metric/selector/simple"
import (
export "go.opentelemetry.io/otel/sdk/export/metric"
"go.opentelemetry.io/otel/sdk/metric/aggregator/array"
"go.opentelemetry.io/otel/sdk/metric/aggregator/counter"
"go.opentelemetry.io/otel/sdk/metric/aggregator/ddsketch"
"go.opentelemetry.io/otel/sdk/metric/aggregator/gauge"
"go.opentelemetry.io/otel/sdk/metric/aggregator/maxsumcount"
)
type (
selectorInexpensive struct{}
selectorExact struct{}
selectorSketch struct {
config *ddsketch.Config
}
)
var (
_ export.AggregationSelector = selectorInexpensive{}
_ export.AggregationSelector = selectorSketch{}
_ export.AggregationSelector = selectorExact{}
)
// NewWithInexpensiveMeasure returns a simple aggregation selector
// that uses counter, gauge, and maxsumcount aggregators for the three
// kinds of metric. This selector is faster and uses less memory than
// the others because maxsumcount does not aggregate quantile
// information.
func NewWithInexpensiveMeasure() export.AggregationSelector {
return selectorInexpensive{}
}
// NewWithSketchMeasure returns a simple aggregation selector that
// uses counter, gauge, and ddsketch aggregators for the three kinds
// of metric. This selector uses more cpu and memory than the
// NewWithInexpensiveMeasure because it uses one DDSketch per distinct
// measure and labelset.
func NewWithSketchMeasure(config *ddsketch.Config) export.AggregationSelector {
return selectorSketch{
config: config,
}
}
// NewWithExactMeasure returns a simple aggregation selector that uses
// counter, gauge, and array behavior for the three kinds of metric.
// This selector uses more memory than the NewWithSketchMeasure
// because it aggregates an array of all values, therefore is able to
// compute exact quantiles.
func NewWithExactMeasure() export.AggregationSelector {
return selectorExact{}
}
func (selectorInexpensive) AggregatorFor(descriptor *export.Descriptor) export.Aggregator {
switch descriptor.MetricKind() {
case export.GaugeKind:
return gauge.New()
case export.MeasureKind:
return maxsumcount.New()
default:
return counter.New()
}
}
func (s selectorSketch) AggregatorFor(descriptor *export.Descriptor) export.Aggregator {
switch descriptor.MetricKind() {
case export.GaugeKind:
return gauge.New()
case export.MeasureKind:
return ddsketch.New(s.config, descriptor)
default:
return counter.New()
}
}
func (selectorExact) AggregatorFor(descriptor *export.Descriptor) export.Aggregator {
switch descriptor.MetricKind() {
case export.GaugeKind:
return gauge.New()
case export.MeasureKind:
return array.New()
default:
return counter.New()
}
}

View File

@ -0,0 +1,57 @@
// Copyright 2019, OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package simple_test
import (
"testing"
"github.com/stretchr/testify/require"
"go.opentelemetry.io/otel/api/core"
export "go.opentelemetry.io/otel/sdk/export/metric"
"go.opentelemetry.io/otel/sdk/metric/aggregator/array"
"go.opentelemetry.io/otel/sdk/metric/aggregator/counter"
"go.opentelemetry.io/otel/sdk/metric/aggregator/ddsketch"
"go.opentelemetry.io/otel/sdk/metric/aggregator/gauge"
"go.opentelemetry.io/otel/sdk/metric/aggregator/maxsumcount"
"go.opentelemetry.io/otel/sdk/metric/selector/simple"
)
var (
testGaugeDesc = export.NewDescriptor("gauge", export.GaugeKind, nil, "", "", core.Int64NumberKind, false)
testCounterDesc = export.NewDescriptor("counter", export.CounterKind, nil, "", "", core.Int64NumberKind, false)
testMeasureDesc = export.NewDescriptor("measure", export.MeasureKind, nil, "", "", core.Int64NumberKind, false)
)
func TestInexpensiveMeasure(t *testing.T) {
inex := simple.NewWithInexpensiveMeasure()
require.NotPanics(t, func() { _ = inex.AggregatorFor(testGaugeDesc).(*gauge.Aggregator) })
require.NotPanics(t, func() { _ = inex.AggregatorFor(testCounterDesc).(*counter.Aggregator) })
require.NotPanics(t, func() { _ = inex.AggregatorFor(testMeasureDesc).(*maxsumcount.Aggregator) })
}
func TestSketchMeasure(t *testing.T) {
sk := simple.NewWithSketchMeasure(ddsketch.NewDefaultConfig())
require.NotPanics(t, func() { _ = sk.AggregatorFor(testGaugeDesc).(*gauge.Aggregator) })
require.NotPanics(t, func() { _ = sk.AggregatorFor(testCounterDesc).(*counter.Aggregator) })
require.NotPanics(t, func() { _ = sk.AggregatorFor(testMeasureDesc).(*ddsketch.Aggregator) })
}
func TestExactMeasure(t *testing.T) {
ex := simple.NewWithExactMeasure()
require.NotPanics(t, func() { _ = ex.AggregatorFor(testGaugeDesc).(*gauge.Aggregator) })
require.NotPanics(t, func() { _ = ex.AggregatorFor(testCounterDesc).(*counter.Aggregator) })
require.NotPanics(t, func() { _ = ex.AggregatorFor(testMeasureDesc).(*array.Aggregator) })
}

View File

@ -36,6 +36,7 @@ import (
"go.opentelemetry.io/otel/api/metric"
api "go.opentelemetry.io/otel/api/metric"
export "go.opentelemetry.io/otel/sdk/export/metric"
"go.opentelemetry.io/otel/sdk/export/metric/aggregator"
sdk "go.opentelemetry.io/otel/sdk/metric"
"go.opentelemetry.io/otel/sdk/metric/aggregator/counter"
"go.opentelemetry.io/otel/sdk/metric/aggregator/gauge"
@ -222,13 +223,13 @@ func (f *testFixture) assertTest(numCollect int) {
}
func (f *testFixture) preCollect() {
// Collect calls Export in a single-threaded context. No need
// Collect calls Process in a single-threaded context. No need
// to lock this struct.
f.dupCheck = map[testKey]int{}
}
func (f *testFixture) AggregatorFor(record export.Record) export.Aggregator {
switch record.Descriptor().MetricKind() {
func (*testFixture) AggregatorFor(descriptor *export.Descriptor) export.Aggregator {
switch descriptor.MetricKind() {
case export.CounterKind:
return counter.New()
case export.GaugeKind:
@ -238,11 +239,17 @@ func (f *testFixture) AggregatorFor(record export.Record) export.Aggregator {
}
}
func (f *testFixture) Export(ctx context.Context, record export.Record, agg export.Aggregator) {
desc := record.Descriptor()
func (*testFixture) CheckpointSet() export.CheckpointSet {
return nil
}
func (*testFixture) FinishedCollection() {
}
func (f *testFixture) Process(_ context.Context, record export.Record) error {
key := testKey{
labels: canonicalizeLabels(record.Labels()),
descriptor: desc,
labels: canonicalizeLabels(record.Labels().Ordered()),
descriptor: record.Descriptor(),
}
if f.dupCheck[key] == 0 {
f.dupCheck[key]++
@ -252,15 +259,26 @@ func (f *testFixture) Export(ctx context.Context, record export.Record, agg expo
actual, _ := f.received.LoadOrStore(key, f.impl.newStore())
switch desc.MetricKind() {
agg := record.Aggregator()
switch record.Descriptor().MetricKind() {
case export.CounterKind:
f.impl.storeCollect(actual, agg.(*counter.Aggregator).AsNumber(), time.Time{})
counter := agg.(aggregator.Sum)
sum, err := counter.Sum()
if err != nil {
f.T.Fatal("Sum error: ", err)
}
f.impl.storeCollect(actual, sum, time.Time{})
case export.GaugeKind:
gauge := agg.(*gauge.Aggregator)
f.impl.storeCollect(actual, gauge.AsNumber(), gauge.Timestamp())
gauge := agg.(aggregator.LastValue)
lv, ts, err := gauge.LastValue()
if err != nil && err != aggregator.ErrNoLastValue {
f.T.Fatal("Last value error: ", err)
}
f.impl.storeCollect(actual, lv, ts)
default:
panic("Not used in this test")
}
return nil
}
func stressTest(t *testing.T, impl testImpl) {
@ -272,7 +290,7 @@ func stressTest(t *testing.T, impl testImpl) {
lused: map[string]bool{},
}
cc := concurrency()
sdk := sdk.New(fixture)
sdk := sdk.New(fixture, sdk.DefaultLabelEncoder())
fixture.wg.Add(cc + 1)
for i := 0; i < cc; i++ {