1
0
mirror of https://github.com/open-telemetry/opentelemetry-go.git synced 2025-07-15 01:04:25 +02:00

Remove Quantile aggregation, DDSketch aggregator; add Exact timestamps (#1412)

* Remove quantile definition

* Complete removal of Quantile-related function

* Comment in selector/simple/simple.go

* Remove 'quantile' in mmsc

* Rename array->exact

* Update changelog

* Add PR number

* Rename exact benchmark

* New test for exact timestamps

* Add timestamp tests

* More test

* From feedback

* Apply suggestions from code review

Co-authored-by: Tyler Yahn <MrAlias@users.noreply.github.com>

* Samples->Points

* Rename

Co-authored-by: Tyler Yahn <MrAlias@users.noreply.github.com>
This commit is contained in:
Joshua MacDonald
2021-01-12 10:19:13 -08:00
committed by GitHub
parent 9c949411ce
commit 49f699d657
38 changed files with 321 additions and 1006 deletions

View File

@ -26,6 +26,9 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm
- Improve span duration accuracy. (#1360)
- Migrated CI/CD from CircleCI to GitHub Actions (#1382)
- Remove duplicate checkout from GitHub Actions workflow (#1407)
- Metric `array` aggregator renamed `exact` to match its `aggregation.Kind` (#1412)
- Metric `exact` aggregator includes per-point timestamps (#1412)
- Metric stdout exporter uses MinMaxSumCount aggregator for ValueRecorder instruments (#1412)
- `NewExporter` from `exporters/otlp` now takes a `ProtocolDriver` as a parameter. (#1369)
- Many OTLP Exporter options became gRPC ProtocolDriver options. (#1369)
- Unify endpoint API that related to OTel exporter. (#1401)
@ -36,6 +39,8 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm
### Removed
- Remove `errUninitializedSpan` as its only usage is now obsolete. (#1360)
- Remove Metric export functionality related to quantiles and summary data points: this is not specified (#1412)
- Remove DDSketch metric aggregator; our intention is to re-introduce this as an option of the histogram aggregator after [new OTLP histogram data types](https://github.com/open-telemetry/opentelemetry-proto/pull/226) are released (#1412)
## [0.15.0] - 2020-12-10

View File

@ -1,13 +1,9 @@
github.com/DataDog/sketches-go v0.0.1 h1:RtG+76WKgZuz6FIaGsjoPePmadDBkuD/KC6+ZWu78b8=
github.com/DataDog/sketches-go v0.0.1/go.mod h1:Q5DbzQ+3AkgGwymQO7aZFNP7ns2lZKGtvRBzRXfdi60=
github.com/benbjohnson/clock v1.0.3 h1:vkLuvpK4fmtSCuo60+yC63p7y0BmQ8gm5ZXGuBCJyXg=
github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM=
github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/google/go-cmp v0.5.4 h1:L8R9j+yAqZuZjsqh/z+F1NCffTKKLShY6zXTItVIZ8M=
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g=
github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=

View File

@ -40,7 +40,6 @@ var (
func main() {
exporter, err := stdout.NewExporter([]stdout.Option{
stdout.WithQuantiles([]float64{0.5, 0.9, 0.99}),
stdout.WithPrettyPrint(),
}...)
if err != nil {

View File

@ -34,7 +34,6 @@ cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/DataDog/sketches-go v0.0.1/go.mod h1:Q5DbzQ+3AkgGwymQO7aZFNP7ns2lZKGtvRBzRXfdi60=
github.com/apache/thrift v0.13.0 h1:5hryIiq9gtn+MiLVn0wP37kb/uTeRZgN08WoCsAhIhI=
github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM=
@ -93,7 +92,6 @@ github.com/google/go-cmp v0.5.2 h1:X2ev0eStA3AbceY54o37/0PQ/UWqKEiiO2dKL5OPaFM=
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.4 h1:L8R9j+yAqZuZjsqh/z+F1NCffTKKLShY6zXTItVIZ8M=
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=

View File

@ -1,13 +1,9 @@
github.com/DataDog/sketches-go v0.0.1 h1:RtG+76WKgZuz6FIaGsjoPePmadDBkuD/KC6+ZWu78b8=
github.com/DataDog/sketches-go v0.0.1/go.mod h1:Q5DbzQ+3AkgGwymQO7aZFNP7ns2lZKGtvRBzRXfdi60=
github.com/benbjohnson/clock v1.0.3 h1:vkLuvpK4fmtSCuo60+yC63p7y0BmQ8gm5ZXGuBCJyXg=
github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM=
github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/google/go-cmp v0.5.4 h1:L8R9j+yAqZuZjsqh/z+F1NCffTKKLShY6zXTItVIZ8M=
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g=
github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=

View File

@ -1,7 +1,5 @@
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/DataDog/sketches-go v0.0.1 h1:RtG+76WKgZuz6FIaGsjoPePmadDBkuD/KC6+ZWu78b8=
github.com/DataDog/sketches-go v0.0.1/go.mod h1:Q5DbzQ+3AkgGwymQO7aZFNP7ns2lZKGtvRBzRXfdi60=
github.com/benbjohnson/clock v1.0.3 h1:vkLuvpK4fmtSCuo60+yC63p7y0BmQ8gm5ZXGuBCJyXg=
github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
@ -16,8 +14,6 @@ github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5y
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.5.4 h1:L8R9j+yAqZuZjsqh/z+F1NCffTKKLShY6zXTItVIZ8M=
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g=
github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=

View File

@ -1,7 +1,5 @@
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/DataDog/sketches-go v0.0.1 h1:RtG+76WKgZuz6FIaGsjoPePmadDBkuD/KC6+ZWu78b8=
github.com/DataDog/sketches-go v0.0.1/go.mod h1:Q5DbzQ+3AkgGwymQO7aZFNP7ns2lZKGtvRBzRXfdi60=
github.com/benbjohnson/clock v1.0.3 h1:vkLuvpK4fmtSCuo60+yC63p7y0BmQ8gm5ZXGuBCJyXg=
github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
@ -34,8 +32,6 @@ github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.4 h1:L8R9j+yAqZuZjsqh/z+F1NCffTKKLShY6zXTItVIZ8M=
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g=
github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=

View File

@ -1,8 +1,6 @@
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/DataDog/sketches-go v0.0.1 h1:RtG+76WKgZuz6FIaGsjoPePmadDBkuD/KC6+ZWu78b8=
github.com/DataDog/sketches-go v0.0.1/go.mod h1:Q5DbzQ+3AkgGwymQO7aZFNP7ns2lZKGtvRBzRXfdi60=
github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0=
github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo=
github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
@ -98,8 +96,6 @@ github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
github.com/google/go-cmp v0.5.4 h1:L8R9j+yAqZuZjsqh/z+F1NCffTKKLShY6zXTItVIZ8M=
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g=
github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=

View File

@ -1,6 +1,5 @@
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/DataDog/sketches-go v0.0.1/go.mod h1:Q5DbzQ+3AkgGwymQO7aZFNP7ns2lZKGtvRBzRXfdi60=
github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo=
github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM=
@ -40,7 +39,6 @@ github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.4 h1:L8R9j+yAqZuZjsqh/z+F1NCffTKKLShY6zXTItVIZ8M=
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg=
github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=

View File

@ -1,8 +1,6 @@
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/DataDog/sketches-go v0.0.1 h1:RtG+76WKgZuz6FIaGsjoPePmadDBkuD/KC6+ZWu78b8=
github.com/DataDog/sketches-go v0.0.1/go.mod h1:Q5DbzQ+3AkgGwymQO7aZFNP7ns2lZKGtvRBzRXfdi60=
github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0=
github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo=
github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
@ -99,8 +97,6 @@ github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
github.com/google/go-cmp v0.5.4 h1:L8R9j+yAqZuZjsqh/z+F1NCffTKKLShY6zXTItVIZ8M=
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g=
github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=

View File

@ -14,6 +14,9 @@
package prometheus // import "go.opentelemetry.io/otel/exporters/metric/prometheus"
// Note that this package does not support a way to export Prometheus
// Summary data points, removed in PR#1412.
import (
"context"
"fmt"
@ -50,10 +53,13 @@ type Exporter struct {
lock sync.RWMutex
controller *pull.Controller
defaultSummaryQuantiles []float64
defaultHistogramBoundaries []float64
}
// ErrUnsupportedAggregator is returned for unrepresentable aggregator
// types (e.g., exact).
var ErrUnsupportedAggregator = fmt.Errorf("unsupported aggregator type")
var _ http.Handler = &Exporter{}
// Config is a set of configs for the tally reporter.
@ -76,10 +82,6 @@ type Config struct {
// If not specified the Registry will be used as default.
Gatherer prometheus.Gatherer
// DefaultSummaryQuantiles is the default summary quantiles
// to use. Use nil to specify the system-default summary quantiles.
DefaultSummaryQuantiles []float64
// DefaultHistogramBoundaries defines the default histogram bucket
// boundaries.
DefaultHistogramBoundaries []float64
@ -104,7 +106,6 @@ func NewExportPipeline(config Config, options ...pull.Option) (*Exporter, error)
handler: promhttp.HandlerFor(config.Gatherer, promhttp.HandlerOpts{}),
registerer: config.Registerer,
gatherer: config.Gatherer,
defaultSummaryQuantiles: config.DefaultSummaryQuantiles,
defaultHistogramBoundaries: config.DefaultHistogramBoundaries,
}
@ -167,15 +168,12 @@ func (e *Exporter) Controller() *pull.Controller {
return e.controller
}
// ExportKindFor implements ExportKindSelector.
func (e *Exporter) ExportKindFor(desc *metric.Descriptor, kind aggregation.Kind) export.ExportKind {
// NOTE: Summary values should use Delta aggregation, then be
// combined into a sliding window, see the TODO below.
// NOTE: Prometheus also supports a "GaugeDelta" exposition format,
// which is expressed as a delta histogram. Need to understand if this
// should be a default behavior for ValueRecorder/ValueObserver.
return export.CumulativeExportKindSelector().ExportKindFor(desc, kind)
}
// ServeHTTP implements http.Handler.
func (e *Exporter) ServeHTTP(w http.ResponseWriter, r *http.Request) {
e.handler.ServeHTTP(w, r)
}
@ -187,6 +185,7 @@ type collector struct {
var _ prometheus.Collector = (*collector)(nil)
// Describe implements prometheus.Collector.
func (c *collector) Describe(ch chan<- *prometheus.Desc) {
c.exp.lock.RLock()
defer c.exp.lock.RUnlock()
@ -226,18 +225,6 @@ func (c *collector) Collect(ch chan<- prometheus.Metric) {
if err := c.exportHistogram(ch, hist, numberKind, desc, labels); err != nil {
return fmt.Errorf("exporting histogram: %w", err)
}
} else if dist, ok := agg.(aggregation.Distribution); ok {
// TODO: summaries values are never being resetted.
// As measurements are recorded, new records starts to have less impact on these summaries.
// We should implement an solution that is similar to the Prometheus Clients
// using a rolling window for summaries could be a solution.
//
// References:
// https://www.robustperception.io/how-does-a-prometheus-summary-work
// https://github.com/prometheus/client_golang/blob/fa4aa9000d2863904891d193dea354d23f3d712a/prometheus/summary.go#L135
if err := c.exportSummary(ch, dist, numberKind, desc, labels); err != nil {
return fmt.Errorf("exporting summary: %w", err)
}
} else if sum, ok := agg.(aggregation.Sum); ok && instrumentKind.Monotonic() {
if err := c.exportMonotonicCounter(ch, sum, numberKind, desc, labels); err != nil {
return fmt.Errorf("exporting monotonic counter: %w", err)
@ -250,6 +237,8 @@ func (c *collector) Collect(ch chan<- prometheus.Metric) {
if err := c.exportLastValue(ch, lastValue, numberKind, desc, labels); err != nil {
return fmt.Errorf("exporting last value: %w", err)
}
} else {
return fmt.Errorf("%w: %s", ErrUnsupportedAggregator, agg.Kind())
}
return nil
})
@ -303,33 +292,6 @@ func (c *collector) exportMonotonicCounter(ch chan<- prometheus.Metric, sum aggr
return nil
}
func (c *collector) exportSummary(ch chan<- prometheus.Metric, dist aggregation.Distribution, kind number.Kind, desc *prometheus.Desc, labels []string) error {
count, err := dist.Count()
if err != nil {
return fmt.Errorf("error retrieving count: %w", err)
}
var sum number.Number
sum, err = dist.Sum()
if err != nil {
return fmt.Errorf("error retrieving distribution sum: %w", err)
}
quantiles := make(map[float64]float64)
for _, quantile := range c.exp.defaultSummaryQuantiles {
q, _ := dist.Quantile(quantile)
quantiles[quantile] = q.CoerceToFloat64(kind)
}
m, err := prometheus.NewConstSummary(desc, uint64(count), sum.CoerceToFloat64(kind), quantiles, labels...)
if err != nil {
return fmt.Errorf("error creating constant summary: %w", err)
}
ch <- m
return nil
}
func (c *collector) exportHistogram(ch chan<- prometheus.Metric, hist aggregation.Histogram, kind number.Kind, desc *prometheus.Desc, labels []string) error {
buckets, err := hist.Histogram()
if err != nil {

View File

@ -1,7 +1,5 @@
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/DataDog/sketches-go v0.0.1 h1:RtG+76WKgZuz6FIaGsjoPePmadDBkuD/KC6+ZWu78b8=
github.com/DataDog/sketches-go v0.0.1/go.mod h1:Q5DbzQ+3AkgGwymQO7aZFNP7ns2lZKGtvRBzRXfdi60=
github.com/benbjohnson/clock v1.0.3 h1:vkLuvpK4fmtSCuo60+yC63p7y0BmQ8gm5ZXGuBCJyXg=
github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
@ -34,8 +32,6 @@ github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.4 h1:L8R9j+yAqZuZjsqh/z+F1NCffTKKLShY6zXTItVIZ8M=
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g=
github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=

View File

@ -306,7 +306,7 @@ func Record(exportSelector export.ExportKindSelector, r export.Record) (*metricp
}
}
func gaugeArray(record export.Record, points []number.Number) (*metricpb.Metric, error) {
func gaugeArray(record export.Record, points []aggregation.Point) (*metricpb.Metric, error) {
desc := record.Descriptor()
m := &metricpb.Metric{
Name: desc.Name(),
@ -314,15 +314,15 @@ func gaugeArray(record export.Record, points []number.Number) (*metricpb.Metric,
Unit: string(desc.Unit()),
}
switch n := desc.NumberKind(); n {
switch nk := desc.NumberKind(); nk {
case number.Int64Kind:
var pts []*metricpb.IntDataPoint
for _, p := range points {
for _, s := range points {
pts = append(pts, &metricpb.IntDataPoint{
Labels: nil,
StartTimeUnixNano: toNanos(record.StartTime()),
TimeUnixNano: toNanos(record.EndTime()),
Value: p.CoerceToInt64(n),
Value: s.Number.CoerceToInt64(nk),
})
}
m.Data = &metricpb.Metric_IntGauge{
@ -333,12 +333,12 @@ func gaugeArray(record export.Record, points []number.Number) (*metricpb.Metric,
case number.Float64Kind:
var pts []*metricpb.DoubleDataPoint
for _, p := range points {
for _, s := range points {
pts = append(pts, &metricpb.DoubleDataPoint{
Labels: nil,
StartTimeUnixNano: toNanos(record.StartTime()),
TimeUnixNano: toNanos(record.EndTime()),
Value: p.CoerceToFloat64(n),
Value: s.Number.CoerceToFloat64(nk),
})
}
m.Data = &metricpb.Metric_DoubleGauge{
@ -348,7 +348,7 @@ func gaugeArray(record export.Record, points []number.Number) (*metricpb.Metric,
}
default:
return nil, fmt.Errorf("%w: %v", ErrUnknownValueType, n)
return nil, fmt.Errorf("%w: %v", ErrUnknownValueType, nk)
}
return m, nil

View File

@ -32,7 +32,7 @@ import (
export "go.opentelemetry.io/otel/sdk/export/metric"
"go.opentelemetry.io/otel/sdk/export/metric/aggregation"
"go.opentelemetry.io/otel/sdk/export/metric/metrictest"
arrAgg "go.opentelemetry.io/otel/sdk/metric/aggregator/array"
arrAgg "go.opentelemetry.io/otel/sdk/metric/aggregator/exact"
"go.opentelemetry.io/otel/sdk/metric/aggregator/lastvalue"
lvAgg "go.opentelemetry.io/otel/sdk/metric/aggregator/lastvalue"
"go.opentelemetry.io/otel/sdk/metric/aggregator/minmaxsumcount"

View File

@ -19,14 +19,12 @@ import (
"os"
"go.opentelemetry.io/otel/label"
"go.opentelemetry.io/otel/sdk/export/metric/aggregation"
)
var (
defaultWriter = os.Stdout
defaultPrettyPrint = false
defaultTimestamps = true
defaultQuantiles = []float64{0.5, 0.9, 0.99}
defaultLabelEncoder = label.DefaultEncoder()
defaultDisableTraceExport = false
defaultDisableMetricExport = false
@ -45,15 +43,6 @@ type Config struct {
// true.
Timestamps bool
// Quantiles are the desired aggregation quantiles for distribution
// summaries, used when the configured aggregator supports
// quantiles.
//
// Note: this exporter is meant as a demonstration; a real
// exporter may wish to configure quantiles on a per-metric
// basis.
Quantiles []float64
// LabelEncoder encodes the labels.
LabelEncoder label.Encoder
@ -70,7 +59,6 @@ func NewConfig(options ...Option) (Config, error) {
Writer: defaultWriter,
PrettyPrint: defaultPrettyPrint,
Timestamps: defaultTimestamps,
Quantiles: defaultQuantiles,
LabelEncoder: defaultLabelEncoder,
DisableTraceExport: defaultDisableTraceExport,
DisableMetricExport: defaultDisableMetricExport,
@ -79,11 +67,6 @@ func NewConfig(options ...Option) (Config, error) {
opt.Apply(&config)
}
for _, q := range config.Quantiles {
if q < 0 || q > 1 {
return config, aggregation.ErrInvalidQuantile
}
}
return config, nil
}
@ -128,17 +111,6 @@ func (o timestampsOption) Apply(config *Config) {
config.Timestamps = bool(o)
}
// WithQuantiles sets the quantile values to export.
func WithQuantiles(quantiles []float64) Option {
return quantilesOption(quantiles)
}
type quantilesOption []float64
func (o quantilesOption) Apply(config *Config) {
config.Quantiles = []float64(o)
}
// WithLabelEncoder sets the label encoder used in export.
func WithLabelEncoder(enc label.Encoder) Option {
return labelEncoderOption{enc}

View File

@ -77,7 +77,6 @@ func multiply(ctx context.Context, x, y int64) int64 {
func Example() {
exportOpts := []stdout.Option{
stdout.WithQuantiles([]float64{0.5}),
stdout.WithPrettyPrint(),
}
// Registers both a trace and meter Provider globally.

View File

@ -59,7 +59,7 @@ func NewExportPipeline(exportOpts []Option, pushOpts []push.Option) (trace.Trace
tp := sdktrace.NewTracerProvider(sdktrace.WithBatcher(exporter))
pusher := push.New(
basic.New(
simple.NewWithExactDistribution(),
simple.NewWithInexpensiveDistribution(),
exporter,
),
exporter,

View File

@ -1,13 +1,9 @@
github.com/DataDog/sketches-go v0.0.1 h1:RtG+76WKgZuz6FIaGsjoPePmadDBkuD/KC6+ZWu78b8=
github.com/DataDog/sketches-go v0.0.1/go.mod h1:Q5DbzQ+3AkgGwymQO7aZFNP7ns2lZKGtvRBzRXfdi60=
github.com/benbjohnson/clock v1.0.3 h1:vkLuvpK4fmtSCuo60+yC63p7y0BmQ8gm5ZXGuBCJyXg=
github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM=
github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/google/go-cmp v0.5.4 h1:L8R9j+yAqZuZjsqh/z+F1NCffTKKLShY6zXTItVIZ8M=
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g=
github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=

View File

@ -41,17 +41,10 @@ type line struct {
Count interface{} `json:"Count,omitempty"`
LastValue interface{} `json:"Last,omitempty"`
Quantiles []quantile `json:"Quantiles,omitempty"`
// Note: this is a pointer because omitempty doesn't work when time.IsZero()
Timestamp *time.Time `json:"Timestamp,omitempty"`
}
type quantile struct {
Quantile interface{} `json:"Quantile"`
Value interface{} `json:"Value"`
}
func (e *metricExporter) ExportKindFor(desc *metric.Descriptor, kind aggregation.Kind) exportmetric.ExportKind {
return exportmetric.StatelessExportKindSelector().ExportKindFor(desc, kind)
}
@ -106,22 +99,6 @@ func (e *metricExporter) Export(_ context.Context, checkpointSet exportmetric.Ch
return err
}
expose.Min = min.AsInterface(kind)
if dist, ok := agg.(aggregation.Distribution); ok && len(e.config.Quantiles) != 0 {
summary := make([]quantile, len(e.config.Quantiles))
expose.Quantiles = summary
for i, q := range e.config.Quantiles {
value, err := dist.Quantile(q)
if err != nil {
return err
}
summary[i] = quantile{
Quantile: q,
Value: value.AsInterface(kind),
}
}
}
} else if lv, ok := agg.(aggregation.LastValue); ok {
value, timestamp, err := lv.LastValue()
if err != nil {

View File

@ -31,11 +31,8 @@ import (
"go.opentelemetry.io/otel/metric"
"go.opentelemetry.io/otel/metric/number"
export "go.opentelemetry.io/otel/sdk/export/metric"
"go.opentelemetry.io/otel/sdk/export/metric/aggregation"
"go.opentelemetry.io/otel/sdk/export/metric/metrictest"
"go.opentelemetry.io/otel/sdk/metric/aggregator/aggregatortest"
"go.opentelemetry.io/otel/sdk/metric/aggregator/array"
"go.opentelemetry.io/otel/sdk/metric/aggregator/ddsketch"
"go.opentelemetry.io/otel/sdk/metric/aggregator/lastvalue"
"go.opentelemetry.io/otel/sdk/metric/aggregator/minmaxsumcount"
"go.opentelemetry.io/otel/sdk/metric/aggregator/sum"
@ -78,14 +75,6 @@ func (fix testFixture) Export(checkpointSet export.CheckpointSet) {
}
}
func TestStdoutInvalidQuantile(t *testing.T) {
_, err := stdout.NewExporter(
stdout.WithQuantiles([]float64{1.1, 0.9}),
)
require.Error(t, err, "Invalid quantile error expected")
require.Equal(t, aggregation.ErrInvalidQuantile, err)
}
func TestStdoutTimestamp(t *testing.T) {
var buf bytes.Buffer
exporter, err := stdout.NewExporter(
@ -197,7 +186,7 @@ func TestStdoutValueRecorderFormat(t *testing.T) {
checkpointSet := metrictest.NewCheckpointSet(testResource)
desc := metric.NewDescriptor("test.name", metric.ValueRecorderInstrumentKind, number.Float64Kind)
aagg, ckpt := metrictest.Unslice2(array.New(2))
aagg, ckpt := metrictest.Unslice2(minmaxsumcount.New(2, &desc))
for i := 0; i < 1000; i++ {
aggregatortest.CheckedUpdate(fix.t, aagg, number.NewFloat64Number(float64(i)+0.5), &desc)
@ -215,21 +204,7 @@ func TestStdoutValueRecorderFormat(t *testing.T) {
"Min": 0.5,
"Max": 999.5,
"Sum": 500000,
"Count": 1000,
"Quantiles": [
{
"Quantile": 0.5,
"Value": 500.5
},
{
"Quantile": 0.9,
"Value": 900.5
},
{
"Quantile": 0.99,
"Value": 990.5
}
]
"Count": 1000
}
]`, fix.Output())
}
@ -255,7 +230,7 @@ func TestStdoutNoData(t *testing.T) {
})
}
runTwoAggs(metrictest.Unslice2(ddsketch.New(2, &desc, ddsketch.NewDefaultConfig())))
runTwoAggs(metrictest.Unslice2(lastvalue.New(2)))
runTwoAggs(metrictest.Unslice2(minmaxsumcount.New(2, &desc)))
}

View File

@ -34,7 +34,6 @@ cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/DataDog/sketches-go v0.0.1/go.mod h1:Q5DbzQ+3AkgGwymQO7aZFNP7ns2lZKGtvRBzRXfdi60=
github.com/apache/thrift v0.13.0 h1:5hryIiq9gtn+MiLVn0wP37kb/uTeRZgN08WoCsAhIhI=
github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM=
@ -93,7 +92,6 @@ github.com/google/go-cmp v0.5.2 h1:X2ev0eStA3AbceY54o37/0PQ/UWqKEiiO2dKL5OPaFM=
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.4 h1:L8R9j+yAqZuZjsqh/z+F1NCffTKKLShY6zXTItVIZ8M=
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=

View File

@ -1,6 +1,5 @@
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/DataDog/sketches-go v0.0.1/go.mod h1:Q5DbzQ+3AkgGwymQO7aZFNP7ns2lZKGtvRBzRXfdi60=
github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo=
github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM=
@ -40,7 +39,6 @@ github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.4 h1:L8R9j+yAqZuZjsqh/z+F1NCffTKKLShY6zXTItVIZ8M=
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg=
github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=

View File

@ -58,23 +58,26 @@ type (
Max() (number.Number, error)
}
// Quantile returns an exact or estimated quantile over the
// set of values that were aggregated.
Quantile interface {
Aggregation
Quantile(float64) (number.Number, error)
}
// LastValue returns the latest value that was aggregated.
LastValue interface {
Aggregation
LastValue() (number.Number, time.Time, error)
}
// Points returns the raw set of values that were aggregated.
// Points returns the raw values that were aggregated.
Points interface {
Aggregation
Points() ([]number.Number, error)
// Points returns points in the order they were
// recorded. Points are approximately ordered by
// timestamp, but this is not guaranteed.
Points() ([]Point, error)
}
// Point is a raw data point, consisting of a number and value.
Point struct {
number.Number
time.Time
}
// Buckets represents histogram buckets boundaries and counts.
@ -106,17 +109,6 @@ type (
Sum() (number.Number, error)
Count() (uint64, error)
}
// Distribution supports the Min, Max, Sum, Count, and Quantile
// interfaces.
Distribution interface {
Aggregation
Min() (number.Number, error)
Max() (number.Number, error)
Sum() (number.Number, error)
Count() (uint64, error)
Quantile(float64) (number.Number, error)
}
)
type (
@ -141,12 +133,10 @@ const (
MinMaxSumCountKind Kind = "MinMaxSumCount"
HistogramKind Kind = "Histogram"
LastValueKind Kind = "Lastvalue"
SketchKind Kind = "Sketch"
ExactKind Kind = "Exact"
)
var (
ErrInvalidQuantile = fmt.Errorf("the requested quantile is out of range")
ErrNegativeInput = fmt.Errorf("negative value is out of range for this instrument")
ErrNaNInput = fmt.Errorf("NaN value is an invalid input")
ErrInconsistentType = fmt.Errorf("inconsistent aggregator types")

View File

@ -5,10 +5,8 @@ go 1.14
replace go.opentelemetry.io/otel => ../
require (
github.com/DataDog/sketches-go v0.0.1
github.com/benbjohnson/clock v1.0.3
github.com/google/go-cmp v0.5.4
github.com/google/gofuzz v1.1.0 // indirect
github.com/stretchr/testify v1.6.1
go.opentelemetry.io/otel v0.15.0
)

View File

@ -1,13 +1,9 @@
github.com/DataDog/sketches-go v0.0.1 h1:RtG+76WKgZuz6FIaGsjoPePmadDBkuD/KC6+ZWu78b8=
github.com/DataDog/sketches-go v0.0.1/go.mod h1:Q5DbzQ+3AkgGwymQO7aZFNP7ns2lZKGtvRBzRXfdi60=
github.com/benbjohnson/clock v1.0.3 h1:vkLuvpK4fmtSCuo60+yC63p7y0BmQ8gm5ZXGuBCJyXg=
github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM=
github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/google/go-cmp v0.5.4 h1:L8R9j+yAqZuZjsqh/z+F1NCffTKKLShY6zXTItVIZ8M=
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g=
github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=

View File

@ -92,8 +92,6 @@ func TestMain(m *testing.M) {
os.Exit(m.Run())
}
// TODO: Expose Numbers in api/metric for sorting support
type Numbers struct {
// numbers has to be aligned for 64-bit atomic operations.
numbers []number.Number
@ -146,16 +144,6 @@ func (n *Numbers) Max() number.Number {
return n.numbers[len(n.numbers)-1]
}
// Median() is an alias for Quantile(0.5).
func (n *Numbers) Median() number.Number {
// Note that len(n.numbers) is 1 greater than the max element
// index, so dividing by two rounds up. This gives the
// intended definition for Quantile() in tests, which is to
// return the smallest element that is at or above the
// specified quantile.
return n.numbers[len(n.numbers)/2]
}
func (n *Numbers) Points() []number.Number {
return n.numbers
}

View File

@ -1,219 +0,0 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package array // import "go.opentelemetry.io/otel/sdk/metric/aggregator/array"
import (
"context"
"math"
"sort"
"sync"
"unsafe"
"go.opentelemetry.io/otel/metric"
"go.opentelemetry.io/otel/metric/number"
export "go.opentelemetry.io/otel/sdk/export/metric"
"go.opentelemetry.io/otel/sdk/export/metric/aggregation"
"go.opentelemetry.io/otel/sdk/metric/aggregator"
)
type (
// Aggregator aggregates events that form a distribution, keeping
// an array with the exact set of values.
Aggregator struct {
lock sync.Mutex
sum number.Number
points points
}
points []number.Number
)
var _ export.Aggregator = &Aggregator{}
var _ aggregation.MinMaxSumCount = &Aggregator{}
var _ aggregation.Distribution = &Aggregator{}
var _ aggregation.Points = &Aggregator{}
// New returns a new array aggregator, which aggregates recorded
// measurements by storing them in an array. This type uses a mutex
// for Update() and SynchronizedMove() concurrency.
func New(cnt int) []Aggregator {
return make([]Aggregator, cnt)
}
// Aggregation returns an interface for reading the state of this aggregator.
func (c *Aggregator) Aggregation() aggregation.Aggregation {
return c
}
// Kind returns aggregation.ExactKind.
func (c *Aggregator) Kind() aggregation.Kind {
return aggregation.ExactKind
}
// Sum returns the sum of values in the checkpoint.
func (c *Aggregator) Sum() (number.Number, error) {
return c.sum, nil
}
// Count returns the number of values in the checkpoint.
func (c *Aggregator) Count() (uint64, error) {
return uint64(len(c.points)), nil
}
// Max returns the maximum value in the checkpoint.
func (c *Aggregator) Max() (number.Number, error) {
return c.points.Quantile(1)
}
// Min returns the mininum value in the checkpoint.
func (c *Aggregator) Min() (number.Number, error) {
return c.points.Quantile(0)
}
// Quantile returns the estimated quantile of data in the checkpoint.
// It is an error if `q` is less than 0 or greated than 1.
func (c *Aggregator) Quantile(q float64) (number.Number, error) {
return c.points.Quantile(q)
}
// Points returns access to the raw data set.
func (c *Aggregator) Points() ([]number.Number, error) {
return c.points, nil
}
// SynchronizedMove saves the current state to oa and resets the current state to
// the empty set, taking a lock to prevent concurrent Update() calls.
func (c *Aggregator) SynchronizedMove(oa export.Aggregator, desc *metric.Descriptor) error {
o, _ := oa.(*Aggregator)
if oa != nil && o == nil {
return aggregator.NewInconsistentAggregatorError(c, oa)
}
c.lock.Lock()
if o != nil {
o.points = c.points
o.sum = c.sum
}
c.points = nil
c.sum = 0
c.lock.Unlock()
// TODO: This sort should be done lazily, only when quantiles
// are requested. The SDK specification says you can use this
// aggregator to simply list values in the order they were
// received as an alternative to requesting quantile information.
if o != nil {
o.sort(desc.NumberKind())
}
return nil
}
// Update adds the recorded measurement to the current data set.
// Update takes a lock to prevent concurrent Update() and SynchronizedMove()
// calls.
func (c *Aggregator) Update(_ context.Context, number number.Number, desc *metric.Descriptor) error {
c.lock.Lock()
c.points = append(c.points, number)
c.sum.AddNumber(desc.NumberKind(), number)
c.lock.Unlock()
return nil
}
// Merge combines two data sets into one.
func (c *Aggregator) Merge(oa export.Aggregator, desc *metric.Descriptor) error {
o, _ := oa.(*Aggregator)
if o == nil {
return aggregator.NewInconsistentAggregatorError(c, oa)
}
// Note: Current assumption is that `o` was checkpointed,
// therefore is already sorted. See the TODO above, since
// this is an open question.
c.sum.AddNumber(desc.NumberKind(), o.sum)
c.points = combine(c.points, o.points, desc.NumberKind())
return nil
}
func (c *Aggregator) sort(kind number.Kind) {
switch kind {
case number.Float64Kind:
sort.Float64s(*(*[]float64)(unsafe.Pointer(&c.points)))
case number.Int64Kind:
sort.Sort(&c.points)
default:
// NOTE: This can't happen because the SDK doesn't
// support uint64-kind metric instruments.
panic("Impossible case")
}
}
func combine(a, b points, kind number.Kind) points {
result := make(points, 0, len(a)+len(b))
for len(a) != 0 && len(b) != 0 {
if a[0].CompareNumber(kind, b[0]) < 0 {
result = append(result, a[0])
a = a[1:]
} else {
result = append(result, b[0])
b = b[1:]
}
}
result = append(result, a...)
result = append(result, b...)
return result
}
func (p *points) Len() int {
return len(*p)
}
func (p *points) Less(i, j int) bool {
// Note this is specialized for int64, because float64 is
// handled by `sort.Float64s` and uint64 numbers never appear
// in this data.
return int64((*p)[i]) < int64((*p)[j])
}
func (p *points) Swap(i, j int) {
(*p)[i], (*p)[j] = (*p)[j], (*p)[i]
}
// Quantile returns the least X such that Pr(x<X)>=q, where X is an
// element of the data set. This uses the "Nearest-Rank" definition
// of a quantile.
func (p *points) Quantile(q float64) (number.Number, error) {
if len(*p) == 0 {
return 0, aggregation.ErrNoData
}
if q < 0 || q > 1 {
return 0, aggregation.ErrInvalidQuantile
}
if q == 0 || len(*p) == 1 {
return (*p)[0], nil
} else if q == 1 {
return (*p)[len(*p)-1], nil
}
position := float64(len(*p)-1) * q
ceil := int(math.Ceil(position))
return (*p)[ceil], nil
}

View File

@ -1,155 +0,0 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ddsketch // import "go.opentelemetry.io/otel/sdk/metric/aggregator/ddsketch"
import (
"context"
"math"
"sync"
sdk "github.com/DataDog/sketches-go/ddsketch"
"go.opentelemetry.io/otel/metric"
"go.opentelemetry.io/otel/metric/number"
export "go.opentelemetry.io/otel/sdk/export/metric"
"go.opentelemetry.io/otel/sdk/export/metric/aggregation"
"go.opentelemetry.io/otel/sdk/metric/aggregator"
)
// Config is an alias for the underlying DDSketch config object.
type Config = sdk.Config
// Aggregator aggregates events into a distribution.
type Aggregator struct {
lock sync.Mutex
cfg *Config
kind number.Kind
sketch *sdk.DDSketch
}
var _ export.Aggregator = &Aggregator{}
var _ aggregation.MinMaxSumCount = &Aggregator{}
var _ aggregation.Distribution = &Aggregator{}
// New returns a new DDSketch aggregator.
func New(cnt int, desc *metric.Descriptor, cfg *Config) []Aggregator {
if cfg == nil {
cfg = NewDefaultConfig()
}
aggs := make([]Aggregator, cnt)
for i := range aggs {
aggs[i] = Aggregator{
cfg: cfg,
kind: desc.NumberKind(),
sketch: sdk.NewDDSketch(cfg),
}
}
return aggs
}
// Aggregation returns an interface for reading the state of this aggregator.
func (c *Aggregator) Aggregation() aggregation.Aggregation {
return c
}
// Kind returns aggregation.SketchKind.
func (c *Aggregator) Kind() aggregation.Kind {
return aggregation.SketchKind
}
// NewDefaultConfig returns a new, default DDSketch config.
func NewDefaultConfig() *Config {
return sdk.NewDefaultConfig()
}
// Sum returns the sum of values in the checkpoint.
func (c *Aggregator) Sum() (number.Number, error) {
return c.toNumber(c.sketch.Sum()), nil
}
// Count returns the number of values in the checkpoint.
func (c *Aggregator) Count() (uint64, error) {
return uint64(c.sketch.Count()), nil
}
// Max returns the maximum value in the checkpoint.
func (c *Aggregator) Max() (number.Number, error) {
return c.Quantile(1)
}
// Min returns the minimum value in the checkpoint.
func (c *Aggregator) Min() (number.Number, error) {
return c.Quantile(0)
}
// Quantile returns the estimated quantile of data in the checkpoint.
// It is an error if `q` is less than 0 or greated than 1.
func (c *Aggregator) Quantile(q float64) (number.Number, error) {
if c.sketch.Count() == 0 {
return 0, aggregation.ErrNoData
}
f := c.sketch.Quantile(q)
if math.IsNaN(f) {
return 0, aggregation.ErrInvalidQuantile
}
return c.toNumber(f), nil
}
func (c *Aggregator) toNumber(f float64) number.Number {
if c.kind == number.Float64Kind {
return number.NewFloat64Number(f)
}
return number.NewInt64Number(int64(f))
}
// SynchronizedMove saves the current state into oa and resets the current state to
// a new sketch, taking a lock to prevent concurrent Update() calls.
func (c *Aggregator) SynchronizedMove(oa export.Aggregator, _ *metric.Descriptor) error {
o, _ := oa.(*Aggregator)
if oa != nil && o == nil {
return aggregator.NewInconsistentAggregatorError(c, oa)
}
replace := sdk.NewDDSketch(c.cfg)
c.lock.Lock()
if o != nil {
o.sketch = c.sketch
}
c.sketch = replace
c.lock.Unlock()
return nil
}
// Update adds the recorded measurement to the current data set.
// Update takes a lock to prevent concurrent Update() and SynchronizedMove()
// calls.
func (c *Aggregator) Update(_ context.Context, number number.Number, desc *metric.Descriptor) error {
c.lock.Lock()
defer c.lock.Unlock()
c.sketch.Add(number.CoerceToFloat64(desc.NumberKind()))
return nil
}
// Merge combines two sketches into one.
func (c *Aggregator) Merge(oa export.Aggregator, d *metric.Descriptor) error {
o, _ := oa.(*Aggregator)
if o == nil {
return aggregator.NewInconsistentAggregatorError(c, oa)
}
c.sketch.Merge(o.sketch)
return nil
}

View File

@ -1,221 +0,0 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ddsketch
import (
"errors"
"fmt"
"testing"
"github.com/stretchr/testify/require"
"go.opentelemetry.io/otel/metric"
export "go.opentelemetry.io/otel/sdk/export/metric"
"go.opentelemetry.io/otel/sdk/export/metric/aggregation"
"go.opentelemetry.io/otel/sdk/metric/aggregator/aggregatortest"
)
const count = 1000
type updateTest struct {
}
func new2(desc *metric.Descriptor) (_, _ *Aggregator) {
alloc := New(2, desc, NewDefaultConfig())
return &alloc[0], &alloc[1]
}
func new4(desc *metric.Descriptor) (_, _, _, _ *Aggregator) {
alloc := New(4, desc, NewDefaultConfig())
return &alloc[0], &alloc[1], &alloc[2], &alloc[3]
}
func checkZero(t *testing.T, agg *Aggregator, desc *metric.Descriptor) {
kind := desc.NumberKind()
sum, err := agg.Sum()
require.NoError(t, err)
require.Equal(t, kind.Zero(), sum)
count, err := agg.Count()
require.NoError(t, err)
require.Equal(t, uint64(0), count)
max, err := agg.Max()
require.True(t, errors.Is(err, aggregation.ErrNoData))
require.Equal(t, kind.Zero(), max)
median, err := agg.Quantile(0.5)
require.True(t, errors.Is(err, aggregation.ErrNoData))
require.Equal(t, kind.Zero(), median)
min, err := agg.Min()
require.True(t, errors.Is(err, aggregation.ErrNoData))
require.Equal(t, kind.Zero(), min)
}
func (ut *updateTest) run(t *testing.T, profile aggregatortest.Profile) {
descriptor := aggregatortest.NewAggregatorTest(metric.ValueRecorderInstrumentKind, profile.NumberKind)
agg, ckpt := new2(descriptor)
all := aggregatortest.NewNumbers(profile.NumberKind)
for i := 0; i < count; i++ {
x := profile.Random(+1)
all.Append(x)
aggregatortest.CheckedUpdate(t, agg, x, descriptor)
y := profile.Random(-1)
all.Append(y)
aggregatortest.CheckedUpdate(t, agg, y, descriptor)
}
err := agg.SynchronizedMove(ckpt, descriptor)
require.NoError(t, err)
checkZero(t, agg, descriptor)
all.Sort()
sum, err := ckpt.Sum()
require.Nil(t, err)
allSum := all.Sum()
require.InDelta(t,
(&allSum).CoerceToFloat64(profile.NumberKind),
sum.CoerceToFloat64(profile.NumberKind),
1,
"Same sum")
count, err := ckpt.Count()
require.Equal(t, all.Count(), count, "Same count")
require.Nil(t, err)
max, err := ckpt.Max()
require.Nil(t, err)
require.Equal(t,
all.Max(),
max,
"Same max")
median, err := ckpt.Quantile(0.5)
require.Nil(t, err)
allMedian := all.Median()
require.InDelta(t,
(&allMedian).CoerceToFloat64(profile.NumberKind),
median.CoerceToFloat64(profile.NumberKind),
10,
"Same median")
}
func TestDDSketchUpdate(t *testing.T) {
ut := updateTest{}
aggregatortest.RunProfiles(t, ut.run)
}
type mergeTest struct {
absolute bool
}
func (mt *mergeTest) run(t *testing.T, profile aggregatortest.Profile) {
descriptor := aggregatortest.NewAggregatorTest(metric.ValueRecorderInstrumentKind, profile.NumberKind)
agg1, agg2, ckpt1, ckpt2 := new4(descriptor)
all := aggregatortest.NewNumbers(profile.NumberKind)
for i := 0; i < count; i++ {
x := profile.Random(+1)
all.Append(x)
aggregatortest.CheckedUpdate(t, agg1, x, descriptor)
if !mt.absolute {
y := profile.Random(-1)
all.Append(y)
aggregatortest.CheckedUpdate(t, agg1, y, descriptor)
}
}
for i := 0; i < count; i++ {
x := profile.Random(+1)
all.Append(x)
aggregatortest.CheckedUpdate(t, agg2, x, descriptor)
if !mt.absolute {
y := profile.Random(-1)
all.Append(y)
aggregatortest.CheckedUpdate(t, agg2, y, descriptor)
}
}
require.NoError(t, agg1.SynchronizedMove(ckpt1, descriptor))
require.NoError(t, agg2.SynchronizedMove(ckpt2, descriptor))
checkZero(t, agg1, descriptor)
checkZero(t, agg1, descriptor)
aggregatortest.CheckedMerge(t, ckpt1, ckpt2, descriptor)
all.Sort()
aggSum, err := ckpt1.Sum()
require.Nil(t, err)
allSum := all.Sum()
require.InDelta(t,
(&allSum).CoerceToFloat64(profile.NumberKind),
aggSum.CoerceToFloat64(profile.NumberKind),
1,
"Same sum")
count, err := ckpt1.Count()
require.Equal(t, all.Count(), count, "Same count")
require.Nil(t, err)
max, err := ckpt1.Max()
require.Nil(t, err)
require.Equal(t,
all.Max(),
max,
"Same max")
median, err := ckpt1.Quantile(0.5)
require.Nil(t, err)
allMedian := all.Median()
require.InDelta(t,
(&allMedian).CoerceToFloat64(profile.NumberKind),
median.CoerceToFloat64(profile.NumberKind),
10,
"Same median")
}
func TestDDSketchMerge(t *testing.T) {
// Test absolute and non-absolute
for _, absolute := range []bool{false, true} {
t.Run(fmt.Sprint("Absolute=", absolute), func(t *testing.T) {
mt := mergeTest{
absolute: absolute,
}
// Test integer and floating point
aggregatortest.RunProfiles(t, mt.run)
})
}
}
func TestSynchronizedMoveReset(t *testing.T) {
aggregatortest.SynchronizedMoveResetTest(
t,
metric.ValueRecorderInstrumentKind,
func(desc *metric.Descriptor) export.Aggregator {
return &New(1, desc, NewDefaultConfig())[0]
},
)
}

View File

@ -0,0 +1,130 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package exact // import "go.opentelemetry.io/otel/sdk/metric/aggregator/exact"
import (
"context"
"sync"
"time"
"go.opentelemetry.io/otel/metric"
"go.opentelemetry.io/otel/metric/number"
export "go.opentelemetry.io/otel/sdk/export/metric"
"go.opentelemetry.io/otel/sdk/export/metric/aggregation"
"go.opentelemetry.io/otel/sdk/metric/aggregator"
)
type (
// Aggregator aggregates events that form a distribution, keeping
// an array with the exact set of values.
Aggregator struct {
lock sync.Mutex
samples []aggregation.Point
}
)
var _ export.Aggregator = &Aggregator{}
var _ aggregation.Points = &Aggregator{}
var _ aggregation.Count = &Aggregator{}
// New returns cnt many new exact aggregators, which aggregate recorded
// measurements by storing them in an array. This type uses a mutex
// for Update() and SynchronizedMove() concurrency.
func New(cnt int) []Aggregator {
return make([]Aggregator, cnt)
}
// Aggregation returns an interface for reading the state of this aggregator.
func (c *Aggregator) Aggregation() aggregation.Aggregation {
return c
}
// Kind returns aggregation.ExactKind.
func (c *Aggregator) Kind() aggregation.Kind {
return aggregation.ExactKind
}
// Count returns the number of values in the checkpoint.
func (c *Aggregator) Count() (uint64, error) {
return uint64(len(c.samples)), nil
}
// Points returns access to the raw data set.
func (c *Aggregator) Points() ([]aggregation.Point, error) {
return c.samples, nil
}
// SynchronizedMove saves the current state to oa and resets the current state to
// the empty set, taking a lock to prevent concurrent Update() calls.
func (c *Aggregator) SynchronizedMove(oa export.Aggregator, desc *metric.Descriptor) error {
o, _ := oa.(*Aggregator)
if oa != nil && o == nil {
return aggregator.NewInconsistentAggregatorError(c, oa)
}
c.lock.Lock()
defer c.lock.Unlock()
if o != nil {
o.samples = c.samples
}
c.samples = nil
return nil
}
// Update adds the recorded measurement to the current data set.
// Update takes a lock to prevent concurrent Update() and SynchronizedMove()
// calls.
func (c *Aggregator) Update(_ context.Context, number number.Number, desc *metric.Descriptor) error {
now := time.Now()
c.lock.Lock()
defer c.lock.Unlock()
c.samples = append(c.samples, aggregation.Point{
Number: number,
Time: now,
})
return nil
}
// Merge combines two data sets into one.
func (c *Aggregator) Merge(oa export.Aggregator, desc *metric.Descriptor) error {
o, _ := oa.(*Aggregator)
if o == nil {
return aggregator.NewInconsistentAggregatorError(c, oa)
}
c.samples = combine(c.samples, o.samples)
return nil
}
func combine(a, b []aggregation.Point) []aggregation.Point {
result := make([]aggregation.Point, 0, len(a)+len(b))
for len(a) != 0 && len(b) != 0 {
if a[0].Time.Before(b[0].Time) {
result = append(result, a[0])
a = a[1:]
} else {
result = append(result, b[0])
b = b[1:]
}
}
result = append(result, a...)
result = append(result, b...)
return result
}

View File

@ -12,13 +12,13 @@
// See the License for the specific language governing permissions and
// limitations under the License.
package array
package exact
import (
"errors"
"fmt"
"math"
"testing"
"time"
"github.com/stretchr/testify/require"
@ -34,23 +34,13 @@ type updateTest struct {
}
func checkZero(t *testing.T, agg *Aggregator, desc *metric.Descriptor) {
kind := desc.NumberKind()
sum, err := agg.Sum()
require.NoError(t, err)
require.Equal(t, kind.Zero(), sum)
count, err := agg.Count()
require.NoError(t, err)
require.Equal(t, uint64(0), count)
max, err := agg.Max()
require.True(t, errors.Is(err, aggregation.ErrNoData))
require.Equal(t, kind.Zero(), max)
min, err := agg.Min()
require.True(t, errors.Is(err, aggregation.ErrNoData))
require.Equal(t, kind.Zero(), min)
pts, err := agg.Points()
require.NoError(t, err)
require.Equal(t, 0, len(pts))
}
func new2() (_, _ *Aggregator) {
@ -63,6 +53,14 @@ func new4() (_, _, _, _ *Aggregator) {
return &alloc[0], &alloc[1], &alloc[2], &alloc[3]
}
func sumOf(samples []aggregation.Point, k number.Kind) number.Number {
var n number.Number
for _, s := range samples {
n.AddNumber(k, s.Number)
}
return n
}
func (ut *updateTest) run(t *testing.T, profile aggregatortest.Profile) {
descriptor := aggregatortest.NewAggregatorTest(metric.ValueRecorderInstrumentKind, profile.NumberKind)
agg, ckpt := new2()
@ -86,32 +84,21 @@ func (ut *updateTest) run(t *testing.T, profile aggregatortest.Profile) {
all.Sort()
sum, err := ckpt.Sum()
pts, err := ckpt.Points()
require.Nil(t, err)
sum := sumOf(pts, profile.NumberKind)
allSum := all.Sum()
require.InEpsilon(t,
(&allSum).CoerceToFloat64(profile.NumberKind),
allSum.CoerceToFloat64(profile.NumberKind),
sum.CoerceToFloat64(profile.NumberKind),
0.0000001,
"Same sum")
count, err := ckpt.Count()
require.Nil(t, err)
require.Equal(t, all.Count(), count, "Same count")
min, err := ckpt.Min()
require.Nil(t, err)
require.Equal(t, all.Min(), min, "Same min")
max, err := ckpt.Max()
require.Nil(t, err)
require.Equal(t, all.Max(), max, "Same max")
qx, err := ckpt.Quantile(0.5)
require.Nil(t, err)
require.Equal(t, all.Median(), qx, "Same median")
}
func TestArrayUpdate(t *testing.T) {
func TestExactUpdate(t *testing.T) {
// Test with an odd an even number of measurements
for count := 999; count <= 1000; count++ {
t.Run(fmt.Sprint("Odd=", count%2 == 1), func(t *testing.T) {
@ -164,34 +151,32 @@ func (mt *mergeTest) run(t *testing.T, profile aggregatortest.Profile) {
aggregatortest.CheckedMerge(t, ckpt1, ckpt2, descriptor)
all.Sort()
sum, err := ckpt1.Sum()
pts, err := ckpt1.Points()
require.Nil(t, err)
received := aggregatortest.NewNumbers(profile.NumberKind)
for i, s := range pts {
received.Append(s.Number)
if i > 0 {
require.True(t, pts[i-1].Time.Before(pts[i].Time))
}
}
allSum := all.Sum()
sum := sumOf(pts, profile.NumberKind)
require.InEpsilon(t,
(&allSum).CoerceToFloat64(profile.NumberKind),
allSum.CoerceToFloat64(profile.NumberKind),
sum.CoerceToFloat64(profile.NumberKind),
0.0000001,
"Same sum - absolute")
count, err := ckpt1.Count()
require.Nil(t, err)
require.Equal(t, all.Count(), count, "Same count - absolute")
min, err := ckpt1.Min()
require.Nil(t, err)
require.Equal(t, all.Min(), min, "Same min - absolute")
max, err := ckpt1.Max()
require.Nil(t, err)
require.Equal(t, all.Max(), max, "Same max - absolute")
qx, err := ckpt1.Quantile(0.5)
require.Nil(t, err)
require.Equal(t, all.Median(), qx, "Same median - absolute")
require.Equal(t, all, received, "Same ordered contents")
}
func TestArrayMerge(t *testing.T) {
func TestExactMerge(t *testing.T) {
// Test with an odd an even number of measurements
for count := 999; count <= 1000; count++ {
t.Run(fmt.Sprint("Odd=", count%2 == 1), func(t *testing.T) {
@ -211,22 +196,10 @@ func TestArrayMerge(t *testing.T) {
}
}
func TestArrayErrors(t *testing.T) {
func TestExactErrors(t *testing.T) {
aggregatortest.RunProfiles(t, func(t *testing.T, profile aggregatortest.Profile) {
agg, ckpt := new2()
_, err := ckpt.Max()
require.Error(t, err)
require.Equal(t, err, aggregation.ErrNoData)
_, err = ckpt.Min()
require.Error(t, err)
require.Equal(t, err, aggregation.ErrNoData)
_, err = ckpt.Quantile(0.1)
require.Error(t, err)
require.Equal(t, err, aggregation.ErrNoData)
descriptor := aggregatortest.NewAggregatorTest(metric.ValueRecorderInstrumentKind, profile.NumberKind)
aggregatortest.CheckedUpdate(t, agg, number.Number(0), descriptor)
@ -239,22 +212,10 @@ func TestArrayErrors(t *testing.T) {
count, err := ckpt.Count()
require.Equal(t, uint64(1), count, "NaN value was not counted")
require.Nil(t, err)
num, err := ckpt.Quantile(0)
require.Nil(t, err)
require.Equal(t, num, number.Number(0))
_, err = ckpt.Quantile(-0.0001)
require.Error(t, err)
require.True(t, errors.Is(err, aggregation.ErrInvalidQuantile))
_, err = agg.Quantile(1.0001)
require.Error(t, err)
require.True(t, errors.Is(err, aggregation.ErrNoData))
})
}
func TestArrayFloat64(t *testing.T) {
func TestExactFloat64(t *testing.T) {
descriptor := aggregatortest.NewAggregatorTest(metric.ValueRecorderInstrumentKind, number.Float64Kind)
fpsf := func(sign int) []float64 {
@ -288,6 +249,8 @@ func TestArrayFloat64(t *testing.T) {
agg, ckpt := new2()
startTime := time.Now()
for _, f := range fpsf(1) {
all.Append(number.NewFloat64Number(f))
aggregatortest.CheckedUpdate(t, agg, number.NewFloat64Number(f), descriptor)
@ -298,38 +261,33 @@ func TestArrayFloat64(t *testing.T) {
aggregatortest.CheckedUpdate(t, agg, number.NewFloat64Number(f), descriptor)
}
endTime := time.Now()
require.NoError(t, agg.SynchronizedMove(ckpt, descriptor))
all.Sort()
sum, err := ckpt.Sum()
pts, err := ckpt.Points()
require.Nil(t, err)
allSum := all.Sum()
require.InEpsilon(t, (&allSum).AsFloat64(), sum.AsFloat64(), 0.0000001, "Same sum")
sum := sumOf(pts, number.Float64Kind)
require.InEpsilon(t, allSum.AsFloat64(), sum.AsFloat64(), 0.0000001, "Same sum")
count, err := ckpt.Count()
require.Equal(t, all.Count(), count, "Same count")
require.Nil(t, err)
min, err := ckpt.Min()
require.Nil(t, err)
require.Equal(t, all.Min(), min, "Same min")
max, err := ckpt.Max()
require.Nil(t, err)
require.Equal(t, all.Max(), max, "Same max")
qx, err := ckpt.Quantile(0.5)
require.Nil(t, err)
require.Equal(t, all.Median(), qx, "Same median")
po, err := ckpt.Points()
require.Nil(t, err)
require.Equal(t, all.Len(), len(po), "Points() must have same length of updates")
for i := 0; i < len(po); i++ {
require.Equal(t, all.Points()[i], po[i], "Wrong point at position %d", i)
require.Equal(t, all.Points()[i], po[i].Number, "Wrong point at position %d", i)
if i > 0 {
require.True(t, po[i-1].Time.Before(po[i].Time))
}
}
require.True(t, po[0].Time.After(startTime))
require.True(t, po[len(po)-1].Time.Before(endTime))
}
func TestSynchronizedMoveReset(t *testing.T) {
aggregatortest.SynchronizedMoveResetTest(
@ -340,3 +298,60 @@ func TestSynchronizedMoveReset(t *testing.T) {
},
)
}
func TestMergeBehavior(t *testing.T) {
aggregatortest.RunProfiles(t, func(t *testing.T, profile aggregatortest.Profile) {
for _, forward := range []bool{false, true} {
t.Run(fmt.Sprint("Forward=", forward), func(t *testing.T) {
descriptor := aggregatortest.NewAggregatorTest(metric.ValueRecorderInstrumentKind, profile.NumberKind)
agg1, agg2, ckpt, _ := new4()
all := aggregatortest.NewNumbers(profile.NumberKind)
for i := 0; i < 100; i++ {
x1 := profile.Random(+1)
all.Append(x1)
aggregatortest.CheckedUpdate(t, agg1, x1, descriptor)
}
for i := 0; i < 100; i++ {
x2 := profile.Random(+1)
all.Append(x2)
aggregatortest.CheckedUpdate(t, agg2, x2, descriptor)
}
if forward {
aggregatortest.CheckedMerge(t, ckpt, agg1, descriptor)
aggregatortest.CheckedMerge(t, ckpt, agg2, descriptor)
} else {
aggregatortest.CheckedMerge(t, ckpt, agg2, descriptor)
aggregatortest.CheckedMerge(t, ckpt, agg1, descriptor)
}
pts, err := ckpt.Points()
require.NoError(t, err)
received := aggregatortest.NewNumbers(profile.NumberKind)
for i, s := range pts {
received.Append(s.Number)
if i > 0 {
require.True(t, pts[i-1].Time.Before(pts[i].Time))
}
}
allSum := all.Sum()
sum := sumOf(pts, profile.NumberKind)
require.InEpsilon(t,
allSum.CoerceToFloat64(profile.NumberKind),
sum.CoerceToFloat64(profile.NumberKind),
0.0000001,
"Same sum - absolute")
count, err := ckpt.Count()
require.NoError(t, err)
require.Equal(t, all.Count(), count, "Same count - absolute")
require.Equal(t, all, received, "Same ordered contents")
})
}
})
}

View File

@ -46,8 +46,7 @@ var _ export.Aggregator = &Aggregator{}
var _ aggregation.MinMaxSumCount = &Aggregator{}
// New returns a new aggregator for computing the min, max, sum, and
// count. It does not compute quantile information other than Min and
// Max.
// count.
//
// This type uses a mutex for Update() and SynchronizedMove() concurrency.
func New(cnt int, desc *metric.Descriptor) []Aggregator {

View File

@ -458,39 +458,21 @@ func BenchmarkFloat64MaxSumCountHandleAdd(b *testing.B) {
benchmarkFloat64ValueRecorderHandleAdd(b, "float64.minmaxsumcount")
}
// DDSketch
// Exact
func BenchmarkInt64DDSketchAdd(b *testing.B) {
benchmarkInt64ValueRecorderAdd(b, "int64.sketch")
}
func BenchmarkInt64DDSketchHandleAdd(b *testing.B) {
benchmarkInt64ValueRecorderHandleAdd(b, "int64.sketch")
}
func BenchmarkFloat64DDSketchAdd(b *testing.B) {
benchmarkFloat64ValueRecorderAdd(b, "float64.sketch")
}
func BenchmarkFloat64DDSketchHandleAdd(b *testing.B) {
benchmarkFloat64ValueRecorderHandleAdd(b, "float64.sketch")
}
// Array
func BenchmarkInt64ArrayAdd(b *testing.B) {
func BenchmarkInt64ExactAdd(b *testing.B) {
benchmarkInt64ValueRecorderAdd(b, "int64.exact")
}
func BenchmarkInt64ArrayHandleAdd(b *testing.B) {
func BenchmarkInt64ExactHandleAdd(b *testing.B) {
benchmarkInt64ValueRecorderHandleAdd(b, "int64.exact")
}
func BenchmarkFloat64ArrayAdd(b *testing.B) {
func BenchmarkFloat64ExactAdd(b *testing.B) {
benchmarkFloat64ValueRecorderAdd(b, "float64.exact")
}
func BenchmarkFloat64ArrayHandleAdd(b *testing.B) {
func BenchmarkFloat64ExactHandleAdd(b *testing.B) {
benchmarkFloat64ValueRecorderHandleAdd(b, "float64.exact")
}

View File

@ -167,7 +167,7 @@ func TestInputRangeValueRecorder(t *testing.T) {
processor.accumulations = nil
checkpointed = sdk.Collect(ctx)
count, err := processor.accumulations[0].Aggregator().(aggregation.Distribution).Count()
count, err := processor.accumulations[0].Aggregator().(aggregation.Count).Count()
require.Equal(t, uint64(2), count)
require.Equal(t, 1, checkpointed)
require.Nil(t, testHandler.Flush())

View File

@ -77,7 +77,6 @@ func TestProcessor(t *testing.T) {
{kind: aggregation.HistogramKind},
{kind: aggregation.LastValueKind},
{kind: aggregation.ExactKind},
{kind: aggregation.SketchKind},
} {
t.Run(ac.kind.String(), func(t *testing.T) {
testProcessor(

View File

@ -23,10 +23,10 @@ import (
"go.opentelemetry.io/otel/label"
"go.opentelemetry.io/otel/metric"
"go.opentelemetry.io/otel/metric/number"
export "go.opentelemetry.io/otel/sdk/export/metric"
"go.opentelemetry.io/otel/sdk/export/metric/aggregation"
"go.opentelemetry.io/otel/sdk/metric/aggregator/array"
"go.opentelemetry.io/otel/sdk/metric/aggregator/ddsketch"
"go.opentelemetry.io/otel/sdk/metric/aggregator/exact"
"go.opentelemetry.io/otel/sdk/metric/aggregator/histogram"
"go.opentelemetry.io/otel/sdk/metric/aggregator/lastvalue"
"go.opentelemetry.io/otel/sdk/metric/aggregator/minmaxsumcount"
@ -183,18 +183,13 @@ func (testAggregatorSelector) AggregatorFor(desc *metric.Descriptor, aggPtrs ...
for i := range aggPtrs {
*aggPtrs[i] = &aggs[i]
}
case strings.HasSuffix(desc.Name(), ".sketch"):
aggs := ddsketch.New(len(aggPtrs), desc, ddsketch.NewDefaultConfig())
for i := range aggPtrs {
*aggPtrs[i] = &aggs[i]
}
case strings.HasSuffix(desc.Name(), ".histogram"):
aggs := histogram.New(len(aggPtrs), desc, nil)
for i := range aggPtrs {
*aggPtrs[i] = &aggs[i]
}
case strings.HasSuffix(desc.Name(), ".exact"):
aggs := array.New(len(aggPtrs))
aggs := exact.New(len(aggPtrs))
for i := range aggPtrs {
*aggPtrs[i] = &aggs[i]
}
@ -260,21 +255,28 @@ func (o *Output) AddRecord(rec export.Record) error {
func (o *Output) Map() map[string]float64 {
r := make(map[string]float64)
err := o.ForEach(export.StatelessExportKindSelector(), func(record export.Record) error {
for key, value := range o.m {
encoded := value.labels.Encoded(o.labelEncoder)
rencoded := value.resource.Encoded(o.labelEncoder)
number := 0.0
if s, ok := value.aggregator.(aggregation.Sum); ok {
for key, entry := range o.m {
encoded := entry.labels.Encoded(o.labelEncoder)
rencoded := entry.resource.Encoded(o.labelEncoder)
value := 0.0
if s, ok := entry.aggregator.(aggregation.Sum); ok {
sum, _ := s.Sum()
number = sum.CoerceToFloat64(key.desc.NumberKind())
} else if l, ok := value.aggregator.(aggregation.LastValue); ok {
value = sum.CoerceToFloat64(key.desc.NumberKind())
} else if l, ok := entry.aggregator.(aggregation.LastValue); ok {
last, _, _ := l.LastValue()
number = last.CoerceToFloat64(key.desc.NumberKind())
value = last.CoerceToFloat64(key.desc.NumberKind())
} else if l, ok := entry.aggregator.(aggregation.Points); ok {
pts, _ := l.Points()
var sum number.Number
for _, s := range pts {
sum.AddNumber(key.desc.NumberKind(), s.Number)
}
value = sum.CoerceToFloat64(key.desc.NumberKind())
} else {
panic(fmt.Sprintf("Unhandled aggregator type: %T", value.aggregator))
panic(fmt.Sprintf("Unhandled aggregator type: %T", entry.aggregator))
}
name := fmt.Sprint(key.desc.Name(), "/", encoded, "/", rencoded)
r[name] = number
r[name] = value
}
return nil
})

View File

@ -17,8 +17,7 @@ package simple // import "go.opentelemetry.io/otel/sdk/metric/selector/simple"
import (
"go.opentelemetry.io/otel/metric"
export "go.opentelemetry.io/otel/sdk/export/metric"
"go.opentelemetry.io/otel/sdk/metric/aggregator/array"
"go.opentelemetry.io/otel/sdk/metric/aggregator/ddsketch"
"go.opentelemetry.io/otel/sdk/metric/aggregator/exact"
"go.opentelemetry.io/otel/sdk/metric/aggregator/histogram"
"go.opentelemetry.io/otel/sdk/metric/aggregator/lastvalue"
"go.opentelemetry.io/otel/sdk/metric/aggregator/minmaxsumcount"
@ -28,9 +27,6 @@ import (
type (
selectorInexpensive struct{}
selectorExact struct{}
selectorSketch struct {
config *ddsketch.Config
}
selectorHistogram struct {
boundaries []float64
}
@ -38,44 +34,31 @@ type (
var (
_ export.AggregatorSelector = selectorInexpensive{}
_ export.AggregatorSelector = selectorSketch{}
_ export.AggregatorSelector = selectorExact{}
_ export.AggregatorSelector = selectorHistogram{}
)
// NewWithInexpensiveDistribution returns a simple aggregation selector
// that uses counter, minmaxsumcount and minmaxsumcount aggregators
// for the three kinds of metric. This selector is faster and uses
// less memory than the others because minmaxsumcount does not
// aggregate quantile information.
// NewWithInexpensiveDistribution returns a simple aggregator selector
// that uses minmaxsumcount aggregators for `ValueRecorder`
// instruments. This selector is faster and uses less memory than the
// others in this package because minmaxsumcount aggregators maintain
// the least information about the distribution among these choices.
func NewWithInexpensiveDistribution() export.AggregatorSelector {
return selectorInexpensive{}
}
// NewWithSketchDistribution returns a simple aggregation selector that
// uses counter, ddsketch, and ddsketch aggregators for the three
// kinds of metric. This selector uses more cpu and memory than the
// NewWithInexpensiveDistribution because it uses one DDSketch per distinct
// instrument and label set.
func NewWithSketchDistribution(config *ddsketch.Config) export.AggregatorSelector {
return selectorSketch{
config: config,
}
}
// NewWithExactDistribution returns a simple aggregation selector that uses
// counter, array, and array aggregators for the three kinds of metric.
// This selector uses more memory than the NewWithSketchDistribution
// because it aggregates an array of all values, therefore is able to
// compute exact quantiles.
// NewWithExactDistribution returns a simple aggregator selector that
// uses exact aggregators for `ValueRecorder` instruments. This
// selector uses more memory than the others in this package because
// exact aggregators maintain the most information about the
// distribution among these choices.
func NewWithExactDistribution() export.AggregatorSelector {
return selectorExact{}
}
// NewWithHistogramDistribution returns a simple aggregation selector that uses counter,
// histogram, and histogram aggregators for the three kinds of metric. This
// selector uses more memory than the NewWithInexpensiveDistribution because it
// uses a counter per bucket.
// NewWithHistogramDistribution returns a simple aggregator selector
// that uses histogram aggregators for `ValueRecorder` instruments.
// This selector is a good default choice for most metric exporters.
func NewWithHistogramDistribution(boundaries []float64) export.AggregatorSelector {
return selectorHistogram{boundaries: boundaries}
}
@ -108,26 +91,12 @@ func (selectorInexpensive) AggregatorFor(descriptor *metric.Descriptor, aggPtrs
}
}
func (s selectorSketch) AggregatorFor(descriptor *metric.Descriptor, aggPtrs ...*export.Aggregator) {
switch descriptor.InstrumentKind() {
case metric.ValueObserverInstrumentKind:
lastValueAggs(aggPtrs)
case metric.ValueRecorderInstrumentKind:
aggs := ddsketch.New(len(aggPtrs), descriptor, s.config)
for i := range aggPtrs {
*aggPtrs[i] = &aggs[i]
}
default:
sumAggs(aggPtrs)
}
}
func (selectorExact) AggregatorFor(descriptor *metric.Descriptor, aggPtrs ...*export.Aggregator) {
switch descriptor.InstrumentKind() {
case metric.ValueObserverInstrumentKind:
lastValueAggs(aggPtrs)
case metric.ValueRecorderInstrumentKind:
aggs := array.New(len(aggPtrs))
aggs := exact.New(len(aggPtrs))
for i := range aggPtrs {
*aggPtrs[i] = &aggs[i]
}

View File

@ -22,8 +22,7 @@ import (
"go.opentelemetry.io/otel/metric"
"go.opentelemetry.io/otel/metric/number"
export "go.opentelemetry.io/otel/sdk/export/metric"
"go.opentelemetry.io/otel/sdk/metric/aggregator/array"
"go.opentelemetry.io/otel/sdk/metric/aggregator/ddsketch"
"go.opentelemetry.io/otel/sdk/metric/aggregator/exact"
"go.opentelemetry.io/otel/sdk/metric/aggregator/histogram"
"go.opentelemetry.io/otel/sdk/metric/aggregator/lastvalue"
"go.opentelemetry.io/otel/sdk/metric/aggregator/minmaxsumcount"
@ -60,15 +59,9 @@ func TestInexpensiveDistribution(t *testing.T) {
testFixedSelectors(t, inex)
}
func TestSketchDistribution(t *testing.T) {
sk := simple.NewWithSketchDistribution(ddsketch.NewDefaultConfig())
require.IsType(t, (*ddsketch.Aggregator)(nil), oneAgg(sk, &testValueRecorderDesc))
testFixedSelectors(t, sk)
}
func TestExactDistribution(t *testing.T) {
ex := simple.NewWithExactDistribution()
require.IsType(t, (*array.Aggregator)(nil), oneAgg(ex, &testValueRecorderDesc))
require.IsType(t, (*exact.Aggregator)(nil), oneAgg(ex, &testValueRecorderDesc))
testFixedSelectors(t, ex)
}