2020-03-24 07:41:10 +02:00
|
|
|
// Copyright The OpenTelemetry Authors
|
2019-10-29 22:27:22 +02:00
|
|
|
//
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
|
|
|
package metric_test
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
"fmt"
|
|
|
|
"math/rand"
|
|
|
|
"testing"
|
|
|
|
|
2020-07-28 19:47:08 +02:00
|
|
|
"go.opentelemetry.io/otel/api/global"
|
2020-05-14 01:06:03 +02:00
|
|
|
"go.opentelemetry.io/otel/api/kv"
|
2020-04-23 21:10:58 +02:00
|
|
|
"go.opentelemetry.io/otel/api/label"
|
2019-11-06 20:54:36 +02:00
|
|
|
"go.opentelemetry.io/otel/api/metric"
|
2019-11-05 23:08:55 +02:00
|
|
|
export "go.opentelemetry.io/otel/sdk/export/metric"
|
2019-11-01 20:40:29 +02:00
|
|
|
sdk "go.opentelemetry.io/otel/sdk/metric"
|
2020-06-23 21:00:15 +02:00
|
|
|
"go.opentelemetry.io/otel/sdk/metric/processor/test"
|
2019-10-29 22:27:22 +02:00
|
|
|
)
|
|
|
|
|
|
|
|
type benchFixture struct {
|
2020-07-28 19:47:08 +02:00
|
|
|
meter metric.Meter
|
2020-05-11 19:23:06 +02:00
|
|
|
accumulator *sdk.Accumulator
|
|
|
|
B *testing.B
|
2020-06-23 19:51:15 +02:00
|
|
|
export.AggregatorSelector
|
2019-10-29 22:27:22 +02:00
|
|
|
}
|
|
|
|
|
2020-03-24 18:30:12 +02:00
|
|
|
func newFixture(b *testing.B) *benchFixture {
|
2019-10-29 22:27:22 +02:00
|
|
|
b.ReportAllocs()
|
|
|
|
bf := &benchFixture{
|
2020-06-23 19:51:15 +02:00
|
|
|
B: b,
|
|
|
|
AggregatorSelector: test.AggregatorSelector(),
|
2019-10-29 22:27:22 +02:00
|
|
|
}
|
2020-03-24 19:54:08 +02:00
|
|
|
|
2020-05-11 19:23:06 +02:00
|
|
|
bf.accumulator = sdk.NewAccumulator(bf)
|
2020-07-28 19:47:08 +02:00
|
|
|
bf.meter = metric.WrapMeterImpl(bf.accumulator, "benchmarks")
|
2019-10-29 22:27:22 +02:00
|
|
|
return bf
|
|
|
|
}
|
|
|
|
|
2020-06-18 19:16:33 +02:00
|
|
|
func (f *benchFixture) Process(export.Accumulation) error {
|
2020-06-09 20:00:50 +02:00
|
|
|
return nil
|
2019-11-15 23:01:20 +02:00
|
|
|
}
|
|
|
|
|
2020-07-28 19:47:08 +02:00
|
|
|
func (f *benchFixture) Meter(_ string, _ ...metric.MeterOption) metric.Meter {
|
|
|
|
return f.meter
|
|
|
|
}
|
|
|
|
|
|
|
|
func (f *benchFixture) meterMust() metric.MeterMust {
|
|
|
|
return metric.Must(f.meter)
|
|
|
|
}
|
|
|
|
|
2020-05-14 01:06:03 +02:00
|
|
|
func makeManyLabels(n int) [][]kv.KeyValue {
|
|
|
|
r := make([][]kv.KeyValue, n)
|
2019-11-06 20:54:36 +02:00
|
|
|
|
|
|
|
for i := 0; i < n; i++ {
|
|
|
|
r[i] = makeLabels(1)
|
|
|
|
}
|
|
|
|
|
|
|
|
return r
|
|
|
|
}
|
|
|
|
|
2020-05-14 01:06:03 +02:00
|
|
|
func makeLabels(n int) []kv.KeyValue {
|
2019-10-29 22:27:22 +02:00
|
|
|
used := map[string]bool{}
|
2020-05-14 01:06:03 +02:00
|
|
|
l := make([]kv.KeyValue, n)
|
2019-10-29 22:27:22 +02:00
|
|
|
for i := 0; i < n; i++ {
|
|
|
|
var k string
|
|
|
|
for {
|
|
|
|
k = fmt.Sprint("k", rand.Intn(1000000000))
|
|
|
|
if !used[k] {
|
|
|
|
used[k] = true
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
2020-05-14 01:21:23 +02:00
|
|
|
l[i] = kv.Key(k).String(fmt.Sprint("v", rand.Intn(1000000000)))
|
2019-10-29 22:27:22 +02:00
|
|
|
}
|
|
|
|
return l
|
|
|
|
}
|
|
|
|
|
|
|
|
func benchmarkLabels(b *testing.B, n int) {
|
2020-03-27 23:06:48 +02:00
|
|
|
ctx := context.Background()
|
2019-10-29 22:27:22 +02:00
|
|
|
fix := newFixture(b)
|
|
|
|
labs := makeLabels(n)
|
2020-07-28 19:47:08 +02:00
|
|
|
cnt := fix.meterMust().NewInt64Counter("int64.counter")
|
2019-10-29 22:27:22 +02:00
|
|
|
|
|
|
|
b.ResetTimer()
|
|
|
|
|
|
|
|
for i := 0; i < b.N; i++ {
|
2020-03-27 23:06:48 +02:00
|
|
|
cnt.Add(ctx, 1, labs...)
|
2019-10-29 22:27:22 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-03-27 23:06:48 +02:00
|
|
|
func BenchmarkInt64CounterAddWithLabels_1(b *testing.B) {
|
2019-10-29 22:27:22 +02:00
|
|
|
benchmarkLabels(b, 1)
|
|
|
|
}
|
|
|
|
|
2020-03-27 23:06:48 +02:00
|
|
|
func BenchmarkInt64CounterAddWithLabels_2(b *testing.B) {
|
2019-10-29 22:27:22 +02:00
|
|
|
benchmarkLabels(b, 2)
|
|
|
|
}
|
|
|
|
|
2020-03-27 23:06:48 +02:00
|
|
|
func BenchmarkInt64CounterAddWithLabels_4(b *testing.B) {
|
2019-10-29 22:27:22 +02:00
|
|
|
benchmarkLabels(b, 4)
|
|
|
|
}
|
|
|
|
|
2020-03-27 23:06:48 +02:00
|
|
|
func BenchmarkInt64CounterAddWithLabels_8(b *testing.B) {
|
2019-10-29 22:27:22 +02:00
|
|
|
benchmarkLabels(b, 8)
|
|
|
|
}
|
|
|
|
|
2020-03-27 23:06:48 +02:00
|
|
|
func BenchmarkInt64CounterAddWithLabels_16(b *testing.B) {
|
2019-10-29 22:27:22 +02:00
|
|
|
benchmarkLabels(b, 16)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Note: performance does not depend on label set size for the
|
2020-03-27 23:06:48 +02:00
|
|
|
// benchmarks below--all are benchmarked for a single label.
|
2019-10-29 22:27:22 +02:00
|
|
|
|
2019-11-06 20:54:36 +02:00
|
|
|
func BenchmarkAcquireNewHandle(b *testing.B) {
|
2019-10-29 22:27:22 +02:00
|
|
|
fix := newFixture(b)
|
2020-03-27 23:06:48 +02:00
|
|
|
labelSets := makeManyLabels(b.N)
|
2020-07-28 19:47:08 +02:00
|
|
|
cnt := fix.meterMust().NewInt64Counter("int64.counter")
|
2019-10-29 22:27:22 +02:00
|
|
|
|
|
|
|
b.ResetTimer()
|
|
|
|
|
|
|
|
for i := 0; i < b.N; i++ {
|
2020-03-27 23:06:48 +02:00
|
|
|
cnt.Bind(labelSets[i]...)
|
2019-10-29 22:27:22 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-11-06 20:54:36 +02:00
|
|
|
func BenchmarkAcquireExistingHandle(b *testing.B) {
|
2019-10-29 22:27:22 +02:00
|
|
|
fix := newFixture(b)
|
2020-03-27 23:06:48 +02:00
|
|
|
labelSets := makeManyLabels(b.N)
|
2020-07-28 19:47:08 +02:00
|
|
|
cnt := fix.meterMust().NewInt64Counter("int64.counter")
|
2019-11-06 20:54:36 +02:00
|
|
|
|
|
|
|
for i := 0; i < b.N; i++ {
|
2020-03-27 23:06:48 +02:00
|
|
|
cnt.Bind(labelSets[i]...).Unbind()
|
2019-11-06 20:54:36 +02:00
|
|
|
}
|
2019-10-29 22:27:22 +02:00
|
|
|
|
|
|
|
b.ResetTimer()
|
|
|
|
|
|
|
|
for i := 0; i < b.N; i++ {
|
2020-03-27 23:06:48 +02:00
|
|
|
cnt.Bind(labelSets[i]...)
|
2019-10-29 22:27:22 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-11-06 20:54:36 +02:00
|
|
|
func BenchmarkAcquireReleaseExistingHandle(b *testing.B) {
|
|
|
|
fix := newFixture(b)
|
2020-03-27 23:06:48 +02:00
|
|
|
labelSets := makeManyLabels(b.N)
|
2020-07-28 19:47:08 +02:00
|
|
|
cnt := fix.meterMust().NewInt64Counter("int64.counter")
|
2019-11-06 20:54:36 +02:00
|
|
|
|
|
|
|
for i := 0; i < b.N; i++ {
|
2020-03-27 23:06:48 +02:00
|
|
|
cnt.Bind(labelSets[i]...).Unbind()
|
2019-11-06 20:54:36 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
b.ResetTimer()
|
|
|
|
|
|
|
|
for i := 0; i < b.N; i++ {
|
2020-03-27 23:06:48 +02:00
|
|
|
cnt.Bind(labelSets[i]...).Unbind()
|
2019-11-06 20:54:36 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
Replace `Ordered` with an iterator in `export.Labels`. (#567)
* Do not expose a slice of labels in export.Record
This is really an inconvenient implementation detail leak - we may
want to store labels in a different way. Replace it with an iterator -
it does not force us to use slice of key values as a storage in the
long run.
* Add Len to LabelIterator
It may come in handy in several situations, where we don't have access
to export.Labels object, but only to the label iterator.
* Use reflect value label iterator for the fixed labels
* add reset operation to iterator
Makes my life easier when writing a benchmark. Might also be an
alternative to cloning the iterator.
* Add benchmarks for iterators
* Add import comment
* Add clone operation to label iterator
* Move iterator tests to a separate package
* Add tests for cloning iterators
* Pass label iterator to export labels
* Use non-addressable array reflect values
By not using the value created by `reflect.New()`, but rather by
`reflect.ValueOf()`, we get a non-addressable array in the value,
which does not infer an allocation cost when getting an element from
the array.
* Drop zero iterator
This can be substituted by a reflect value iterator that goes over a
value with a zero-sized array.
* Add a simple iterator that implements label iterator
In the long run this will completely replace the LabelIterator
interface.
* Replace reflect value iterator with simple iterator
* Pass label storage to new export labels, not label iterator
* Drop label iterator interface, rename storage iterator to label iterator
* Drop clone operation from iterator
It's a leftover from interface times and now it's pointless - the
iterator is a simple struct, so cloning it is a simple copy.
* Drop Reset from label iterator
The sole existence of Reset was actually for benchmarking convenience.
Now we can just copy the iterator cheaply, so a need for Reset is no
more.
* Drop noop iterator tests
* Move back iterator tests to export package
* Eagerly get the reflect value of ordered labels
So we won't get into problems when several goroutines want to iterate
the same labels at the same time. Not sure if this would be a big
deal, since every goroutine would compute the same reflect.Value, but
concurrent write to the same memory is bad anyway. And it doesn't cost
us any extra allocations anyway.
* Replace NewSliceLabelIterator() with a method of LabelSlice
* Add some documentation
* Documentation fixes
2020-03-20 00:01:34 +02:00
|
|
|
// Iterators
|
|
|
|
|
2020-05-14 01:06:03 +02:00
|
|
|
var benchmarkIteratorVar kv.KeyValue
|
Replace `Ordered` with an iterator in `export.Labels`. (#567)
* Do not expose a slice of labels in export.Record
This is really an inconvenient implementation detail leak - we may
want to store labels in a different way. Replace it with an iterator -
it does not force us to use slice of key values as a storage in the
long run.
* Add Len to LabelIterator
It may come in handy in several situations, where we don't have access
to export.Labels object, but only to the label iterator.
* Use reflect value label iterator for the fixed labels
* add reset operation to iterator
Makes my life easier when writing a benchmark. Might also be an
alternative to cloning the iterator.
* Add benchmarks for iterators
* Add import comment
* Add clone operation to label iterator
* Move iterator tests to a separate package
* Add tests for cloning iterators
* Pass label iterator to export labels
* Use non-addressable array reflect values
By not using the value created by `reflect.New()`, but rather by
`reflect.ValueOf()`, we get a non-addressable array in the value,
which does not infer an allocation cost when getting an element from
the array.
* Drop zero iterator
This can be substituted by a reflect value iterator that goes over a
value with a zero-sized array.
* Add a simple iterator that implements label iterator
In the long run this will completely replace the LabelIterator
interface.
* Replace reflect value iterator with simple iterator
* Pass label storage to new export labels, not label iterator
* Drop label iterator interface, rename storage iterator to label iterator
* Drop clone operation from iterator
It's a leftover from interface times and now it's pointless - the
iterator is a simple struct, so cloning it is a simple copy.
* Drop Reset from label iterator
The sole existence of Reset was actually for benchmarking convenience.
Now we can just copy the iterator cheaply, so a need for Reset is no
more.
* Drop noop iterator tests
* Move back iterator tests to export package
* Eagerly get the reflect value of ordered labels
So we won't get into problems when several goroutines want to iterate
the same labels at the same time. Not sure if this would be a big
deal, since every goroutine would compute the same reflect.Value, but
concurrent write to the same memory is bad anyway. And it doesn't cost
us any extra allocations anyway.
* Replace NewSliceLabelIterator() with a method of LabelSlice
* Add some documentation
* Documentation fixes
2020-03-20 00:01:34 +02:00
|
|
|
|
|
|
|
func benchmarkIterator(b *testing.B, n int) {
|
2020-06-09 20:00:50 +02:00
|
|
|
labels := label.NewSet(makeLabels(n)...)
|
Replace `Ordered` with an iterator in `export.Labels`. (#567)
* Do not expose a slice of labels in export.Record
This is really an inconvenient implementation detail leak - we may
want to store labels in a different way. Replace it with an iterator -
it does not force us to use slice of key values as a storage in the
long run.
* Add Len to LabelIterator
It may come in handy in several situations, where we don't have access
to export.Labels object, but only to the label iterator.
* Use reflect value label iterator for the fixed labels
* add reset operation to iterator
Makes my life easier when writing a benchmark. Might also be an
alternative to cloning the iterator.
* Add benchmarks for iterators
* Add import comment
* Add clone operation to label iterator
* Move iterator tests to a separate package
* Add tests for cloning iterators
* Pass label iterator to export labels
* Use non-addressable array reflect values
By not using the value created by `reflect.New()`, but rather by
`reflect.ValueOf()`, we get a non-addressable array in the value,
which does not infer an allocation cost when getting an element from
the array.
* Drop zero iterator
This can be substituted by a reflect value iterator that goes over a
value with a zero-sized array.
* Add a simple iterator that implements label iterator
In the long run this will completely replace the LabelIterator
interface.
* Replace reflect value iterator with simple iterator
* Pass label storage to new export labels, not label iterator
* Drop label iterator interface, rename storage iterator to label iterator
* Drop clone operation from iterator
It's a leftover from interface times and now it's pointless - the
iterator is a simple struct, so cloning it is a simple copy.
* Drop Reset from label iterator
The sole existence of Reset was actually for benchmarking convenience.
Now we can just copy the iterator cheaply, so a need for Reset is no
more.
* Drop noop iterator tests
* Move back iterator tests to export package
* Eagerly get the reflect value of ordered labels
So we won't get into problems when several goroutines want to iterate
the same labels at the same time. Not sure if this would be a big
deal, since every goroutine would compute the same reflect.Value, but
concurrent write to the same memory is bad anyway. And it doesn't cost
us any extra allocations anyway.
* Replace NewSliceLabelIterator() with a method of LabelSlice
* Add some documentation
* Documentation fixes
2020-03-20 00:01:34 +02:00
|
|
|
b.ResetTimer()
|
2020-06-09 20:00:50 +02:00
|
|
|
for i := 0; i < b.N; i++ {
|
|
|
|
iter := labels.Iter()
|
|
|
|
for iter.Next() {
|
|
|
|
benchmarkIteratorVar = iter.Label()
|
|
|
|
}
|
|
|
|
}
|
Replace `Ordered` with an iterator in `export.Labels`. (#567)
* Do not expose a slice of labels in export.Record
This is really an inconvenient implementation detail leak - we may
want to store labels in a different way. Replace it with an iterator -
it does not force us to use slice of key values as a storage in the
long run.
* Add Len to LabelIterator
It may come in handy in several situations, where we don't have access
to export.Labels object, but only to the label iterator.
* Use reflect value label iterator for the fixed labels
* add reset operation to iterator
Makes my life easier when writing a benchmark. Might also be an
alternative to cloning the iterator.
* Add benchmarks for iterators
* Add import comment
* Add clone operation to label iterator
* Move iterator tests to a separate package
* Add tests for cloning iterators
* Pass label iterator to export labels
* Use non-addressable array reflect values
By not using the value created by `reflect.New()`, but rather by
`reflect.ValueOf()`, we get a non-addressable array in the value,
which does not infer an allocation cost when getting an element from
the array.
* Drop zero iterator
This can be substituted by a reflect value iterator that goes over a
value with a zero-sized array.
* Add a simple iterator that implements label iterator
In the long run this will completely replace the LabelIterator
interface.
* Replace reflect value iterator with simple iterator
* Pass label storage to new export labels, not label iterator
* Drop label iterator interface, rename storage iterator to label iterator
* Drop clone operation from iterator
It's a leftover from interface times and now it's pointless - the
iterator is a simple struct, so cloning it is a simple copy.
* Drop Reset from label iterator
The sole existence of Reset was actually for benchmarking convenience.
Now we can just copy the iterator cheaply, so a need for Reset is no
more.
* Drop noop iterator tests
* Move back iterator tests to export package
* Eagerly get the reflect value of ordered labels
So we won't get into problems when several goroutines want to iterate
the same labels at the same time. Not sure if this would be a big
deal, since every goroutine would compute the same reflect.Value, but
concurrent write to the same memory is bad anyway. And it doesn't cost
us any extra allocations anyway.
* Replace NewSliceLabelIterator() with a method of LabelSlice
* Add some documentation
* Documentation fixes
2020-03-20 00:01:34 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
func BenchmarkIterator_0(b *testing.B) {
|
|
|
|
benchmarkIterator(b, 0)
|
|
|
|
}
|
|
|
|
|
|
|
|
func BenchmarkIterator_1(b *testing.B) {
|
|
|
|
benchmarkIterator(b, 1)
|
|
|
|
}
|
|
|
|
|
|
|
|
func BenchmarkIterator_2(b *testing.B) {
|
|
|
|
benchmarkIterator(b, 2)
|
|
|
|
}
|
|
|
|
|
|
|
|
func BenchmarkIterator_4(b *testing.B) {
|
|
|
|
benchmarkIterator(b, 4)
|
|
|
|
}
|
|
|
|
|
|
|
|
func BenchmarkIterator_8(b *testing.B) {
|
|
|
|
benchmarkIterator(b, 8)
|
|
|
|
}
|
|
|
|
|
|
|
|
func BenchmarkIterator_16(b *testing.B) {
|
|
|
|
benchmarkIterator(b, 16)
|
|
|
|
}
|
|
|
|
|
2019-11-06 20:54:36 +02:00
|
|
|
// Counters
|
|
|
|
|
2020-07-28 19:47:08 +02:00
|
|
|
func BenchmarkGlobalInt64CounterAddWithSDK(b *testing.B) {
|
|
|
|
// Compare with BenchmarkInt64CounterAdd() to see overhead of global
|
|
|
|
// package. This is in the SDK to avoid the API from depending on the
|
|
|
|
// SDK.
|
|
|
|
ctx := context.Background()
|
|
|
|
fix := newFixture(b)
|
|
|
|
|
|
|
|
sdk := global.Meter("test")
|
|
|
|
global.SetMeterProvider(fix)
|
|
|
|
|
|
|
|
labs := []kv.KeyValue{kv.String("A", "B")}
|
|
|
|
cnt := Must(sdk).NewInt64Counter("int64.counter")
|
|
|
|
|
|
|
|
b.ResetTimer()
|
|
|
|
|
|
|
|
for i := 0; i < b.N; i++ {
|
|
|
|
cnt.Add(ctx, 1, labs...)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-11-06 20:54:36 +02:00
|
|
|
func BenchmarkInt64CounterAdd(b *testing.B) {
|
2019-10-29 22:27:22 +02:00
|
|
|
ctx := context.Background()
|
|
|
|
fix := newFixture(b)
|
2020-03-27 23:06:48 +02:00
|
|
|
labs := makeLabels(1)
|
2020-07-28 19:47:08 +02:00
|
|
|
cnt := fix.meterMust().NewInt64Counter("int64.counter")
|
2019-10-29 22:27:22 +02:00
|
|
|
|
|
|
|
b.ResetTimer()
|
|
|
|
|
|
|
|
for i := 0; i < b.N; i++ {
|
2020-03-27 23:06:48 +02:00
|
|
|
cnt.Add(ctx, 1, labs...)
|
2019-10-29 22:27:22 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-11-06 20:54:36 +02:00
|
|
|
func BenchmarkInt64CounterHandleAdd(b *testing.B) {
|
2019-10-29 22:27:22 +02:00
|
|
|
ctx := context.Background()
|
|
|
|
fix := newFixture(b)
|
2020-03-27 23:06:48 +02:00
|
|
|
labs := makeLabels(1)
|
2020-07-28 19:47:08 +02:00
|
|
|
cnt := fix.meterMust().NewInt64Counter("int64.counter")
|
2020-03-27 23:06:48 +02:00
|
|
|
handle := cnt.Bind(labs...)
|
2019-10-29 22:27:22 +02:00
|
|
|
|
|
|
|
b.ResetTimer()
|
|
|
|
|
|
|
|
for i := 0; i < b.N; i++ {
|
2019-11-06 20:54:36 +02:00
|
|
|
handle.Add(ctx, 1)
|
2019-10-29 22:27:22 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-11-06 20:54:36 +02:00
|
|
|
func BenchmarkFloat64CounterAdd(b *testing.B) {
|
|
|
|
ctx := context.Background()
|
2019-10-29 22:27:22 +02:00
|
|
|
fix := newFixture(b)
|
2020-03-27 23:06:48 +02:00
|
|
|
labs := makeLabels(1)
|
2020-07-28 19:47:08 +02:00
|
|
|
cnt := fix.meterMust().NewFloat64Counter("float64.counter")
|
2019-10-29 22:27:22 +02:00
|
|
|
|
|
|
|
b.ResetTimer()
|
|
|
|
|
|
|
|
for i := 0; i < b.N; i++ {
|
2020-03-27 23:06:48 +02:00
|
|
|
cnt.Add(ctx, 1.1, labs...)
|
2019-10-29 22:27:22 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func BenchmarkFloat64CounterHandleAdd(b *testing.B) {
|
|
|
|
ctx := context.Background()
|
|
|
|
fix := newFixture(b)
|
2020-03-27 23:06:48 +02:00
|
|
|
labs := makeLabels(1)
|
2020-07-28 19:47:08 +02:00
|
|
|
cnt := fix.meterMust().NewFloat64Counter("float64.counter")
|
2020-03-27 23:06:48 +02:00
|
|
|
handle := cnt.Bind(labs...)
|
2019-10-29 22:27:22 +02:00
|
|
|
|
|
|
|
b.ResetTimer()
|
|
|
|
|
|
|
|
for i := 0; i < b.N; i++ {
|
|
|
|
handle.Add(ctx, 1.1)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-03-11 01:00:37 +02:00
|
|
|
// LastValue
|
2019-10-29 22:27:22 +02:00
|
|
|
|
2020-03-11 01:00:37 +02:00
|
|
|
func BenchmarkInt64LastValueAdd(b *testing.B) {
|
2019-10-29 22:27:22 +02:00
|
|
|
ctx := context.Background()
|
|
|
|
fix := newFixture(b)
|
2020-03-27 23:06:48 +02:00
|
|
|
labs := makeLabels(1)
|
2020-07-28 19:47:08 +02:00
|
|
|
mea := fix.meterMust().NewInt64ValueRecorder("int64.lastvalue")
|
2019-10-29 22:27:22 +02:00
|
|
|
|
|
|
|
b.ResetTimer()
|
|
|
|
|
|
|
|
for i := 0; i < b.N; i++ {
|
2020-03-27 23:06:48 +02:00
|
|
|
mea.Record(ctx, int64(i), labs...)
|
2019-10-29 22:27:22 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-03-11 01:00:37 +02:00
|
|
|
func BenchmarkInt64LastValueHandleAdd(b *testing.B) {
|
2019-10-29 22:27:22 +02:00
|
|
|
ctx := context.Background()
|
|
|
|
fix := newFixture(b)
|
2020-03-27 23:06:48 +02:00
|
|
|
labs := makeLabels(1)
|
2020-07-28 19:47:08 +02:00
|
|
|
mea := fix.meterMust().NewInt64ValueRecorder("int64.lastvalue")
|
2020-03-27 23:06:48 +02:00
|
|
|
handle := mea.Bind(labs...)
|
2019-10-29 22:27:22 +02:00
|
|
|
|
|
|
|
b.ResetTimer()
|
|
|
|
|
|
|
|
for i := 0; i < b.N; i++ {
|
2020-03-11 01:00:37 +02:00
|
|
|
handle.Record(ctx, int64(i))
|
2019-10-29 22:27:22 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-03-11 01:00:37 +02:00
|
|
|
func BenchmarkFloat64LastValueAdd(b *testing.B) {
|
2019-10-29 22:27:22 +02:00
|
|
|
ctx := context.Background()
|
|
|
|
fix := newFixture(b)
|
2020-03-27 23:06:48 +02:00
|
|
|
labs := makeLabels(1)
|
2020-07-28 19:47:08 +02:00
|
|
|
mea := fix.meterMust().NewFloat64ValueRecorder("float64.lastvalue")
|
2019-10-29 22:27:22 +02:00
|
|
|
|
|
|
|
b.ResetTimer()
|
|
|
|
|
|
|
|
for i := 0; i < b.N; i++ {
|
2020-03-27 23:06:48 +02:00
|
|
|
mea.Record(ctx, float64(i), labs...)
|
2019-10-29 22:27:22 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-03-11 01:00:37 +02:00
|
|
|
func BenchmarkFloat64LastValueHandleAdd(b *testing.B) {
|
2019-10-29 22:27:22 +02:00
|
|
|
ctx := context.Background()
|
|
|
|
fix := newFixture(b)
|
2020-03-27 23:06:48 +02:00
|
|
|
labs := makeLabels(1)
|
2020-07-28 19:47:08 +02:00
|
|
|
mea := fix.meterMust().NewFloat64ValueRecorder("float64.lastvalue")
|
2020-03-27 23:06:48 +02:00
|
|
|
handle := mea.Bind(labs...)
|
2019-10-29 22:27:22 +02:00
|
|
|
|
|
|
|
b.ResetTimer()
|
|
|
|
|
|
|
|
for i := 0; i < b.N; i++ {
|
2020-03-11 01:00:37 +02:00
|
|
|
handle.Record(ctx, float64(i))
|
2019-10-29 22:27:22 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-05-16 07:11:12 +02:00
|
|
|
// ValueRecorders
|
2019-10-29 22:27:22 +02:00
|
|
|
|
2020-05-16 07:11:12 +02:00
|
|
|
func benchmarkInt64ValueRecorderAdd(b *testing.B, name string) {
|
2019-10-29 22:27:22 +02:00
|
|
|
ctx := context.Background()
|
|
|
|
fix := newFixture(b)
|
2020-03-27 23:06:48 +02:00
|
|
|
labs := makeLabels(1)
|
2020-07-28 19:47:08 +02:00
|
|
|
mea := fix.meterMust().NewInt64ValueRecorder(name)
|
2019-10-29 22:27:22 +02:00
|
|
|
|
|
|
|
b.ResetTimer()
|
|
|
|
|
|
|
|
for i := 0; i < b.N; i++ {
|
2020-03-27 23:06:48 +02:00
|
|
|
mea.Record(ctx, int64(i), labs...)
|
2019-10-29 22:27:22 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-05-16 07:11:12 +02:00
|
|
|
func benchmarkInt64ValueRecorderHandleAdd(b *testing.B, name string) {
|
2019-10-29 22:27:22 +02:00
|
|
|
ctx := context.Background()
|
|
|
|
fix := newFixture(b)
|
2020-03-27 23:06:48 +02:00
|
|
|
labs := makeLabels(1)
|
2020-07-28 19:47:08 +02:00
|
|
|
mea := fix.meterMust().NewInt64ValueRecorder(name)
|
2020-03-27 23:06:48 +02:00
|
|
|
handle := mea.Bind(labs...)
|
2019-10-29 22:27:22 +02:00
|
|
|
|
|
|
|
b.ResetTimer()
|
|
|
|
|
|
|
|
for i := 0; i < b.N; i++ {
|
|
|
|
handle.Record(ctx, int64(i))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-05-16 07:11:12 +02:00
|
|
|
func benchmarkFloat64ValueRecorderAdd(b *testing.B, name string) {
|
2019-10-29 22:27:22 +02:00
|
|
|
ctx := context.Background()
|
|
|
|
fix := newFixture(b)
|
2020-03-27 23:06:48 +02:00
|
|
|
labs := makeLabels(1)
|
2020-07-28 19:47:08 +02:00
|
|
|
mea := fix.meterMust().NewFloat64ValueRecorder(name)
|
2019-10-29 22:27:22 +02:00
|
|
|
|
|
|
|
b.ResetTimer()
|
|
|
|
|
|
|
|
for i := 0; i < b.N; i++ {
|
2020-03-27 23:06:48 +02:00
|
|
|
mea.Record(ctx, float64(i), labs...)
|
2019-10-29 22:27:22 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-05-16 07:11:12 +02:00
|
|
|
func benchmarkFloat64ValueRecorderHandleAdd(b *testing.B, name string) {
|
2019-10-29 22:27:22 +02:00
|
|
|
ctx := context.Background()
|
|
|
|
fix := newFixture(b)
|
2020-03-27 23:06:48 +02:00
|
|
|
labs := makeLabels(1)
|
2020-07-28 19:47:08 +02:00
|
|
|
mea := fix.meterMust().NewFloat64ValueRecorder(name)
|
2020-03-27 23:06:48 +02:00
|
|
|
handle := mea.Bind(labs...)
|
2019-10-29 22:27:22 +02:00
|
|
|
|
|
|
|
b.ResetTimer()
|
|
|
|
|
|
|
|
for i := 0; i < b.N; i++ {
|
|
|
|
handle.Record(ctx, float64(i))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-03-05 22:15:30 +02:00
|
|
|
// Observers
|
|
|
|
|
|
|
|
func BenchmarkObserverRegistration(b *testing.B) {
|
|
|
|
fix := newFixture(b)
|
|
|
|
names := make([]string, 0, b.N)
|
|
|
|
for i := 0; i < b.N; i++ {
|
2020-05-18 20:03:43 +02:00
|
|
|
names = append(names, fmt.Sprintf("test.valueobserver.%d", i))
|
2020-03-05 22:15:30 +02:00
|
|
|
}
|
2020-05-20 06:33:10 +02:00
|
|
|
cb := func(_ context.Context, result metric.Int64ObserverResult) {}
|
2020-03-05 22:15:30 +02:00
|
|
|
|
|
|
|
b.ResetTimer()
|
|
|
|
|
|
|
|
for i := 0; i < b.N; i++ {
|
2020-07-28 19:47:08 +02:00
|
|
|
fix.meterMust().NewInt64ValueObserver(names[i], cb)
|
2020-03-05 22:15:30 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-05-18 20:03:43 +02:00
|
|
|
func BenchmarkValueObserverObservationInt64(b *testing.B) {
|
2020-03-05 22:15:30 +02:00
|
|
|
ctx := context.Background()
|
|
|
|
fix := newFixture(b)
|
2020-03-27 23:06:48 +02:00
|
|
|
labs := makeLabels(1)
|
2020-07-28 19:47:08 +02:00
|
|
|
_ = fix.meterMust().NewInt64ValueObserver("test.valueobserver", func(_ context.Context, result metric.Int64ObserverResult) {
|
2020-03-05 22:15:30 +02:00
|
|
|
for i := 0; i < b.N; i++ {
|
2020-03-27 23:06:48 +02:00
|
|
|
result.Observe((int64)(i), labs...)
|
2020-03-05 22:15:30 +02:00
|
|
|
}
|
|
|
|
})
|
2020-03-27 23:06:48 +02:00
|
|
|
|
2020-03-05 22:15:30 +02:00
|
|
|
b.ResetTimer()
|
2020-03-27 23:06:48 +02:00
|
|
|
|
2020-05-11 19:23:06 +02:00
|
|
|
fix.accumulator.Collect(ctx)
|
2020-03-05 22:15:30 +02:00
|
|
|
}
|
|
|
|
|
2020-05-18 20:03:43 +02:00
|
|
|
func BenchmarkValueObserverObservationFloat64(b *testing.B) {
|
2020-03-05 22:15:30 +02:00
|
|
|
ctx := context.Background()
|
|
|
|
fix := newFixture(b)
|
2020-03-27 23:06:48 +02:00
|
|
|
labs := makeLabels(1)
|
2020-07-28 19:47:08 +02:00
|
|
|
_ = fix.meterMust().NewFloat64ValueObserver("test.valueobserver", func(_ context.Context, result metric.Float64ObserverResult) {
|
2020-03-05 22:15:30 +02:00
|
|
|
for i := 0; i < b.N; i++ {
|
2020-03-27 23:06:48 +02:00
|
|
|
result.Observe((float64)(i), labs...)
|
2020-03-05 22:15:30 +02:00
|
|
|
}
|
|
|
|
})
|
2020-03-27 23:06:48 +02:00
|
|
|
|
2020-03-05 22:15:30 +02:00
|
|
|
b.ResetTimer()
|
2020-03-27 23:06:48 +02:00
|
|
|
|
2020-05-11 19:23:06 +02:00
|
|
|
fix.accumulator.Collect(ctx)
|
2020-03-05 22:15:30 +02:00
|
|
|
}
|
|
|
|
|
2019-10-29 22:27:22 +02:00
|
|
|
// MaxSumCount
|
|
|
|
|
|
|
|
func BenchmarkInt64MaxSumCountAdd(b *testing.B) {
|
2020-05-16 07:11:12 +02:00
|
|
|
benchmarkInt64ValueRecorderAdd(b, "int64.minmaxsumcount")
|
2019-10-29 22:27:22 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
func BenchmarkInt64MaxSumCountHandleAdd(b *testing.B) {
|
2020-05-16 07:11:12 +02:00
|
|
|
benchmarkInt64ValueRecorderHandleAdd(b, "int64.minmaxsumcount")
|
2019-10-29 22:27:22 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
func BenchmarkFloat64MaxSumCountAdd(b *testing.B) {
|
2020-05-16 07:11:12 +02:00
|
|
|
benchmarkFloat64ValueRecorderAdd(b, "float64.minmaxsumcount")
|
2019-10-29 22:27:22 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
func BenchmarkFloat64MaxSumCountHandleAdd(b *testing.B) {
|
2020-05-16 07:11:12 +02:00
|
|
|
benchmarkFloat64ValueRecorderHandleAdd(b, "float64.minmaxsumcount")
|
2019-10-29 22:27:22 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// DDSketch
|
|
|
|
|
|
|
|
func BenchmarkInt64DDSketchAdd(b *testing.B) {
|
2020-05-16 07:11:12 +02:00
|
|
|
benchmarkInt64ValueRecorderAdd(b, "int64.ddsketch")
|
2019-10-29 22:27:22 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
func BenchmarkInt64DDSketchHandleAdd(b *testing.B) {
|
2020-05-16 07:11:12 +02:00
|
|
|
benchmarkInt64ValueRecorderHandleAdd(b, "int64.ddsketch")
|
2019-10-29 22:27:22 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
func BenchmarkFloat64DDSketchAdd(b *testing.B) {
|
2020-05-16 07:11:12 +02:00
|
|
|
benchmarkFloat64ValueRecorderAdd(b, "float64.ddsketch")
|
2019-10-29 22:27:22 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
func BenchmarkFloat64DDSketchHandleAdd(b *testing.B) {
|
2020-05-16 07:11:12 +02:00
|
|
|
benchmarkFloat64ValueRecorderHandleAdd(b, "float64.ddsketch")
|
2019-10-29 22:27:22 +02:00
|
|
|
}
|
2019-11-05 00:24:01 +02:00
|
|
|
|
|
|
|
// Array
|
|
|
|
|
|
|
|
func BenchmarkInt64ArrayAdd(b *testing.B) {
|
2020-05-16 07:11:12 +02:00
|
|
|
benchmarkInt64ValueRecorderAdd(b, "int64.array")
|
2019-11-05 00:24:01 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
func BenchmarkInt64ArrayHandleAdd(b *testing.B) {
|
2020-05-16 07:11:12 +02:00
|
|
|
benchmarkInt64ValueRecorderHandleAdd(b, "int64.array")
|
2019-11-05 00:24:01 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
func BenchmarkFloat64ArrayAdd(b *testing.B) {
|
2020-05-16 07:11:12 +02:00
|
|
|
benchmarkFloat64ValueRecorderAdd(b, "float64.array")
|
2019-11-05 00:24:01 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
func BenchmarkFloat64ArrayHandleAdd(b *testing.B) {
|
2020-05-16 07:11:12 +02:00
|
|
|
benchmarkFloat64ValueRecorderHandleAdd(b, "float64.array")
|
2019-11-05 00:24:01 +02:00
|
|
|
}
|
2020-03-25 17:57:40 +02:00
|
|
|
|
|
|
|
// BatchRecord
|
|
|
|
|
|
|
|
func benchmarkBatchRecord8Labels(b *testing.B, numInst int) {
|
|
|
|
const numLabels = 8
|
|
|
|
ctx := context.Background()
|
|
|
|
fix := newFixture(b)
|
|
|
|
labs := makeLabels(numLabels)
|
|
|
|
var meas []metric.Measurement
|
|
|
|
|
|
|
|
for i := 0; i < numInst; i++ {
|
2020-07-28 19:47:08 +02:00
|
|
|
inst := fix.meterMust().NewInt64Counter(fmt.Sprint("int64.counter.", i))
|
2020-03-25 17:57:40 +02:00
|
|
|
meas = append(meas, inst.Measurement(1))
|
|
|
|
}
|
|
|
|
|
|
|
|
b.ResetTimer()
|
|
|
|
|
|
|
|
for i := 0; i < b.N; i++ {
|
2020-05-11 19:23:06 +02:00
|
|
|
fix.accumulator.RecordBatch(ctx, labs, meas...)
|
2020-03-25 17:57:40 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func BenchmarkBatchRecord8Labels_1Instrument(b *testing.B) {
|
|
|
|
benchmarkBatchRecord8Labels(b, 1)
|
|
|
|
}
|
|
|
|
|
|
|
|
func BenchmarkBatchRecord_8Labels_2Instruments(b *testing.B) {
|
|
|
|
benchmarkBatchRecord8Labels(b, 2)
|
|
|
|
}
|
|
|
|
|
|
|
|
func BenchmarkBatchRecord_8Labels_4Instruments(b *testing.B) {
|
|
|
|
benchmarkBatchRecord8Labels(b, 4)
|
|
|
|
}
|
|
|
|
|
|
|
|
func BenchmarkBatchRecord_8Labels_8Instruments(b *testing.B) {
|
|
|
|
benchmarkBatchRecord8Labels(b, 8)
|
|
|
|
}
|
2020-04-21 05:29:46 +02:00
|
|
|
|
2020-04-22 05:23:15 +02:00
|
|
|
// Record creation
|
|
|
|
|
|
|
|
func BenchmarkRepeatedDirectCalls(b *testing.B) {
|
|
|
|
ctx := context.Background()
|
|
|
|
fix := newFixture(b)
|
|
|
|
|
2020-07-28 19:47:08 +02:00
|
|
|
c := fix.meterMust().NewInt64Counter("int64.counter")
|
2020-05-14 01:06:03 +02:00
|
|
|
k := kv.String("bench", "true")
|
2020-04-22 05:23:15 +02:00
|
|
|
|
|
|
|
b.ResetTimer()
|
|
|
|
|
|
|
|
for i := 0; i < b.N; i++ {
|
|
|
|
c.Add(ctx, 1, k)
|
2020-05-11 19:23:06 +02:00
|
|
|
fix.accumulator.Collect(ctx)
|
2020-04-22 05:23:15 +02:00
|
|
|
}
|
|
|
|
}
|