2020-03-24 07:41:10 +02:00
|
|
|
// Copyright The OpenTelemetry Authors
|
2019-10-29 22:27:22 +02:00
|
|
|
//
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
|
|
|
package metric_test
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
"fmt"
|
|
|
|
"math/rand"
|
|
|
|
"strings"
|
|
|
|
"testing"
|
|
|
|
|
2019-11-01 20:40:29 +02:00
|
|
|
"go.opentelemetry.io/otel/api/core"
|
|
|
|
"go.opentelemetry.io/otel/api/key"
|
2019-11-06 20:54:36 +02:00
|
|
|
"go.opentelemetry.io/otel/api/metric"
|
2019-11-05 23:08:55 +02:00
|
|
|
export "go.opentelemetry.io/otel/sdk/export/metric"
|
2019-11-01 20:40:29 +02:00
|
|
|
sdk "go.opentelemetry.io/otel/sdk/metric"
|
|
|
|
"go.opentelemetry.io/otel/sdk/metric/aggregator/ddsketch"
|
2020-03-11 01:00:37 +02:00
|
|
|
"go.opentelemetry.io/otel/sdk/metric/aggregator/lastvalue"
|
2019-11-27 00:07:58 +02:00
|
|
|
"go.opentelemetry.io/otel/sdk/metric/aggregator/minmaxsumcount"
|
2020-03-12 05:21:34 +02:00
|
|
|
"go.opentelemetry.io/otel/sdk/metric/aggregator/sum"
|
2019-10-29 22:27:22 +02:00
|
|
|
)
|
|
|
|
|
2020-03-24 18:30:12 +02:00
|
|
|
type processFunc func(context.Context, export.Record) error
|
|
|
|
|
2019-10-29 22:27:22 +02:00
|
|
|
type benchFixture struct {
|
2020-03-11 20:57:57 +02:00
|
|
|
meter metric.MeterMust
|
|
|
|
sdk *sdk.SDK
|
|
|
|
B *testing.B
|
2020-03-24 18:30:12 +02:00
|
|
|
pcb processFunc
|
2019-10-29 22:27:22 +02:00
|
|
|
}
|
|
|
|
|
2020-03-24 18:30:12 +02:00
|
|
|
func newFixture(b *testing.B) *benchFixture {
|
2019-10-29 22:27:22 +02:00
|
|
|
b.ReportAllocs()
|
|
|
|
bf := &benchFixture{
|
|
|
|
B: b,
|
|
|
|
}
|
2020-03-24 19:54:08 +02:00
|
|
|
|
2020-03-24 18:30:12 +02:00
|
|
|
bf.sdk = sdk.New(bf)
|
2020-03-24 19:54:08 +02:00
|
|
|
bf.meter = metric.Must(metric.WrapMeterImpl(bf.sdk, "benchmarks"))
|
2019-10-29 22:27:22 +02:00
|
|
|
return bf
|
|
|
|
}
|
|
|
|
|
2020-03-24 18:30:12 +02:00
|
|
|
func (f *benchFixture) setProcessCallback(cb processFunc) {
|
|
|
|
f.pcb = cb
|
Replace `Ordered` with an iterator in `export.Labels`. (#567)
* Do not expose a slice of labels in export.Record
This is really an inconvenient implementation detail leak - we may
want to store labels in a different way. Replace it with an iterator -
it does not force us to use slice of key values as a storage in the
long run.
* Add Len to LabelIterator
It may come in handy in several situations, where we don't have access
to export.Labels object, but only to the label iterator.
* Use reflect value label iterator for the fixed labels
* add reset operation to iterator
Makes my life easier when writing a benchmark. Might also be an
alternative to cloning the iterator.
* Add benchmarks for iterators
* Add import comment
* Add clone operation to label iterator
* Move iterator tests to a separate package
* Add tests for cloning iterators
* Pass label iterator to export labels
* Use non-addressable array reflect values
By not using the value created by `reflect.New()`, but rather by
`reflect.ValueOf()`, we get a non-addressable array in the value,
which does not infer an allocation cost when getting an element from
the array.
* Drop zero iterator
This can be substituted by a reflect value iterator that goes over a
value with a zero-sized array.
* Add a simple iterator that implements label iterator
In the long run this will completely replace the LabelIterator
interface.
* Replace reflect value iterator with simple iterator
* Pass label storage to new export labels, not label iterator
* Drop label iterator interface, rename storage iterator to label iterator
* Drop clone operation from iterator
It's a leftover from interface times and now it's pointless - the
iterator is a simple struct, so cloning it is a simple copy.
* Drop Reset from label iterator
The sole existence of Reset was actually for benchmarking convenience.
Now we can just copy the iterator cheaply, so a need for Reset is no
more.
* Drop noop iterator tests
* Move back iterator tests to export package
* Eagerly get the reflect value of ordered labels
So we won't get into problems when several goroutines want to iterate
the same labels at the same time. Not sure if this would be a big
deal, since every goroutine would compute the same reflect.Value, but
concurrent write to the same memory is bad anyway. And it doesn't cost
us any extra allocations anyway.
* Replace NewSliceLabelIterator() with a method of LabelSlice
* Add some documentation
* Documentation fixes
2020-03-20 00:01:34 +02:00
|
|
|
}
|
|
|
|
|
2020-03-19 21:02:46 +02:00
|
|
|
func (*benchFixture) AggregatorFor(descriptor *metric.Descriptor) export.Aggregator {
|
2020-03-11 01:00:37 +02:00
|
|
|
name := descriptor.Name()
|
|
|
|
switch {
|
|
|
|
case strings.HasSuffix(name, "counter"):
|
2020-03-12 05:21:34 +02:00
|
|
|
return sum.New()
|
2020-03-11 01:00:37 +02:00
|
|
|
case strings.HasSuffix(name, "lastvalue"):
|
|
|
|
return lastvalue.New()
|
|
|
|
default:
|
2019-11-27 00:07:58 +02:00
|
|
|
if strings.HasSuffix(descriptor.Name(), "minmaxsumcount") {
|
|
|
|
return minmaxsumcount.New(descriptor)
|
2019-11-15 23:01:20 +02:00
|
|
|
} else if strings.HasSuffix(descriptor.Name(), "ddsketch") {
|
|
|
|
return ddsketch.New(ddsketch.NewDefaultConfig(), descriptor)
|
|
|
|
} else if strings.HasSuffix(descriptor.Name(), "array") {
|
|
|
|
return ddsketch.New(ddsketch.NewDefaultConfig(), descriptor)
|
2019-10-29 22:27:22 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-03-24 18:30:12 +02:00
|
|
|
func (f *benchFixture) Process(ctx context.Context, rec export.Record) error {
|
|
|
|
if f.pcb == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
return f.pcb(ctx, rec)
|
2019-11-15 23:01:20 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
func (*benchFixture) CheckpointSet() export.CheckpointSet {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (*benchFixture) FinishedCollection() {
|
2019-10-29 22:27:22 +02:00
|
|
|
}
|
|
|
|
|
2020-03-27 23:06:48 +02:00
|
|
|
func makeManyLabels(n int) [][]core.KeyValue {
|
2019-11-06 20:54:36 +02:00
|
|
|
r := make([][]core.KeyValue, n)
|
|
|
|
|
|
|
|
for i := 0; i < n; i++ {
|
|
|
|
r[i] = makeLabels(1)
|
|
|
|
}
|
|
|
|
|
|
|
|
return r
|
|
|
|
}
|
|
|
|
|
2019-10-29 22:27:22 +02:00
|
|
|
func makeLabels(n int) []core.KeyValue {
|
|
|
|
used := map[string]bool{}
|
|
|
|
l := make([]core.KeyValue, n)
|
|
|
|
for i := 0; i < n; i++ {
|
|
|
|
var k string
|
|
|
|
for {
|
|
|
|
k = fmt.Sprint("k", rand.Intn(1000000000))
|
|
|
|
if !used[k] {
|
|
|
|
used[k] = true
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
l[i] = key.New(k).String(fmt.Sprint("v", rand.Intn(1000000000)))
|
|
|
|
}
|
|
|
|
return l
|
|
|
|
}
|
|
|
|
|
|
|
|
func benchmarkLabels(b *testing.B, n int) {
|
2020-03-27 23:06:48 +02:00
|
|
|
ctx := context.Background()
|
2019-10-29 22:27:22 +02:00
|
|
|
fix := newFixture(b)
|
|
|
|
labs := makeLabels(n)
|
2020-03-27 23:06:48 +02:00
|
|
|
cnt := fix.meter.NewInt64Counter("int64.counter")
|
2019-10-29 22:27:22 +02:00
|
|
|
|
|
|
|
b.ResetTimer()
|
|
|
|
|
|
|
|
for i := 0; i < b.N; i++ {
|
2020-03-27 23:06:48 +02:00
|
|
|
cnt.Add(ctx, 1, labs...)
|
2019-10-29 22:27:22 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-03-27 23:06:48 +02:00
|
|
|
func BenchmarkInt64CounterAddWithLabels_1(b *testing.B) {
|
2019-10-29 22:27:22 +02:00
|
|
|
benchmarkLabels(b, 1)
|
|
|
|
}
|
|
|
|
|
2020-03-27 23:06:48 +02:00
|
|
|
func BenchmarkInt64CounterAddWithLabels_2(b *testing.B) {
|
2019-10-29 22:27:22 +02:00
|
|
|
benchmarkLabels(b, 2)
|
|
|
|
}
|
|
|
|
|
2020-03-27 23:06:48 +02:00
|
|
|
func BenchmarkInt64CounterAddWithLabels_4(b *testing.B) {
|
2019-10-29 22:27:22 +02:00
|
|
|
benchmarkLabels(b, 4)
|
|
|
|
}
|
|
|
|
|
2020-03-27 23:06:48 +02:00
|
|
|
func BenchmarkInt64CounterAddWithLabels_8(b *testing.B) {
|
2019-10-29 22:27:22 +02:00
|
|
|
benchmarkLabels(b, 8)
|
|
|
|
}
|
|
|
|
|
2020-03-27 23:06:48 +02:00
|
|
|
func BenchmarkInt64CounterAddWithLabels_16(b *testing.B) {
|
2019-10-29 22:27:22 +02:00
|
|
|
benchmarkLabels(b, 16)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Note: performance does not depend on label set size for the
|
2020-03-27 23:06:48 +02:00
|
|
|
// benchmarks below--all are benchmarked for a single label.
|
2019-10-29 22:27:22 +02:00
|
|
|
|
2019-11-06 20:54:36 +02:00
|
|
|
func BenchmarkAcquireNewHandle(b *testing.B) {
|
2019-10-29 22:27:22 +02:00
|
|
|
fix := newFixture(b)
|
2020-03-27 23:06:48 +02:00
|
|
|
labelSets := makeManyLabels(b.N)
|
2020-03-11 20:57:57 +02:00
|
|
|
cnt := fix.meter.NewInt64Counter("int64.counter")
|
2019-10-29 22:27:22 +02:00
|
|
|
|
|
|
|
b.ResetTimer()
|
|
|
|
|
|
|
|
for i := 0; i < b.N; i++ {
|
2020-03-27 23:06:48 +02:00
|
|
|
cnt.Bind(labelSets[i]...)
|
2019-10-29 22:27:22 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-11-06 20:54:36 +02:00
|
|
|
func BenchmarkAcquireExistingHandle(b *testing.B) {
|
2019-10-29 22:27:22 +02:00
|
|
|
fix := newFixture(b)
|
2020-03-27 23:06:48 +02:00
|
|
|
labelSets := makeManyLabels(b.N)
|
2020-03-11 20:57:57 +02:00
|
|
|
cnt := fix.meter.NewInt64Counter("int64.counter")
|
2019-11-06 20:54:36 +02:00
|
|
|
|
|
|
|
for i := 0; i < b.N; i++ {
|
2020-03-27 23:06:48 +02:00
|
|
|
cnt.Bind(labelSets[i]...).Unbind()
|
2019-11-06 20:54:36 +02:00
|
|
|
}
|
2019-10-29 22:27:22 +02:00
|
|
|
|
|
|
|
b.ResetTimer()
|
|
|
|
|
|
|
|
for i := 0; i < b.N; i++ {
|
2020-03-27 23:06:48 +02:00
|
|
|
cnt.Bind(labelSets[i]...)
|
2019-10-29 22:27:22 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-11-06 20:54:36 +02:00
|
|
|
func BenchmarkAcquireReleaseExistingHandle(b *testing.B) {
|
|
|
|
fix := newFixture(b)
|
2020-03-27 23:06:48 +02:00
|
|
|
labelSets := makeManyLabels(b.N)
|
2020-03-11 20:57:57 +02:00
|
|
|
cnt := fix.meter.NewInt64Counter("int64.counter")
|
2019-11-06 20:54:36 +02:00
|
|
|
|
|
|
|
for i := 0; i < b.N; i++ {
|
2020-03-27 23:06:48 +02:00
|
|
|
cnt.Bind(labelSets[i]...).Unbind()
|
2019-11-06 20:54:36 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
b.ResetTimer()
|
|
|
|
|
|
|
|
for i := 0; i < b.N; i++ {
|
2020-03-27 23:06:48 +02:00
|
|
|
cnt.Bind(labelSets[i]...).Unbind()
|
2019-11-06 20:54:36 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
Replace `Ordered` with an iterator in `export.Labels`. (#567)
* Do not expose a slice of labels in export.Record
This is really an inconvenient implementation detail leak - we may
want to store labels in a different way. Replace it with an iterator -
it does not force us to use slice of key values as a storage in the
long run.
* Add Len to LabelIterator
It may come in handy in several situations, where we don't have access
to export.Labels object, but only to the label iterator.
* Use reflect value label iterator for the fixed labels
* add reset operation to iterator
Makes my life easier when writing a benchmark. Might also be an
alternative to cloning the iterator.
* Add benchmarks for iterators
* Add import comment
* Add clone operation to label iterator
* Move iterator tests to a separate package
* Add tests for cloning iterators
* Pass label iterator to export labels
* Use non-addressable array reflect values
By not using the value created by `reflect.New()`, but rather by
`reflect.ValueOf()`, we get a non-addressable array in the value,
which does not infer an allocation cost when getting an element from
the array.
* Drop zero iterator
This can be substituted by a reflect value iterator that goes over a
value with a zero-sized array.
* Add a simple iterator that implements label iterator
In the long run this will completely replace the LabelIterator
interface.
* Replace reflect value iterator with simple iterator
* Pass label storage to new export labels, not label iterator
* Drop label iterator interface, rename storage iterator to label iterator
* Drop clone operation from iterator
It's a leftover from interface times and now it's pointless - the
iterator is a simple struct, so cloning it is a simple copy.
* Drop Reset from label iterator
The sole existence of Reset was actually for benchmarking convenience.
Now we can just copy the iterator cheaply, so a need for Reset is no
more.
* Drop noop iterator tests
* Move back iterator tests to export package
* Eagerly get the reflect value of ordered labels
So we won't get into problems when several goroutines want to iterate
the same labels at the same time. Not sure if this would be a big
deal, since every goroutine would compute the same reflect.Value, but
concurrent write to the same memory is bad anyway. And it doesn't cost
us any extra allocations anyway.
* Replace NewSliceLabelIterator() with a method of LabelSlice
* Add some documentation
* Documentation fixes
2020-03-20 00:01:34 +02:00
|
|
|
// Iterators
|
|
|
|
|
2020-03-24 18:30:12 +02:00
|
|
|
var benchmarkIteratorVar core.KeyValue
|
Replace `Ordered` with an iterator in `export.Labels`. (#567)
* Do not expose a slice of labels in export.Record
This is really an inconvenient implementation detail leak - we may
want to store labels in a different way. Replace it with an iterator -
it does not force us to use slice of key values as a storage in the
long run.
* Add Len to LabelIterator
It may come in handy in several situations, where we don't have access
to export.Labels object, but only to the label iterator.
* Use reflect value label iterator for the fixed labels
* add reset operation to iterator
Makes my life easier when writing a benchmark. Might also be an
alternative to cloning the iterator.
* Add benchmarks for iterators
* Add import comment
* Add clone operation to label iterator
* Move iterator tests to a separate package
* Add tests for cloning iterators
* Pass label iterator to export labels
* Use non-addressable array reflect values
By not using the value created by `reflect.New()`, but rather by
`reflect.ValueOf()`, we get a non-addressable array in the value,
which does not infer an allocation cost when getting an element from
the array.
* Drop zero iterator
This can be substituted by a reflect value iterator that goes over a
value with a zero-sized array.
* Add a simple iterator that implements label iterator
In the long run this will completely replace the LabelIterator
interface.
* Replace reflect value iterator with simple iterator
* Pass label storage to new export labels, not label iterator
* Drop label iterator interface, rename storage iterator to label iterator
* Drop clone operation from iterator
It's a leftover from interface times and now it's pointless - the
iterator is a simple struct, so cloning it is a simple copy.
* Drop Reset from label iterator
The sole existence of Reset was actually for benchmarking convenience.
Now we can just copy the iterator cheaply, so a need for Reset is no
more.
* Drop noop iterator tests
* Move back iterator tests to export package
* Eagerly get the reflect value of ordered labels
So we won't get into problems when several goroutines want to iterate
the same labels at the same time. Not sure if this would be a big
deal, since every goroutine would compute the same reflect.Value, but
concurrent write to the same memory is bad anyway. And it doesn't cost
us any extra allocations anyway.
* Replace NewSliceLabelIterator() with a method of LabelSlice
* Add some documentation
* Documentation fixes
2020-03-20 00:01:34 +02:00
|
|
|
|
|
|
|
func benchmarkIterator(b *testing.B, n int) {
|
2020-03-24 18:30:12 +02:00
|
|
|
fix := newFixture(b)
|
|
|
|
fix.setProcessCallback(func(ctx context.Context, rec export.Record) error {
|
|
|
|
var kv core.KeyValue
|
|
|
|
li := rec.Labels().Iter()
|
|
|
|
fix.B.StartTimer()
|
|
|
|
for i := 0; i < fix.B.N; i++ {
|
|
|
|
iter := li
|
|
|
|
// test getting only the first element
|
|
|
|
if iter.Next() {
|
|
|
|
kv = iter.Label()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
fix.B.StopTimer()
|
|
|
|
benchmarkIteratorVar = kv
|
|
|
|
return nil
|
|
|
|
})
|
Replace `Ordered` with an iterator in `export.Labels`. (#567)
* Do not expose a slice of labels in export.Record
This is really an inconvenient implementation detail leak - we may
want to store labels in a different way. Replace it with an iterator -
it does not force us to use slice of key values as a storage in the
long run.
* Add Len to LabelIterator
It may come in handy in several situations, where we don't have access
to export.Labels object, but only to the label iterator.
* Use reflect value label iterator for the fixed labels
* add reset operation to iterator
Makes my life easier when writing a benchmark. Might also be an
alternative to cloning the iterator.
* Add benchmarks for iterators
* Add import comment
* Add clone operation to label iterator
* Move iterator tests to a separate package
* Add tests for cloning iterators
* Pass label iterator to export labels
* Use non-addressable array reflect values
By not using the value created by `reflect.New()`, but rather by
`reflect.ValueOf()`, we get a non-addressable array in the value,
which does not infer an allocation cost when getting an element from
the array.
* Drop zero iterator
This can be substituted by a reflect value iterator that goes over a
value with a zero-sized array.
* Add a simple iterator that implements label iterator
In the long run this will completely replace the LabelIterator
interface.
* Replace reflect value iterator with simple iterator
* Pass label storage to new export labels, not label iterator
* Drop label iterator interface, rename storage iterator to label iterator
* Drop clone operation from iterator
It's a leftover from interface times and now it's pointless - the
iterator is a simple struct, so cloning it is a simple copy.
* Drop Reset from label iterator
The sole existence of Reset was actually for benchmarking convenience.
Now we can just copy the iterator cheaply, so a need for Reset is no
more.
* Drop noop iterator tests
* Move back iterator tests to export package
* Eagerly get the reflect value of ordered labels
So we won't get into problems when several goroutines want to iterate
the same labels at the same time. Not sure if this would be a big
deal, since every goroutine would compute the same reflect.Value, but
concurrent write to the same memory is bad anyway. And it doesn't cost
us any extra allocations anyway.
* Replace NewSliceLabelIterator() with a method of LabelSlice
* Add some documentation
* Documentation fixes
2020-03-20 00:01:34 +02:00
|
|
|
cnt := fix.meter.NewInt64Counter("int64.counter")
|
|
|
|
ctx := context.Background()
|
2020-03-27 23:06:48 +02:00
|
|
|
cnt.Add(ctx, 1, makeLabels(n)...)
|
Replace `Ordered` with an iterator in `export.Labels`. (#567)
* Do not expose a slice of labels in export.Record
This is really an inconvenient implementation detail leak - we may
want to store labels in a different way. Replace it with an iterator -
it does not force us to use slice of key values as a storage in the
long run.
* Add Len to LabelIterator
It may come in handy in several situations, where we don't have access
to export.Labels object, but only to the label iterator.
* Use reflect value label iterator for the fixed labels
* add reset operation to iterator
Makes my life easier when writing a benchmark. Might also be an
alternative to cloning the iterator.
* Add benchmarks for iterators
* Add import comment
* Add clone operation to label iterator
* Move iterator tests to a separate package
* Add tests for cloning iterators
* Pass label iterator to export labels
* Use non-addressable array reflect values
By not using the value created by `reflect.New()`, but rather by
`reflect.ValueOf()`, we get a non-addressable array in the value,
which does not infer an allocation cost when getting an element from
the array.
* Drop zero iterator
This can be substituted by a reflect value iterator that goes over a
value with a zero-sized array.
* Add a simple iterator that implements label iterator
In the long run this will completely replace the LabelIterator
interface.
* Replace reflect value iterator with simple iterator
* Pass label storage to new export labels, not label iterator
* Drop label iterator interface, rename storage iterator to label iterator
* Drop clone operation from iterator
It's a leftover from interface times and now it's pointless - the
iterator is a simple struct, so cloning it is a simple copy.
* Drop Reset from label iterator
The sole existence of Reset was actually for benchmarking convenience.
Now we can just copy the iterator cheaply, so a need for Reset is no
more.
* Drop noop iterator tests
* Move back iterator tests to export package
* Eagerly get the reflect value of ordered labels
So we won't get into problems when several goroutines want to iterate
the same labels at the same time. Not sure if this would be a big
deal, since every goroutine would compute the same reflect.Value, but
concurrent write to the same memory is bad anyway. And it doesn't cost
us any extra allocations anyway.
* Replace NewSliceLabelIterator() with a method of LabelSlice
* Add some documentation
* Documentation fixes
2020-03-20 00:01:34 +02:00
|
|
|
|
|
|
|
b.ResetTimer()
|
|
|
|
fix.sdk.Collect(ctx)
|
|
|
|
}
|
|
|
|
|
|
|
|
func BenchmarkIterator_0(b *testing.B) {
|
|
|
|
benchmarkIterator(b, 0)
|
|
|
|
}
|
|
|
|
|
|
|
|
func BenchmarkIterator_1(b *testing.B) {
|
|
|
|
benchmarkIterator(b, 1)
|
|
|
|
}
|
|
|
|
|
|
|
|
func BenchmarkIterator_2(b *testing.B) {
|
|
|
|
benchmarkIterator(b, 2)
|
|
|
|
}
|
|
|
|
|
|
|
|
func BenchmarkIterator_4(b *testing.B) {
|
|
|
|
benchmarkIterator(b, 4)
|
|
|
|
}
|
|
|
|
|
|
|
|
func BenchmarkIterator_8(b *testing.B) {
|
|
|
|
benchmarkIterator(b, 8)
|
|
|
|
}
|
|
|
|
|
|
|
|
func BenchmarkIterator_16(b *testing.B) {
|
|
|
|
benchmarkIterator(b, 16)
|
|
|
|
}
|
|
|
|
|
2019-11-06 20:54:36 +02:00
|
|
|
// Counters
|
|
|
|
|
|
|
|
func BenchmarkInt64CounterAdd(b *testing.B) {
|
2019-10-29 22:27:22 +02:00
|
|
|
ctx := context.Background()
|
|
|
|
fix := newFixture(b)
|
2020-03-27 23:06:48 +02:00
|
|
|
labs := makeLabels(1)
|
2020-03-11 20:57:57 +02:00
|
|
|
cnt := fix.meter.NewInt64Counter("int64.counter")
|
2019-10-29 22:27:22 +02:00
|
|
|
|
|
|
|
b.ResetTimer()
|
|
|
|
|
|
|
|
for i := 0; i < b.N; i++ {
|
2020-03-27 23:06:48 +02:00
|
|
|
cnt.Add(ctx, 1, labs...)
|
2019-10-29 22:27:22 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-11-06 20:54:36 +02:00
|
|
|
func BenchmarkInt64CounterHandleAdd(b *testing.B) {
|
2019-10-29 22:27:22 +02:00
|
|
|
ctx := context.Background()
|
|
|
|
fix := newFixture(b)
|
2020-03-27 23:06:48 +02:00
|
|
|
labs := makeLabels(1)
|
2020-03-11 20:57:57 +02:00
|
|
|
cnt := fix.meter.NewInt64Counter("int64.counter")
|
2020-03-27 23:06:48 +02:00
|
|
|
handle := cnt.Bind(labs...)
|
2019-10-29 22:27:22 +02:00
|
|
|
|
|
|
|
b.ResetTimer()
|
|
|
|
|
|
|
|
for i := 0; i < b.N; i++ {
|
2019-11-06 20:54:36 +02:00
|
|
|
handle.Add(ctx, 1)
|
2019-10-29 22:27:22 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-11-06 20:54:36 +02:00
|
|
|
func BenchmarkFloat64CounterAdd(b *testing.B) {
|
|
|
|
ctx := context.Background()
|
2019-10-29 22:27:22 +02:00
|
|
|
fix := newFixture(b)
|
2020-03-27 23:06:48 +02:00
|
|
|
labs := makeLabels(1)
|
2020-03-11 20:57:57 +02:00
|
|
|
cnt := fix.meter.NewFloat64Counter("float64.counter")
|
2019-10-29 22:27:22 +02:00
|
|
|
|
|
|
|
b.ResetTimer()
|
|
|
|
|
|
|
|
for i := 0; i < b.N; i++ {
|
2020-03-27 23:06:48 +02:00
|
|
|
cnt.Add(ctx, 1.1, labs...)
|
2019-10-29 22:27:22 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func BenchmarkFloat64CounterHandleAdd(b *testing.B) {
|
|
|
|
ctx := context.Background()
|
|
|
|
fix := newFixture(b)
|
2020-03-27 23:06:48 +02:00
|
|
|
labs := makeLabels(1)
|
2020-03-11 20:57:57 +02:00
|
|
|
cnt := fix.meter.NewFloat64Counter("float64.counter")
|
2020-03-27 23:06:48 +02:00
|
|
|
handle := cnt.Bind(labs...)
|
2019-10-29 22:27:22 +02:00
|
|
|
|
|
|
|
b.ResetTimer()
|
|
|
|
|
|
|
|
for i := 0; i < b.N; i++ {
|
|
|
|
handle.Add(ctx, 1.1)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-03-11 01:00:37 +02:00
|
|
|
// LastValue
|
2019-10-29 22:27:22 +02:00
|
|
|
|
2020-03-11 01:00:37 +02:00
|
|
|
func BenchmarkInt64LastValueAdd(b *testing.B) {
|
2019-10-29 22:27:22 +02:00
|
|
|
ctx := context.Background()
|
|
|
|
fix := newFixture(b)
|
2020-03-27 23:06:48 +02:00
|
|
|
labs := makeLabels(1)
|
2020-03-11 20:57:57 +02:00
|
|
|
mea := fix.meter.NewInt64Measure("int64.lastvalue")
|
2019-10-29 22:27:22 +02:00
|
|
|
|
|
|
|
b.ResetTimer()
|
|
|
|
|
|
|
|
for i := 0; i < b.N; i++ {
|
2020-03-27 23:06:48 +02:00
|
|
|
mea.Record(ctx, int64(i), labs...)
|
2019-10-29 22:27:22 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-03-11 01:00:37 +02:00
|
|
|
func BenchmarkInt64LastValueHandleAdd(b *testing.B) {
|
2019-10-29 22:27:22 +02:00
|
|
|
ctx := context.Background()
|
|
|
|
fix := newFixture(b)
|
2020-03-27 23:06:48 +02:00
|
|
|
labs := makeLabels(1)
|
2020-03-11 20:57:57 +02:00
|
|
|
mea := fix.meter.NewInt64Measure("int64.lastvalue")
|
2020-03-27 23:06:48 +02:00
|
|
|
handle := mea.Bind(labs...)
|
2019-10-29 22:27:22 +02:00
|
|
|
|
|
|
|
b.ResetTimer()
|
|
|
|
|
|
|
|
for i := 0; i < b.N; i++ {
|
2020-03-11 01:00:37 +02:00
|
|
|
handle.Record(ctx, int64(i))
|
2019-10-29 22:27:22 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-03-11 01:00:37 +02:00
|
|
|
func BenchmarkFloat64LastValueAdd(b *testing.B) {
|
2019-10-29 22:27:22 +02:00
|
|
|
ctx := context.Background()
|
|
|
|
fix := newFixture(b)
|
2020-03-27 23:06:48 +02:00
|
|
|
labs := makeLabels(1)
|
2020-03-11 20:57:57 +02:00
|
|
|
mea := fix.meter.NewFloat64Measure("float64.lastvalue")
|
2019-10-29 22:27:22 +02:00
|
|
|
|
|
|
|
b.ResetTimer()
|
|
|
|
|
|
|
|
for i := 0; i < b.N; i++ {
|
2020-03-27 23:06:48 +02:00
|
|
|
mea.Record(ctx, float64(i), labs...)
|
2019-10-29 22:27:22 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-03-11 01:00:37 +02:00
|
|
|
func BenchmarkFloat64LastValueHandleAdd(b *testing.B) {
|
2019-10-29 22:27:22 +02:00
|
|
|
ctx := context.Background()
|
|
|
|
fix := newFixture(b)
|
2020-03-27 23:06:48 +02:00
|
|
|
labs := makeLabels(1)
|
2020-03-11 20:57:57 +02:00
|
|
|
mea := fix.meter.NewFloat64Measure("float64.lastvalue")
|
2020-03-27 23:06:48 +02:00
|
|
|
handle := mea.Bind(labs...)
|
2019-10-29 22:27:22 +02:00
|
|
|
|
|
|
|
b.ResetTimer()
|
|
|
|
|
|
|
|
for i := 0; i < b.N; i++ {
|
2020-03-11 01:00:37 +02:00
|
|
|
handle.Record(ctx, float64(i))
|
2019-10-29 22:27:22 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Measures
|
|
|
|
|
|
|
|
func benchmarkInt64MeasureAdd(b *testing.B, name string) {
|
|
|
|
ctx := context.Background()
|
|
|
|
fix := newFixture(b)
|
2020-03-27 23:06:48 +02:00
|
|
|
labs := makeLabels(1)
|
2020-03-11 20:57:57 +02:00
|
|
|
mea := fix.meter.NewInt64Measure(name)
|
2019-10-29 22:27:22 +02:00
|
|
|
|
|
|
|
b.ResetTimer()
|
|
|
|
|
|
|
|
for i := 0; i < b.N; i++ {
|
2020-03-27 23:06:48 +02:00
|
|
|
mea.Record(ctx, int64(i), labs...)
|
2019-10-29 22:27:22 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func benchmarkInt64MeasureHandleAdd(b *testing.B, name string) {
|
|
|
|
ctx := context.Background()
|
|
|
|
fix := newFixture(b)
|
2020-03-27 23:06:48 +02:00
|
|
|
labs := makeLabels(1)
|
2020-03-11 20:57:57 +02:00
|
|
|
mea := fix.meter.NewInt64Measure(name)
|
2020-03-27 23:06:48 +02:00
|
|
|
handle := mea.Bind(labs...)
|
2019-10-29 22:27:22 +02:00
|
|
|
|
|
|
|
b.ResetTimer()
|
|
|
|
|
|
|
|
for i := 0; i < b.N; i++ {
|
|
|
|
handle.Record(ctx, int64(i))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func benchmarkFloat64MeasureAdd(b *testing.B, name string) {
|
|
|
|
ctx := context.Background()
|
|
|
|
fix := newFixture(b)
|
2020-03-27 23:06:48 +02:00
|
|
|
labs := makeLabels(1)
|
2020-03-11 20:57:57 +02:00
|
|
|
mea := fix.meter.NewFloat64Measure(name)
|
2019-10-29 22:27:22 +02:00
|
|
|
|
|
|
|
b.ResetTimer()
|
|
|
|
|
|
|
|
for i := 0; i < b.N; i++ {
|
2020-03-27 23:06:48 +02:00
|
|
|
mea.Record(ctx, float64(i), labs...)
|
2019-10-29 22:27:22 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func benchmarkFloat64MeasureHandleAdd(b *testing.B, name string) {
|
|
|
|
ctx := context.Background()
|
|
|
|
fix := newFixture(b)
|
2020-03-27 23:06:48 +02:00
|
|
|
labs := makeLabels(1)
|
2020-03-11 20:57:57 +02:00
|
|
|
mea := fix.meter.NewFloat64Measure(name)
|
2020-03-27 23:06:48 +02:00
|
|
|
handle := mea.Bind(labs...)
|
2019-10-29 22:27:22 +02:00
|
|
|
|
|
|
|
b.ResetTimer()
|
|
|
|
|
|
|
|
for i := 0; i < b.N; i++ {
|
|
|
|
handle.Record(ctx, float64(i))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-03-05 22:15:30 +02:00
|
|
|
// Observers
|
|
|
|
|
|
|
|
func BenchmarkObserverRegistration(b *testing.B) {
|
|
|
|
fix := newFixture(b)
|
|
|
|
names := make([]string, 0, b.N)
|
|
|
|
for i := 0; i < b.N; i++ {
|
|
|
|
names = append(names, fmt.Sprintf("test.observer.%d", i))
|
|
|
|
}
|
|
|
|
cb := func(result metric.Int64ObserverResult) {}
|
|
|
|
|
|
|
|
b.ResetTimer()
|
|
|
|
|
|
|
|
for i := 0; i < b.N; i++ {
|
2020-03-11 20:57:57 +02:00
|
|
|
fix.meter.RegisterInt64Observer(names[i], cb)
|
2020-03-05 22:15:30 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func BenchmarkObserverObservationInt64(b *testing.B) {
|
|
|
|
ctx := context.Background()
|
|
|
|
fix := newFixture(b)
|
2020-03-27 23:06:48 +02:00
|
|
|
labs := makeLabels(1)
|
2020-03-11 20:57:57 +02:00
|
|
|
_ = fix.meter.RegisterInt64Observer("test.observer", func(result metric.Int64ObserverResult) {
|
2020-03-05 22:15:30 +02:00
|
|
|
for i := 0; i < b.N; i++ {
|
2020-03-27 23:06:48 +02:00
|
|
|
result.Observe((int64)(i), labs...)
|
2020-03-05 22:15:30 +02:00
|
|
|
}
|
|
|
|
})
|
2020-03-27 23:06:48 +02:00
|
|
|
|
2020-03-05 22:15:30 +02:00
|
|
|
b.ResetTimer()
|
2020-03-27 23:06:48 +02:00
|
|
|
|
2020-03-05 22:15:30 +02:00
|
|
|
fix.sdk.Collect(ctx)
|
|
|
|
}
|
|
|
|
|
|
|
|
func BenchmarkObserverObservationFloat64(b *testing.B) {
|
|
|
|
ctx := context.Background()
|
|
|
|
fix := newFixture(b)
|
2020-03-27 23:06:48 +02:00
|
|
|
labs := makeLabels(1)
|
2020-03-11 20:57:57 +02:00
|
|
|
_ = fix.meter.RegisterFloat64Observer("test.observer", func(result metric.Float64ObserverResult) {
|
2020-03-05 22:15:30 +02:00
|
|
|
for i := 0; i < b.N; i++ {
|
2020-03-27 23:06:48 +02:00
|
|
|
result.Observe((float64)(i), labs...)
|
2020-03-05 22:15:30 +02:00
|
|
|
}
|
|
|
|
})
|
2020-03-27 23:06:48 +02:00
|
|
|
|
2020-03-05 22:15:30 +02:00
|
|
|
b.ResetTimer()
|
2020-03-27 23:06:48 +02:00
|
|
|
|
2020-03-05 22:15:30 +02:00
|
|
|
fix.sdk.Collect(ctx)
|
|
|
|
}
|
|
|
|
|
2019-10-29 22:27:22 +02:00
|
|
|
// MaxSumCount
|
|
|
|
|
|
|
|
func BenchmarkInt64MaxSumCountAdd(b *testing.B) {
|
2019-11-27 00:07:58 +02:00
|
|
|
benchmarkInt64MeasureAdd(b, "int64.minmaxsumcount")
|
2019-10-29 22:27:22 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
func BenchmarkInt64MaxSumCountHandleAdd(b *testing.B) {
|
2019-11-27 00:07:58 +02:00
|
|
|
benchmarkInt64MeasureHandleAdd(b, "int64.minmaxsumcount")
|
2019-10-29 22:27:22 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
func BenchmarkFloat64MaxSumCountAdd(b *testing.B) {
|
2019-11-27 00:07:58 +02:00
|
|
|
benchmarkFloat64MeasureAdd(b, "float64.minmaxsumcount")
|
2019-10-29 22:27:22 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
func BenchmarkFloat64MaxSumCountHandleAdd(b *testing.B) {
|
2019-11-27 00:07:58 +02:00
|
|
|
benchmarkFloat64MeasureHandleAdd(b, "float64.minmaxsumcount")
|
2019-10-29 22:27:22 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// DDSketch
|
|
|
|
|
|
|
|
func BenchmarkInt64DDSketchAdd(b *testing.B) {
|
|
|
|
benchmarkInt64MeasureAdd(b, "int64.ddsketch")
|
|
|
|
}
|
|
|
|
|
|
|
|
func BenchmarkInt64DDSketchHandleAdd(b *testing.B) {
|
|
|
|
benchmarkInt64MeasureHandleAdd(b, "int64.ddsketch")
|
|
|
|
}
|
|
|
|
|
|
|
|
func BenchmarkFloat64DDSketchAdd(b *testing.B) {
|
|
|
|
benchmarkFloat64MeasureAdd(b, "float64.ddsketch")
|
|
|
|
}
|
|
|
|
|
|
|
|
func BenchmarkFloat64DDSketchHandleAdd(b *testing.B) {
|
|
|
|
benchmarkFloat64MeasureHandleAdd(b, "float64.ddsketch")
|
|
|
|
}
|
2019-11-05 00:24:01 +02:00
|
|
|
|
|
|
|
// Array
|
|
|
|
|
|
|
|
func BenchmarkInt64ArrayAdd(b *testing.B) {
|
|
|
|
benchmarkInt64MeasureAdd(b, "int64.array")
|
|
|
|
}
|
|
|
|
|
|
|
|
func BenchmarkInt64ArrayHandleAdd(b *testing.B) {
|
|
|
|
benchmarkInt64MeasureHandleAdd(b, "int64.array")
|
|
|
|
}
|
|
|
|
|
|
|
|
func BenchmarkFloat64ArrayAdd(b *testing.B) {
|
|
|
|
benchmarkFloat64MeasureAdd(b, "float64.array")
|
|
|
|
}
|
|
|
|
|
|
|
|
func BenchmarkFloat64ArrayHandleAdd(b *testing.B) {
|
|
|
|
benchmarkFloat64MeasureHandleAdd(b, "float64.array")
|
|
|
|
}
|
2020-03-25 17:57:40 +02:00
|
|
|
|
|
|
|
// BatchRecord
|
|
|
|
|
|
|
|
func benchmarkBatchRecord8Labels(b *testing.B, numInst int) {
|
|
|
|
const numLabels = 8
|
|
|
|
ctx := context.Background()
|
|
|
|
fix := newFixture(b)
|
|
|
|
labs := makeLabels(numLabels)
|
|
|
|
var meas []metric.Measurement
|
|
|
|
|
|
|
|
for i := 0; i < numInst; i++ {
|
|
|
|
inst := fix.meter.NewInt64Counter(fmt.Sprint("int64.counter.", i))
|
|
|
|
meas = append(meas, inst.Measurement(1))
|
|
|
|
}
|
|
|
|
|
|
|
|
b.ResetTimer()
|
|
|
|
|
|
|
|
for i := 0; i < b.N; i++ {
|
2020-03-27 23:06:48 +02:00
|
|
|
fix.sdk.RecordBatch(ctx, labs, meas...)
|
2020-03-25 17:57:40 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func BenchmarkBatchRecord8Labels_1Instrument(b *testing.B) {
|
|
|
|
benchmarkBatchRecord8Labels(b, 1)
|
|
|
|
}
|
|
|
|
|
|
|
|
func BenchmarkBatchRecord_8Labels_2Instruments(b *testing.B) {
|
|
|
|
benchmarkBatchRecord8Labels(b, 2)
|
|
|
|
}
|
|
|
|
|
|
|
|
func BenchmarkBatchRecord_8Labels_4Instruments(b *testing.B) {
|
|
|
|
benchmarkBatchRecord8Labels(b, 4)
|
|
|
|
}
|
|
|
|
|
|
|
|
func BenchmarkBatchRecord_8Labels_8Instruments(b *testing.B) {
|
|
|
|
benchmarkBatchRecord8Labels(b, 8)
|
|
|
|
}
|
2020-04-21 05:29:46 +02:00
|
|
|
|
2020-04-22 05:23:15 +02:00
|
|
|
// Record creation
|
|
|
|
|
|
|
|
func BenchmarkRepeatedDirectCalls(b *testing.B) {
|
|
|
|
ctx := context.Background()
|
|
|
|
fix := newFixture(b)
|
|
|
|
encoder := export.NewDefaultLabelEncoder()
|
|
|
|
fix.pcb = func(_ context.Context, rec export.Record) error {
|
|
|
|
_ = rec.Labels().Encoded(encoder)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
c := fix.meter.NewInt64Counter("int64.counter")
|
|
|
|
k := key.String("bench", "true")
|
|
|
|
|
|
|
|
b.ResetTimer()
|
|
|
|
|
|
|
|
for i := 0; i < b.N; i++ {
|
|
|
|
c.Add(ctx, 1, k)
|
|
|
|
fix.sdk.Collect(ctx)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-04-21 05:29:46 +02:00
|
|
|
// LabelIterator
|
2020-04-22 05:23:15 +02:00
|
|
|
|
2020-04-21 05:29:46 +02:00
|
|
|
func BenchmarkLabelIterator(b *testing.B) {
|
|
|
|
const labelCount = 1024
|
|
|
|
ctx := context.Background()
|
|
|
|
fix := newFixture(b)
|
|
|
|
|
|
|
|
var rec export.Record
|
|
|
|
fix.pcb = func(_ context.Context, processRec export.Record) error {
|
|
|
|
rec = processRec
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
keyValues := makeLabels(labelCount)
|
|
|
|
counter := fix.meter.NewInt64Counter("test.counter")
|
|
|
|
counter.Add(ctx, 1, keyValues...)
|
|
|
|
|
|
|
|
fix.sdk.Collect(ctx)
|
|
|
|
|
|
|
|
b.ResetTimer()
|
|
|
|
|
|
|
|
labels := rec.Labels()
|
|
|
|
iter := labels.Iter()
|
|
|
|
var val core.KeyValue
|
|
|
|
for i := 0; i < b.N; i++ {
|
|
|
|
if !iter.Next() {
|
|
|
|
iter = labels.Iter()
|
|
|
|
iter.Next()
|
|
|
|
}
|
|
|
|
val = iter.Label()
|
|
|
|
}
|
|
|
|
if false {
|
|
|
|
fmt.Println(val)
|
|
|
|
}
|
|
|
|
}
|