1
0
mirror of https://github.com/open-telemetry/opentelemetry-go.git synced 2024-12-16 10:19:23 +02:00
opentelemetry-go/sdk/metric/benchmark_test.go

557 lines
12 KiB
Go
Raw Normal View History

// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package metric_test
import (
"context"
"fmt"
"math/rand"
"strings"
"testing"
"go.opentelemetry.io/otel/api/core"
"go.opentelemetry.io/otel/api/key"
"go.opentelemetry.io/otel/api/metric"
export "go.opentelemetry.io/otel/sdk/export/metric"
sdk "go.opentelemetry.io/otel/sdk/metric"
"go.opentelemetry.io/otel/sdk/metric/aggregator/ddsketch"
"go.opentelemetry.io/otel/sdk/metric/aggregator/lastvalue"
"go.opentelemetry.io/otel/sdk/metric/aggregator/minmaxsumcount"
"go.opentelemetry.io/otel/sdk/metric/aggregator/sum"
)
type processFunc func(context.Context, export.Record) error
type benchFixture struct {
meter metric.MeterMust
sdk *sdk.SDK
B *testing.B
pcb processFunc
}
func newFixture(b *testing.B) *benchFixture {
b.ReportAllocs()
bf := &benchFixture{
B: b,
}
bf.sdk = sdk.New(bf)
bf.meter = metric.Must(metric.WrapMeterImpl(bf.sdk, "benchmarks"))
return bf
}
func (f *benchFixture) setProcessCallback(cb processFunc) {
f.pcb = cb
Replace `Ordered` with an iterator in `export.Labels`. (#567) * Do not expose a slice of labels in export.Record This is really an inconvenient implementation detail leak - we may want to store labels in a different way. Replace it with an iterator - it does not force us to use slice of key values as a storage in the long run. * Add Len to LabelIterator It may come in handy in several situations, where we don't have access to export.Labels object, but only to the label iterator. * Use reflect value label iterator for the fixed labels * add reset operation to iterator Makes my life easier when writing a benchmark. Might also be an alternative to cloning the iterator. * Add benchmarks for iterators * Add import comment * Add clone operation to label iterator * Move iterator tests to a separate package * Add tests for cloning iterators * Pass label iterator to export labels * Use non-addressable array reflect values By not using the value created by `reflect.New()`, but rather by `reflect.ValueOf()`, we get a non-addressable array in the value, which does not infer an allocation cost when getting an element from the array. * Drop zero iterator This can be substituted by a reflect value iterator that goes over a value with a zero-sized array. * Add a simple iterator that implements label iterator In the long run this will completely replace the LabelIterator interface. * Replace reflect value iterator with simple iterator * Pass label storage to new export labels, not label iterator * Drop label iterator interface, rename storage iterator to label iterator * Drop clone operation from iterator It's a leftover from interface times and now it's pointless - the iterator is a simple struct, so cloning it is a simple copy. * Drop Reset from label iterator The sole existence of Reset was actually for benchmarking convenience. Now we can just copy the iterator cheaply, so a need for Reset is no more. * Drop noop iterator tests * Move back iterator tests to export package * Eagerly get the reflect value of ordered labels So we won't get into problems when several goroutines want to iterate the same labels at the same time. Not sure if this would be a big deal, since every goroutine would compute the same reflect.Value, but concurrent write to the same memory is bad anyway. And it doesn't cost us any extra allocations anyway. * Replace NewSliceLabelIterator() with a method of LabelSlice * Add some documentation * Documentation fixes
2020-03-20 00:01:34 +02:00
}
func (*benchFixture) AggregatorFor(descriptor *metric.Descriptor) export.Aggregator {
name := descriptor.Name()
switch {
case strings.HasSuffix(name, "counter"):
return sum.New()
case strings.HasSuffix(name, "lastvalue"):
return lastvalue.New()
default:
if strings.HasSuffix(descriptor.Name(), "minmaxsumcount") {
return minmaxsumcount.New(descriptor)
Metrics stdout export pipeline (#265) * Add MetricAggregator.Merge() implementations * Update from feedback * Type * Ckpt * Ckpt * Add push controller * Ckpt * Add aggregator interfaces, stdout encoder * Modify basic main.go * Main is working * Batch stdout output * Sum udpate * Rename stdout * Add stateless/stateful Batcher options * Undo a for-loop in the example, remove a done TODO * Update imports * Add note * Rename defaultkeys * Support variable label encoder to speed OpenMetrics/Statsd export * Lint * Doc * Precommit/lint * Simplify Aggregator API * Record->Identifier * Remove export.Record a.k.a. Identifier * Checkpoint * Propagate errors to the SDK, remove a bunch of 'TODO warn' * Checkpoint * Introduce export.Labels * Comments in export/metric.go * Comment * More merge * More doc * Complete example * Lint fixes * Add a testable example * Lint * Let Export return an error * add a basic stdout exporter test * Add measure test; fix aggregator APIs * Use JSON numbers, not strings * Test stdout exporter error * Add a test for the call to RangeTest * Add error handler API to improve correctness test; return errors from RecordOne * Undo the previous -- do not expose errors * Add simple selector variations, test * Repair examples * Test push controller error handling * Add SDK label encoder tests * Add a defaultkeys batcher test * Add an ungrouped batcher test * Lint new tests * Respond to krnowak's feedback * Undo comment * Use concrete receivers for export records and labels, since the constructors return structs not pointers * Bug fix for stateful batchers; clone an aggregator for long term storage * Remove TODO addressed in #318 * Add errors to all aggregator interfaces * Handle ErrNoLastValue case in stdout exporter * Move aggregator API into sdk/export/metric/aggregator * Update all aggregator exported-method comments * Document the aggregator APIs * More aggregator comments * Add multiple updates to the ungrouped test * Fixes for feedback from Gustavo and Liz * Producer->CheckpointSet; add FinishedCollection * Process takes an export.Record * ReadCheckpoint->CheckpointSet * EncodeLabels->Encode * Format a better inconsistent type error; add more aggregator API tests * More RangeTest test coverage * Make benbjohnson/clock a test-only dependency * Handle ErrNoLastValue in stress_test
2019-11-15 23:01:20 +02:00
} else if strings.HasSuffix(descriptor.Name(), "ddsketch") {
return ddsketch.New(ddsketch.NewDefaultConfig(), descriptor)
} else if strings.HasSuffix(descriptor.Name(), "array") {
return ddsketch.New(ddsketch.NewDefaultConfig(), descriptor)
}
}
return nil
}
func (f *benchFixture) Process(ctx context.Context, rec export.Record) error {
if f.pcb == nil {
return nil
}
return f.pcb(ctx, rec)
Metrics stdout export pipeline (#265) * Add MetricAggregator.Merge() implementations * Update from feedback * Type * Ckpt * Ckpt * Add push controller * Ckpt * Add aggregator interfaces, stdout encoder * Modify basic main.go * Main is working * Batch stdout output * Sum udpate * Rename stdout * Add stateless/stateful Batcher options * Undo a for-loop in the example, remove a done TODO * Update imports * Add note * Rename defaultkeys * Support variable label encoder to speed OpenMetrics/Statsd export * Lint * Doc * Precommit/lint * Simplify Aggregator API * Record->Identifier * Remove export.Record a.k.a. Identifier * Checkpoint * Propagate errors to the SDK, remove a bunch of 'TODO warn' * Checkpoint * Introduce export.Labels * Comments in export/metric.go * Comment * More merge * More doc * Complete example * Lint fixes * Add a testable example * Lint * Let Export return an error * add a basic stdout exporter test * Add measure test; fix aggregator APIs * Use JSON numbers, not strings * Test stdout exporter error * Add a test for the call to RangeTest * Add error handler API to improve correctness test; return errors from RecordOne * Undo the previous -- do not expose errors * Add simple selector variations, test * Repair examples * Test push controller error handling * Add SDK label encoder tests * Add a defaultkeys batcher test * Add an ungrouped batcher test * Lint new tests * Respond to krnowak's feedback * Undo comment * Use concrete receivers for export records and labels, since the constructors return structs not pointers * Bug fix for stateful batchers; clone an aggregator for long term storage * Remove TODO addressed in #318 * Add errors to all aggregator interfaces * Handle ErrNoLastValue case in stdout exporter * Move aggregator API into sdk/export/metric/aggregator * Update all aggregator exported-method comments * Document the aggregator APIs * More aggregator comments * Add multiple updates to the ungrouped test * Fixes for feedback from Gustavo and Liz * Producer->CheckpointSet; add FinishedCollection * Process takes an export.Record * ReadCheckpoint->CheckpointSet * EncodeLabels->Encode * Format a better inconsistent type error; add more aggregator API tests * More RangeTest test coverage * Make benbjohnson/clock a test-only dependency * Handle ErrNoLastValue in stress_test
2019-11-15 23:01:20 +02:00
}
func (*benchFixture) CheckpointSet() export.CheckpointSet {
return nil
}
func (*benchFixture) FinishedCollection() {
}
func makeManyLabels(n int) [][]core.KeyValue {
r := make([][]core.KeyValue, n)
for i := 0; i < n; i++ {
r[i] = makeLabels(1)
}
return r
}
func makeLabels(n int) []core.KeyValue {
used := map[string]bool{}
l := make([]core.KeyValue, n)
for i := 0; i < n; i++ {
var k string
for {
k = fmt.Sprint("k", rand.Intn(1000000000))
if !used[k] {
used[k] = true
break
}
}
l[i] = key.New(k).String(fmt.Sprint("v", rand.Intn(1000000000)))
}
return l
}
func benchmarkLabels(b *testing.B, n int) {
ctx := context.Background()
fix := newFixture(b)
labs := makeLabels(n)
cnt := fix.meter.NewInt64Counter("int64.counter")
b.ResetTimer()
for i := 0; i < b.N; i++ {
cnt.Add(ctx, 1, labs...)
}
}
func BenchmarkInt64CounterAddWithLabels_1(b *testing.B) {
benchmarkLabels(b, 1)
}
func BenchmarkInt64CounterAddWithLabels_2(b *testing.B) {
benchmarkLabels(b, 2)
}
func BenchmarkInt64CounterAddWithLabels_4(b *testing.B) {
benchmarkLabels(b, 4)
}
func BenchmarkInt64CounterAddWithLabels_8(b *testing.B) {
benchmarkLabels(b, 8)
}
func BenchmarkInt64CounterAddWithLabels_16(b *testing.B) {
benchmarkLabels(b, 16)
}
// Note: performance does not depend on label set size for the
// benchmarks below--all are benchmarked for a single label.
func BenchmarkAcquireNewHandle(b *testing.B) {
fix := newFixture(b)
labelSets := makeManyLabels(b.N)
cnt := fix.meter.NewInt64Counter("int64.counter")
b.ResetTimer()
for i := 0; i < b.N; i++ {
cnt.Bind(labelSets[i]...)
}
}
func BenchmarkAcquireExistingHandle(b *testing.B) {
fix := newFixture(b)
labelSets := makeManyLabels(b.N)
cnt := fix.meter.NewInt64Counter("int64.counter")
for i := 0; i < b.N; i++ {
cnt.Bind(labelSets[i]...).Unbind()
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
cnt.Bind(labelSets[i]...)
}
}
func BenchmarkAcquireReleaseExistingHandle(b *testing.B) {
fix := newFixture(b)
labelSets := makeManyLabels(b.N)
cnt := fix.meter.NewInt64Counter("int64.counter")
for i := 0; i < b.N; i++ {
cnt.Bind(labelSets[i]...).Unbind()
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
cnt.Bind(labelSets[i]...).Unbind()
}
}
Replace `Ordered` with an iterator in `export.Labels`. (#567) * Do not expose a slice of labels in export.Record This is really an inconvenient implementation detail leak - we may want to store labels in a different way. Replace it with an iterator - it does not force us to use slice of key values as a storage in the long run. * Add Len to LabelIterator It may come in handy in several situations, where we don't have access to export.Labels object, but only to the label iterator. * Use reflect value label iterator for the fixed labels * add reset operation to iterator Makes my life easier when writing a benchmark. Might also be an alternative to cloning the iterator. * Add benchmarks for iterators * Add import comment * Add clone operation to label iterator * Move iterator tests to a separate package * Add tests for cloning iterators * Pass label iterator to export labels * Use non-addressable array reflect values By not using the value created by `reflect.New()`, but rather by `reflect.ValueOf()`, we get a non-addressable array in the value, which does not infer an allocation cost when getting an element from the array. * Drop zero iterator This can be substituted by a reflect value iterator that goes over a value with a zero-sized array. * Add a simple iterator that implements label iterator In the long run this will completely replace the LabelIterator interface. * Replace reflect value iterator with simple iterator * Pass label storage to new export labels, not label iterator * Drop label iterator interface, rename storage iterator to label iterator * Drop clone operation from iterator It's a leftover from interface times and now it's pointless - the iterator is a simple struct, so cloning it is a simple copy. * Drop Reset from label iterator The sole existence of Reset was actually for benchmarking convenience. Now we can just copy the iterator cheaply, so a need for Reset is no more. * Drop noop iterator tests * Move back iterator tests to export package * Eagerly get the reflect value of ordered labels So we won't get into problems when several goroutines want to iterate the same labels at the same time. Not sure if this would be a big deal, since every goroutine would compute the same reflect.Value, but concurrent write to the same memory is bad anyway. And it doesn't cost us any extra allocations anyway. * Replace NewSliceLabelIterator() with a method of LabelSlice * Add some documentation * Documentation fixes
2020-03-20 00:01:34 +02:00
// Iterators
var benchmarkIteratorVar core.KeyValue
Replace `Ordered` with an iterator in `export.Labels`. (#567) * Do not expose a slice of labels in export.Record This is really an inconvenient implementation detail leak - we may want to store labels in a different way. Replace it with an iterator - it does not force us to use slice of key values as a storage in the long run. * Add Len to LabelIterator It may come in handy in several situations, where we don't have access to export.Labels object, but only to the label iterator. * Use reflect value label iterator for the fixed labels * add reset operation to iterator Makes my life easier when writing a benchmark. Might also be an alternative to cloning the iterator. * Add benchmarks for iterators * Add import comment * Add clone operation to label iterator * Move iterator tests to a separate package * Add tests for cloning iterators * Pass label iterator to export labels * Use non-addressable array reflect values By not using the value created by `reflect.New()`, but rather by `reflect.ValueOf()`, we get a non-addressable array in the value, which does not infer an allocation cost when getting an element from the array. * Drop zero iterator This can be substituted by a reflect value iterator that goes over a value with a zero-sized array. * Add a simple iterator that implements label iterator In the long run this will completely replace the LabelIterator interface. * Replace reflect value iterator with simple iterator * Pass label storage to new export labels, not label iterator * Drop label iterator interface, rename storage iterator to label iterator * Drop clone operation from iterator It's a leftover from interface times and now it's pointless - the iterator is a simple struct, so cloning it is a simple copy. * Drop Reset from label iterator The sole existence of Reset was actually for benchmarking convenience. Now we can just copy the iterator cheaply, so a need for Reset is no more. * Drop noop iterator tests * Move back iterator tests to export package * Eagerly get the reflect value of ordered labels So we won't get into problems when several goroutines want to iterate the same labels at the same time. Not sure if this would be a big deal, since every goroutine would compute the same reflect.Value, but concurrent write to the same memory is bad anyway. And it doesn't cost us any extra allocations anyway. * Replace NewSliceLabelIterator() with a method of LabelSlice * Add some documentation * Documentation fixes
2020-03-20 00:01:34 +02:00
func benchmarkIterator(b *testing.B, n int) {
fix := newFixture(b)
fix.setProcessCallback(func(ctx context.Context, rec export.Record) error {
var kv core.KeyValue
li := rec.Labels().Iter()
fix.B.StartTimer()
for i := 0; i < fix.B.N; i++ {
iter := li
// test getting only the first element
if iter.Next() {
kv = iter.Label()
}
}
fix.B.StopTimer()
benchmarkIteratorVar = kv
return nil
})
Replace `Ordered` with an iterator in `export.Labels`. (#567) * Do not expose a slice of labels in export.Record This is really an inconvenient implementation detail leak - we may want to store labels in a different way. Replace it with an iterator - it does not force us to use slice of key values as a storage in the long run. * Add Len to LabelIterator It may come in handy in several situations, where we don't have access to export.Labels object, but only to the label iterator. * Use reflect value label iterator for the fixed labels * add reset operation to iterator Makes my life easier when writing a benchmark. Might also be an alternative to cloning the iterator. * Add benchmarks for iterators * Add import comment * Add clone operation to label iterator * Move iterator tests to a separate package * Add tests for cloning iterators * Pass label iterator to export labels * Use non-addressable array reflect values By not using the value created by `reflect.New()`, but rather by `reflect.ValueOf()`, we get a non-addressable array in the value, which does not infer an allocation cost when getting an element from the array. * Drop zero iterator This can be substituted by a reflect value iterator that goes over a value with a zero-sized array. * Add a simple iterator that implements label iterator In the long run this will completely replace the LabelIterator interface. * Replace reflect value iterator with simple iterator * Pass label storage to new export labels, not label iterator * Drop label iterator interface, rename storage iterator to label iterator * Drop clone operation from iterator It's a leftover from interface times and now it's pointless - the iterator is a simple struct, so cloning it is a simple copy. * Drop Reset from label iterator The sole existence of Reset was actually for benchmarking convenience. Now we can just copy the iterator cheaply, so a need for Reset is no more. * Drop noop iterator tests * Move back iterator tests to export package * Eagerly get the reflect value of ordered labels So we won't get into problems when several goroutines want to iterate the same labels at the same time. Not sure if this would be a big deal, since every goroutine would compute the same reflect.Value, but concurrent write to the same memory is bad anyway. And it doesn't cost us any extra allocations anyway. * Replace NewSliceLabelIterator() with a method of LabelSlice * Add some documentation * Documentation fixes
2020-03-20 00:01:34 +02:00
cnt := fix.meter.NewInt64Counter("int64.counter")
ctx := context.Background()
cnt.Add(ctx, 1, makeLabels(n)...)
Replace `Ordered` with an iterator in `export.Labels`. (#567) * Do not expose a slice of labels in export.Record This is really an inconvenient implementation detail leak - we may want to store labels in a different way. Replace it with an iterator - it does not force us to use slice of key values as a storage in the long run. * Add Len to LabelIterator It may come in handy in several situations, where we don't have access to export.Labels object, but only to the label iterator. * Use reflect value label iterator for the fixed labels * add reset operation to iterator Makes my life easier when writing a benchmark. Might also be an alternative to cloning the iterator. * Add benchmarks for iterators * Add import comment * Add clone operation to label iterator * Move iterator tests to a separate package * Add tests for cloning iterators * Pass label iterator to export labels * Use non-addressable array reflect values By not using the value created by `reflect.New()`, but rather by `reflect.ValueOf()`, we get a non-addressable array in the value, which does not infer an allocation cost when getting an element from the array. * Drop zero iterator This can be substituted by a reflect value iterator that goes over a value with a zero-sized array. * Add a simple iterator that implements label iterator In the long run this will completely replace the LabelIterator interface. * Replace reflect value iterator with simple iterator * Pass label storage to new export labels, not label iterator * Drop label iterator interface, rename storage iterator to label iterator * Drop clone operation from iterator It's a leftover from interface times and now it's pointless - the iterator is a simple struct, so cloning it is a simple copy. * Drop Reset from label iterator The sole existence of Reset was actually for benchmarking convenience. Now we can just copy the iterator cheaply, so a need for Reset is no more. * Drop noop iterator tests * Move back iterator tests to export package * Eagerly get the reflect value of ordered labels So we won't get into problems when several goroutines want to iterate the same labels at the same time. Not sure if this would be a big deal, since every goroutine would compute the same reflect.Value, but concurrent write to the same memory is bad anyway. And it doesn't cost us any extra allocations anyway. * Replace NewSliceLabelIterator() with a method of LabelSlice * Add some documentation * Documentation fixes
2020-03-20 00:01:34 +02:00
b.ResetTimer()
fix.sdk.Collect(ctx)
}
func BenchmarkIterator_0(b *testing.B) {
benchmarkIterator(b, 0)
}
func BenchmarkIterator_1(b *testing.B) {
benchmarkIterator(b, 1)
}
func BenchmarkIterator_2(b *testing.B) {
benchmarkIterator(b, 2)
}
func BenchmarkIterator_4(b *testing.B) {
benchmarkIterator(b, 4)
}
func BenchmarkIterator_8(b *testing.B) {
benchmarkIterator(b, 8)
}
func BenchmarkIterator_16(b *testing.B) {
benchmarkIterator(b, 16)
}
// Counters
func BenchmarkInt64CounterAdd(b *testing.B) {
ctx := context.Background()
fix := newFixture(b)
labs := makeLabels(1)
cnt := fix.meter.NewInt64Counter("int64.counter")
b.ResetTimer()
for i := 0; i < b.N; i++ {
cnt.Add(ctx, 1, labs...)
}
}
func BenchmarkInt64CounterHandleAdd(b *testing.B) {
ctx := context.Background()
fix := newFixture(b)
labs := makeLabels(1)
cnt := fix.meter.NewInt64Counter("int64.counter")
handle := cnt.Bind(labs...)
b.ResetTimer()
for i := 0; i < b.N; i++ {
handle.Add(ctx, 1)
}
}
func BenchmarkFloat64CounterAdd(b *testing.B) {
ctx := context.Background()
fix := newFixture(b)
labs := makeLabels(1)
cnt := fix.meter.NewFloat64Counter("float64.counter")
b.ResetTimer()
for i := 0; i < b.N; i++ {
cnt.Add(ctx, 1.1, labs...)
}
}
func BenchmarkFloat64CounterHandleAdd(b *testing.B) {
ctx := context.Background()
fix := newFixture(b)
labs := makeLabels(1)
cnt := fix.meter.NewFloat64Counter("float64.counter")
handle := cnt.Bind(labs...)
b.ResetTimer()
for i := 0; i < b.N; i++ {
handle.Add(ctx, 1.1)
}
}
// LastValue
func BenchmarkInt64LastValueAdd(b *testing.B) {
ctx := context.Background()
fix := newFixture(b)
labs := makeLabels(1)
mea := fix.meter.NewInt64Measure("int64.lastvalue")
b.ResetTimer()
for i := 0; i < b.N; i++ {
mea.Record(ctx, int64(i), labs...)
}
}
func BenchmarkInt64LastValueHandleAdd(b *testing.B) {
ctx := context.Background()
fix := newFixture(b)
labs := makeLabels(1)
mea := fix.meter.NewInt64Measure("int64.lastvalue")
handle := mea.Bind(labs...)
b.ResetTimer()
for i := 0; i < b.N; i++ {
handle.Record(ctx, int64(i))
}
}
func BenchmarkFloat64LastValueAdd(b *testing.B) {
ctx := context.Background()
fix := newFixture(b)
labs := makeLabels(1)
mea := fix.meter.NewFloat64Measure("float64.lastvalue")
b.ResetTimer()
for i := 0; i < b.N; i++ {
mea.Record(ctx, float64(i), labs...)
}
}
func BenchmarkFloat64LastValueHandleAdd(b *testing.B) {
ctx := context.Background()
fix := newFixture(b)
labs := makeLabels(1)
mea := fix.meter.NewFloat64Measure("float64.lastvalue")
handle := mea.Bind(labs...)
b.ResetTimer()
for i := 0; i < b.N; i++ {
handle.Record(ctx, float64(i))
}
}
// Measures
func benchmarkInt64MeasureAdd(b *testing.B, name string) {
ctx := context.Background()
fix := newFixture(b)
labs := makeLabels(1)
mea := fix.meter.NewInt64Measure(name)
b.ResetTimer()
for i := 0; i < b.N; i++ {
mea.Record(ctx, int64(i), labs...)
}
}
func benchmarkInt64MeasureHandleAdd(b *testing.B, name string) {
ctx := context.Background()
fix := newFixture(b)
labs := makeLabels(1)
mea := fix.meter.NewInt64Measure(name)
handle := mea.Bind(labs...)
b.ResetTimer()
for i := 0; i < b.N; i++ {
handle.Record(ctx, int64(i))
}
}
func benchmarkFloat64MeasureAdd(b *testing.B, name string) {
ctx := context.Background()
fix := newFixture(b)
labs := makeLabels(1)
mea := fix.meter.NewFloat64Measure(name)
b.ResetTimer()
for i := 0; i < b.N; i++ {
mea.Record(ctx, float64(i), labs...)
}
}
func benchmarkFloat64MeasureHandleAdd(b *testing.B, name string) {
ctx := context.Background()
fix := newFixture(b)
labs := makeLabels(1)
mea := fix.meter.NewFloat64Measure(name)
handle := mea.Bind(labs...)
b.ResetTimer()
for i := 0; i < b.N; i++ {
handle.Record(ctx, float64(i))
}
}
Add observer metric (#474) * wip: observers * wip: float observers * fix copy pasta * wip: rework observers in sdk * small fix in global meter * wip: aggregators and selectors * wip: monotonicity option for observers * some refactor * wip: docs needs more package docs (especially for api/metric and sdk/metric) * fix ci * Fix copy-pasta in docs Co-Authored-By: Mauricio Vásquez <mauricio@kinvolk.io> * recycle unused recorders in observers if a recorder for a labelset is unused for a second collection cycle in a row, drop it * unregister * thread-safe set callback * Fix docs * Revert "wip: aggregators and selectors" This reverts commit 37b7d05aed5dc90f6d5593325b6eb77494e21736. * update selector * tests * Rework number equality Compare concrete numbers, so we can get actual numbers in the error message when they are not equal, not some uint64 representation. This also uses InDelta for comparing floats. * Ensure that Observers are registered in the same order * Run observers in fixed order So the tests can be reproducible - iterating a map made the order of measurements random. * Ensure the proper alignment of the delegates This wasn't checked at all. After adding the checks, the test-386 failed. * Small tweaks to the global meter test * Ensure proper alignment of the callback pointer test-386 was complaining about it * update docs * update a TODO * address review issues * drop SetCallback Co-authored-by: Mauricio Vásquez <mauricio@kinvolk.io> Co-authored-by: Rahul Patel <rghetia@yahoo.com>
2020-03-05 22:15:30 +02:00
// Observers
func BenchmarkObserverRegistration(b *testing.B) {
fix := newFixture(b)
names := make([]string, 0, b.N)
for i := 0; i < b.N; i++ {
names = append(names, fmt.Sprintf("test.observer.%d", i))
}
cb := func(result metric.Int64ObserverResult) {}
b.ResetTimer()
for i := 0; i < b.N; i++ {
fix.meter.RegisterInt64Observer(names[i], cb)
Add observer metric (#474) * wip: observers * wip: float observers * fix copy pasta * wip: rework observers in sdk * small fix in global meter * wip: aggregators and selectors * wip: monotonicity option for observers * some refactor * wip: docs needs more package docs (especially for api/metric and sdk/metric) * fix ci * Fix copy-pasta in docs Co-Authored-By: Mauricio Vásquez <mauricio@kinvolk.io> * recycle unused recorders in observers if a recorder for a labelset is unused for a second collection cycle in a row, drop it * unregister * thread-safe set callback * Fix docs * Revert "wip: aggregators and selectors" This reverts commit 37b7d05aed5dc90f6d5593325b6eb77494e21736. * update selector * tests * Rework number equality Compare concrete numbers, so we can get actual numbers in the error message when they are not equal, not some uint64 representation. This also uses InDelta for comparing floats. * Ensure that Observers are registered in the same order * Run observers in fixed order So the tests can be reproducible - iterating a map made the order of measurements random. * Ensure the proper alignment of the delegates This wasn't checked at all. After adding the checks, the test-386 failed. * Small tweaks to the global meter test * Ensure proper alignment of the callback pointer test-386 was complaining about it * update docs * update a TODO * address review issues * drop SetCallback Co-authored-by: Mauricio Vásquez <mauricio@kinvolk.io> Co-authored-by: Rahul Patel <rghetia@yahoo.com>
2020-03-05 22:15:30 +02:00
}
}
func BenchmarkObserverObservationInt64(b *testing.B) {
ctx := context.Background()
fix := newFixture(b)
labs := makeLabels(1)
_ = fix.meter.RegisterInt64Observer("test.observer", func(result metric.Int64ObserverResult) {
Add observer metric (#474) * wip: observers * wip: float observers * fix copy pasta * wip: rework observers in sdk * small fix in global meter * wip: aggregators and selectors * wip: monotonicity option for observers * some refactor * wip: docs needs more package docs (especially for api/metric and sdk/metric) * fix ci * Fix copy-pasta in docs Co-Authored-By: Mauricio Vásquez <mauricio@kinvolk.io> * recycle unused recorders in observers if a recorder for a labelset is unused for a second collection cycle in a row, drop it * unregister * thread-safe set callback * Fix docs * Revert "wip: aggregators and selectors" This reverts commit 37b7d05aed5dc90f6d5593325b6eb77494e21736. * update selector * tests * Rework number equality Compare concrete numbers, so we can get actual numbers in the error message when they are not equal, not some uint64 representation. This also uses InDelta for comparing floats. * Ensure that Observers are registered in the same order * Run observers in fixed order So the tests can be reproducible - iterating a map made the order of measurements random. * Ensure the proper alignment of the delegates This wasn't checked at all. After adding the checks, the test-386 failed. * Small tweaks to the global meter test * Ensure proper alignment of the callback pointer test-386 was complaining about it * update docs * update a TODO * address review issues * drop SetCallback Co-authored-by: Mauricio Vásquez <mauricio@kinvolk.io> Co-authored-by: Rahul Patel <rghetia@yahoo.com>
2020-03-05 22:15:30 +02:00
for i := 0; i < b.N; i++ {
result.Observe((int64)(i), labs...)
Add observer metric (#474) * wip: observers * wip: float observers * fix copy pasta * wip: rework observers in sdk * small fix in global meter * wip: aggregators and selectors * wip: monotonicity option for observers * some refactor * wip: docs needs more package docs (especially for api/metric and sdk/metric) * fix ci * Fix copy-pasta in docs Co-Authored-By: Mauricio Vásquez <mauricio@kinvolk.io> * recycle unused recorders in observers if a recorder for a labelset is unused for a second collection cycle in a row, drop it * unregister * thread-safe set callback * Fix docs * Revert "wip: aggregators and selectors" This reverts commit 37b7d05aed5dc90f6d5593325b6eb77494e21736. * update selector * tests * Rework number equality Compare concrete numbers, so we can get actual numbers in the error message when they are not equal, not some uint64 representation. This also uses InDelta for comparing floats. * Ensure that Observers are registered in the same order * Run observers in fixed order So the tests can be reproducible - iterating a map made the order of measurements random. * Ensure the proper alignment of the delegates This wasn't checked at all. After adding the checks, the test-386 failed. * Small tweaks to the global meter test * Ensure proper alignment of the callback pointer test-386 was complaining about it * update docs * update a TODO * address review issues * drop SetCallback Co-authored-by: Mauricio Vásquez <mauricio@kinvolk.io> Co-authored-by: Rahul Patel <rghetia@yahoo.com>
2020-03-05 22:15:30 +02:00
}
})
Add observer metric (#474) * wip: observers * wip: float observers * fix copy pasta * wip: rework observers in sdk * small fix in global meter * wip: aggregators and selectors * wip: monotonicity option for observers * some refactor * wip: docs needs more package docs (especially for api/metric and sdk/metric) * fix ci * Fix copy-pasta in docs Co-Authored-By: Mauricio Vásquez <mauricio@kinvolk.io> * recycle unused recorders in observers if a recorder for a labelset is unused for a second collection cycle in a row, drop it * unregister * thread-safe set callback * Fix docs * Revert "wip: aggregators and selectors" This reverts commit 37b7d05aed5dc90f6d5593325b6eb77494e21736. * update selector * tests * Rework number equality Compare concrete numbers, so we can get actual numbers in the error message when they are not equal, not some uint64 representation. This also uses InDelta for comparing floats. * Ensure that Observers are registered in the same order * Run observers in fixed order So the tests can be reproducible - iterating a map made the order of measurements random. * Ensure the proper alignment of the delegates This wasn't checked at all. After adding the checks, the test-386 failed. * Small tweaks to the global meter test * Ensure proper alignment of the callback pointer test-386 was complaining about it * update docs * update a TODO * address review issues * drop SetCallback Co-authored-by: Mauricio Vásquez <mauricio@kinvolk.io> Co-authored-by: Rahul Patel <rghetia@yahoo.com>
2020-03-05 22:15:30 +02:00
b.ResetTimer()
Add observer metric (#474) * wip: observers * wip: float observers * fix copy pasta * wip: rework observers in sdk * small fix in global meter * wip: aggregators and selectors * wip: monotonicity option for observers * some refactor * wip: docs needs more package docs (especially for api/metric and sdk/metric) * fix ci * Fix copy-pasta in docs Co-Authored-By: Mauricio Vásquez <mauricio@kinvolk.io> * recycle unused recorders in observers if a recorder for a labelset is unused for a second collection cycle in a row, drop it * unregister * thread-safe set callback * Fix docs * Revert "wip: aggregators and selectors" This reverts commit 37b7d05aed5dc90f6d5593325b6eb77494e21736. * update selector * tests * Rework number equality Compare concrete numbers, so we can get actual numbers in the error message when they are not equal, not some uint64 representation. This also uses InDelta for comparing floats. * Ensure that Observers are registered in the same order * Run observers in fixed order So the tests can be reproducible - iterating a map made the order of measurements random. * Ensure the proper alignment of the delegates This wasn't checked at all. After adding the checks, the test-386 failed. * Small tweaks to the global meter test * Ensure proper alignment of the callback pointer test-386 was complaining about it * update docs * update a TODO * address review issues * drop SetCallback Co-authored-by: Mauricio Vásquez <mauricio@kinvolk.io> Co-authored-by: Rahul Patel <rghetia@yahoo.com>
2020-03-05 22:15:30 +02:00
fix.sdk.Collect(ctx)
}
func BenchmarkObserverObservationFloat64(b *testing.B) {
ctx := context.Background()
fix := newFixture(b)
labs := makeLabels(1)
_ = fix.meter.RegisterFloat64Observer("test.observer", func(result metric.Float64ObserverResult) {
Add observer metric (#474) * wip: observers * wip: float observers * fix copy pasta * wip: rework observers in sdk * small fix in global meter * wip: aggregators and selectors * wip: monotonicity option for observers * some refactor * wip: docs needs more package docs (especially for api/metric and sdk/metric) * fix ci * Fix copy-pasta in docs Co-Authored-By: Mauricio Vásquez <mauricio@kinvolk.io> * recycle unused recorders in observers if a recorder for a labelset is unused for a second collection cycle in a row, drop it * unregister * thread-safe set callback * Fix docs * Revert "wip: aggregators and selectors" This reverts commit 37b7d05aed5dc90f6d5593325b6eb77494e21736. * update selector * tests * Rework number equality Compare concrete numbers, so we can get actual numbers in the error message when they are not equal, not some uint64 representation. This also uses InDelta for comparing floats. * Ensure that Observers are registered in the same order * Run observers in fixed order So the tests can be reproducible - iterating a map made the order of measurements random. * Ensure the proper alignment of the delegates This wasn't checked at all. After adding the checks, the test-386 failed. * Small tweaks to the global meter test * Ensure proper alignment of the callback pointer test-386 was complaining about it * update docs * update a TODO * address review issues * drop SetCallback Co-authored-by: Mauricio Vásquez <mauricio@kinvolk.io> Co-authored-by: Rahul Patel <rghetia@yahoo.com>
2020-03-05 22:15:30 +02:00
for i := 0; i < b.N; i++ {
result.Observe((float64)(i), labs...)
Add observer metric (#474) * wip: observers * wip: float observers * fix copy pasta * wip: rework observers in sdk * small fix in global meter * wip: aggregators and selectors * wip: monotonicity option for observers * some refactor * wip: docs needs more package docs (especially for api/metric and sdk/metric) * fix ci * Fix copy-pasta in docs Co-Authored-By: Mauricio Vásquez <mauricio@kinvolk.io> * recycle unused recorders in observers if a recorder for a labelset is unused for a second collection cycle in a row, drop it * unregister * thread-safe set callback * Fix docs * Revert "wip: aggregators and selectors" This reverts commit 37b7d05aed5dc90f6d5593325b6eb77494e21736. * update selector * tests * Rework number equality Compare concrete numbers, so we can get actual numbers in the error message when they are not equal, not some uint64 representation. This also uses InDelta for comparing floats. * Ensure that Observers are registered in the same order * Run observers in fixed order So the tests can be reproducible - iterating a map made the order of measurements random. * Ensure the proper alignment of the delegates This wasn't checked at all. After adding the checks, the test-386 failed. * Small tweaks to the global meter test * Ensure proper alignment of the callback pointer test-386 was complaining about it * update docs * update a TODO * address review issues * drop SetCallback Co-authored-by: Mauricio Vásquez <mauricio@kinvolk.io> Co-authored-by: Rahul Patel <rghetia@yahoo.com>
2020-03-05 22:15:30 +02:00
}
})
Add observer metric (#474) * wip: observers * wip: float observers * fix copy pasta * wip: rework observers in sdk * small fix in global meter * wip: aggregators and selectors * wip: monotonicity option for observers * some refactor * wip: docs needs more package docs (especially for api/metric and sdk/metric) * fix ci * Fix copy-pasta in docs Co-Authored-By: Mauricio Vásquez <mauricio@kinvolk.io> * recycle unused recorders in observers if a recorder for a labelset is unused for a second collection cycle in a row, drop it * unregister * thread-safe set callback * Fix docs * Revert "wip: aggregators and selectors" This reverts commit 37b7d05aed5dc90f6d5593325b6eb77494e21736. * update selector * tests * Rework number equality Compare concrete numbers, so we can get actual numbers in the error message when they are not equal, not some uint64 representation. This also uses InDelta for comparing floats. * Ensure that Observers are registered in the same order * Run observers in fixed order So the tests can be reproducible - iterating a map made the order of measurements random. * Ensure the proper alignment of the delegates This wasn't checked at all. After adding the checks, the test-386 failed. * Small tweaks to the global meter test * Ensure proper alignment of the callback pointer test-386 was complaining about it * update docs * update a TODO * address review issues * drop SetCallback Co-authored-by: Mauricio Vásquez <mauricio@kinvolk.io> Co-authored-by: Rahul Patel <rghetia@yahoo.com>
2020-03-05 22:15:30 +02:00
b.ResetTimer()
Add observer metric (#474) * wip: observers * wip: float observers * fix copy pasta * wip: rework observers in sdk * small fix in global meter * wip: aggregators and selectors * wip: monotonicity option for observers * some refactor * wip: docs needs more package docs (especially for api/metric and sdk/metric) * fix ci * Fix copy-pasta in docs Co-Authored-By: Mauricio Vásquez <mauricio@kinvolk.io> * recycle unused recorders in observers if a recorder for a labelset is unused for a second collection cycle in a row, drop it * unregister * thread-safe set callback * Fix docs * Revert "wip: aggregators and selectors" This reverts commit 37b7d05aed5dc90f6d5593325b6eb77494e21736. * update selector * tests * Rework number equality Compare concrete numbers, so we can get actual numbers in the error message when they are not equal, not some uint64 representation. This also uses InDelta for comparing floats. * Ensure that Observers are registered in the same order * Run observers in fixed order So the tests can be reproducible - iterating a map made the order of measurements random. * Ensure the proper alignment of the delegates This wasn't checked at all. After adding the checks, the test-386 failed. * Small tweaks to the global meter test * Ensure proper alignment of the callback pointer test-386 was complaining about it * update docs * update a TODO * address review issues * drop SetCallback Co-authored-by: Mauricio Vásquez <mauricio@kinvolk.io> Co-authored-by: Rahul Patel <rghetia@yahoo.com>
2020-03-05 22:15:30 +02:00
fix.sdk.Collect(ctx)
}
// MaxSumCount
func BenchmarkInt64MaxSumCountAdd(b *testing.B) {
benchmarkInt64MeasureAdd(b, "int64.minmaxsumcount")
}
func BenchmarkInt64MaxSumCountHandleAdd(b *testing.B) {
benchmarkInt64MeasureHandleAdd(b, "int64.minmaxsumcount")
}
func BenchmarkFloat64MaxSumCountAdd(b *testing.B) {
benchmarkFloat64MeasureAdd(b, "float64.minmaxsumcount")
}
func BenchmarkFloat64MaxSumCountHandleAdd(b *testing.B) {
benchmarkFloat64MeasureHandleAdd(b, "float64.minmaxsumcount")
}
// DDSketch
func BenchmarkInt64DDSketchAdd(b *testing.B) {
benchmarkInt64MeasureAdd(b, "int64.ddsketch")
}
func BenchmarkInt64DDSketchHandleAdd(b *testing.B) {
benchmarkInt64MeasureHandleAdd(b, "int64.ddsketch")
}
func BenchmarkFloat64DDSketchAdd(b *testing.B) {
benchmarkFloat64MeasureAdd(b, "float64.ddsketch")
}
func BenchmarkFloat64DDSketchHandleAdd(b *testing.B) {
benchmarkFloat64MeasureHandleAdd(b, "float64.ddsketch")
}
// Array
func BenchmarkInt64ArrayAdd(b *testing.B) {
benchmarkInt64MeasureAdd(b, "int64.array")
}
func BenchmarkInt64ArrayHandleAdd(b *testing.B) {
benchmarkInt64MeasureHandleAdd(b, "int64.array")
}
func BenchmarkFloat64ArrayAdd(b *testing.B) {
benchmarkFloat64MeasureAdd(b, "float64.array")
}
func BenchmarkFloat64ArrayHandleAdd(b *testing.B) {
benchmarkFloat64MeasureHandleAdd(b, "float64.array")
}
2020-03-25 17:57:40 +02:00
// BatchRecord
func benchmarkBatchRecord8Labels(b *testing.B, numInst int) {
const numLabels = 8
ctx := context.Background()
fix := newFixture(b)
labs := makeLabels(numLabels)
var meas []metric.Measurement
for i := 0; i < numInst; i++ {
inst := fix.meter.NewInt64Counter(fmt.Sprint("int64.counter.", i))
meas = append(meas, inst.Measurement(1))
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
fix.sdk.RecordBatch(ctx, labs, meas...)
2020-03-25 17:57:40 +02:00
}
}
func BenchmarkBatchRecord8Labels_1Instrument(b *testing.B) {
benchmarkBatchRecord8Labels(b, 1)
}
func BenchmarkBatchRecord_8Labels_2Instruments(b *testing.B) {
benchmarkBatchRecord8Labels(b, 2)
}
func BenchmarkBatchRecord_8Labels_4Instruments(b *testing.B) {
benchmarkBatchRecord8Labels(b, 4)
}
func BenchmarkBatchRecord_8Labels_8Instruments(b *testing.B) {
benchmarkBatchRecord8Labels(b, 8)
}