1
0
mirror of https://github.com/open-telemetry/opentelemetry-go.git synced 2024-12-12 10:04:29 +02:00
opentelemetry-go/sdk/metric/benchmark_test.go

413 lines
9.0 KiB
Go
Raw Normal View History

// Copyright 2019, OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package metric_test
import (
"context"
"fmt"
"math/rand"
"strings"
"testing"
"go.opentelemetry.io/otel/api/core"
"go.opentelemetry.io/otel/api/key"
"go.opentelemetry.io/otel/api/metric"
export "go.opentelemetry.io/otel/sdk/export/metric"
sdk "go.opentelemetry.io/otel/sdk/metric"
"go.opentelemetry.io/otel/sdk/metric/aggregator/counter"
"go.opentelemetry.io/otel/sdk/metric/aggregator/ddsketch"
"go.opentelemetry.io/otel/sdk/metric/aggregator/gauge"
"go.opentelemetry.io/otel/sdk/metric/aggregator/maxsumcount"
)
type benchFixture struct {
sdk *sdk.SDK
B *testing.B
}
func newFixture(b *testing.B) *benchFixture {
b.ReportAllocs()
bf := &benchFixture{
B: b,
}
Dogstatsd metrics exporter (#326) * Add MetricAggregator.Merge() implementations * Update from feedback * Type * Ckpt * Ckpt * Add push controller * Ckpt * Add aggregator interfaces, stdout encoder * Modify basic main.go * Main is working * Batch stdout output * Sum udpate * Rename stdout * Add stateless/stateful Batcher options * Undo a for-loop in the example, remove a done TODO * Update imports * Add note * Rename defaultkeys * Support variable label encoder to speed OpenMetrics/Statsd export * Lint * Checkpoint * Checkpoint * Doc * Precommit/lint * Simplify Aggregator API * Record->Identifier * Remove export.Record a.k.a. Identifier * Checkpoint * Propagate errors to the SDK, remove a bunch of 'TODO warn' * Checkpoint * Introduce export.Labels * Comments in export/metric.go * Comment * More merge * More doc * Complete example * Lint fixes * Add a testable example * Lint * Dogstats * Let Export return an error * Checkpoint * add a basic stdout exporter test * Add measure test; fix aggregator APIs * Use JSON numbers, not strings * Test stdout exporter error * Add a test for the call to RangeTest * Add error handler API to improve correctness test; return errors from RecordOne * Undo the previous -- do not expose errors * Add simple selector variations, test * Repair examples * Test push controller error handling * Add SDK label encoder tests * Add a defaultkeys batcher test * Add an ungrouped batcher test * Lint new tests * Respond to krnowak's feedback * Checkpoint * Funciontal example using unixgram * Tidy the example * Add a packet-split test * More tests * Undo comment * Use concrete receivers for export records and labels, since the constructors return structs not pointers * Bug fix for stateful batchers; clone an aggregator for long term storage * Remove TODO addressed in #318 * Add errors to all aggregator interfaces * Handle ErrNoLastValue case in stdout exporter * Move aggregator API into sdk/export/metric/aggregator * Update all aggregator exported-method comments * Document the aggregator APIs * More aggregator comments * Add multiple updates to the ungrouped test * Fixes for feedback from Gustavo and Liz * Producer->CheckpointSet; add FinishedCollection * Process takes an export.Record * ReadCheckpoint->CheckpointSet * EncodeLabels->Encode * Format a better inconsistent type error; add more aggregator API tests * More RangeTest test coverage * Make benbjohnson/clock a test-only dependency * Handle ErrNoLastValue in stress_test * Update comments; use a pipe vs a unix socket in the example test * Update test * Spelling * Typo fix * Rename DefaultLabelEncoder to NewDefaultLabelEncoder for clarity * Rename DefaultLabelEncoder to NewDefaultLabelEncoder for clarity * Test different adapters; add ForceEncode to statsd label encoder
2019-11-22 06:46:05 +02:00
bf.sdk = sdk.New(bf, sdk.NewDefaultLabelEncoder())
return bf
}
Metrics stdout export pipeline (#265) * Add MetricAggregator.Merge() implementations * Update from feedback * Type * Ckpt * Ckpt * Add push controller * Ckpt * Add aggregator interfaces, stdout encoder * Modify basic main.go * Main is working * Batch stdout output * Sum udpate * Rename stdout * Add stateless/stateful Batcher options * Undo a for-loop in the example, remove a done TODO * Update imports * Add note * Rename defaultkeys * Support variable label encoder to speed OpenMetrics/Statsd export * Lint * Doc * Precommit/lint * Simplify Aggregator API * Record->Identifier * Remove export.Record a.k.a. Identifier * Checkpoint * Propagate errors to the SDK, remove a bunch of 'TODO warn' * Checkpoint * Introduce export.Labels * Comments in export/metric.go * Comment * More merge * More doc * Complete example * Lint fixes * Add a testable example * Lint * Let Export return an error * add a basic stdout exporter test * Add measure test; fix aggregator APIs * Use JSON numbers, not strings * Test stdout exporter error * Add a test for the call to RangeTest * Add error handler API to improve correctness test; return errors from RecordOne * Undo the previous -- do not expose errors * Add simple selector variations, test * Repair examples * Test push controller error handling * Add SDK label encoder tests * Add a defaultkeys batcher test * Add an ungrouped batcher test * Lint new tests * Respond to krnowak's feedback * Undo comment * Use concrete receivers for export records and labels, since the constructors return structs not pointers * Bug fix for stateful batchers; clone an aggregator for long term storage * Remove TODO addressed in #318 * Add errors to all aggregator interfaces * Handle ErrNoLastValue case in stdout exporter * Move aggregator API into sdk/export/metric/aggregator * Update all aggregator exported-method comments * Document the aggregator APIs * More aggregator comments * Add multiple updates to the ungrouped test * Fixes for feedback from Gustavo and Liz * Producer->CheckpointSet; add FinishedCollection * Process takes an export.Record * ReadCheckpoint->CheckpointSet * EncodeLabels->Encode * Format a better inconsistent type error; add more aggregator API tests * More RangeTest test coverage * Make benbjohnson/clock a test-only dependency * Handle ErrNoLastValue in stress_test
2019-11-15 23:01:20 +02:00
func (*benchFixture) AggregatorFor(descriptor *export.Descriptor) export.Aggregator {
switch descriptor.MetricKind() {
case export.CounterKind:
return counter.New()
case export.GaugeKind:
return gauge.New()
case export.MeasureKind:
Metrics stdout export pipeline (#265) * Add MetricAggregator.Merge() implementations * Update from feedback * Type * Ckpt * Ckpt * Add push controller * Ckpt * Add aggregator interfaces, stdout encoder * Modify basic main.go * Main is working * Batch stdout output * Sum udpate * Rename stdout * Add stateless/stateful Batcher options * Undo a for-loop in the example, remove a done TODO * Update imports * Add note * Rename defaultkeys * Support variable label encoder to speed OpenMetrics/Statsd export * Lint * Doc * Precommit/lint * Simplify Aggregator API * Record->Identifier * Remove export.Record a.k.a. Identifier * Checkpoint * Propagate errors to the SDK, remove a bunch of 'TODO warn' * Checkpoint * Introduce export.Labels * Comments in export/metric.go * Comment * More merge * More doc * Complete example * Lint fixes * Add a testable example * Lint * Let Export return an error * add a basic stdout exporter test * Add measure test; fix aggregator APIs * Use JSON numbers, not strings * Test stdout exporter error * Add a test for the call to RangeTest * Add error handler API to improve correctness test; return errors from RecordOne * Undo the previous -- do not expose errors * Add simple selector variations, test * Repair examples * Test push controller error handling * Add SDK label encoder tests * Add a defaultkeys batcher test * Add an ungrouped batcher test * Lint new tests * Respond to krnowak's feedback * Undo comment * Use concrete receivers for export records and labels, since the constructors return structs not pointers * Bug fix for stateful batchers; clone an aggregator for long term storage * Remove TODO addressed in #318 * Add errors to all aggregator interfaces * Handle ErrNoLastValue case in stdout exporter * Move aggregator API into sdk/export/metric/aggregator * Update all aggregator exported-method comments * Document the aggregator APIs * More aggregator comments * Add multiple updates to the ungrouped test * Fixes for feedback from Gustavo and Liz * Producer->CheckpointSet; add FinishedCollection * Process takes an export.Record * ReadCheckpoint->CheckpointSet * EncodeLabels->Encode * Format a better inconsistent type error; add more aggregator API tests * More RangeTest test coverage * Make benbjohnson/clock a test-only dependency * Handle ErrNoLastValue in stress_test
2019-11-15 23:01:20 +02:00
if strings.HasSuffix(descriptor.Name(), "maxsumcount") {
Implement support for NonAbsolute Measurement MaxSumCount (#335) * Add tests for nonabsolute and varying sign values * Implement support for NonAbsolute Measurement MaxSumCount Previously, the MaxSumCount aggregator failed to work correctly with negative numbers (e.g. MeasureKind Alternate()==true). * Pass NumberKind to MaxSumCount New() function Allows it to set the initial state (current.max) to the correct value based on the NumberKind. * Revert extraneous local change * Pass full descriptor to msc New() This is analagous to the DDSketch New() constructor * Remember to run make precommit first * Add tests for empty checkpoint of MaxSumCount aggregator An empty checkpoint should have Sum() == 0, Count() == 0 and Max() still equal to the numberKind.Minimum() * Return ErrEmptyDataSet if no value set by the aggregator Remove TODO from stdout exporter to ensure that if a maxsumcount or ddsketch aggregator returns ErrEmptyDataSet from Max(), then the entire record will be skipped by the exporter. Added tests to ensure the exporter doesn't send any updates for EmptyDataSet checkpoints - for both ddsketch and maxsumcount. * Relayout Aggreggator struct to ensure int64s are 8-byte aligned On 32-bit architectures, Go only guarantees that primitive values are aligned to a 4 byte boundary. Atomic operations on 32-bit machines require 8-byte alignment. See https://github.com/golang/go/issues/599 * Addressing PR comments The use of Minimum() for the default uninitialized Maximum value means that in the unlikely condition that every recorded value for a measure is equal to the same NumberKind.Minimum(), then the aggregator's Max() will return ErrEmptyDataSet * Fix PR merge issue
2019-11-25 19:51:49 +02:00
return maxsumcount.New(descriptor)
Metrics stdout export pipeline (#265) * Add MetricAggregator.Merge() implementations * Update from feedback * Type * Ckpt * Ckpt * Add push controller * Ckpt * Add aggregator interfaces, stdout encoder * Modify basic main.go * Main is working * Batch stdout output * Sum udpate * Rename stdout * Add stateless/stateful Batcher options * Undo a for-loop in the example, remove a done TODO * Update imports * Add note * Rename defaultkeys * Support variable label encoder to speed OpenMetrics/Statsd export * Lint * Doc * Precommit/lint * Simplify Aggregator API * Record->Identifier * Remove export.Record a.k.a. Identifier * Checkpoint * Propagate errors to the SDK, remove a bunch of 'TODO warn' * Checkpoint * Introduce export.Labels * Comments in export/metric.go * Comment * More merge * More doc * Complete example * Lint fixes * Add a testable example * Lint * Let Export return an error * add a basic stdout exporter test * Add measure test; fix aggregator APIs * Use JSON numbers, not strings * Test stdout exporter error * Add a test for the call to RangeTest * Add error handler API to improve correctness test; return errors from RecordOne * Undo the previous -- do not expose errors * Add simple selector variations, test * Repair examples * Test push controller error handling * Add SDK label encoder tests * Add a defaultkeys batcher test * Add an ungrouped batcher test * Lint new tests * Respond to krnowak's feedback * Undo comment * Use concrete receivers for export records and labels, since the constructors return structs not pointers * Bug fix for stateful batchers; clone an aggregator for long term storage * Remove TODO addressed in #318 * Add errors to all aggregator interfaces * Handle ErrNoLastValue case in stdout exporter * Move aggregator API into sdk/export/metric/aggregator * Update all aggregator exported-method comments * Document the aggregator APIs * More aggregator comments * Add multiple updates to the ungrouped test * Fixes for feedback from Gustavo and Liz * Producer->CheckpointSet; add FinishedCollection * Process takes an export.Record * ReadCheckpoint->CheckpointSet * EncodeLabels->Encode * Format a better inconsistent type error; add more aggregator API tests * More RangeTest test coverage * Make benbjohnson/clock a test-only dependency * Handle ErrNoLastValue in stress_test
2019-11-15 23:01:20 +02:00
} else if strings.HasSuffix(descriptor.Name(), "ddsketch") {
return ddsketch.New(ddsketch.NewDefaultConfig(), descriptor)
} else if strings.HasSuffix(descriptor.Name(), "array") {
return ddsketch.New(ddsketch.NewDefaultConfig(), descriptor)
}
}
return nil
}
Metrics stdout export pipeline (#265) * Add MetricAggregator.Merge() implementations * Update from feedback * Type * Ckpt * Ckpt * Add push controller * Ckpt * Add aggregator interfaces, stdout encoder * Modify basic main.go * Main is working * Batch stdout output * Sum udpate * Rename stdout * Add stateless/stateful Batcher options * Undo a for-loop in the example, remove a done TODO * Update imports * Add note * Rename defaultkeys * Support variable label encoder to speed OpenMetrics/Statsd export * Lint * Doc * Precommit/lint * Simplify Aggregator API * Record->Identifier * Remove export.Record a.k.a. Identifier * Checkpoint * Propagate errors to the SDK, remove a bunch of 'TODO warn' * Checkpoint * Introduce export.Labels * Comments in export/metric.go * Comment * More merge * More doc * Complete example * Lint fixes * Add a testable example * Lint * Let Export return an error * add a basic stdout exporter test * Add measure test; fix aggregator APIs * Use JSON numbers, not strings * Test stdout exporter error * Add a test for the call to RangeTest * Add error handler API to improve correctness test; return errors from RecordOne * Undo the previous -- do not expose errors * Add simple selector variations, test * Repair examples * Test push controller error handling * Add SDK label encoder tests * Add a defaultkeys batcher test * Add an ungrouped batcher test * Lint new tests * Respond to krnowak's feedback * Undo comment * Use concrete receivers for export records and labels, since the constructors return structs not pointers * Bug fix for stateful batchers; clone an aggregator for long term storage * Remove TODO addressed in #318 * Add errors to all aggregator interfaces * Handle ErrNoLastValue case in stdout exporter * Move aggregator API into sdk/export/metric/aggregator * Update all aggregator exported-method comments * Document the aggregator APIs * More aggregator comments * Add multiple updates to the ungrouped test * Fixes for feedback from Gustavo and Liz * Producer->CheckpointSet; add FinishedCollection * Process takes an export.Record * ReadCheckpoint->CheckpointSet * EncodeLabels->Encode * Format a better inconsistent type error; add more aggregator API tests * More RangeTest test coverage * Make benbjohnson/clock a test-only dependency * Handle ErrNoLastValue in stress_test
2019-11-15 23:01:20 +02:00
func (*benchFixture) Process(context.Context, export.Record) error {
return nil
}
func (*benchFixture) CheckpointSet() export.CheckpointSet {
return nil
}
func (*benchFixture) FinishedCollection() {
}
func makeLabelSets(n int) [][]core.KeyValue {
r := make([][]core.KeyValue, n)
for i := 0; i < n; i++ {
r[i] = makeLabels(1)
}
return r
}
func makeLabels(n int) []core.KeyValue {
used := map[string]bool{}
l := make([]core.KeyValue, n)
for i := 0; i < n; i++ {
var k string
for {
k = fmt.Sprint("k", rand.Intn(1000000000))
if !used[k] {
used[k] = true
break
}
}
l[i] = key.New(k).String(fmt.Sprint("v", rand.Intn(1000000000)))
}
return l
}
func benchmarkLabels(b *testing.B, n int) {
fix := newFixture(b)
labs := makeLabels(n)
b.ResetTimer()
for i := 0; i < b.N; i++ {
fix.sdk.Labels(labs...)
}
}
func BenchmarkLabels_1(b *testing.B) {
benchmarkLabels(b, 1)
}
func BenchmarkLabels_2(b *testing.B) {
benchmarkLabels(b, 2)
}
func BenchmarkLabels_4(b *testing.B) {
benchmarkLabels(b, 4)
}
func BenchmarkLabels_8(b *testing.B) {
benchmarkLabels(b, 8)
}
func BenchmarkLabels_16(b *testing.B) {
benchmarkLabels(b, 16)
}
// Note: performance does not depend on label set size for the
// benchmarks below.
func BenchmarkAcquireNewHandle(b *testing.B) {
fix := newFixture(b)
labelSets := makeLabelSets(b.N)
cnt := fix.sdk.NewInt64Counter("int64.counter")
labels := make([]metric.LabelSet, b.N)
for i := 0; i < b.N; i++ {
labels[i] = fix.sdk.Labels(labelSets[i]...)
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
cnt.AcquireHandle(labels[i])
}
}
func BenchmarkAcquireExistingHandle(b *testing.B) {
fix := newFixture(b)
labelSets := makeLabelSets(b.N)
cnt := fix.sdk.NewInt64Counter("int64.counter")
labels := make([]metric.LabelSet, b.N)
for i := 0; i < b.N; i++ {
labels[i] = fix.sdk.Labels(labelSets[i]...)
cnt.AcquireHandle(labels[i]).Release()
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
cnt.AcquireHandle(labels[i])
}
}
func BenchmarkAcquireReleaseExistingHandle(b *testing.B) {
fix := newFixture(b)
labelSets := makeLabelSets(b.N)
cnt := fix.sdk.NewInt64Counter("int64.counter")
labels := make([]metric.LabelSet, b.N)
for i := 0; i < b.N; i++ {
labels[i] = fix.sdk.Labels(labelSets[i]...)
cnt.AcquireHandle(labels[i]).Release()
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
cnt.AcquireHandle(labels[i]).Release()
}
}
// Counters
func BenchmarkInt64CounterAdd(b *testing.B) {
ctx := context.Background()
fix := newFixture(b)
labs := fix.sdk.Labels(makeLabels(1)...)
cnt := fix.sdk.NewInt64Counter("int64.counter")
b.ResetTimer()
for i := 0; i < b.N; i++ {
cnt.Add(ctx, 1, labs)
}
}
func BenchmarkInt64CounterHandleAdd(b *testing.B) {
ctx := context.Background()
fix := newFixture(b)
labs := fix.sdk.Labels(makeLabels(1)...)
cnt := fix.sdk.NewInt64Counter("int64.counter")
handle := cnt.AcquireHandle(labs)
b.ResetTimer()
for i := 0; i < b.N; i++ {
handle.Add(ctx, 1)
}
}
func BenchmarkFloat64CounterAdd(b *testing.B) {
ctx := context.Background()
fix := newFixture(b)
labs := fix.sdk.Labels(makeLabels(1)...)
cnt := fix.sdk.NewFloat64Counter("float64.counter")
b.ResetTimer()
for i := 0; i < b.N; i++ {
cnt.Add(ctx, 1.1, labs)
}
}
func BenchmarkFloat64CounterHandleAdd(b *testing.B) {
ctx := context.Background()
fix := newFixture(b)
labs := fix.sdk.Labels(makeLabels(1)...)
cnt := fix.sdk.NewFloat64Counter("float64.counter")
handle := cnt.AcquireHandle(labs)
b.ResetTimer()
for i := 0; i < b.N; i++ {
handle.Add(ctx, 1.1)
}
}
// Gauges
func BenchmarkInt64GaugeAdd(b *testing.B) {
ctx := context.Background()
fix := newFixture(b)
labs := fix.sdk.Labels(makeLabels(1)...)
gau := fix.sdk.NewInt64Gauge("int64.gauge")
b.ResetTimer()
for i := 0; i < b.N; i++ {
gau.Set(ctx, int64(i), labs)
}
}
func BenchmarkInt64GaugeHandleAdd(b *testing.B) {
ctx := context.Background()
fix := newFixture(b)
labs := fix.sdk.Labels(makeLabels(1)...)
gau := fix.sdk.NewInt64Gauge("int64.gauge")
handle := gau.AcquireHandle(labs)
b.ResetTimer()
for i := 0; i < b.N; i++ {
handle.Set(ctx, int64(i))
}
}
func BenchmarkFloat64GaugeAdd(b *testing.B) {
ctx := context.Background()
fix := newFixture(b)
labs := fix.sdk.Labels(makeLabels(1)...)
gau := fix.sdk.NewFloat64Gauge("float64.gauge")
b.ResetTimer()
for i := 0; i < b.N; i++ {
gau.Set(ctx, float64(i), labs)
}
}
func BenchmarkFloat64GaugeHandleAdd(b *testing.B) {
ctx := context.Background()
fix := newFixture(b)
labs := fix.sdk.Labels(makeLabels(1)...)
gau := fix.sdk.NewFloat64Gauge("float64.gauge")
handle := gau.AcquireHandle(labs)
b.ResetTimer()
for i := 0; i < b.N; i++ {
handle.Set(ctx, float64(i))
}
}
// Measures
func benchmarkInt64MeasureAdd(b *testing.B, name string) {
ctx := context.Background()
fix := newFixture(b)
labs := fix.sdk.Labels(makeLabels(1)...)
mea := fix.sdk.NewInt64Measure(name)
b.ResetTimer()
for i := 0; i < b.N; i++ {
mea.Record(ctx, int64(i), labs)
}
}
func benchmarkInt64MeasureHandleAdd(b *testing.B, name string) {
ctx := context.Background()
fix := newFixture(b)
labs := fix.sdk.Labels(makeLabels(1)...)
mea := fix.sdk.NewInt64Measure(name)
handle := mea.AcquireHandle(labs)
b.ResetTimer()
for i := 0; i < b.N; i++ {
handle.Record(ctx, int64(i))
}
}
func benchmarkFloat64MeasureAdd(b *testing.B, name string) {
ctx := context.Background()
fix := newFixture(b)
labs := fix.sdk.Labels(makeLabels(1)...)
mea := fix.sdk.NewFloat64Measure(name)
b.ResetTimer()
for i := 0; i < b.N; i++ {
mea.Record(ctx, float64(i), labs)
}
}
func benchmarkFloat64MeasureHandleAdd(b *testing.B, name string) {
ctx := context.Background()
fix := newFixture(b)
labs := fix.sdk.Labels(makeLabels(1)...)
mea := fix.sdk.NewFloat64Measure(name)
handle := mea.AcquireHandle(labs)
b.ResetTimer()
for i := 0; i < b.N; i++ {
handle.Record(ctx, float64(i))
}
}
// MaxSumCount
func BenchmarkInt64MaxSumCountAdd(b *testing.B) {
benchmarkInt64MeasureAdd(b, "int64.maxsumcount")
}
func BenchmarkInt64MaxSumCountHandleAdd(b *testing.B) {
benchmarkInt64MeasureHandleAdd(b, "int64.maxsumcount")
}
func BenchmarkFloat64MaxSumCountAdd(b *testing.B) {
benchmarkFloat64MeasureAdd(b, "float64.maxsumcount")
}
func BenchmarkFloat64MaxSumCountHandleAdd(b *testing.B) {
benchmarkFloat64MeasureHandleAdd(b, "float64.maxsumcount")
}
// DDSketch
func BenchmarkInt64DDSketchAdd(b *testing.B) {
benchmarkInt64MeasureAdd(b, "int64.ddsketch")
}
func BenchmarkInt64DDSketchHandleAdd(b *testing.B) {
benchmarkInt64MeasureHandleAdd(b, "int64.ddsketch")
}
func BenchmarkFloat64DDSketchAdd(b *testing.B) {
benchmarkFloat64MeasureAdd(b, "float64.ddsketch")
}
func BenchmarkFloat64DDSketchHandleAdd(b *testing.B) {
benchmarkFloat64MeasureHandleAdd(b, "float64.ddsketch")
}
// Array
func BenchmarkInt64ArrayAdd(b *testing.B) {
benchmarkInt64MeasureAdd(b, "int64.array")
}
func BenchmarkInt64ArrayHandleAdd(b *testing.B) {
benchmarkInt64MeasureHandleAdd(b, "int64.array")
}
func BenchmarkFloat64ArrayAdd(b *testing.B) {
benchmarkFloat64MeasureAdd(b, "float64.array")
}
func BenchmarkFloat64ArrayHandleAdd(b *testing.B) {
benchmarkFloat64MeasureHandleAdd(b, "float64.array")
}