mirror of
https://github.com/open-telemetry/opentelemetry-go.git
synced 2025-04-07 07:00:13 +02:00
* Do not expose a slice of labels in export.Record This is really an inconvenient implementation detail leak - we may want to store labels in a different way. Replace it with an iterator - it does not force us to use slice of key values as a storage in the long run. * Add Len to LabelIterator It may come in handy in several situations, where we don't have access to export.Labels object, but only to the label iterator. * Use reflect value label iterator for the fixed labels * add reset operation to iterator Makes my life easier when writing a benchmark. Might also be an alternative to cloning the iterator. * Add benchmarks for iterators * Add import comment * Add clone operation to label iterator * Move iterator tests to a separate package * Add tests for cloning iterators * Pass label iterator to export labels * Use non-addressable array reflect values By not using the value created by `reflect.New()`, but rather by `reflect.ValueOf()`, we get a non-addressable array in the value, which does not infer an allocation cost when getting an element from the array. * Drop zero iterator This can be substituted by a reflect value iterator that goes over a value with a zero-sized array. * Add a simple iterator that implements label iterator In the long run this will completely replace the LabelIterator interface. * Replace reflect value iterator with simple iterator * Pass label storage to new export labels, not label iterator * Drop label iterator interface, rename storage iterator to label iterator * Drop clone operation from iterator It's a leftover from interface times and now it's pointless - the iterator is a simple struct, so cloning it is a simple copy. * Drop Reset from label iterator The sole existence of Reset was actually for benchmarking convenience. Now we can just copy the iterator cheaply, so a need for Reset is no more. * Drop noop iterator tests * Move back iterator tests to export package * Eagerly get the reflect value of ordered labels So we won't get into problems when several goroutines want to iterate the same labels at the same time. Not sure if this would be a big deal, since every goroutine would compute the same reflect.Value, but concurrent write to the same memory is bad anyway. And it doesn't cost us any extra allocations anyway. * Replace NewSliceLabelIterator() with a method of LabelSlice * Add some documentation * Documentation fixes
169 lines
4.6 KiB
Go
169 lines
4.6 KiB
Go
// Copyright 2019, OpenTelemetry Authors
|
|
//
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
// you may not use this file except in compliance with the License.
|
|
// You may obtain a copy of the License at
|
|
//
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
//
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
// See the License for the specific language governing permissions and
|
|
// limitations under the License.
|
|
|
|
package defaultkeys // import "go.opentelemetry.io/otel/sdk/metric/batcher/defaultkeys"
|
|
|
|
import (
|
|
"context"
|
|
"errors"
|
|
|
|
"go.opentelemetry.io/otel/api/core"
|
|
"go.opentelemetry.io/otel/api/metric"
|
|
export "go.opentelemetry.io/otel/sdk/export/metric"
|
|
"go.opentelemetry.io/otel/sdk/export/metric/aggregator"
|
|
)
|
|
|
|
type (
|
|
Batcher struct {
|
|
selector export.AggregationSelector
|
|
labelEncoder export.LabelEncoder
|
|
stateful bool
|
|
descKeyIndex descKeyIndexMap
|
|
aggCheckpoint aggCheckpointMap
|
|
}
|
|
|
|
// descKeyIndexMap is a mapping, for each Descriptor, from the
|
|
// Key to the position in the descriptor's recommended keys.
|
|
descKeyIndexMap map[*metric.Descriptor]map[core.Key]int
|
|
|
|
// batchKey describes a unique metric descriptor and encoded label set.
|
|
batchKey struct {
|
|
descriptor *metric.Descriptor
|
|
encoded string
|
|
}
|
|
|
|
// aggCheckpointMap is a mapping from batchKey to current
|
|
// export record. If the batcher is stateful, this map is
|
|
// never cleared.
|
|
aggCheckpointMap map[batchKey]export.Record
|
|
|
|
checkpointSet struct {
|
|
aggCheckpointMap aggCheckpointMap
|
|
labelEncoder export.LabelEncoder
|
|
}
|
|
)
|
|
|
|
var _ export.Batcher = &Batcher{}
|
|
var _ export.CheckpointSet = &checkpointSet{}
|
|
|
|
func New(selector export.AggregationSelector, labelEncoder export.LabelEncoder, stateful bool) *Batcher {
|
|
return &Batcher{
|
|
selector: selector,
|
|
labelEncoder: labelEncoder,
|
|
descKeyIndex: descKeyIndexMap{},
|
|
aggCheckpoint: aggCheckpointMap{},
|
|
stateful: stateful,
|
|
}
|
|
}
|
|
|
|
func (b *Batcher) AggregatorFor(descriptor *metric.Descriptor) export.Aggregator {
|
|
return b.selector.AggregatorFor(descriptor)
|
|
}
|
|
|
|
func (b *Batcher) Process(_ context.Context, record export.Record) error {
|
|
desc := record.Descriptor()
|
|
keys := desc.Keys()
|
|
|
|
// Cache the mapping from Descriptor->Key->Index
|
|
ki, ok := b.descKeyIndex[desc]
|
|
if !ok {
|
|
ki = map[core.Key]int{}
|
|
b.descKeyIndex[desc] = ki
|
|
|
|
for i, k := range keys {
|
|
ki[k] = i
|
|
}
|
|
}
|
|
|
|
// Compute the value list. Note: Unspecified values become
|
|
// empty strings. TODO: pin this down, we have no appropriate
|
|
// Value constructor.
|
|
outputLabels := make([]core.KeyValue, len(keys))
|
|
|
|
for i, key := range keys {
|
|
outputLabels[i] = key.String("")
|
|
}
|
|
|
|
// Note also the possibility to speed this computation of
|
|
// "encoded" via "outputLabels" in the form of a (Descriptor,
|
|
// LabelSet)->(Labels, Encoded) cache.
|
|
iter := record.Labels().Iter()
|
|
for iter.Next() {
|
|
kv := iter.Label()
|
|
pos, ok := ki[kv.Key]
|
|
if !ok {
|
|
continue
|
|
}
|
|
outputLabels[pos].Value = kv.Value
|
|
}
|
|
|
|
// Compute an encoded lookup key.
|
|
encoded := b.labelEncoder.Encode(export.LabelSlice(outputLabels).Iter())
|
|
|
|
// Merge this aggregator with all preceding aggregators that
|
|
// map to the same set of `outputLabels` labels.
|
|
agg := record.Aggregator()
|
|
key := batchKey{
|
|
descriptor: record.Descriptor(),
|
|
encoded: encoded,
|
|
}
|
|
rag, ok := b.aggCheckpoint[key]
|
|
if ok {
|
|
// Combine the input aggregator with the current
|
|
// checkpoint state.
|
|
return rag.Aggregator().Merge(agg, desc)
|
|
}
|
|
// If this Batcher is stateful, create a copy of the
|
|
// Aggregator for long-term storage. Otherwise the
|
|
// Meter implementation will checkpoint the aggregator
|
|
// again, overwriting the long-lived state.
|
|
if b.stateful {
|
|
tmp := agg
|
|
// Note: the call to AggregatorFor() followed by Merge
|
|
// is effectively a Clone() operation.
|
|
agg = b.AggregatorFor(desc)
|
|
if err := agg.Merge(tmp, desc); err != nil {
|
|
return err
|
|
}
|
|
}
|
|
b.aggCheckpoint[key] = export.NewRecord(
|
|
desc,
|
|
export.NewLabels(export.LabelSlice(outputLabels), encoded, b.labelEncoder),
|
|
agg,
|
|
)
|
|
return nil
|
|
}
|
|
|
|
func (b *Batcher) CheckpointSet() export.CheckpointSet {
|
|
return &checkpointSet{
|
|
aggCheckpointMap: b.aggCheckpoint,
|
|
labelEncoder: b.labelEncoder,
|
|
}
|
|
}
|
|
|
|
func (b *Batcher) FinishedCollection() {
|
|
if !b.stateful {
|
|
b.aggCheckpoint = aggCheckpointMap{}
|
|
}
|
|
}
|
|
|
|
func (p *checkpointSet) ForEach(f func(export.Record) error) error {
|
|
for _, entry := range p.aggCheckpointMap {
|
|
if err := f(entry); err != nil && !errors.Is(err, aggregator.ErrNoData) {
|
|
return err
|
|
}
|
|
}
|
|
return nil
|
|
}
|