2020-03-25 23:47:17 +02:00
|
|
|
// Copyright The OpenTelemetry Authors
|
2020-03-13 20:42:20 +02:00
|
|
|
//
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
|
|
|
// Package transform provides translations for opentelemetry-go concepts and
|
|
|
|
// structures to otlp structures.
|
|
|
|
package transform
|
|
|
|
|
|
|
|
import (
|
2020-04-15 21:04:44 +02:00
|
|
|
"context"
|
2020-03-13 20:42:20 +02:00
|
|
|
"errors"
|
2020-04-15 21:04:44 +02:00
|
|
|
"fmt"
|
|
|
|
"strings"
|
|
|
|
"sync"
|
2020-03-13 20:42:20 +02:00
|
|
|
|
2020-07-16 22:59:14 +02:00
|
|
|
commonpb "go.opentelemetry.io/otel/internal/opentelemetry-proto-gen/common/v1"
|
|
|
|
metricpb "go.opentelemetry.io/otel/internal/opentelemetry-proto-gen/metrics/v1"
|
|
|
|
resourcepb "go.opentelemetry.io/otel/internal/opentelemetry-proto-gen/resource/v1"
|
2020-03-13 20:42:20 +02:00
|
|
|
|
2020-04-23 21:10:58 +02:00
|
|
|
"go.opentelemetry.io/otel/api/label"
|
2020-03-19 21:02:46 +02:00
|
|
|
"go.opentelemetry.io/otel/api/metric"
|
|
|
|
export "go.opentelemetry.io/otel/sdk/export/metric"
|
2020-06-10 07:53:30 +02:00
|
|
|
"go.opentelemetry.io/otel/sdk/export/metric/aggregation"
|
2020-06-12 18:11:17 +02:00
|
|
|
"go.opentelemetry.io/otel/sdk/instrumentation"
|
2020-04-15 21:04:44 +02:00
|
|
|
"go.opentelemetry.io/otel/sdk/resource"
|
2020-03-13 20:42:20 +02:00
|
|
|
)
|
|
|
|
|
2020-04-15 21:04:44 +02:00
|
|
|
var (
|
|
|
|
// ErrUnimplementedAgg is returned when a transformation of an unimplemented
|
|
|
|
// aggregator is attempted.
|
|
|
|
ErrUnimplementedAgg = errors.New("unimplemented aggregator")
|
|
|
|
|
|
|
|
// ErrUnknownValueType is returned when a transformation of an unknown value
|
|
|
|
// is attempted.
|
|
|
|
ErrUnknownValueType = errors.New("invalid value type")
|
|
|
|
|
|
|
|
// ErrContextCanceled is returned when a context cancellation halts a
|
|
|
|
// transformation.
|
|
|
|
ErrContextCanceled = errors.New("context canceled")
|
|
|
|
|
|
|
|
// ErrTransforming is returned when an unexected error is encoutered transforming.
|
|
|
|
ErrTransforming = errors.New("transforming failed")
|
|
|
|
)
|
|
|
|
|
|
|
|
// result is the product of transforming Records into OTLP Metrics.
|
|
|
|
type result struct {
|
2020-06-12 18:11:17 +02:00
|
|
|
Resource *resource.Resource
|
|
|
|
InstrumentationLibrary instrumentation.Library
|
|
|
|
Metric *metricpb.Metric
|
|
|
|
Err error
|
2020-04-15 21:04:44 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// CheckpointSet transforms all records contained in a checkpoint into
|
|
|
|
// batched OTLP ResourceMetrics.
|
2020-06-23 07:59:51 +02:00
|
|
|
func CheckpointSet(ctx context.Context, exportSelector export.ExportKindSelector, cps export.CheckpointSet, numWorkers uint) ([]*metricpb.ResourceMetrics, error) {
|
|
|
|
records, errc := source(ctx, exportSelector, cps)
|
2020-04-15 21:04:44 +02:00
|
|
|
|
|
|
|
// Start a fixed number of goroutines to transform records.
|
|
|
|
transformed := make(chan result)
|
|
|
|
var wg sync.WaitGroup
|
|
|
|
wg.Add(int(numWorkers))
|
|
|
|
for i := uint(0); i < numWorkers; i++ {
|
|
|
|
go func() {
|
|
|
|
defer wg.Done()
|
2020-05-19 02:44:28 +02:00
|
|
|
transformer(ctx, records, transformed)
|
2020-04-15 21:04:44 +02:00
|
|
|
}()
|
|
|
|
}
|
|
|
|
go func() {
|
|
|
|
wg.Wait()
|
|
|
|
close(transformed)
|
|
|
|
}()
|
|
|
|
|
|
|
|
// Synchronously collect the transformed records and transmit.
|
|
|
|
rms, err := sink(ctx, transformed)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// source is complete, check for any errors.
|
|
|
|
if err := <-errc; err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return rms, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// source starts a goroutine that sends each one of the Records yielded by
|
|
|
|
// the CheckpointSet on the returned chan. Any error encoutered will be sent
|
|
|
|
// on the returned error chan after seeding is complete.
|
2020-06-23 07:59:51 +02:00
|
|
|
func source(ctx context.Context, exportSelector export.ExportKindSelector, cps export.CheckpointSet) (<-chan export.Record, <-chan error) {
|
2020-04-15 21:04:44 +02:00
|
|
|
errc := make(chan error, 1)
|
|
|
|
out := make(chan export.Record)
|
|
|
|
// Seed records into process.
|
|
|
|
go func() {
|
|
|
|
defer close(out)
|
|
|
|
// No select is needed since errc is buffered.
|
2020-06-23 07:59:51 +02:00
|
|
|
errc <- cps.ForEach(exportSelector, func(r export.Record) error {
|
2020-04-15 21:04:44 +02:00
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
return ErrContextCanceled
|
|
|
|
case out <- r:
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
}()
|
|
|
|
return out, errc
|
|
|
|
}
|
|
|
|
|
|
|
|
// transformer transforms records read from the passed in chan into
|
|
|
|
// OTLP Metrics which are sent on the out chan.
|
2020-05-19 02:44:28 +02:00
|
|
|
func transformer(ctx context.Context, in <-chan export.Record, out chan<- result) {
|
2020-04-15 21:04:44 +02:00
|
|
|
for r := range in {
|
|
|
|
m, err := Record(r)
|
|
|
|
// Propagate errors, but do not send empty results.
|
|
|
|
if err == nil && m == nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
res := result{
|
2020-05-19 02:44:28 +02:00
|
|
|
Resource: r.Resource(),
|
2020-06-12 18:11:17 +02:00
|
|
|
InstrumentationLibrary: instrumentation.Library{
|
|
|
|
Name: r.Descriptor().InstrumentationName(),
|
|
|
|
Version: r.Descriptor().InstrumentationVersion(),
|
|
|
|
},
|
|
|
|
Metric: m,
|
|
|
|
Err: err,
|
2020-04-15 21:04:44 +02:00
|
|
|
}
|
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
return
|
|
|
|
case out <- res:
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// sink collects transformed Records and batches them.
|
|
|
|
//
|
|
|
|
// Any errors encoutered transforming input will be reported with an
|
|
|
|
// ErrTransforming as well as the completed ResourceMetrics. It is up to the
|
|
|
|
// caller to handle any incorrect data in these ResourceMetrics.
|
|
|
|
func sink(ctx context.Context, in <-chan result) ([]*metricpb.ResourceMetrics, error) {
|
|
|
|
var errStrings []string
|
|
|
|
|
|
|
|
type resourceBatch struct {
|
|
|
|
Resource *resourcepb.Resource
|
|
|
|
// Group by instrumentation library name and then the MetricDescriptor.
|
2020-06-12 18:11:17 +02:00
|
|
|
InstrumentationLibraryBatches map[instrumentation.Library]map[string]*metricpb.Metric
|
2020-04-15 21:04:44 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// group by unique Resource string.
|
2020-04-23 21:10:58 +02:00
|
|
|
grouped := make(map[label.Distinct]resourceBatch)
|
2020-04-15 21:04:44 +02:00
|
|
|
for res := range in {
|
|
|
|
if res.Err != nil {
|
|
|
|
errStrings = append(errStrings, res.Err.Error())
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2020-04-23 21:10:58 +02:00
|
|
|
rID := res.Resource.Equivalent()
|
2020-04-15 21:04:44 +02:00
|
|
|
rb, ok := grouped[rID]
|
|
|
|
if !ok {
|
|
|
|
rb = resourceBatch{
|
2020-04-23 21:10:58 +02:00
|
|
|
Resource: Resource(res.Resource),
|
2020-06-12 18:11:17 +02:00
|
|
|
InstrumentationLibraryBatches: make(map[instrumentation.Library]map[string]*metricpb.Metric),
|
2020-04-15 21:04:44 +02:00
|
|
|
}
|
|
|
|
grouped[rID] = rb
|
|
|
|
}
|
|
|
|
|
2020-06-12 18:11:17 +02:00
|
|
|
mb, ok := rb.InstrumentationLibraryBatches[res.InstrumentationLibrary]
|
2020-04-15 21:04:44 +02:00
|
|
|
if !ok {
|
|
|
|
mb = make(map[string]*metricpb.Metric)
|
2020-06-12 18:11:17 +02:00
|
|
|
rb.InstrumentationLibraryBatches[res.InstrumentationLibrary] = mb
|
2020-04-15 21:04:44 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
mID := res.Metric.GetMetricDescriptor().String()
|
|
|
|
m, ok := mb[mID]
|
|
|
|
if !ok {
|
|
|
|
mb[mID] = res.Metric
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if len(res.Metric.Int64DataPoints) > 0 {
|
|
|
|
m.Int64DataPoints = append(m.Int64DataPoints, res.Metric.Int64DataPoints...)
|
|
|
|
}
|
|
|
|
if len(res.Metric.DoubleDataPoints) > 0 {
|
|
|
|
m.DoubleDataPoints = append(m.DoubleDataPoints, res.Metric.DoubleDataPoints...)
|
|
|
|
}
|
|
|
|
if len(res.Metric.HistogramDataPoints) > 0 {
|
|
|
|
m.HistogramDataPoints = append(m.HistogramDataPoints, res.Metric.HistogramDataPoints...)
|
|
|
|
}
|
|
|
|
if len(res.Metric.SummaryDataPoints) > 0 {
|
|
|
|
m.SummaryDataPoints = append(m.SummaryDataPoints, res.Metric.SummaryDataPoints...)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(grouped) == 0 {
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
var rms []*metricpb.ResourceMetrics
|
|
|
|
for _, rb := range grouped {
|
|
|
|
rm := &metricpb.ResourceMetrics{Resource: rb.Resource}
|
2020-06-12 18:11:17 +02:00
|
|
|
for il, mb := range rb.InstrumentationLibraryBatches {
|
2020-04-15 21:04:44 +02:00
|
|
|
ilm := &metricpb.InstrumentationLibraryMetrics{
|
|
|
|
Metrics: make([]*metricpb.Metric, 0, len(mb)),
|
|
|
|
}
|
2020-06-12 18:11:17 +02:00
|
|
|
if il != (instrumentation.Library{}) {
|
|
|
|
ilm.InstrumentationLibrary = &commonpb.InstrumentationLibrary{
|
|
|
|
Name: il.Name,
|
|
|
|
Version: il.Version,
|
|
|
|
}
|
2020-04-15 21:04:44 +02:00
|
|
|
}
|
|
|
|
for _, m := range mb {
|
|
|
|
ilm.Metrics = append(ilm.Metrics, m)
|
|
|
|
}
|
|
|
|
rm.InstrumentationLibraryMetrics = append(rm.InstrumentationLibraryMetrics, ilm)
|
|
|
|
}
|
|
|
|
rms = append(rms, rm)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Report any transform errors.
|
|
|
|
if len(errStrings) > 0 {
|
|
|
|
return rms, fmt.Errorf("%w:\n -%s", ErrTransforming, strings.Join(errStrings, "\n -"))
|
|
|
|
}
|
|
|
|
return rms, nil
|
|
|
|
}
|
2020-03-13 20:42:20 +02:00
|
|
|
|
|
|
|
// Record transforms a Record into an OTLP Metric. An ErrUnimplementedAgg
|
|
|
|
// error is returned if the Record Aggregator is not supported.
|
2020-03-19 21:02:46 +02:00
|
|
|
func Record(r export.Record) (*metricpb.Metric, error) {
|
2020-06-18 19:16:33 +02:00
|
|
|
switch a := r.Aggregation().(type) {
|
2020-06-10 07:53:30 +02:00
|
|
|
case aggregation.MinMaxSumCount:
|
2020-06-18 19:16:33 +02:00
|
|
|
return minMaxSumCount(r, a)
|
2020-06-10 07:53:30 +02:00
|
|
|
case aggregation.Sum:
|
2020-06-18 19:16:33 +02:00
|
|
|
return sum(r, a)
|
2020-04-15 21:04:44 +02:00
|
|
|
default:
|
|
|
|
return nil, fmt.Errorf("%w: %v", ErrUnimplementedAgg, a)
|
2020-03-13 20:42:20 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// sum transforms a Sum Aggregator into an OTLP Metric.
|
2020-06-18 19:16:33 +02:00
|
|
|
func sum(record export.Record, a aggregation.Sum) (*metricpb.Metric, error) {
|
|
|
|
desc := record.Descriptor()
|
|
|
|
labels := record.Labels()
|
2020-03-13 20:42:20 +02:00
|
|
|
sum, err := a.Sum()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
m := &metricpb.Metric{
|
|
|
|
MetricDescriptor: &metricpb.MetricDescriptor{
|
|
|
|
Name: desc.Name(),
|
|
|
|
Description: desc.Description(),
|
|
|
|
Unit: string(desc.Unit()),
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
switch n := desc.NumberKind(); n {
|
Eliminate Uint64NumberKind from API (#864)
fixes #851
This includes all of the associated methods, such as
AsUint64, AsUint64Atomic, AsUint64Ptr, CoerceToUint64, SetUint64
SetUint64Atomic, SwapUint64, SwapUint64Atomic, AddUint64,
AddUint64Atomic, CompamreAndSwapUint64, CompareUint64
Only significant change as a result was converting the histogram
aggregator's `count` state field into an int64 from a `metric.Number`.
Co-authored-by: Tyler Yahn <MrAlias@users.noreply.github.com>
2020-06-24 01:28:04 +02:00
|
|
|
case metric.Int64NumberKind:
|
2020-06-24 23:02:13 +02:00
|
|
|
m.MetricDescriptor.Type = metricpb.MetricDescriptor_INT64
|
2020-03-17 01:43:54 +02:00
|
|
|
m.Int64DataPoints = []*metricpb.Int64DataPoint{
|
2020-06-18 19:16:33 +02:00
|
|
|
{
|
|
|
|
Value: sum.CoerceToInt64(n),
|
2020-06-24 23:02:13 +02:00
|
|
|
Labels: stringKeyValues(labels.Iter()),
|
2020-06-18 19:16:33 +02:00
|
|
|
StartTimeUnixNano: uint64(record.StartTime().UnixNano()),
|
|
|
|
TimeUnixNano: uint64(record.EndTime().UnixNano()),
|
|
|
|
},
|
2020-03-13 20:42:20 +02:00
|
|
|
}
|
2020-05-11 08:44:42 +02:00
|
|
|
case metric.Float64NumberKind:
|
2020-06-24 23:02:13 +02:00
|
|
|
m.MetricDescriptor.Type = metricpb.MetricDescriptor_DOUBLE
|
2020-03-17 01:43:54 +02:00
|
|
|
m.DoubleDataPoints = []*metricpb.DoubleDataPoint{
|
2020-06-18 19:16:33 +02:00
|
|
|
{
|
|
|
|
Value: sum.CoerceToFloat64(n),
|
2020-06-24 23:02:13 +02:00
|
|
|
Labels: stringKeyValues(labels.Iter()),
|
2020-06-18 19:16:33 +02:00
|
|
|
StartTimeUnixNano: uint64(record.StartTime().UnixNano()),
|
|
|
|
TimeUnixNano: uint64(record.EndTime().UnixNano()),
|
|
|
|
},
|
2020-03-13 20:42:20 +02:00
|
|
|
}
|
2020-04-15 21:04:44 +02:00
|
|
|
default:
|
|
|
|
return nil, fmt.Errorf("%w: %v", ErrUnknownValueType, n)
|
2020-03-13 20:42:20 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
return m, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// minMaxSumCountValue returns the values of the MinMaxSumCount Aggregator
|
|
|
|
// as discret values.
|
2020-06-10 07:53:30 +02:00
|
|
|
func minMaxSumCountValues(a aggregation.MinMaxSumCount) (min, max, sum metric.Number, count int64, err error) {
|
2020-03-13 20:42:20 +02:00
|
|
|
if min, err = a.Min(); err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if max, err = a.Max(); err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if sum, err = a.Sum(); err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if count, err = a.Count(); err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// minMaxSumCount transforms a MinMaxSumCount Aggregator into an OTLP Metric.
|
2020-06-18 19:16:33 +02:00
|
|
|
func minMaxSumCount(record export.Record, a aggregation.MinMaxSumCount) (*metricpb.Metric, error) {
|
|
|
|
desc := record.Descriptor()
|
|
|
|
labels := record.Labels()
|
2020-03-13 20:42:20 +02:00
|
|
|
min, max, sum, count, err := minMaxSumCountValues(a)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
numKind := desc.NumberKind()
|
|
|
|
return &metricpb.Metric{
|
|
|
|
MetricDescriptor: &metricpb.MetricDescriptor{
|
|
|
|
Name: desc.Name(),
|
|
|
|
Description: desc.Description(),
|
|
|
|
Unit: string(desc.Unit()),
|
|
|
|
Type: metricpb.MetricDescriptor_SUMMARY,
|
|
|
|
},
|
2020-03-17 01:43:54 +02:00
|
|
|
SummaryDataPoints: []*metricpb.SummaryDataPoint{
|
2020-03-13 20:42:20 +02:00
|
|
|
{
|
2020-06-24 23:02:13 +02:00
|
|
|
Labels: stringKeyValues(labels.Iter()),
|
|
|
|
Count: uint64(count),
|
|
|
|
Sum: sum.CoerceToFloat64(numKind),
|
2020-03-13 20:42:20 +02:00
|
|
|
PercentileValues: []*metricpb.SummaryDataPoint_ValueAtPercentile{
|
|
|
|
{
|
|
|
|
Percentile: 0.0,
|
|
|
|
Value: min.CoerceToFloat64(numKind),
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Percentile: 100.0,
|
|
|
|
Value: max.CoerceToFloat64(numKind),
|
|
|
|
},
|
|
|
|
},
|
2020-06-18 19:16:33 +02:00
|
|
|
StartTimeUnixNano: uint64(record.StartTime().UnixNano()),
|
|
|
|
TimeUnixNano: uint64(record.EndTime().UnixNano()),
|
2020-03-13 20:42:20 +02:00
|
|
|
},
|
|
|
|
},
|
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
Replace `Ordered` with an iterator in `export.Labels`. (#567)
* Do not expose a slice of labels in export.Record
This is really an inconvenient implementation detail leak - we may
want to store labels in a different way. Replace it with an iterator -
it does not force us to use slice of key values as a storage in the
long run.
* Add Len to LabelIterator
It may come in handy in several situations, where we don't have access
to export.Labels object, but only to the label iterator.
* Use reflect value label iterator for the fixed labels
* add reset operation to iterator
Makes my life easier when writing a benchmark. Might also be an
alternative to cloning the iterator.
* Add benchmarks for iterators
* Add import comment
* Add clone operation to label iterator
* Move iterator tests to a separate package
* Add tests for cloning iterators
* Pass label iterator to export labels
* Use non-addressable array reflect values
By not using the value created by `reflect.New()`, but rather by
`reflect.ValueOf()`, we get a non-addressable array in the value,
which does not infer an allocation cost when getting an element from
the array.
* Drop zero iterator
This can be substituted by a reflect value iterator that goes over a
value with a zero-sized array.
* Add a simple iterator that implements label iterator
In the long run this will completely replace the LabelIterator
interface.
* Replace reflect value iterator with simple iterator
* Pass label storage to new export labels, not label iterator
* Drop label iterator interface, rename storage iterator to label iterator
* Drop clone operation from iterator
It's a leftover from interface times and now it's pointless - the
iterator is a simple struct, so cloning it is a simple copy.
* Drop Reset from label iterator
The sole existence of Reset was actually for benchmarking convenience.
Now we can just copy the iterator cheaply, so a need for Reset is no
more.
* Drop noop iterator tests
* Move back iterator tests to export package
* Eagerly get the reflect value of ordered labels
So we won't get into problems when several goroutines want to iterate
the same labels at the same time. Not sure if this would be a big
deal, since every goroutine would compute the same reflect.Value, but
concurrent write to the same memory is bad anyway. And it doesn't cost
us any extra allocations anyway.
* Replace NewSliceLabelIterator() with a method of LabelSlice
* Add some documentation
* Documentation fixes
2020-03-20 00:01:34 +02:00
|
|
|
// stringKeyValues transforms a label iterator into an OTLP StringKeyValues.
|
2020-04-23 21:10:58 +02:00
|
|
|
func stringKeyValues(iter label.Iterator) []*commonpb.StringKeyValue {
|
Replace `Ordered` with an iterator in `export.Labels`. (#567)
* Do not expose a slice of labels in export.Record
This is really an inconvenient implementation detail leak - we may
want to store labels in a different way. Replace it with an iterator -
it does not force us to use slice of key values as a storage in the
long run.
* Add Len to LabelIterator
It may come in handy in several situations, where we don't have access
to export.Labels object, but only to the label iterator.
* Use reflect value label iterator for the fixed labels
* add reset operation to iterator
Makes my life easier when writing a benchmark. Might also be an
alternative to cloning the iterator.
* Add benchmarks for iterators
* Add import comment
* Add clone operation to label iterator
* Move iterator tests to a separate package
* Add tests for cloning iterators
* Pass label iterator to export labels
* Use non-addressable array reflect values
By not using the value created by `reflect.New()`, but rather by
`reflect.ValueOf()`, we get a non-addressable array in the value,
which does not infer an allocation cost when getting an element from
the array.
* Drop zero iterator
This can be substituted by a reflect value iterator that goes over a
value with a zero-sized array.
* Add a simple iterator that implements label iterator
In the long run this will completely replace the LabelIterator
interface.
* Replace reflect value iterator with simple iterator
* Pass label storage to new export labels, not label iterator
* Drop label iterator interface, rename storage iterator to label iterator
* Drop clone operation from iterator
It's a leftover from interface times and now it's pointless - the
iterator is a simple struct, so cloning it is a simple copy.
* Drop Reset from label iterator
The sole existence of Reset was actually for benchmarking convenience.
Now we can just copy the iterator cheaply, so a need for Reset is no
more.
* Drop noop iterator tests
* Move back iterator tests to export package
* Eagerly get the reflect value of ordered labels
So we won't get into problems when several goroutines want to iterate
the same labels at the same time. Not sure if this would be a big
deal, since every goroutine would compute the same reflect.Value, but
concurrent write to the same memory is bad anyway. And it doesn't cost
us any extra allocations anyway.
* Replace NewSliceLabelIterator() with a method of LabelSlice
* Add some documentation
* Documentation fixes
2020-03-20 00:01:34 +02:00
|
|
|
l := iter.Len()
|
|
|
|
if l == 0 {
|
2020-03-26 05:17:43 +02:00
|
|
|
return nil
|
Replace `Ordered` with an iterator in `export.Labels`. (#567)
* Do not expose a slice of labels in export.Record
This is really an inconvenient implementation detail leak - we may
want to store labels in a different way. Replace it with an iterator -
it does not force us to use slice of key values as a storage in the
long run.
* Add Len to LabelIterator
It may come in handy in several situations, where we don't have access
to export.Labels object, but only to the label iterator.
* Use reflect value label iterator for the fixed labels
* add reset operation to iterator
Makes my life easier when writing a benchmark. Might also be an
alternative to cloning the iterator.
* Add benchmarks for iterators
* Add import comment
* Add clone operation to label iterator
* Move iterator tests to a separate package
* Add tests for cloning iterators
* Pass label iterator to export labels
* Use non-addressable array reflect values
By not using the value created by `reflect.New()`, but rather by
`reflect.ValueOf()`, we get a non-addressable array in the value,
which does not infer an allocation cost when getting an element from
the array.
* Drop zero iterator
This can be substituted by a reflect value iterator that goes over a
value with a zero-sized array.
* Add a simple iterator that implements label iterator
In the long run this will completely replace the LabelIterator
interface.
* Replace reflect value iterator with simple iterator
* Pass label storage to new export labels, not label iterator
* Drop label iterator interface, rename storage iterator to label iterator
* Drop clone operation from iterator
It's a leftover from interface times and now it's pointless - the
iterator is a simple struct, so cloning it is a simple copy.
* Drop Reset from label iterator
The sole existence of Reset was actually for benchmarking convenience.
Now we can just copy the iterator cheaply, so a need for Reset is no
more.
* Drop noop iterator tests
* Move back iterator tests to export package
* Eagerly get the reflect value of ordered labels
So we won't get into problems when several goroutines want to iterate
the same labels at the same time. Not sure if this would be a big
deal, since every goroutine would compute the same reflect.Value, but
concurrent write to the same memory is bad anyway. And it doesn't cost
us any extra allocations anyway.
* Replace NewSliceLabelIterator() with a method of LabelSlice
* Add some documentation
* Documentation fixes
2020-03-20 00:01:34 +02:00
|
|
|
}
|
|
|
|
result := make([]*commonpb.StringKeyValue, 0, l)
|
|
|
|
for iter.Next() {
|
|
|
|
kv := iter.Label()
|
2020-03-13 20:42:20 +02:00
|
|
|
result = append(result, &commonpb.StringKeyValue{
|
|
|
|
Key: string(kv.Key),
|
|
|
|
Value: kv.Value.Emit(),
|
|
|
|
})
|
|
|
|
}
|
|
|
|
return result
|
|
|
|
}
|