2020-03-24 07:41:10 +02:00
|
|
|
// Copyright The OpenTelemetry Authors
|
2019-10-29 22:27:22 +02:00
|
|
|
//
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
|
|
|
package metric
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
2019-11-15 23:01:20 +02:00
|
|
|
"fmt"
|
|
|
|
"os"
|
2020-03-11 18:11:27 +02:00
|
|
|
"reflect"
|
2020-02-11 02:20:29 +02:00
|
|
|
"runtime"
|
2019-10-29 22:27:22 +02:00
|
|
|
"sort"
|
|
|
|
"sync"
|
|
|
|
"sync/atomic"
|
|
|
|
|
2019-11-01 20:40:29 +02:00
|
|
|
"go.opentelemetry.io/otel/api/core"
|
|
|
|
"go.opentelemetry.io/otel/api/metric"
|
|
|
|
api "go.opentelemetry.io/otel/api/metric"
|
2019-11-05 23:08:55 +02:00
|
|
|
export "go.opentelemetry.io/otel/sdk/export/metric"
|
2019-11-15 23:01:20 +02:00
|
|
|
"go.opentelemetry.io/otel/sdk/export/metric/aggregator"
|
2020-03-20 17:58:32 +02:00
|
|
|
"go.opentelemetry.io/otel/sdk/resource"
|
2019-10-29 22:27:22 +02:00
|
|
|
)
|
|
|
|
|
|
|
|
type (
|
|
|
|
// SDK implements the OpenTelemetry Meter API. The SDK is
|
2019-11-15 23:01:20 +02:00
|
|
|
// bound to a single export.Batcher in `New()`.
|
2019-10-29 22:27:22 +02:00
|
|
|
//
|
|
|
|
// The SDK supports a Collect() API to gather and export
|
|
|
|
// current data. Collect() should be arranged according to
|
2019-11-15 23:01:20 +02:00
|
|
|
// the batcher model. Push-based batchers will setup a
|
|
|
|
// timer to call Collect() periodically. Pull-based batchers
|
2019-10-29 22:27:22 +02:00
|
|
|
// will call Collect() when a pull request arrives.
|
|
|
|
SDK struct {
|
|
|
|
// current maps `mapkey` to *record.
|
|
|
|
current sync.Map
|
|
|
|
|
2020-03-19 21:02:46 +02:00
|
|
|
// asyncInstruments is a set of
|
|
|
|
// `*asyncInstrument` instances
|
|
|
|
asyncInstruments sync.Map
|
2020-03-05 22:15:30 +02:00
|
|
|
|
2019-10-29 22:27:22 +02:00
|
|
|
// currentEpoch is the current epoch number. It is
|
|
|
|
// incremented in `Collect()`.
|
|
|
|
currentEpoch int64
|
|
|
|
|
2019-11-15 23:01:20 +02:00
|
|
|
// batcher is the configured batcher+configuration.
|
|
|
|
batcher export.Batcher
|
|
|
|
|
2019-10-29 22:27:22 +02:00
|
|
|
// collectLock prevents simultaneous calls to Collect().
|
|
|
|
collectLock sync.Mutex
|
2019-11-15 23:01:20 +02:00
|
|
|
|
|
|
|
// errorHandler supports delivering errors to the user.
|
|
|
|
errorHandler ErrorHandler
|
2020-03-20 17:58:32 +02:00
|
|
|
|
|
|
|
// resource represents the entity producing telemetry.
|
|
|
|
resource resource.Resource
|
2020-03-27 23:06:48 +02:00
|
|
|
|
|
|
|
// asyncSortSlice has a single purpose - as a temporary
|
|
|
|
// place for sorting during labels creation to avoid
|
|
|
|
// allocation. It is cleared after use.
|
|
|
|
asyncSortSlice sortedLabels
|
2019-10-29 22:27:22 +02:00
|
|
|
}
|
|
|
|
|
2020-03-19 21:02:46 +02:00
|
|
|
syncInstrument struct {
|
|
|
|
instrument
|
2019-10-29 22:27:22 +02:00
|
|
|
}
|
|
|
|
|
2020-03-11 18:11:27 +02:00
|
|
|
// orderedLabels is a variable-size array of core.KeyValue
|
|
|
|
// suitable for use as a map key.
|
|
|
|
orderedLabels interface{}
|
2019-10-29 22:27:22 +02:00
|
|
|
|
2020-03-27 23:06:48 +02:00
|
|
|
// labels represents an internalized set of labels that have been
|
|
|
|
// sorted and deduplicated.
|
2019-10-29 22:27:22 +02:00
|
|
|
labels struct {
|
2020-03-24 18:30:12 +02:00
|
|
|
// cachedEncoderID needs to be aligned for atomic access
|
|
|
|
cachedEncoderID int64
|
|
|
|
// cachedEncoded is an encoded version of ordered
|
|
|
|
// labels
|
|
|
|
cachedEncoded string
|
|
|
|
|
2020-03-11 18:11:27 +02:00
|
|
|
// ordered is the output of sorting and deduplicating
|
|
|
|
// the labels, copied into an array of the correct
|
|
|
|
// size for use as a map key.
|
|
|
|
ordered orderedLabels
|
2019-10-29 22:27:22 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// mapkey uniquely describes a metric instrument in terms of
|
2020-03-27 23:06:48 +02:00
|
|
|
// its InstrumentID and the encoded form of its labels.
|
2019-10-29 22:27:22 +02:00
|
|
|
mapkey struct {
|
2020-03-19 21:02:46 +02:00
|
|
|
descriptor *metric.Descriptor
|
2020-03-11 18:11:27 +02:00
|
|
|
ordered orderedLabels
|
2019-10-29 22:27:22 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// record maintains the state of one metric instrument. Due
|
|
|
|
// the use of lock-free algorithms, there may be more than one
|
|
|
|
// `record` in existence at a time, although at most one can
|
|
|
|
// be referenced from the `SDK.current` map.
|
|
|
|
record struct {
|
2020-02-07 00:45:56 +02:00
|
|
|
// refMapped keeps track of refcounts and the mapping state to the
|
|
|
|
// SDK.current map.
|
|
|
|
refMapped refcountMapped
|
2019-10-29 22:27:22 +02:00
|
|
|
|
2020-04-22 05:23:15 +02:00
|
|
|
// updateCount is incremented on every Update.
|
|
|
|
updateCount int64
|
|
|
|
|
|
|
|
// collectedCount is set to updateCount on collection,
|
|
|
|
// supports checking for no updates during a round.
|
|
|
|
collectedCount int64
|
2019-10-29 22:27:22 +02:00
|
|
|
|
2020-03-27 23:06:48 +02:00
|
|
|
// labels is the processed label set for this record.
|
|
|
|
//
|
|
|
|
// labels has to be aligned for 64-bit atomic operations.
|
|
|
|
labels labels
|
|
|
|
|
|
|
|
// sortSlice has a single purpose - as a temporary
|
|
|
|
// place for sorting during labels creation to avoid
|
|
|
|
// allocation.
|
|
|
|
sortSlice sortedLabels
|
2020-01-06 20:08:40 +02:00
|
|
|
|
2020-03-19 21:02:46 +02:00
|
|
|
// inst is a pointer to the corresponding instrument.
|
|
|
|
inst *syncInstrument
|
2020-01-06 20:08:40 +02:00
|
|
|
|
2019-10-29 22:27:22 +02:00
|
|
|
// recorder implements the actual RecordOne() API,
|
|
|
|
// depending on the type of aggregation. If nil, the
|
|
|
|
// metric was disabled by the exporter.
|
2019-11-05 23:08:55 +02:00
|
|
|
recorder export.Aggregator
|
2019-10-29 22:27:22 +02:00
|
|
|
}
|
|
|
|
|
2020-03-19 21:02:46 +02:00
|
|
|
instrument struct {
|
|
|
|
meter *SDK
|
|
|
|
descriptor metric.Descriptor
|
2020-03-05 22:15:30 +02:00
|
|
|
}
|
|
|
|
|
2020-03-19 21:02:46 +02:00
|
|
|
asyncInstrument struct {
|
|
|
|
instrument
|
2020-03-11 18:11:27 +02:00
|
|
|
// recorders maps ordered labels to the pair of
|
2020-03-05 22:15:30 +02:00
|
|
|
// labelset and recorder
|
2020-03-11 18:11:27 +02:00
|
|
|
recorders map[orderedLabels]labeledRecorder
|
2020-03-19 21:02:46 +02:00
|
|
|
|
2020-03-27 23:06:48 +02:00
|
|
|
callback func(func(core.Number, []core.KeyValue))
|
2020-03-05 22:15:30 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
labeledRecorder struct {
|
2020-04-22 05:23:15 +02:00
|
|
|
observedEpoch int64
|
2020-03-27 23:06:48 +02:00
|
|
|
labels labels
|
|
|
|
recorder export.Aggregator
|
2020-03-05 22:15:30 +02:00
|
|
|
}
|
|
|
|
|
2019-11-15 23:01:20 +02:00
|
|
|
ErrorHandler func(error)
|
2019-10-29 22:27:22 +02:00
|
|
|
)
|
|
|
|
|
|
|
|
var (
|
Replace `Ordered` with an iterator in `export.Labels`. (#567)
* Do not expose a slice of labels in export.Record
This is really an inconvenient implementation detail leak - we may
want to store labels in a different way. Replace it with an iterator -
it does not force us to use slice of key values as a storage in the
long run.
* Add Len to LabelIterator
It may come in handy in several situations, where we don't have access
to export.Labels object, but only to the label iterator.
* Use reflect value label iterator for the fixed labels
* add reset operation to iterator
Makes my life easier when writing a benchmark. Might also be an
alternative to cloning the iterator.
* Add benchmarks for iterators
* Add import comment
* Add clone operation to label iterator
* Move iterator tests to a separate package
* Add tests for cloning iterators
* Pass label iterator to export labels
* Use non-addressable array reflect values
By not using the value created by `reflect.New()`, but rather by
`reflect.ValueOf()`, we get a non-addressable array in the value,
which does not infer an allocation cost when getting an element from
the array.
* Drop zero iterator
This can be substituted by a reflect value iterator that goes over a
value with a zero-sized array.
* Add a simple iterator that implements label iterator
In the long run this will completely replace the LabelIterator
interface.
* Replace reflect value iterator with simple iterator
* Pass label storage to new export labels, not label iterator
* Drop label iterator interface, rename storage iterator to label iterator
* Drop clone operation from iterator
It's a leftover from interface times and now it's pointless - the
iterator is a simple struct, so cloning it is a simple copy.
* Drop Reset from label iterator
The sole existence of Reset was actually for benchmarking convenience.
Now we can just copy the iterator cheaply, so a need for Reset is no
more.
* Drop noop iterator tests
* Move back iterator tests to export package
* Eagerly get the reflect value of ordered labels
So we won't get into problems when several goroutines want to iterate
the same labels at the same time. Not sure if this would be a big
deal, since every goroutine would compute the same reflect.Value, but
concurrent write to the same memory is bad anyway. And it doesn't cost
us any extra allocations anyway.
* Replace NewSliceLabelIterator() with a method of LabelSlice
* Add some documentation
* Documentation fixes
2020-03-20 00:01:34 +02:00
|
|
|
_ api.MeterImpl = &SDK{}
|
|
|
|
_ api.AsyncImpl = &asyncInstrument{}
|
|
|
|
_ api.SyncImpl = &syncInstrument{}
|
|
|
|
_ api.BoundSyncImpl = &record{}
|
2020-03-20 17:58:32 +02:00
|
|
|
_ api.Resourcer = &SDK{}
|
Replace `Ordered` with an iterator in `export.Labels`. (#567)
* Do not expose a slice of labels in export.Record
This is really an inconvenient implementation detail leak - we may
want to store labels in a different way. Replace it with an iterator -
it does not force us to use slice of key values as a storage in the
long run.
* Add Len to LabelIterator
It may come in handy in several situations, where we don't have access
to export.Labels object, but only to the label iterator.
* Use reflect value label iterator for the fixed labels
* add reset operation to iterator
Makes my life easier when writing a benchmark. Might also be an
alternative to cloning the iterator.
* Add benchmarks for iterators
* Add import comment
* Add clone operation to label iterator
* Move iterator tests to a separate package
* Add tests for cloning iterators
* Pass label iterator to export labels
* Use non-addressable array reflect values
By not using the value created by `reflect.New()`, but rather by
`reflect.ValueOf()`, we get a non-addressable array in the value,
which does not infer an allocation cost when getting an element from
the array.
* Drop zero iterator
This can be substituted by a reflect value iterator that goes over a
value with a zero-sized array.
* Add a simple iterator that implements label iterator
In the long run this will completely replace the LabelIterator
interface.
* Replace reflect value iterator with simple iterator
* Pass label storage to new export labels, not label iterator
* Drop label iterator interface, rename storage iterator to label iterator
* Drop clone operation from iterator
It's a leftover from interface times and now it's pointless - the
iterator is a simple struct, so cloning it is a simple copy.
* Drop Reset from label iterator
The sole existence of Reset was actually for benchmarking convenience.
Now we can just copy the iterator cheaply, so a need for Reset is no
more.
* Drop noop iterator tests
* Move back iterator tests to export package
* Eagerly get the reflect value of ordered labels
So we won't get into problems when several goroutines want to iterate
the same labels at the same time. Not sure if this would be a big
deal, since every goroutine would compute the same reflect.Value, but
concurrent write to the same memory is bad anyway. And it doesn't cost
us any extra allocations anyway.
* Replace NewSliceLabelIterator() with a method of LabelSlice
* Add some documentation
* Documentation fixes
2020-03-20 00:01:34 +02:00
|
|
|
_ export.LabelStorage = &labels{}
|
2020-03-24 18:30:12 +02:00
|
|
|
_ export.Labels = &labels{}
|
2020-03-11 18:11:27 +02:00
|
|
|
|
|
|
|
kvType = reflect.TypeOf(core.KeyValue{})
|
2020-03-27 23:06:48 +02:00
|
|
|
|
|
|
|
emptyLabels = labels{
|
2020-04-21 05:29:46 +02:00
|
|
|
ordered: [0]core.KeyValue{},
|
2020-03-27 23:06:48 +02:00
|
|
|
}
|
2019-10-29 22:27:22 +02:00
|
|
|
)
|
|
|
|
|
2020-03-19 21:02:46 +02:00
|
|
|
func (inst *instrument) Descriptor() api.Descriptor {
|
|
|
|
return inst.descriptor
|
2020-03-05 22:15:30 +02:00
|
|
|
}
|
|
|
|
|
2020-03-19 21:02:46 +02:00
|
|
|
func (a *asyncInstrument) Implementation() interface{} {
|
|
|
|
return a
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *syncInstrument) Implementation() interface{} {
|
|
|
|
return s
|
|
|
|
}
|
|
|
|
|
2020-03-27 23:06:48 +02:00
|
|
|
func (a *asyncInstrument) observe(number core.Number, labels []core.KeyValue) {
|
2020-03-19 21:02:46 +02:00
|
|
|
if err := aggregator.RangeTest(number, &a.descriptor); err != nil {
|
|
|
|
a.meter.errorHandler(err)
|
2020-03-05 22:15:30 +02:00
|
|
|
return
|
|
|
|
}
|
2020-03-27 23:06:48 +02:00
|
|
|
recorder := a.getRecorder(labels)
|
2020-03-05 22:15:30 +02:00
|
|
|
if recorder == nil {
|
|
|
|
// The instrument is disabled according to the
|
|
|
|
// AggregationSelector.
|
|
|
|
return
|
|
|
|
}
|
2020-03-19 21:02:46 +02:00
|
|
|
if err := recorder.Update(context.Background(), number, &a.descriptor); err != nil {
|
|
|
|
a.meter.errorHandler(err)
|
2020-03-05 22:15:30 +02:00
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-03-27 23:06:48 +02:00
|
|
|
func (a *asyncInstrument) getRecorder(kvs []core.KeyValue) export.Aggregator {
|
|
|
|
// We are in a single-threaded context. Note: this assumption
|
|
|
|
// could be violated if the user added concurrency within
|
|
|
|
// their callback.
|
|
|
|
labels := a.meter.makeLabels(kvs, &a.meter.asyncSortSlice)
|
|
|
|
|
2020-03-19 21:02:46 +02:00
|
|
|
lrec, ok := a.recorders[labels.ordered]
|
2020-03-05 22:15:30 +02:00
|
|
|
if ok {
|
2020-04-22 05:23:15 +02:00
|
|
|
if lrec.observedEpoch == a.meter.currentEpoch {
|
2020-04-03 01:51:37 +02:00
|
|
|
// last value wins for Observers, so if we see the same labels
|
|
|
|
// in the current epoch, we replace the old recorder
|
|
|
|
lrec.recorder = a.meter.batcher.AggregatorFor(&a.descriptor)
|
|
|
|
} else {
|
2020-04-22 05:23:15 +02:00
|
|
|
lrec.observedEpoch = a.meter.currentEpoch
|
2020-04-03 01:51:37 +02:00
|
|
|
}
|
2020-03-19 21:02:46 +02:00
|
|
|
a.recorders[labels.ordered] = lrec
|
2020-03-05 22:15:30 +02:00
|
|
|
return lrec.recorder
|
|
|
|
}
|
2020-03-19 21:02:46 +02:00
|
|
|
rec := a.meter.batcher.AggregatorFor(&a.descriptor)
|
|
|
|
if a.recorders == nil {
|
|
|
|
a.recorders = make(map[orderedLabels]labeledRecorder)
|
2020-03-05 22:15:30 +02:00
|
|
|
}
|
|
|
|
// This may store nil recorder in the map, thus disabling the
|
2020-03-19 21:02:46 +02:00
|
|
|
// asyncInstrument for the labelset for good. This is intentional,
|
2020-03-05 22:15:30 +02:00
|
|
|
// but will be revisited later.
|
2020-03-19 21:02:46 +02:00
|
|
|
a.recorders[labels.ordered] = labeledRecorder{
|
2020-03-05 22:15:30 +02:00
|
|
|
recorder: rec,
|
|
|
|
labels: labels,
|
2020-04-22 05:23:15 +02:00
|
|
|
observedEpoch: a.meter.currentEpoch,
|
2020-03-05 22:15:30 +02:00
|
|
|
}
|
|
|
|
return rec
|
|
|
|
}
|
|
|
|
|
2019-11-15 23:01:20 +02:00
|
|
|
func (m *SDK) SetErrorHandler(f ErrorHandler) {
|
|
|
|
m.errorHandler = f
|
|
|
|
}
|
|
|
|
|
2020-03-27 23:06:48 +02:00
|
|
|
// acquireHandle gets or creates a `*record` corresponding to `kvs`,
|
|
|
|
// the input labels. The second argument `labels` is passed in to
|
|
|
|
// support re-use of the orderedLabels computed by a previous
|
|
|
|
// measurement in the same batch. This performs two allocations
|
|
|
|
// in the common case.
|
|
|
|
func (s *syncInstrument) acquireHandle(kvs []core.KeyValue, lptr *labels) *record {
|
|
|
|
var rec *record
|
|
|
|
var labels labels
|
|
|
|
|
|
|
|
if lptr == nil || lptr.ordered == nil {
|
|
|
|
// This memory allocation may not be used, but it's
|
|
|
|
// needed for the `sortSlice` field, to avoid an
|
|
|
|
// allocation while sorting.
|
|
|
|
rec = &record{}
|
|
|
|
labels = s.meter.makeLabels(kvs, &rec.sortSlice)
|
|
|
|
} else {
|
|
|
|
labels = *lptr
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create lookup key for sync.Map (one allocation, as this
|
|
|
|
// passes through an interface{})
|
2019-10-29 22:27:22 +02:00
|
|
|
mk := mapkey{
|
2020-03-19 21:02:46 +02:00
|
|
|
descriptor: &s.descriptor,
|
2020-03-27 23:06:48 +02:00
|
|
|
ordered: labels.ordered,
|
2019-10-29 22:27:22 +02:00
|
|
|
}
|
|
|
|
|
2020-03-19 21:02:46 +02:00
|
|
|
if actual, ok := s.meter.current.Load(mk); ok {
|
2020-03-27 23:06:48 +02:00
|
|
|
// Existing record case.
|
|
|
|
existingRec := actual.(*record)
|
|
|
|
if existingRec.refMapped.ref() {
|
2020-02-07 00:45:56 +02:00
|
|
|
// At this moment it is guaranteed that the entry is in
|
|
|
|
// the map and will not be removed.
|
2020-03-27 23:06:48 +02:00
|
|
|
return existingRec
|
2020-02-07 00:45:56 +02:00
|
|
|
}
|
|
|
|
// This entry is no longer mapped, try to add a new entry.
|
2019-11-06 20:54:36 +02:00
|
|
|
}
|
|
|
|
|
2020-03-27 23:06:48 +02:00
|
|
|
if rec == nil {
|
|
|
|
rec = &record{}
|
2019-10-29 22:27:22 +02:00
|
|
|
}
|
2020-03-27 23:06:48 +02:00
|
|
|
rec.refMapped = refcountMapped{value: 2}
|
|
|
|
rec.labels = labels
|
|
|
|
rec.inst = s
|
|
|
|
rec.recorder = s.meter.batcher.AggregatorFor(&s.descriptor)
|
2019-10-29 22:27:22 +02:00
|
|
|
|
2020-02-07 00:45:56 +02:00
|
|
|
for {
|
|
|
|
// Load/Store: there's a memory allocation to place `mk` into
|
|
|
|
// an interface here.
|
2020-03-19 21:02:46 +02:00
|
|
|
if actual, loaded := s.meter.current.LoadOrStore(mk, rec); loaded {
|
2020-02-07 00:45:56 +02:00
|
|
|
// Existing record case. Cannot change rec here because if fail
|
|
|
|
// will try to add rec again to avoid new allocations.
|
|
|
|
oldRec := actual.(*record)
|
|
|
|
if oldRec.refMapped.ref() {
|
|
|
|
// At this moment it is guaranteed that the entry is in
|
|
|
|
// the map and will not be removed.
|
|
|
|
return oldRec
|
|
|
|
}
|
|
|
|
// This loaded entry is marked as unmapped (so Collect will remove
|
|
|
|
// it from the map immediately), try again - this is a busy waiting
|
|
|
|
// strategy to wait until Collect() removes this entry from the map.
|
|
|
|
//
|
|
|
|
// This can be improved by having a list of "Unmapped" entries for
|
|
|
|
// one time only usages, OR we can make this a blocking path and use
|
|
|
|
// a Mutex that protects the delete operation (delete only if the old
|
|
|
|
// record is associated with the key).
|
2020-02-11 02:20:29 +02:00
|
|
|
|
|
|
|
// Let collector get work done to remove the entry from the map.
|
|
|
|
runtime.Gosched()
|
2020-02-07 00:45:56 +02:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
// The new entry was added to the map, good to go.
|
2019-10-29 22:27:22 +02:00
|
|
|
return rec
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-03-27 23:06:48 +02:00
|
|
|
func (s *syncInstrument) Bind(kvs []core.KeyValue) api.BoundSyncImpl {
|
|
|
|
return s.acquireHandle(kvs, nil)
|
2019-10-29 22:27:22 +02:00
|
|
|
}
|
|
|
|
|
2020-03-27 23:06:48 +02:00
|
|
|
func (s *syncInstrument) RecordOne(ctx context.Context, number core.Number, kvs []core.KeyValue) {
|
|
|
|
h := s.acquireHandle(kvs, nil)
|
2019-12-28 02:30:19 +02:00
|
|
|
defer h.Unbind()
|
2019-10-29 22:27:22 +02:00
|
|
|
h.RecordOne(ctx, number)
|
|
|
|
}
|
|
|
|
|
2019-11-15 23:01:20 +02:00
|
|
|
// New constructs a new SDK for the given batcher. This SDK supports
|
|
|
|
// only a single batcher.
|
2019-10-29 22:27:22 +02:00
|
|
|
//
|
|
|
|
// The SDK does not start any background process to collect itself
|
2019-11-15 23:01:20 +02:00
|
|
|
// periodically, this responsbility lies with the batcher, typically,
|
2019-10-29 22:27:22 +02:00
|
|
|
// depending on the type of export. For example, a pull-based
|
2019-11-15 23:01:20 +02:00
|
|
|
// batcher will call Collect() when it receives a request to scrape
|
|
|
|
// current metric values. A push-based batcher should configure its
|
2019-10-29 22:27:22 +02:00
|
|
|
// own periodic collection.
|
2020-03-24 18:30:12 +02:00
|
|
|
func New(batcher export.Batcher, opts ...Option) *SDK {
|
2020-03-20 17:58:32 +02:00
|
|
|
c := &Config{ErrorHandler: DefaultErrorHandler}
|
|
|
|
for _, opt := range opts {
|
|
|
|
opt.Apply(c)
|
|
|
|
}
|
|
|
|
|
2020-03-27 23:06:48 +02:00
|
|
|
return &SDK{
|
2019-11-15 23:01:20 +02:00
|
|
|
batcher: batcher,
|
2020-03-20 17:58:32 +02:00
|
|
|
errorHandler: c.ErrorHandler,
|
|
|
|
resource: c.Resource,
|
2019-10-29 22:27:22 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-11-15 23:01:20 +02:00
|
|
|
func DefaultErrorHandler(err error) {
|
|
|
|
fmt.Fprintln(os.Stderr, "Metrics SDK error:", err)
|
|
|
|
}
|
|
|
|
|
2020-03-27 23:06:48 +02:00
|
|
|
// makeLabels returns a `labels` corresponding to the arguments. Labels
|
Replace `Ordered` with an iterator in `export.Labels`. (#567)
* Do not expose a slice of labels in export.Record
This is really an inconvenient implementation detail leak - we may
want to store labels in a different way. Replace it with an iterator -
it does not force us to use slice of key values as a storage in the
long run.
* Add Len to LabelIterator
It may come in handy in several situations, where we don't have access
to export.Labels object, but only to the label iterator.
* Use reflect value label iterator for the fixed labels
* add reset operation to iterator
Makes my life easier when writing a benchmark. Might also be an
alternative to cloning the iterator.
* Add benchmarks for iterators
* Add import comment
* Add clone operation to label iterator
* Move iterator tests to a separate package
* Add tests for cloning iterators
* Pass label iterator to export labels
* Use non-addressable array reflect values
By not using the value created by `reflect.New()`, but rather by
`reflect.ValueOf()`, we get a non-addressable array in the value,
which does not infer an allocation cost when getting an element from
the array.
* Drop zero iterator
This can be substituted by a reflect value iterator that goes over a
value with a zero-sized array.
* Add a simple iterator that implements label iterator
In the long run this will completely replace the LabelIterator
interface.
* Replace reflect value iterator with simple iterator
* Pass label storage to new export labels, not label iterator
* Drop label iterator interface, rename storage iterator to label iterator
* Drop clone operation from iterator
It's a leftover from interface times and now it's pointless - the
iterator is a simple struct, so cloning it is a simple copy.
* Drop Reset from label iterator
The sole existence of Reset was actually for benchmarking convenience.
Now we can just copy the iterator cheaply, so a need for Reset is no
more.
* Drop noop iterator tests
* Move back iterator tests to export package
* Eagerly get the reflect value of ordered labels
So we won't get into problems when several goroutines want to iterate
the same labels at the same time. Not sure if this would be a big
deal, since every goroutine would compute the same reflect.Value, but
concurrent write to the same memory is bad anyway. And it doesn't cost
us any extra allocations anyway.
* Replace NewSliceLabelIterator() with a method of LabelSlice
* Add some documentation
* Documentation fixes
2020-03-20 00:01:34 +02:00
|
|
|
// are sorted and de-duplicated, with last-value-wins semantics. Note that
|
|
|
|
// sorting and deduplicating happens in-place to avoid allocation, so the
|
2020-03-27 23:06:48 +02:00
|
|
|
// passed slice will be modified. The `sortSlice` argument refers to a memory
|
|
|
|
// location used temporarily while sorting the slice, to avoid a memory
|
|
|
|
// allocation.
|
|
|
|
func (m *SDK) makeLabels(kvs []core.KeyValue, sortSlice *sortedLabels) labels {
|
2019-10-29 22:27:22 +02:00
|
|
|
// Check for empty set.
|
|
|
|
if len(kvs) == 0 {
|
2020-03-27 23:06:48 +02:00
|
|
|
return emptyLabels
|
2019-10-29 22:27:22 +02:00
|
|
|
}
|
|
|
|
|
2020-03-27 23:06:48 +02:00
|
|
|
*sortSlice = kvs
|
2019-11-14 23:13:42 +02:00
|
|
|
|
2020-03-27 23:06:48 +02:00
|
|
|
// Sort and de-duplicate. Note: this use of `sortSlice`
|
|
|
|
// avoids an allocation because it is a pointer.
|
|
|
|
sort.Stable(sortSlice)
|
|
|
|
|
|
|
|
*sortSlice = nil
|
2020-03-11 18:11:27 +02:00
|
|
|
|
2019-10-29 22:27:22 +02:00
|
|
|
oi := 1
|
2020-03-11 18:11:27 +02:00
|
|
|
for i := 1; i < len(kvs); i++ {
|
|
|
|
if kvs[i-1].Key == kvs[i].Key {
|
|
|
|
// Overwrite the value for "last-value wins".
|
|
|
|
kvs[oi-1].Value = kvs[i].Value
|
2019-10-29 22:27:22 +02:00
|
|
|
continue
|
|
|
|
}
|
2020-03-11 18:11:27 +02:00
|
|
|
kvs[oi] = kvs[i]
|
2019-10-29 22:27:22 +02:00
|
|
|
oi++
|
|
|
|
}
|
2020-03-11 18:11:27 +02:00
|
|
|
kvs = kvs[0:oi]
|
2020-03-27 23:06:48 +02:00
|
|
|
return computeOrderedLabels(kvs)
|
2020-03-11 18:11:27 +02:00
|
|
|
}
|
2019-10-29 22:27:22 +02:00
|
|
|
|
2020-03-24 18:30:12 +02:00
|
|
|
// NumLabels is a part of an implementation of the export.LabelStorage
|
|
|
|
// interface.
|
Replace `Ordered` with an iterator in `export.Labels`. (#567)
* Do not expose a slice of labels in export.Record
This is really an inconvenient implementation detail leak - we may
want to store labels in a different way. Replace it with an iterator -
it does not force us to use slice of key values as a storage in the
long run.
* Add Len to LabelIterator
It may come in handy in several situations, where we don't have access
to export.Labels object, but only to the label iterator.
* Use reflect value label iterator for the fixed labels
* add reset operation to iterator
Makes my life easier when writing a benchmark. Might also be an
alternative to cloning the iterator.
* Add benchmarks for iterators
* Add import comment
* Add clone operation to label iterator
* Move iterator tests to a separate package
* Add tests for cloning iterators
* Pass label iterator to export labels
* Use non-addressable array reflect values
By not using the value created by `reflect.New()`, but rather by
`reflect.ValueOf()`, we get a non-addressable array in the value,
which does not infer an allocation cost when getting an element from
the array.
* Drop zero iterator
This can be substituted by a reflect value iterator that goes over a
value with a zero-sized array.
* Add a simple iterator that implements label iterator
In the long run this will completely replace the LabelIterator
interface.
* Replace reflect value iterator with simple iterator
* Pass label storage to new export labels, not label iterator
* Drop label iterator interface, rename storage iterator to label iterator
* Drop clone operation from iterator
It's a leftover from interface times and now it's pointless - the
iterator is a simple struct, so cloning it is a simple copy.
* Drop Reset from label iterator
The sole existence of Reset was actually for benchmarking convenience.
Now we can just copy the iterator cheaply, so a need for Reset is no
more.
* Drop noop iterator tests
* Move back iterator tests to export package
* Eagerly get the reflect value of ordered labels
So we won't get into problems when several goroutines want to iterate
the same labels at the same time. Not sure if this would be a big
deal, since every goroutine would compute the same reflect.Value, but
concurrent write to the same memory is bad anyway. And it doesn't cost
us any extra allocations anyway.
* Replace NewSliceLabelIterator() with a method of LabelSlice
* Add some documentation
* Documentation fixes
2020-03-20 00:01:34 +02:00
|
|
|
func (ls *labels) NumLabels() int {
|
2020-04-21 05:29:46 +02:00
|
|
|
return reflect.ValueOf(ls.ordered).Len()
|
Replace `Ordered` with an iterator in `export.Labels`. (#567)
* Do not expose a slice of labels in export.Record
This is really an inconvenient implementation detail leak - we may
want to store labels in a different way. Replace it with an iterator -
it does not force us to use slice of key values as a storage in the
long run.
* Add Len to LabelIterator
It may come in handy in several situations, where we don't have access
to export.Labels object, but only to the label iterator.
* Use reflect value label iterator for the fixed labels
* add reset operation to iterator
Makes my life easier when writing a benchmark. Might also be an
alternative to cloning the iterator.
* Add benchmarks for iterators
* Add import comment
* Add clone operation to label iterator
* Move iterator tests to a separate package
* Add tests for cloning iterators
* Pass label iterator to export labels
* Use non-addressable array reflect values
By not using the value created by `reflect.New()`, but rather by
`reflect.ValueOf()`, we get a non-addressable array in the value,
which does not infer an allocation cost when getting an element from
the array.
* Drop zero iterator
This can be substituted by a reflect value iterator that goes over a
value with a zero-sized array.
* Add a simple iterator that implements label iterator
In the long run this will completely replace the LabelIterator
interface.
* Replace reflect value iterator with simple iterator
* Pass label storage to new export labels, not label iterator
* Drop label iterator interface, rename storage iterator to label iterator
* Drop clone operation from iterator
It's a leftover from interface times and now it's pointless - the
iterator is a simple struct, so cloning it is a simple copy.
* Drop Reset from label iterator
The sole existence of Reset was actually for benchmarking convenience.
Now we can just copy the iterator cheaply, so a need for Reset is no
more.
* Drop noop iterator tests
* Move back iterator tests to export package
* Eagerly get the reflect value of ordered labels
So we won't get into problems when several goroutines want to iterate
the same labels at the same time. Not sure if this would be a big
deal, since every goroutine would compute the same reflect.Value, but
concurrent write to the same memory is bad anyway. And it doesn't cost
us any extra allocations anyway.
* Replace NewSliceLabelIterator() with a method of LabelSlice
* Add some documentation
* Documentation fixes
2020-03-20 00:01:34 +02:00
|
|
|
}
|
|
|
|
|
2020-03-24 18:30:12 +02:00
|
|
|
// GetLabel is a part of an implementation of the export.LabelStorage
|
|
|
|
// interface.
|
Replace `Ordered` with an iterator in `export.Labels`. (#567)
* Do not expose a slice of labels in export.Record
This is really an inconvenient implementation detail leak - we may
want to store labels in a different way. Replace it with an iterator -
it does not force us to use slice of key values as a storage in the
long run.
* Add Len to LabelIterator
It may come in handy in several situations, where we don't have access
to export.Labels object, but only to the label iterator.
* Use reflect value label iterator for the fixed labels
* add reset operation to iterator
Makes my life easier when writing a benchmark. Might also be an
alternative to cloning the iterator.
* Add benchmarks for iterators
* Add import comment
* Add clone operation to label iterator
* Move iterator tests to a separate package
* Add tests for cloning iterators
* Pass label iterator to export labels
* Use non-addressable array reflect values
By not using the value created by `reflect.New()`, but rather by
`reflect.ValueOf()`, we get a non-addressable array in the value,
which does not infer an allocation cost when getting an element from
the array.
* Drop zero iterator
This can be substituted by a reflect value iterator that goes over a
value with a zero-sized array.
* Add a simple iterator that implements label iterator
In the long run this will completely replace the LabelIterator
interface.
* Replace reflect value iterator with simple iterator
* Pass label storage to new export labels, not label iterator
* Drop label iterator interface, rename storage iterator to label iterator
* Drop clone operation from iterator
It's a leftover from interface times and now it's pointless - the
iterator is a simple struct, so cloning it is a simple copy.
* Drop Reset from label iterator
The sole existence of Reset was actually for benchmarking convenience.
Now we can just copy the iterator cheaply, so a need for Reset is no
more.
* Drop noop iterator tests
* Move back iterator tests to export package
* Eagerly get the reflect value of ordered labels
So we won't get into problems when several goroutines want to iterate
the same labels at the same time. Not sure if this would be a big
deal, since every goroutine would compute the same reflect.Value, but
concurrent write to the same memory is bad anyway. And it doesn't cost
us any extra allocations anyway.
* Replace NewSliceLabelIterator() with a method of LabelSlice
* Add some documentation
* Documentation fixes
2020-03-20 00:01:34 +02:00
|
|
|
func (ls *labels) GetLabel(idx int) core.KeyValue {
|
2020-04-21 05:29:46 +02:00
|
|
|
// Note: The Go compiler successfully avoids an allocation for
|
|
|
|
// the interface{} conversion here:
|
|
|
|
return reflect.ValueOf(ls.ordered).Index(idx).Interface().(core.KeyValue)
|
Replace `Ordered` with an iterator in `export.Labels`. (#567)
* Do not expose a slice of labels in export.Record
This is really an inconvenient implementation detail leak - we may
want to store labels in a different way. Replace it with an iterator -
it does not force us to use slice of key values as a storage in the
long run.
* Add Len to LabelIterator
It may come in handy in several situations, where we don't have access
to export.Labels object, but only to the label iterator.
* Use reflect value label iterator for the fixed labels
* add reset operation to iterator
Makes my life easier when writing a benchmark. Might also be an
alternative to cloning the iterator.
* Add benchmarks for iterators
* Add import comment
* Add clone operation to label iterator
* Move iterator tests to a separate package
* Add tests for cloning iterators
* Pass label iterator to export labels
* Use non-addressable array reflect values
By not using the value created by `reflect.New()`, but rather by
`reflect.ValueOf()`, we get a non-addressable array in the value,
which does not infer an allocation cost when getting an element from
the array.
* Drop zero iterator
This can be substituted by a reflect value iterator that goes over a
value with a zero-sized array.
* Add a simple iterator that implements label iterator
In the long run this will completely replace the LabelIterator
interface.
* Replace reflect value iterator with simple iterator
* Pass label storage to new export labels, not label iterator
* Drop label iterator interface, rename storage iterator to label iterator
* Drop clone operation from iterator
It's a leftover from interface times and now it's pointless - the
iterator is a simple struct, so cloning it is a simple copy.
* Drop Reset from label iterator
The sole existence of Reset was actually for benchmarking convenience.
Now we can just copy the iterator cheaply, so a need for Reset is no
more.
* Drop noop iterator tests
* Move back iterator tests to export package
* Eagerly get the reflect value of ordered labels
So we won't get into problems when several goroutines want to iterate
the same labels at the same time. Not sure if this would be a big
deal, since every goroutine would compute the same reflect.Value, but
concurrent write to the same memory is bad anyway. And it doesn't cost
us any extra allocations anyway.
* Replace NewSliceLabelIterator() with a method of LabelSlice
* Add some documentation
* Documentation fixes
2020-03-20 00:01:34 +02:00
|
|
|
}
|
|
|
|
|
2020-03-24 18:30:12 +02:00
|
|
|
// Iter is a part of an implementation of the export.Labels interface.
|
|
|
|
func (ls *labels) Iter() export.LabelIterator {
|
|
|
|
return export.NewLabelIterator(ls)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Encoded is a part of an implementation of the export.Labels
|
|
|
|
// interface.
|
|
|
|
func (ls *labels) Encoded(encoder export.LabelEncoder) string {
|
|
|
|
id := encoder.ID()
|
|
|
|
if id <= 0 {
|
|
|
|
// Punish misbehaving encoders by not even trying to
|
|
|
|
// cache them
|
|
|
|
return encoder.Encode(ls.Iter())
|
|
|
|
}
|
|
|
|
cachedID := atomic.LoadInt64(&ls.cachedEncoderID)
|
|
|
|
// If cached ID is less than zero, it means that other
|
|
|
|
// goroutine is currently caching the encoded labels and the
|
|
|
|
// ID of the encoder. Wait until it's done - it's a
|
|
|
|
// nonblocking op.
|
|
|
|
for cachedID < 0 {
|
|
|
|
// Let other goroutine finish its work.
|
|
|
|
runtime.Gosched()
|
|
|
|
cachedID = atomic.LoadInt64(&ls.cachedEncoderID)
|
|
|
|
}
|
|
|
|
// At this point, cachedID is either 0 (nothing cached) or
|
|
|
|
// some other number.
|
|
|
|
//
|
|
|
|
// If cached ID is the same as ID of the passed encoder, we've
|
|
|
|
// got the fast path.
|
|
|
|
if cachedID == id {
|
|
|
|
return ls.cachedEncoded
|
|
|
|
}
|
|
|
|
// If we are here, either some other encoder cached its
|
|
|
|
// encoded labels or the cache is still for the taking. Either
|
|
|
|
// way, we need to compute the encoded labels anyway.
|
|
|
|
encoded := encoder.Encode(ls.Iter())
|
|
|
|
// If some other encoder took the cache, then we just return
|
|
|
|
// our encoded labels. That's a slow path.
|
|
|
|
if cachedID > 0 {
|
|
|
|
return encoded
|
|
|
|
}
|
|
|
|
// Try to take the cache for ourselves. This is the place
|
|
|
|
// where other encoders may be "blocked".
|
|
|
|
if atomic.CompareAndSwapInt64(&ls.cachedEncoderID, 0, -1) {
|
|
|
|
// The cache is ours.
|
|
|
|
ls.cachedEncoded = encoded
|
|
|
|
atomic.StoreInt64(&ls.cachedEncoderID, id)
|
|
|
|
}
|
|
|
|
return encoded
|
|
|
|
}
|
|
|
|
|
2020-03-27 23:06:48 +02:00
|
|
|
func computeOrderedLabels(kvs []core.KeyValue) labels {
|
|
|
|
var ls labels
|
Replace `Ordered` with an iterator in `export.Labels`. (#567)
* Do not expose a slice of labels in export.Record
This is really an inconvenient implementation detail leak - we may
want to store labels in a different way. Replace it with an iterator -
it does not force us to use slice of key values as a storage in the
long run.
* Add Len to LabelIterator
It may come in handy in several situations, where we don't have access
to export.Labels object, but only to the label iterator.
* Use reflect value label iterator for the fixed labels
* add reset operation to iterator
Makes my life easier when writing a benchmark. Might also be an
alternative to cloning the iterator.
* Add benchmarks for iterators
* Add import comment
* Add clone operation to label iterator
* Move iterator tests to a separate package
* Add tests for cloning iterators
* Pass label iterator to export labels
* Use non-addressable array reflect values
By not using the value created by `reflect.New()`, but rather by
`reflect.ValueOf()`, we get a non-addressable array in the value,
which does not infer an allocation cost when getting an element from
the array.
* Drop zero iterator
This can be substituted by a reflect value iterator that goes over a
value with a zero-sized array.
* Add a simple iterator that implements label iterator
In the long run this will completely replace the LabelIterator
interface.
* Replace reflect value iterator with simple iterator
* Pass label storage to new export labels, not label iterator
* Drop label iterator interface, rename storage iterator to label iterator
* Drop clone operation from iterator
It's a leftover from interface times and now it's pointless - the
iterator is a simple struct, so cloning it is a simple copy.
* Drop Reset from label iterator
The sole existence of Reset was actually for benchmarking convenience.
Now we can just copy the iterator cheaply, so a need for Reset is no
more.
* Drop noop iterator tests
* Move back iterator tests to export package
* Eagerly get the reflect value of ordered labels
So we won't get into problems when several goroutines want to iterate
the same labels at the same time. Not sure if this would be a big
deal, since every goroutine would compute the same reflect.Value, but
concurrent write to the same memory is bad anyway. And it doesn't cost
us any extra allocations anyway.
* Replace NewSliceLabelIterator() with a method of LabelSlice
* Add some documentation
* Documentation fixes
2020-03-20 00:01:34 +02:00
|
|
|
ls.ordered = computeOrderedFixed(kvs)
|
|
|
|
if ls.ordered == nil {
|
|
|
|
ls.ordered = computeOrderedReflect(kvs)
|
|
|
|
}
|
2020-03-27 23:06:48 +02:00
|
|
|
return ls
|
Replace `Ordered` with an iterator in `export.Labels`. (#567)
* Do not expose a slice of labels in export.Record
This is really an inconvenient implementation detail leak - we may
want to store labels in a different way. Replace it with an iterator -
it does not force us to use slice of key values as a storage in the
long run.
* Add Len to LabelIterator
It may come in handy in several situations, where we don't have access
to export.Labels object, but only to the label iterator.
* Use reflect value label iterator for the fixed labels
* add reset operation to iterator
Makes my life easier when writing a benchmark. Might also be an
alternative to cloning the iterator.
* Add benchmarks for iterators
* Add import comment
* Add clone operation to label iterator
* Move iterator tests to a separate package
* Add tests for cloning iterators
* Pass label iterator to export labels
* Use non-addressable array reflect values
By not using the value created by `reflect.New()`, but rather by
`reflect.ValueOf()`, we get a non-addressable array in the value,
which does not infer an allocation cost when getting an element from
the array.
* Drop zero iterator
This can be substituted by a reflect value iterator that goes over a
value with a zero-sized array.
* Add a simple iterator that implements label iterator
In the long run this will completely replace the LabelIterator
interface.
* Replace reflect value iterator with simple iterator
* Pass label storage to new export labels, not label iterator
* Drop label iterator interface, rename storage iterator to label iterator
* Drop clone operation from iterator
It's a leftover from interface times and now it's pointless - the
iterator is a simple struct, so cloning it is a simple copy.
* Drop Reset from label iterator
The sole existence of Reset was actually for benchmarking convenience.
Now we can just copy the iterator cheaply, so a need for Reset is no
more.
* Drop noop iterator tests
* Move back iterator tests to export package
* Eagerly get the reflect value of ordered labels
So we won't get into problems when several goroutines want to iterate
the same labels at the same time. Not sure if this would be a big
deal, since every goroutine would compute the same reflect.Value, but
concurrent write to the same memory is bad anyway. And it doesn't cost
us any extra allocations anyway.
* Replace NewSliceLabelIterator() with a method of LabelSlice
* Add some documentation
* Documentation fixes
2020-03-20 00:01:34 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
func computeOrderedFixed(kvs []core.KeyValue) orderedLabels {
|
2020-03-11 18:11:27 +02:00
|
|
|
switch len(kvs) {
|
|
|
|
case 1:
|
|
|
|
ptr := new([1]core.KeyValue)
|
|
|
|
copy((*ptr)[:], kvs)
|
Replace `Ordered` with an iterator in `export.Labels`. (#567)
* Do not expose a slice of labels in export.Record
This is really an inconvenient implementation detail leak - we may
want to store labels in a different way. Replace it with an iterator -
it does not force us to use slice of key values as a storage in the
long run.
* Add Len to LabelIterator
It may come in handy in several situations, where we don't have access
to export.Labels object, but only to the label iterator.
* Use reflect value label iterator for the fixed labels
* add reset operation to iterator
Makes my life easier when writing a benchmark. Might also be an
alternative to cloning the iterator.
* Add benchmarks for iterators
* Add import comment
* Add clone operation to label iterator
* Move iterator tests to a separate package
* Add tests for cloning iterators
* Pass label iterator to export labels
* Use non-addressable array reflect values
By not using the value created by `reflect.New()`, but rather by
`reflect.ValueOf()`, we get a non-addressable array in the value,
which does not infer an allocation cost when getting an element from
the array.
* Drop zero iterator
This can be substituted by a reflect value iterator that goes over a
value with a zero-sized array.
* Add a simple iterator that implements label iterator
In the long run this will completely replace the LabelIterator
interface.
* Replace reflect value iterator with simple iterator
* Pass label storage to new export labels, not label iterator
* Drop label iterator interface, rename storage iterator to label iterator
* Drop clone operation from iterator
It's a leftover from interface times and now it's pointless - the
iterator is a simple struct, so cloning it is a simple copy.
* Drop Reset from label iterator
The sole existence of Reset was actually for benchmarking convenience.
Now we can just copy the iterator cheaply, so a need for Reset is no
more.
* Drop noop iterator tests
* Move back iterator tests to export package
* Eagerly get the reflect value of ordered labels
So we won't get into problems when several goroutines want to iterate
the same labels at the same time. Not sure if this would be a big
deal, since every goroutine would compute the same reflect.Value, but
concurrent write to the same memory is bad anyway. And it doesn't cost
us any extra allocations anyway.
* Replace NewSliceLabelIterator() with a method of LabelSlice
* Add some documentation
* Documentation fixes
2020-03-20 00:01:34 +02:00
|
|
|
return *ptr
|
2020-03-11 18:11:27 +02:00
|
|
|
case 2:
|
|
|
|
ptr := new([2]core.KeyValue)
|
|
|
|
copy((*ptr)[:], kvs)
|
Replace `Ordered` with an iterator in `export.Labels`. (#567)
* Do not expose a slice of labels in export.Record
This is really an inconvenient implementation detail leak - we may
want to store labels in a different way. Replace it with an iterator -
it does not force us to use slice of key values as a storage in the
long run.
* Add Len to LabelIterator
It may come in handy in several situations, where we don't have access
to export.Labels object, but only to the label iterator.
* Use reflect value label iterator for the fixed labels
* add reset operation to iterator
Makes my life easier when writing a benchmark. Might also be an
alternative to cloning the iterator.
* Add benchmarks for iterators
* Add import comment
* Add clone operation to label iterator
* Move iterator tests to a separate package
* Add tests for cloning iterators
* Pass label iterator to export labels
* Use non-addressable array reflect values
By not using the value created by `reflect.New()`, but rather by
`reflect.ValueOf()`, we get a non-addressable array in the value,
which does not infer an allocation cost when getting an element from
the array.
* Drop zero iterator
This can be substituted by a reflect value iterator that goes over a
value with a zero-sized array.
* Add a simple iterator that implements label iterator
In the long run this will completely replace the LabelIterator
interface.
* Replace reflect value iterator with simple iterator
* Pass label storage to new export labels, not label iterator
* Drop label iterator interface, rename storage iterator to label iterator
* Drop clone operation from iterator
It's a leftover from interface times and now it's pointless - the
iterator is a simple struct, so cloning it is a simple copy.
* Drop Reset from label iterator
The sole existence of Reset was actually for benchmarking convenience.
Now we can just copy the iterator cheaply, so a need for Reset is no
more.
* Drop noop iterator tests
* Move back iterator tests to export package
* Eagerly get the reflect value of ordered labels
So we won't get into problems when several goroutines want to iterate
the same labels at the same time. Not sure if this would be a big
deal, since every goroutine would compute the same reflect.Value, but
concurrent write to the same memory is bad anyway. And it doesn't cost
us any extra allocations anyway.
* Replace NewSliceLabelIterator() with a method of LabelSlice
* Add some documentation
* Documentation fixes
2020-03-20 00:01:34 +02:00
|
|
|
return *ptr
|
2020-03-11 18:11:27 +02:00
|
|
|
case 3:
|
|
|
|
ptr := new([3]core.KeyValue)
|
|
|
|
copy((*ptr)[:], kvs)
|
Replace `Ordered` with an iterator in `export.Labels`. (#567)
* Do not expose a slice of labels in export.Record
This is really an inconvenient implementation detail leak - we may
want to store labels in a different way. Replace it with an iterator -
it does not force us to use slice of key values as a storage in the
long run.
* Add Len to LabelIterator
It may come in handy in several situations, where we don't have access
to export.Labels object, but only to the label iterator.
* Use reflect value label iterator for the fixed labels
* add reset operation to iterator
Makes my life easier when writing a benchmark. Might also be an
alternative to cloning the iterator.
* Add benchmarks for iterators
* Add import comment
* Add clone operation to label iterator
* Move iterator tests to a separate package
* Add tests for cloning iterators
* Pass label iterator to export labels
* Use non-addressable array reflect values
By not using the value created by `reflect.New()`, but rather by
`reflect.ValueOf()`, we get a non-addressable array in the value,
which does not infer an allocation cost when getting an element from
the array.
* Drop zero iterator
This can be substituted by a reflect value iterator that goes over a
value with a zero-sized array.
* Add a simple iterator that implements label iterator
In the long run this will completely replace the LabelIterator
interface.
* Replace reflect value iterator with simple iterator
* Pass label storage to new export labels, not label iterator
* Drop label iterator interface, rename storage iterator to label iterator
* Drop clone operation from iterator
It's a leftover from interface times and now it's pointless - the
iterator is a simple struct, so cloning it is a simple copy.
* Drop Reset from label iterator
The sole existence of Reset was actually for benchmarking convenience.
Now we can just copy the iterator cheaply, so a need for Reset is no
more.
* Drop noop iterator tests
* Move back iterator tests to export package
* Eagerly get the reflect value of ordered labels
So we won't get into problems when several goroutines want to iterate
the same labels at the same time. Not sure if this would be a big
deal, since every goroutine would compute the same reflect.Value, but
concurrent write to the same memory is bad anyway. And it doesn't cost
us any extra allocations anyway.
* Replace NewSliceLabelIterator() with a method of LabelSlice
* Add some documentation
* Documentation fixes
2020-03-20 00:01:34 +02:00
|
|
|
return *ptr
|
2020-03-11 18:11:27 +02:00
|
|
|
case 4:
|
|
|
|
ptr := new([4]core.KeyValue)
|
|
|
|
copy((*ptr)[:], kvs)
|
Replace `Ordered` with an iterator in `export.Labels`. (#567)
* Do not expose a slice of labels in export.Record
This is really an inconvenient implementation detail leak - we may
want to store labels in a different way. Replace it with an iterator -
it does not force us to use slice of key values as a storage in the
long run.
* Add Len to LabelIterator
It may come in handy in several situations, where we don't have access
to export.Labels object, but only to the label iterator.
* Use reflect value label iterator for the fixed labels
* add reset operation to iterator
Makes my life easier when writing a benchmark. Might also be an
alternative to cloning the iterator.
* Add benchmarks for iterators
* Add import comment
* Add clone operation to label iterator
* Move iterator tests to a separate package
* Add tests for cloning iterators
* Pass label iterator to export labels
* Use non-addressable array reflect values
By not using the value created by `reflect.New()`, but rather by
`reflect.ValueOf()`, we get a non-addressable array in the value,
which does not infer an allocation cost when getting an element from
the array.
* Drop zero iterator
This can be substituted by a reflect value iterator that goes over a
value with a zero-sized array.
* Add a simple iterator that implements label iterator
In the long run this will completely replace the LabelIterator
interface.
* Replace reflect value iterator with simple iterator
* Pass label storage to new export labels, not label iterator
* Drop label iterator interface, rename storage iterator to label iterator
* Drop clone operation from iterator
It's a leftover from interface times and now it's pointless - the
iterator is a simple struct, so cloning it is a simple copy.
* Drop Reset from label iterator
The sole existence of Reset was actually for benchmarking convenience.
Now we can just copy the iterator cheaply, so a need for Reset is no
more.
* Drop noop iterator tests
* Move back iterator tests to export package
* Eagerly get the reflect value of ordered labels
So we won't get into problems when several goroutines want to iterate
the same labels at the same time. Not sure if this would be a big
deal, since every goroutine would compute the same reflect.Value, but
concurrent write to the same memory is bad anyway. And it doesn't cost
us any extra allocations anyway.
* Replace NewSliceLabelIterator() with a method of LabelSlice
* Add some documentation
* Documentation fixes
2020-03-20 00:01:34 +02:00
|
|
|
return *ptr
|
2020-03-11 18:11:27 +02:00
|
|
|
case 5:
|
|
|
|
ptr := new([5]core.KeyValue)
|
|
|
|
copy((*ptr)[:], kvs)
|
Replace `Ordered` with an iterator in `export.Labels`. (#567)
* Do not expose a slice of labels in export.Record
This is really an inconvenient implementation detail leak - we may
want to store labels in a different way. Replace it with an iterator -
it does not force us to use slice of key values as a storage in the
long run.
* Add Len to LabelIterator
It may come in handy in several situations, where we don't have access
to export.Labels object, but only to the label iterator.
* Use reflect value label iterator for the fixed labels
* add reset operation to iterator
Makes my life easier when writing a benchmark. Might also be an
alternative to cloning the iterator.
* Add benchmarks for iterators
* Add import comment
* Add clone operation to label iterator
* Move iterator tests to a separate package
* Add tests for cloning iterators
* Pass label iterator to export labels
* Use non-addressable array reflect values
By not using the value created by `reflect.New()`, but rather by
`reflect.ValueOf()`, we get a non-addressable array in the value,
which does not infer an allocation cost when getting an element from
the array.
* Drop zero iterator
This can be substituted by a reflect value iterator that goes over a
value with a zero-sized array.
* Add a simple iterator that implements label iterator
In the long run this will completely replace the LabelIterator
interface.
* Replace reflect value iterator with simple iterator
* Pass label storage to new export labels, not label iterator
* Drop label iterator interface, rename storage iterator to label iterator
* Drop clone operation from iterator
It's a leftover from interface times and now it's pointless - the
iterator is a simple struct, so cloning it is a simple copy.
* Drop Reset from label iterator
The sole existence of Reset was actually for benchmarking convenience.
Now we can just copy the iterator cheaply, so a need for Reset is no
more.
* Drop noop iterator tests
* Move back iterator tests to export package
* Eagerly get the reflect value of ordered labels
So we won't get into problems when several goroutines want to iterate
the same labels at the same time. Not sure if this would be a big
deal, since every goroutine would compute the same reflect.Value, but
concurrent write to the same memory is bad anyway. And it doesn't cost
us any extra allocations anyway.
* Replace NewSliceLabelIterator() with a method of LabelSlice
* Add some documentation
* Documentation fixes
2020-03-20 00:01:34 +02:00
|
|
|
return *ptr
|
2020-03-11 18:11:27 +02:00
|
|
|
case 6:
|
|
|
|
ptr := new([6]core.KeyValue)
|
|
|
|
copy((*ptr)[:], kvs)
|
Replace `Ordered` with an iterator in `export.Labels`. (#567)
* Do not expose a slice of labels in export.Record
This is really an inconvenient implementation detail leak - we may
want to store labels in a different way. Replace it with an iterator -
it does not force us to use slice of key values as a storage in the
long run.
* Add Len to LabelIterator
It may come in handy in several situations, where we don't have access
to export.Labels object, but only to the label iterator.
* Use reflect value label iterator for the fixed labels
* add reset operation to iterator
Makes my life easier when writing a benchmark. Might also be an
alternative to cloning the iterator.
* Add benchmarks for iterators
* Add import comment
* Add clone operation to label iterator
* Move iterator tests to a separate package
* Add tests for cloning iterators
* Pass label iterator to export labels
* Use non-addressable array reflect values
By not using the value created by `reflect.New()`, but rather by
`reflect.ValueOf()`, we get a non-addressable array in the value,
which does not infer an allocation cost when getting an element from
the array.
* Drop zero iterator
This can be substituted by a reflect value iterator that goes over a
value with a zero-sized array.
* Add a simple iterator that implements label iterator
In the long run this will completely replace the LabelIterator
interface.
* Replace reflect value iterator with simple iterator
* Pass label storage to new export labels, not label iterator
* Drop label iterator interface, rename storage iterator to label iterator
* Drop clone operation from iterator
It's a leftover from interface times and now it's pointless - the
iterator is a simple struct, so cloning it is a simple copy.
* Drop Reset from label iterator
The sole existence of Reset was actually for benchmarking convenience.
Now we can just copy the iterator cheaply, so a need for Reset is no
more.
* Drop noop iterator tests
* Move back iterator tests to export package
* Eagerly get the reflect value of ordered labels
So we won't get into problems when several goroutines want to iterate
the same labels at the same time. Not sure if this would be a big
deal, since every goroutine would compute the same reflect.Value, but
concurrent write to the same memory is bad anyway. And it doesn't cost
us any extra allocations anyway.
* Replace NewSliceLabelIterator() with a method of LabelSlice
* Add some documentation
* Documentation fixes
2020-03-20 00:01:34 +02:00
|
|
|
return *ptr
|
2020-03-11 18:11:27 +02:00
|
|
|
case 7:
|
|
|
|
ptr := new([7]core.KeyValue)
|
|
|
|
copy((*ptr)[:], kvs)
|
Replace `Ordered` with an iterator in `export.Labels`. (#567)
* Do not expose a slice of labels in export.Record
This is really an inconvenient implementation detail leak - we may
want to store labels in a different way. Replace it with an iterator -
it does not force us to use slice of key values as a storage in the
long run.
* Add Len to LabelIterator
It may come in handy in several situations, where we don't have access
to export.Labels object, but only to the label iterator.
* Use reflect value label iterator for the fixed labels
* add reset operation to iterator
Makes my life easier when writing a benchmark. Might also be an
alternative to cloning the iterator.
* Add benchmarks for iterators
* Add import comment
* Add clone operation to label iterator
* Move iterator tests to a separate package
* Add tests for cloning iterators
* Pass label iterator to export labels
* Use non-addressable array reflect values
By not using the value created by `reflect.New()`, but rather by
`reflect.ValueOf()`, we get a non-addressable array in the value,
which does not infer an allocation cost when getting an element from
the array.
* Drop zero iterator
This can be substituted by a reflect value iterator that goes over a
value with a zero-sized array.
* Add a simple iterator that implements label iterator
In the long run this will completely replace the LabelIterator
interface.
* Replace reflect value iterator with simple iterator
* Pass label storage to new export labels, not label iterator
* Drop label iterator interface, rename storage iterator to label iterator
* Drop clone operation from iterator
It's a leftover from interface times and now it's pointless - the
iterator is a simple struct, so cloning it is a simple copy.
* Drop Reset from label iterator
The sole existence of Reset was actually for benchmarking convenience.
Now we can just copy the iterator cheaply, so a need for Reset is no
more.
* Drop noop iterator tests
* Move back iterator tests to export package
* Eagerly get the reflect value of ordered labels
So we won't get into problems when several goroutines want to iterate
the same labels at the same time. Not sure if this would be a big
deal, since every goroutine would compute the same reflect.Value, but
concurrent write to the same memory is bad anyway. And it doesn't cost
us any extra allocations anyway.
* Replace NewSliceLabelIterator() with a method of LabelSlice
* Add some documentation
* Documentation fixes
2020-03-20 00:01:34 +02:00
|
|
|
return *ptr
|
2020-03-11 18:11:27 +02:00
|
|
|
case 8:
|
|
|
|
ptr := new([8]core.KeyValue)
|
|
|
|
copy((*ptr)[:], kvs)
|
Replace `Ordered` with an iterator in `export.Labels`. (#567)
* Do not expose a slice of labels in export.Record
This is really an inconvenient implementation detail leak - we may
want to store labels in a different way. Replace it with an iterator -
it does not force us to use slice of key values as a storage in the
long run.
* Add Len to LabelIterator
It may come in handy in several situations, where we don't have access
to export.Labels object, but only to the label iterator.
* Use reflect value label iterator for the fixed labels
* add reset operation to iterator
Makes my life easier when writing a benchmark. Might also be an
alternative to cloning the iterator.
* Add benchmarks for iterators
* Add import comment
* Add clone operation to label iterator
* Move iterator tests to a separate package
* Add tests for cloning iterators
* Pass label iterator to export labels
* Use non-addressable array reflect values
By not using the value created by `reflect.New()`, but rather by
`reflect.ValueOf()`, we get a non-addressable array in the value,
which does not infer an allocation cost when getting an element from
the array.
* Drop zero iterator
This can be substituted by a reflect value iterator that goes over a
value with a zero-sized array.
* Add a simple iterator that implements label iterator
In the long run this will completely replace the LabelIterator
interface.
* Replace reflect value iterator with simple iterator
* Pass label storage to new export labels, not label iterator
* Drop label iterator interface, rename storage iterator to label iterator
* Drop clone operation from iterator
It's a leftover from interface times and now it's pointless - the
iterator is a simple struct, so cloning it is a simple copy.
* Drop Reset from label iterator
The sole existence of Reset was actually for benchmarking convenience.
Now we can just copy the iterator cheaply, so a need for Reset is no
more.
* Drop noop iterator tests
* Move back iterator tests to export package
* Eagerly get the reflect value of ordered labels
So we won't get into problems when several goroutines want to iterate
the same labels at the same time. Not sure if this would be a big
deal, since every goroutine would compute the same reflect.Value, but
concurrent write to the same memory is bad anyway. And it doesn't cost
us any extra allocations anyway.
* Replace NewSliceLabelIterator() with a method of LabelSlice
* Add some documentation
* Documentation fixes
2020-03-20 00:01:34 +02:00
|
|
|
return *ptr
|
2020-03-11 18:11:27 +02:00
|
|
|
case 9:
|
|
|
|
ptr := new([9]core.KeyValue)
|
|
|
|
copy((*ptr)[:], kvs)
|
Replace `Ordered` with an iterator in `export.Labels`. (#567)
* Do not expose a slice of labels in export.Record
This is really an inconvenient implementation detail leak - we may
want to store labels in a different way. Replace it with an iterator -
it does not force us to use slice of key values as a storage in the
long run.
* Add Len to LabelIterator
It may come in handy in several situations, where we don't have access
to export.Labels object, but only to the label iterator.
* Use reflect value label iterator for the fixed labels
* add reset operation to iterator
Makes my life easier when writing a benchmark. Might also be an
alternative to cloning the iterator.
* Add benchmarks for iterators
* Add import comment
* Add clone operation to label iterator
* Move iterator tests to a separate package
* Add tests for cloning iterators
* Pass label iterator to export labels
* Use non-addressable array reflect values
By not using the value created by `reflect.New()`, but rather by
`reflect.ValueOf()`, we get a non-addressable array in the value,
which does not infer an allocation cost when getting an element from
the array.
* Drop zero iterator
This can be substituted by a reflect value iterator that goes over a
value with a zero-sized array.
* Add a simple iterator that implements label iterator
In the long run this will completely replace the LabelIterator
interface.
* Replace reflect value iterator with simple iterator
* Pass label storage to new export labels, not label iterator
* Drop label iterator interface, rename storage iterator to label iterator
* Drop clone operation from iterator
It's a leftover from interface times and now it's pointless - the
iterator is a simple struct, so cloning it is a simple copy.
* Drop Reset from label iterator
The sole existence of Reset was actually for benchmarking convenience.
Now we can just copy the iterator cheaply, so a need for Reset is no
more.
* Drop noop iterator tests
* Move back iterator tests to export package
* Eagerly get the reflect value of ordered labels
So we won't get into problems when several goroutines want to iterate
the same labels at the same time. Not sure if this would be a big
deal, since every goroutine would compute the same reflect.Value, but
concurrent write to the same memory is bad anyway. And it doesn't cost
us any extra allocations anyway.
* Replace NewSliceLabelIterator() with a method of LabelSlice
* Add some documentation
* Documentation fixes
2020-03-20 00:01:34 +02:00
|
|
|
return *ptr
|
2020-03-11 18:11:27 +02:00
|
|
|
case 10:
|
|
|
|
ptr := new([10]core.KeyValue)
|
|
|
|
copy((*ptr)[:], kvs)
|
Replace `Ordered` with an iterator in `export.Labels`. (#567)
* Do not expose a slice of labels in export.Record
This is really an inconvenient implementation detail leak - we may
want to store labels in a different way. Replace it with an iterator -
it does not force us to use slice of key values as a storage in the
long run.
* Add Len to LabelIterator
It may come in handy in several situations, where we don't have access
to export.Labels object, but only to the label iterator.
* Use reflect value label iterator for the fixed labels
* add reset operation to iterator
Makes my life easier when writing a benchmark. Might also be an
alternative to cloning the iterator.
* Add benchmarks for iterators
* Add import comment
* Add clone operation to label iterator
* Move iterator tests to a separate package
* Add tests for cloning iterators
* Pass label iterator to export labels
* Use non-addressable array reflect values
By not using the value created by `reflect.New()`, but rather by
`reflect.ValueOf()`, we get a non-addressable array in the value,
which does not infer an allocation cost when getting an element from
the array.
* Drop zero iterator
This can be substituted by a reflect value iterator that goes over a
value with a zero-sized array.
* Add a simple iterator that implements label iterator
In the long run this will completely replace the LabelIterator
interface.
* Replace reflect value iterator with simple iterator
* Pass label storage to new export labels, not label iterator
* Drop label iterator interface, rename storage iterator to label iterator
* Drop clone operation from iterator
It's a leftover from interface times and now it's pointless - the
iterator is a simple struct, so cloning it is a simple copy.
* Drop Reset from label iterator
The sole existence of Reset was actually for benchmarking convenience.
Now we can just copy the iterator cheaply, so a need for Reset is no
more.
* Drop noop iterator tests
* Move back iterator tests to export package
* Eagerly get the reflect value of ordered labels
So we won't get into problems when several goroutines want to iterate
the same labels at the same time. Not sure if this would be a big
deal, since every goroutine would compute the same reflect.Value, but
concurrent write to the same memory is bad anyway. And it doesn't cost
us any extra allocations anyway.
* Replace NewSliceLabelIterator() with a method of LabelSlice
* Add some documentation
* Documentation fixes
2020-03-20 00:01:34 +02:00
|
|
|
return *ptr
|
2020-03-11 18:11:27 +02:00
|
|
|
default:
|
Replace `Ordered` with an iterator in `export.Labels`. (#567)
* Do not expose a slice of labels in export.Record
This is really an inconvenient implementation detail leak - we may
want to store labels in a different way. Replace it with an iterator -
it does not force us to use slice of key values as a storage in the
long run.
* Add Len to LabelIterator
It may come in handy in several situations, where we don't have access
to export.Labels object, but only to the label iterator.
* Use reflect value label iterator for the fixed labels
* add reset operation to iterator
Makes my life easier when writing a benchmark. Might also be an
alternative to cloning the iterator.
* Add benchmarks for iterators
* Add import comment
* Add clone operation to label iterator
* Move iterator tests to a separate package
* Add tests for cloning iterators
* Pass label iterator to export labels
* Use non-addressable array reflect values
By not using the value created by `reflect.New()`, but rather by
`reflect.ValueOf()`, we get a non-addressable array in the value,
which does not infer an allocation cost when getting an element from
the array.
* Drop zero iterator
This can be substituted by a reflect value iterator that goes over a
value with a zero-sized array.
* Add a simple iterator that implements label iterator
In the long run this will completely replace the LabelIterator
interface.
* Replace reflect value iterator with simple iterator
* Pass label storage to new export labels, not label iterator
* Drop label iterator interface, rename storage iterator to label iterator
* Drop clone operation from iterator
It's a leftover from interface times and now it's pointless - the
iterator is a simple struct, so cloning it is a simple copy.
* Drop Reset from label iterator
The sole existence of Reset was actually for benchmarking convenience.
Now we can just copy the iterator cheaply, so a need for Reset is no
more.
* Drop noop iterator tests
* Move back iterator tests to export package
* Eagerly get the reflect value of ordered labels
So we won't get into problems when several goroutines want to iterate
the same labels at the same time. Not sure if this would be a big
deal, since every goroutine would compute the same reflect.Value, but
concurrent write to the same memory is bad anyway. And it doesn't cost
us any extra allocations anyway.
* Replace NewSliceLabelIterator() with a method of LabelSlice
* Add some documentation
* Documentation fixes
2020-03-20 00:01:34 +02:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
}
|
2019-11-14 23:13:42 +02:00
|
|
|
|
Replace `Ordered` with an iterator in `export.Labels`. (#567)
* Do not expose a slice of labels in export.Record
This is really an inconvenient implementation detail leak - we may
want to store labels in a different way. Replace it with an iterator -
it does not force us to use slice of key values as a storage in the
long run.
* Add Len to LabelIterator
It may come in handy in several situations, where we don't have access
to export.Labels object, but only to the label iterator.
* Use reflect value label iterator for the fixed labels
* add reset operation to iterator
Makes my life easier when writing a benchmark. Might also be an
alternative to cloning the iterator.
* Add benchmarks for iterators
* Add import comment
* Add clone operation to label iterator
* Move iterator tests to a separate package
* Add tests for cloning iterators
* Pass label iterator to export labels
* Use non-addressable array reflect values
By not using the value created by `reflect.New()`, but rather by
`reflect.ValueOf()`, we get a non-addressable array in the value,
which does not infer an allocation cost when getting an element from
the array.
* Drop zero iterator
This can be substituted by a reflect value iterator that goes over a
value with a zero-sized array.
* Add a simple iterator that implements label iterator
In the long run this will completely replace the LabelIterator
interface.
* Replace reflect value iterator with simple iterator
* Pass label storage to new export labels, not label iterator
* Drop label iterator interface, rename storage iterator to label iterator
* Drop clone operation from iterator
It's a leftover from interface times and now it's pointless - the
iterator is a simple struct, so cloning it is a simple copy.
* Drop Reset from label iterator
The sole existence of Reset was actually for benchmarking convenience.
Now we can just copy the iterator cheaply, so a need for Reset is no
more.
* Drop noop iterator tests
* Move back iterator tests to export package
* Eagerly get the reflect value of ordered labels
So we won't get into problems when several goroutines want to iterate
the same labels at the same time. Not sure if this would be a big
deal, since every goroutine would compute the same reflect.Value, but
concurrent write to the same memory is bad anyway. And it doesn't cost
us any extra allocations anyway.
* Replace NewSliceLabelIterator() with a method of LabelSlice
* Add some documentation
* Documentation fixes
2020-03-20 00:01:34 +02:00
|
|
|
func computeOrderedReflect(kvs []core.KeyValue) interface{} {
|
|
|
|
at := reflect.New(reflect.ArrayOf(len(kvs), kvType)).Elem()
|
|
|
|
for i, kv := range kvs {
|
|
|
|
*(at.Index(i).Addr().Interface().(*core.KeyValue)) = kv
|
2020-03-11 18:11:27 +02:00
|
|
|
}
|
Replace `Ordered` with an iterator in `export.Labels`. (#567)
* Do not expose a slice of labels in export.Record
This is really an inconvenient implementation detail leak - we may
want to store labels in a different way. Replace it with an iterator -
it does not force us to use slice of key values as a storage in the
long run.
* Add Len to LabelIterator
It may come in handy in several situations, where we don't have access
to export.Labels object, but only to the label iterator.
* Use reflect value label iterator for the fixed labels
* add reset operation to iterator
Makes my life easier when writing a benchmark. Might also be an
alternative to cloning the iterator.
* Add benchmarks for iterators
* Add import comment
* Add clone operation to label iterator
* Move iterator tests to a separate package
* Add tests for cloning iterators
* Pass label iterator to export labels
* Use non-addressable array reflect values
By not using the value created by `reflect.New()`, but rather by
`reflect.ValueOf()`, we get a non-addressable array in the value,
which does not infer an allocation cost when getting an element from
the array.
* Drop zero iterator
This can be substituted by a reflect value iterator that goes over a
value with a zero-sized array.
* Add a simple iterator that implements label iterator
In the long run this will completely replace the LabelIterator
interface.
* Replace reflect value iterator with simple iterator
* Pass label storage to new export labels, not label iterator
* Drop label iterator interface, rename storage iterator to label iterator
* Drop clone operation from iterator
It's a leftover from interface times and now it's pointless - the
iterator is a simple struct, so cloning it is a simple copy.
* Drop Reset from label iterator
The sole existence of Reset was actually for benchmarking convenience.
Now we can just copy the iterator cheaply, so a need for Reset is no
more.
* Drop noop iterator tests
* Move back iterator tests to export package
* Eagerly get the reflect value of ordered labels
So we won't get into problems when several goroutines want to iterate
the same labels at the same time. Not sure if this would be a big
deal, since every goroutine would compute the same reflect.Value, but
concurrent write to the same memory is bad anyway. And it doesn't cost
us any extra allocations anyway.
* Replace NewSliceLabelIterator() with a method of LabelSlice
* Add some documentation
* Documentation fixes
2020-03-20 00:01:34 +02:00
|
|
|
return at.Interface()
|
2019-10-29 22:27:22 +02:00
|
|
|
}
|
|
|
|
|
2020-03-19 21:02:46 +02:00
|
|
|
func (m *SDK) NewSyncInstrument(descriptor api.Descriptor) (api.SyncImpl, error) {
|
|
|
|
return &syncInstrument{
|
|
|
|
instrument: instrument{
|
|
|
|
descriptor: descriptor,
|
|
|
|
meter: m,
|
|
|
|
},
|
2020-03-11 20:57:57 +02:00
|
|
|
}, nil
|
2020-03-05 22:15:30 +02:00
|
|
|
}
|
|
|
|
|
2020-03-27 23:06:48 +02:00
|
|
|
func (m *SDK) NewAsyncInstrument(descriptor api.Descriptor, callback func(func(core.Number, []core.KeyValue))) (api.AsyncImpl, error) {
|
2020-03-19 21:02:46 +02:00
|
|
|
a := &asyncInstrument{
|
|
|
|
instrument: instrument{
|
|
|
|
descriptor: descriptor,
|
|
|
|
meter: m,
|
|
|
|
},
|
|
|
|
callback: callback,
|
2020-03-05 22:15:30 +02:00
|
|
|
}
|
2020-03-19 21:02:46 +02:00
|
|
|
m.asyncInstruments.Store(a, nil)
|
|
|
|
return a, nil
|
2020-03-05 22:15:30 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Collect traverses the list of active records and observers and
|
|
|
|
// exports data for each active instrument. Collect() may not be
|
|
|
|
// called concurrently.
|
2019-10-29 22:27:22 +02:00
|
|
|
//
|
2019-11-05 23:08:55 +02:00
|
|
|
// During the collection pass, the export.Batcher will receive
|
2019-10-29 22:27:22 +02:00
|
|
|
// one Export() call per current aggregation.
|
2019-11-15 23:01:20 +02:00
|
|
|
//
|
|
|
|
// Returns the number of records that were checkpointed.
|
|
|
|
func (m *SDK) Collect(ctx context.Context) int {
|
2019-10-29 22:27:22 +02:00
|
|
|
m.collectLock.Lock()
|
|
|
|
defer m.collectLock.Unlock()
|
|
|
|
|
2020-03-05 22:15:30 +02:00
|
|
|
checkpointed := m.collectRecords(ctx)
|
2020-03-19 21:02:46 +02:00
|
|
|
checkpointed += m.collectAsync(ctx)
|
2020-03-05 22:15:30 +02:00
|
|
|
m.currentEpoch++
|
|
|
|
return checkpointed
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *SDK) collectRecords(ctx context.Context) int {
|
2019-11-15 23:01:20 +02:00
|
|
|
checkpointed := 0
|
|
|
|
|
2020-02-07 00:45:56 +02:00
|
|
|
m.current.Range(func(key interface{}, value interface{}) bool {
|
2020-04-22 05:23:15 +02:00
|
|
|
// Note: always continue to iterate over the entire
|
|
|
|
// map by returning `true` in this function.
|
2020-02-07 00:45:56 +02:00
|
|
|
inuse := value.(*record)
|
2019-10-29 22:27:22 +02:00
|
|
|
|
2020-04-22 05:23:15 +02:00
|
|
|
mods := atomic.LoadInt64(&inuse.updateCount)
|
|
|
|
coll := inuse.collectedCount
|
|
|
|
|
|
|
|
if mods != coll {
|
|
|
|
// Updates happened in this interval,
|
|
|
|
// checkpoint and continue.
|
2020-03-05 22:15:30 +02:00
|
|
|
checkpointed += m.checkpointRecord(ctx, inuse)
|
2020-04-22 05:23:15 +02:00
|
|
|
inuse.collectedCount = mods
|
|
|
|
return true
|
2019-10-29 22:27:22 +02:00
|
|
|
}
|
|
|
|
|
2020-04-22 05:23:15 +02:00
|
|
|
// Having no updates since last collection, try to unmap:
|
|
|
|
if unmapped := inuse.refMapped.tryUnmap(); !unmapped {
|
|
|
|
// The record is referenced by a binding, continue.
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
// If any other goroutines are now trying to re-insert this
|
|
|
|
// entry in the map, they are busy calling Gosched() awaiting
|
|
|
|
// this deletion:
|
|
|
|
m.current.Delete(inuse.mapkey())
|
|
|
|
|
|
|
|
// There's a potential race between `LoadInt64` and
|
|
|
|
// `tryUnmap` in this function. Since this is the
|
|
|
|
// last we'll see of this record, checkpoint
|
|
|
|
mods = atomic.LoadInt64(&inuse.updateCount)
|
|
|
|
if mods != coll {
|
|
|
|
checkpointed += m.checkpointRecord(ctx, inuse)
|
|
|
|
}
|
2020-02-07 00:45:56 +02:00
|
|
|
return true
|
|
|
|
})
|
2019-10-29 22:27:22 +02:00
|
|
|
|
2019-11-15 23:01:20 +02:00
|
|
|
return checkpointed
|
2019-10-29 22:27:22 +02:00
|
|
|
}
|
|
|
|
|
2020-03-19 21:02:46 +02:00
|
|
|
func (m *SDK) collectAsync(ctx context.Context) int {
|
2020-03-05 22:15:30 +02:00
|
|
|
checkpointed := 0
|
|
|
|
|
2020-03-19 21:02:46 +02:00
|
|
|
m.asyncInstruments.Range(func(key, value interface{}) bool {
|
|
|
|
a := key.(*asyncInstrument)
|
|
|
|
a.callback(a.observe)
|
|
|
|
checkpointed += m.checkpointAsync(ctx, a)
|
2020-03-05 22:15:30 +02:00
|
|
|
return true
|
|
|
|
})
|
|
|
|
|
|
|
|
return checkpointed
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *SDK) checkpointRecord(ctx context.Context, r *record) int {
|
2020-03-27 23:06:48 +02:00
|
|
|
return m.checkpoint(ctx, &r.inst.descriptor, r.recorder, &r.labels)
|
2020-03-05 22:15:30 +02:00
|
|
|
}
|
|
|
|
|
2020-03-19 21:02:46 +02:00
|
|
|
func (m *SDK) checkpointAsync(ctx context.Context, a *asyncInstrument) int {
|
|
|
|
if len(a.recorders) == 0 {
|
2019-11-15 23:01:20 +02:00
|
|
|
return 0
|
|
|
|
}
|
2020-03-05 22:15:30 +02:00
|
|
|
checkpointed := 0
|
2020-03-19 21:02:46 +02:00
|
|
|
for encodedLabels, lrec := range a.recorders {
|
2020-03-27 23:06:48 +02:00
|
|
|
lrec := lrec
|
2020-04-22 05:23:15 +02:00
|
|
|
epochDiff := m.currentEpoch - lrec.observedEpoch
|
2020-03-05 22:15:30 +02:00
|
|
|
if epochDiff == 0 {
|
2020-03-27 23:06:48 +02:00
|
|
|
checkpointed += m.checkpoint(ctx, &a.descriptor, lrec.recorder, &lrec.labels)
|
2020-03-05 22:15:30 +02:00
|
|
|
} else if epochDiff > 1 {
|
|
|
|
// This is second collection cycle with no
|
|
|
|
// observations for this labelset. Remove the
|
|
|
|
// recorder.
|
2020-03-19 21:02:46 +02:00
|
|
|
delete(a.recorders, encodedLabels)
|
2020-03-05 22:15:30 +02:00
|
|
|
}
|
|
|
|
}
|
2020-03-19 21:02:46 +02:00
|
|
|
if len(a.recorders) == 0 {
|
|
|
|
a.recorders = nil
|
2020-03-05 22:15:30 +02:00
|
|
|
}
|
|
|
|
return checkpointed
|
|
|
|
}
|
2019-11-15 23:01:20 +02:00
|
|
|
|
2020-03-19 21:02:46 +02:00
|
|
|
func (m *SDK) checkpoint(ctx context.Context, descriptor *metric.Descriptor, recorder export.Aggregator, labels *labels) int {
|
2020-03-05 22:15:30 +02:00
|
|
|
if recorder == nil {
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
recorder.Checkpoint(ctx, descriptor)
|
2020-03-11 18:11:27 +02:00
|
|
|
|
2020-03-24 18:30:12 +02:00
|
|
|
exportRecord := export.NewRecord(descriptor, labels, recorder)
|
2020-03-05 22:15:30 +02:00
|
|
|
err := m.batcher.Process(ctx, exportRecord)
|
2019-11-15 23:01:20 +02:00
|
|
|
if err != nil {
|
|
|
|
m.errorHandler(err)
|
2019-10-29 22:27:22 +02:00
|
|
|
}
|
2019-11-15 23:01:20 +02:00
|
|
|
return 1
|
2019-10-29 22:27:22 +02:00
|
|
|
}
|
|
|
|
|
2020-03-20 17:58:32 +02:00
|
|
|
// Resource returns the Resource this SDK was created with describing the
|
|
|
|
// entity for which it creates instruments for.
|
|
|
|
//
|
|
|
|
// Resource means that the SDK implements the Resourcer interface and
|
|
|
|
// therefore all metric instruments it creates will inherit its
|
|
|
|
// Resource by default unless explicitly overwritten.
|
|
|
|
func (m *SDK) Resource() resource.Resource {
|
|
|
|
return m.resource
|
|
|
|
}
|
|
|
|
|
2019-10-29 22:27:22 +02:00
|
|
|
// RecordBatch enters a batch of metric events.
|
2020-03-27 23:06:48 +02:00
|
|
|
func (m *SDK) RecordBatch(ctx context.Context, kvs []core.KeyValue, measurements ...api.Measurement) {
|
|
|
|
// Labels will be computed the first time acquireHandle is
|
|
|
|
// called. Subsequent calls to acquireHandle will re-use the
|
|
|
|
// previously computed value instead of recomputing the
|
|
|
|
// ordered labels.
|
|
|
|
var labels labels
|
|
|
|
for i, meas := range measurements {
|
|
|
|
s := meas.SyncImpl().(*syncInstrument)
|
|
|
|
|
|
|
|
h := s.acquireHandle(kvs, &labels)
|
|
|
|
|
|
|
|
// Re-use labels for the next measurement.
|
|
|
|
if i == 0 {
|
|
|
|
labels = h.labels
|
|
|
|
}
|
|
|
|
|
|
|
|
defer h.Unbind()
|
|
|
|
h.RecordOne(ctx, meas.Number())
|
2019-10-29 22:27:22 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (r *record) RecordOne(ctx context.Context, number core.Number) {
|
2019-11-15 23:01:20 +02:00
|
|
|
if r.recorder == nil {
|
|
|
|
// The instrument is disabled according to the AggregationSelector.
|
|
|
|
return
|
|
|
|
}
|
2020-03-19 21:02:46 +02:00
|
|
|
if err := aggregator.RangeTest(number, &r.inst.descriptor); err != nil {
|
2020-03-27 23:06:48 +02:00
|
|
|
r.inst.meter.errorHandler(err)
|
2019-11-15 23:01:20 +02:00
|
|
|
return
|
|
|
|
}
|
2020-03-19 21:02:46 +02:00
|
|
|
if err := r.recorder.Update(ctx, number, &r.inst.descriptor); err != nil {
|
2020-03-27 23:06:48 +02:00
|
|
|
r.inst.meter.errorHandler(err)
|
2019-11-15 23:01:20 +02:00
|
|
|
return
|
2019-10-29 22:27:22 +02:00
|
|
|
}
|
2020-04-22 05:23:15 +02:00
|
|
|
// Record was modified, inform the Collect() that things need
|
|
|
|
// to be collected while the record is still mapped.
|
|
|
|
atomic.AddInt64(&r.updateCount, 1)
|
2019-10-29 22:27:22 +02:00
|
|
|
}
|
|
|
|
|
2019-12-28 02:30:19 +02:00
|
|
|
func (r *record) Unbind() {
|
2020-02-07 00:45:56 +02:00
|
|
|
r.refMapped.unref()
|
2019-10-29 22:27:22 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
func (r *record) mapkey() mapkey {
|
|
|
|
return mapkey{
|
2020-03-19 21:02:46 +02:00
|
|
|
descriptor: &r.inst.descriptor,
|
2020-03-11 18:11:27 +02:00
|
|
|
ordered: r.labels.ordered,
|
2019-10-29 22:27:22 +02:00
|
|
|
}
|
|
|
|
}
|