2020-03-25 23:47:17 +02:00
|
|
|
// Copyright The OpenTelemetry Authors
|
2019-10-29 22:27:22 +02:00
|
|
|
//
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
Replace `Ordered` with an iterator in `export.Labels`. (#567)
* Do not expose a slice of labels in export.Record
This is really an inconvenient implementation detail leak - we may
want to store labels in a different way. Replace it with an iterator -
it does not force us to use slice of key values as a storage in the
long run.
* Add Len to LabelIterator
It may come in handy in several situations, where we don't have access
to export.Labels object, but only to the label iterator.
* Use reflect value label iterator for the fixed labels
* add reset operation to iterator
Makes my life easier when writing a benchmark. Might also be an
alternative to cloning the iterator.
* Add benchmarks for iterators
* Add import comment
* Add clone operation to label iterator
* Move iterator tests to a separate package
* Add tests for cloning iterators
* Pass label iterator to export labels
* Use non-addressable array reflect values
By not using the value created by `reflect.New()`, but rather by
`reflect.ValueOf()`, we get a non-addressable array in the value,
which does not infer an allocation cost when getting an element from
the array.
* Drop zero iterator
This can be substituted by a reflect value iterator that goes over a
value with a zero-sized array.
* Add a simple iterator that implements label iterator
In the long run this will completely replace the LabelIterator
interface.
* Replace reflect value iterator with simple iterator
* Pass label storage to new export labels, not label iterator
* Drop label iterator interface, rename storage iterator to label iterator
* Drop clone operation from iterator
It's a leftover from interface times and now it's pointless - the
iterator is a simple struct, so cloning it is a simple copy.
* Drop Reset from label iterator
The sole existence of Reset was actually for benchmarking convenience.
Now we can just copy the iterator cheaply, so a need for Reset is no
more.
* Drop noop iterator tests
* Move back iterator tests to export package
* Eagerly get the reflect value of ordered labels
So we won't get into problems when several goroutines want to iterate
the same labels at the same time. Not sure if this would be a big
deal, since every goroutine would compute the same reflect.Value, but
concurrent write to the same memory is bad anyway. And it doesn't cost
us any extra allocations anyway.
* Replace NewSliceLabelIterator() with a method of LabelSlice
* Add some documentation
* Documentation fixes
2020-03-20 00:01:34 +02:00
|
|
|
package metric // import "go.opentelemetry.io/otel/sdk/export/metric"
|
2019-11-15 23:01:20 +02:00
|
|
|
|
2019-10-29 22:27:22 +02:00
|
|
|
import (
|
|
|
|
"context"
|
2020-05-19 03:37:41 +02:00
|
|
|
"sync"
|
2019-10-29 22:27:22 +02:00
|
|
|
|
2020-04-23 21:10:58 +02:00
|
|
|
"go.opentelemetry.io/otel/api/label"
|
2020-03-19 21:02:46 +02:00
|
|
|
"go.opentelemetry.io/otel/api/metric"
|
2020-04-24 18:44:21 +02:00
|
|
|
"go.opentelemetry.io/otel/sdk/resource"
|
2019-10-29 22:27:22 +02:00
|
|
|
)
|
|
|
|
|
2020-05-11 19:23:06 +02:00
|
|
|
// Integrator is responsible for deciding which kind of aggregation to
|
2019-11-15 23:01:20 +02:00
|
|
|
// use (via AggregationSelector), gathering exported results from the
|
|
|
|
// SDK during collection, and deciding over which dimensions to group
|
|
|
|
// the exported data.
|
|
|
|
//
|
|
|
|
// The SDK supports binding only one of these interfaces, as it has
|
|
|
|
// the sole responsibility of determining which Aggregator to use for
|
|
|
|
// each record.
|
|
|
|
//
|
|
|
|
// The embedded AggregationSelector interface is called (concurrently)
|
|
|
|
// in instrumentation context to select the appropriate Aggregator for
|
|
|
|
// an instrument.
|
|
|
|
//
|
|
|
|
// The `Process` method is called during collection in a
|
|
|
|
// single-threaded context from the SDK, after the aggregator is
|
2020-05-11 19:23:06 +02:00
|
|
|
// checkpointed, allowing the integrator to build the set of metrics
|
2019-11-15 23:01:20 +02:00
|
|
|
// currently being exported.
|
2020-05-11 19:23:06 +02:00
|
|
|
type Integrator interface {
|
2019-11-15 23:01:20 +02:00
|
|
|
// AggregationSelector is responsible for selecting the
|
|
|
|
// concrete type of Aggregator used for a metric in the SDK.
|
|
|
|
//
|
|
|
|
// This may be a static decision based on fields of the
|
|
|
|
// Descriptor, or it could use an external configuration
|
|
|
|
// source to customize the treatment of each metric
|
|
|
|
// instrument.
|
|
|
|
//
|
|
|
|
// The result from AggregatorSelector.AggregatorFor should be
|
|
|
|
// the same type for a given Descriptor or else nil. The same
|
|
|
|
// type should be returned for a given descriptor, because
|
|
|
|
// Aggregators only know how to Merge with their own type. If
|
|
|
|
// the result is nil, the metric instrument will be disabled.
|
|
|
|
//
|
|
|
|
// Note that the SDK only calls AggregatorFor when new records
|
|
|
|
// require an Aggregator. This does not provide a way to
|
|
|
|
// disable metrics with active records.
|
|
|
|
AggregationSelector
|
|
|
|
|
|
|
|
// Process is called by the SDK once per internal record,
|
|
|
|
// passing the export Record (a Descriptor, the corresponding
|
2020-06-09 20:00:50 +02:00
|
|
|
// Labels, and the checkpointed Aggregator). This call has no
|
|
|
|
// Context argument because it is expected to perform only
|
|
|
|
// computation. An SDK is not expected to call exporters from
|
|
|
|
// with Process, use a controller for that (see
|
|
|
|
// ./controllers/{pull,push}.
|
|
|
|
Process(record Record) error
|
2019-11-05 23:08:55 +02:00
|
|
|
}
|
|
|
|
|
2019-11-15 23:01:20 +02:00
|
|
|
// AggregationSelector supports selecting the kind of Aggregator to
|
|
|
|
// use at runtime for a specific metric instrument.
|
|
|
|
type AggregationSelector interface {
|
|
|
|
// AggregatorFor returns the kind of aggregator suited to the
|
|
|
|
// requested export. Returning `nil` indicates to ignore this
|
|
|
|
// metric instrument. This must return a consistent type to
|
|
|
|
// avoid confusion in later stages of the metrics export
|
|
|
|
// process, i.e., when Merging multiple aggregators for a
|
|
|
|
// specific instrument.
|
|
|
|
//
|
|
|
|
// Note: This is context-free because the aggregator should
|
|
|
|
// not relate to the incoming context. This call should not
|
|
|
|
// block.
|
2020-03-19 21:02:46 +02:00
|
|
|
AggregatorFor(*metric.Descriptor) Aggregator
|
2019-11-15 23:01:20 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Aggregator implements a specific aggregation behavior, e.g., a
|
2020-05-16 07:11:12 +02:00
|
|
|
// behavior to track a sequence of updates to an instrument. Sum-only
|
|
|
|
// instruments commonly use a simple Sum aggregator, but for the
|
|
|
|
// distribution instruments (ValueRecorder, ValueObserver) there are a
|
|
|
|
// number of possible aggregators with different cost and accuracy
|
|
|
|
// tradeoffs.
|
2019-11-15 23:01:20 +02:00
|
|
|
//
|
|
|
|
// Note that any Aggregator may be attached to any instrument--this is
|
|
|
|
// the result of the OpenTelemetry API/SDK separation. It is possible
|
2020-05-16 07:11:12 +02:00
|
|
|
// to attach a Sum aggregator to a ValueRecorder instrument or a
|
|
|
|
// MinMaxSumCount aggregator to a Counter instrument.
|
2019-11-05 23:08:55 +02:00
|
|
|
type Aggregator interface {
|
2019-10-29 22:27:22 +02:00
|
|
|
// Update receives a new measured value and incorporates it
|
2019-11-15 23:01:20 +02:00
|
|
|
// into the aggregation. Update() calls may arrive
|
|
|
|
// concurrently as the SDK does not provide synchronization.
|
|
|
|
//
|
|
|
|
// Descriptor.NumberKind() should be consulted to determine
|
|
|
|
// whether the provided number is an int64 or float64.
|
|
|
|
//
|
|
|
|
// The Context argument comes from user-level code and could be
|
|
|
|
// inspected for distributed or span context.
|
2020-05-11 08:44:42 +02:00
|
|
|
Update(context.Context, metric.Number, *metric.Descriptor) error
|
2019-11-15 23:01:20 +02:00
|
|
|
|
|
|
|
// Checkpoint is called during collection to finish one period
|
|
|
|
// of aggregation by atomically saving the current value.
|
|
|
|
// Checkpoint() is called concurrently with Update().
|
|
|
|
// Checkpoint should reset the current state to the empty
|
|
|
|
// state, in order to begin computing a new delta for the next
|
|
|
|
// collection period.
|
|
|
|
//
|
|
|
|
// After the checkpoint is taken, the current value may be
|
|
|
|
// accessed using by converting to one a suitable interface
|
|
|
|
// types in the `aggregator` sub-package.
|
|
|
|
//
|
2020-06-09 20:00:50 +02:00
|
|
|
// This call has no Context argument because it is expected to
|
|
|
|
// perform only computation.
|
|
|
|
Checkpoint(*metric.Descriptor)
|
2019-11-15 23:01:20 +02:00
|
|
|
|
|
|
|
// Merge combines the checkpointed state from the argument
|
|
|
|
// aggregator into this aggregator's checkpointed state.
|
|
|
|
// Merge() is called in a single-threaded context, no locking
|
|
|
|
// is required.
|
2020-03-19 21:02:46 +02:00
|
|
|
Merge(Aggregator, *metric.Descriptor) error
|
2019-11-15 23:01:20 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Exporter handles presentation of the checkpoint of aggregate
|
|
|
|
// metrics. This is the final stage of a metrics export pipeline,
|
|
|
|
// where metric data are formatted for a specific system.
|
|
|
|
type Exporter interface {
|
|
|
|
// Export is called immediately after completing a collection
|
|
|
|
// pass in the SDK.
|
|
|
|
//
|
|
|
|
// The Context comes from the controller that initiated
|
|
|
|
// collection.
|
|
|
|
//
|
2020-05-11 19:23:06 +02:00
|
|
|
// The CheckpointSet interface refers to the Integrator that just
|
2019-11-15 23:01:20 +02:00
|
|
|
// completed collection.
|
2020-05-19 02:44:28 +02:00
|
|
|
Export(context.Context, CheckpointSet) error
|
2019-11-15 23:01:20 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// CheckpointSet allows a controller to access a complete checkpoint of
|
2020-05-11 19:23:06 +02:00
|
|
|
// aggregated metrics from the Integrator. This is passed to the
|
2019-11-15 23:01:20 +02:00
|
|
|
// Exporter which may then use ForEach to iterate over the collection
|
|
|
|
// of aggregated metrics.
|
|
|
|
type CheckpointSet interface {
|
|
|
|
// ForEach iterates over aggregated checkpoints for all
|
|
|
|
// metrics that were updated during the last collection
|
2020-03-17 01:28:33 +02:00
|
|
|
// period. Each aggregated checkpoint returned by the
|
|
|
|
// function parameter may return an error.
|
|
|
|
// ForEach tolerates ErrNoData silently, as this is
|
|
|
|
// expected from the Meter implementation. Any other kind
|
|
|
|
// of error will immediately halt ForEach and return
|
|
|
|
// the error to the caller.
|
|
|
|
ForEach(func(Record) error) error
|
2020-05-19 03:37:41 +02:00
|
|
|
|
|
|
|
// Locker supports locking the checkpoint set. Collection
|
|
|
|
// into the checkpoint set cannot take place (in case of a
|
|
|
|
// stateful integrator) while it is locked.
|
|
|
|
//
|
|
|
|
// The Integrator attached to the Accumulator MUST be called
|
|
|
|
// with the lock held.
|
|
|
|
sync.Locker
|
|
|
|
|
|
|
|
// RLock acquires a read lock corresponding to this Locker.
|
|
|
|
RLock()
|
|
|
|
// RUnlock releases a read lock corresponding to this Locker.
|
|
|
|
RUnlock()
|
2019-11-15 23:01:20 +02:00
|
|
|
}
|
2019-10-31 07:15:27 +02:00
|
|
|
|
2019-11-15 23:01:20 +02:00
|
|
|
// Record contains the exported data for a single metric instrument
|
|
|
|
// and label set.
|
|
|
|
type Record struct {
|
2020-03-19 21:02:46 +02:00
|
|
|
descriptor *metric.Descriptor
|
2020-04-23 21:10:58 +02:00
|
|
|
labels *label.Set
|
2020-05-19 02:44:28 +02:00
|
|
|
resource *resource.Resource
|
2019-11-15 23:01:20 +02:00
|
|
|
aggregator Aggregator
|
2019-10-29 22:27:22 +02:00
|
|
|
}
|
|
|
|
|
2020-05-11 19:23:06 +02:00
|
|
|
// NewRecord allows Integrator implementations to construct export
|
2019-11-15 23:01:20 +02:00
|
|
|
// records. The Descriptor, Labels, and Aggregator represent
|
|
|
|
// aggregate metric events received over a single collection period.
|
2020-05-19 02:44:28 +02:00
|
|
|
func NewRecord(descriptor *metric.Descriptor, labels *label.Set, resource *resource.Resource, aggregator Aggregator) Record {
|
2019-11-15 23:01:20 +02:00
|
|
|
return Record{
|
|
|
|
descriptor: descriptor,
|
|
|
|
labels: labels,
|
2020-05-19 02:44:28 +02:00
|
|
|
resource: resource,
|
2019-11-15 23:01:20 +02:00
|
|
|
aggregator: aggregator,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Aggregator returns the checkpointed aggregator. It is safe to
|
|
|
|
// access the checkpointed state without locking.
|
|
|
|
func (r Record) Aggregator() Aggregator {
|
|
|
|
return r.aggregator
|
|
|
|
}
|
|
|
|
|
|
|
|
// Descriptor describes the metric instrument being exported.
|
2020-03-19 21:02:46 +02:00
|
|
|
func (r Record) Descriptor() *metric.Descriptor {
|
2019-11-15 23:01:20 +02:00
|
|
|
return r.descriptor
|
|
|
|
}
|
|
|
|
|
|
|
|
// Labels describes the labels associated with the instrument and the
|
|
|
|
// aggregated data.
|
2020-04-23 21:10:58 +02:00
|
|
|
func (r Record) Labels() *label.Set {
|
2019-11-15 23:01:20 +02:00
|
|
|
return r.labels
|
|
|
|
}
|
2020-05-19 02:44:28 +02:00
|
|
|
|
|
|
|
// Resource contains common attributes that apply to this metric event.
|
|
|
|
func (r Record) Resource() *resource.Resource {
|
|
|
|
return r.resource
|
|
|
|
}
|