2020-03-25 23:47:17 +02:00
|
|
|
// Copyright The OpenTelemetry Authors
|
2019-10-29 22:27:22 +02:00
|
|
|
//
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
2020-06-23 07:59:51 +02:00
|
|
|
//go:generate stringer -type=ExportKind
|
|
|
|
|
Replace `Ordered` with an iterator in `export.Labels`. (#567)
* Do not expose a slice of labels in export.Record
This is really an inconvenient implementation detail leak - we may
want to store labels in a different way. Replace it with an iterator -
it does not force us to use slice of key values as a storage in the
long run.
* Add Len to LabelIterator
It may come in handy in several situations, where we don't have access
to export.Labels object, but only to the label iterator.
* Use reflect value label iterator for the fixed labels
* add reset operation to iterator
Makes my life easier when writing a benchmark. Might also be an
alternative to cloning the iterator.
* Add benchmarks for iterators
* Add import comment
* Add clone operation to label iterator
* Move iterator tests to a separate package
* Add tests for cloning iterators
* Pass label iterator to export labels
* Use non-addressable array reflect values
By not using the value created by `reflect.New()`, but rather by
`reflect.ValueOf()`, we get a non-addressable array in the value,
which does not infer an allocation cost when getting an element from
the array.
* Drop zero iterator
This can be substituted by a reflect value iterator that goes over a
value with a zero-sized array.
* Add a simple iterator that implements label iterator
In the long run this will completely replace the LabelIterator
interface.
* Replace reflect value iterator with simple iterator
* Pass label storage to new export labels, not label iterator
* Drop label iterator interface, rename storage iterator to label iterator
* Drop clone operation from iterator
It's a leftover from interface times and now it's pointless - the
iterator is a simple struct, so cloning it is a simple copy.
* Drop Reset from label iterator
The sole existence of Reset was actually for benchmarking convenience.
Now we can just copy the iterator cheaply, so a need for Reset is no
more.
* Drop noop iterator tests
* Move back iterator tests to export package
* Eagerly get the reflect value of ordered labels
So we won't get into problems when several goroutines want to iterate
the same labels at the same time. Not sure if this would be a big
deal, since every goroutine would compute the same reflect.Value, but
concurrent write to the same memory is bad anyway. And it doesn't cost
us any extra allocations anyway.
* Replace NewSliceLabelIterator() with a method of LabelSlice
* Add some documentation
* Documentation fixes
2020-03-20 00:01:34 +02:00
|
|
|
package metric // import "go.opentelemetry.io/otel/sdk/export/metric"
|
2019-11-15 23:01:20 +02:00
|
|
|
|
2019-10-29 22:27:22 +02:00
|
|
|
import (
|
|
|
|
"context"
|
2020-05-19 03:37:41 +02:00
|
|
|
"sync"
|
2020-06-18 19:16:33 +02:00
|
|
|
"time"
|
2019-10-29 22:27:22 +02:00
|
|
|
|
2020-03-19 21:02:46 +02:00
|
|
|
"go.opentelemetry.io/otel/api/metric"
|
2020-08-18 05:25:03 +02:00
|
|
|
"go.opentelemetry.io/otel/label"
|
2020-06-18 19:16:33 +02:00
|
|
|
"go.opentelemetry.io/otel/sdk/export/metric/aggregation"
|
2020-04-24 18:44:21 +02:00
|
|
|
"go.opentelemetry.io/otel/sdk/resource"
|
2019-10-29 22:27:22 +02:00
|
|
|
)
|
|
|
|
|
2020-06-23 21:00:15 +02:00
|
|
|
// Processor is responsible for deciding which kind of aggregation to
|
2020-06-23 19:51:15 +02:00
|
|
|
// use (via AggregatorSelector), gathering exported results from the
|
2019-11-15 23:01:20 +02:00
|
|
|
// SDK during collection, and deciding over which dimensions to group
|
|
|
|
// the exported data.
|
|
|
|
//
|
|
|
|
// The SDK supports binding only one of these interfaces, as it has
|
|
|
|
// the sole responsibility of determining which Aggregator to use for
|
|
|
|
// each record.
|
|
|
|
//
|
2020-06-23 19:51:15 +02:00
|
|
|
// The embedded AggregatorSelector interface is called (concurrently)
|
2019-11-15 23:01:20 +02:00
|
|
|
// in instrumentation context to select the appropriate Aggregator for
|
|
|
|
// an instrument.
|
|
|
|
//
|
|
|
|
// The `Process` method is called during collection in a
|
|
|
|
// single-threaded context from the SDK, after the aggregator is
|
2020-06-23 21:00:15 +02:00
|
|
|
// checkpointed, allowing the processor to build the set of metrics
|
2019-11-15 23:01:20 +02:00
|
|
|
// currently being exported.
|
2020-06-23 21:00:15 +02:00
|
|
|
type Processor interface {
|
2020-06-23 19:51:15 +02:00
|
|
|
// AggregatorSelector is responsible for selecting the
|
2019-11-15 23:01:20 +02:00
|
|
|
// concrete type of Aggregator used for a metric in the SDK.
|
|
|
|
//
|
|
|
|
// This may be a static decision based on fields of the
|
|
|
|
// Descriptor, or it could use an external configuration
|
|
|
|
// source to customize the treatment of each metric
|
|
|
|
// instrument.
|
|
|
|
//
|
|
|
|
// The result from AggregatorSelector.AggregatorFor should be
|
|
|
|
// the same type for a given Descriptor or else nil. The same
|
|
|
|
// type should be returned for a given descriptor, because
|
|
|
|
// Aggregators only know how to Merge with their own type. If
|
|
|
|
// the result is nil, the metric instrument will be disabled.
|
|
|
|
//
|
|
|
|
// Note that the SDK only calls AggregatorFor when new records
|
|
|
|
// require an Aggregator. This does not provide a way to
|
|
|
|
// disable metrics with active records.
|
2020-06-23 19:51:15 +02:00
|
|
|
AggregatorSelector
|
2019-11-15 23:01:20 +02:00
|
|
|
|
|
|
|
// Process is called by the SDK once per internal record,
|
2020-06-18 19:16:33 +02:00
|
|
|
// passing the export Accumulation (a Descriptor, the corresponding
|
2020-06-09 20:00:50 +02:00
|
|
|
// Labels, and the checkpointed Aggregator). This call has no
|
|
|
|
// Context argument because it is expected to perform only
|
|
|
|
// computation. An SDK is not expected to call exporters from
|
|
|
|
// with Process, use a controller for that (see
|
|
|
|
// ./controllers/{pull,push}.
|
2020-06-18 19:16:33 +02:00
|
|
|
Process(Accumulation) error
|
2019-11-05 23:08:55 +02:00
|
|
|
}
|
|
|
|
|
2020-06-23 19:51:15 +02:00
|
|
|
// AggregatorSelector supports selecting the kind of Aggregator to
|
2019-11-15 23:01:20 +02:00
|
|
|
// use at runtime for a specific metric instrument.
|
2020-06-23 19:51:15 +02:00
|
|
|
type AggregatorSelector interface {
|
2020-06-13 09:55:01 +02:00
|
|
|
// AggregatorFor allocates a variable number of aggregators of
|
|
|
|
// a kind suitable for the requested export. This method
|
|
|
|
// initializes a `...*Aggregator`, to support making a single
|
|
|
|
// allocation.
|
|
|
|
//
|
|
|
|
// When the call returns without initializing the *Aggregator
|
|
|
|
// to a non-nil value, the metric instrument is explicitly
|
|
|
|
// disabled.
|
|
|
|
//
|
|
|
|
// This must return a consistent type to avoid confusion in
|
|
|
|
// later stages of the metrics export process, i.e., when
|
|
|
|
// Merging or Checkpointing aggregators for a specific
|
|
|
|
// instrument.
|
2019-11-15 23:01:20 +02:00
|
|
|
//
|
|
|
|
// Note: This is context-free because the aggregator should
|
|
|
|
// not relate to the incoming context. This call should not
|
|
|
|
// block.
|
2020-06-13 09:55:01 +02:00
|
|
|
AggregatorFor(*metric.Descriptor, ...*Aggregator)
|
2019-11-15 23:01:20 +02:00
|
|
|
}
|
|
|
|
|
2020-08-13 22:12:32 +02:00
|
|
|
// Checkpointer is the interface used by a Controller to coordinate
|
|
|
|
// the Processor with Accumulator(s) and Exporter(s). The
|
|
|
|
// StartCollection() and FinishCollection() methods start and finish a
|
|
|
|
// collection interval. Controllers call the Accumulator(s) during
|
|
|
|
// collection to process Accumulations.
|
|
|
|
type Checkpointer interface {
|
|
|
|
// Processor processes metric data for export. The Process
|
|
|
|
// method is bracketed by StartCollection and FinishCollection
|
|
|
|
// calls. The embedded AggregatorSelector can be called at
|
|
|
|
// any time.
|
|
|
|
Processor
|
|
|
|
|
|
|
|
// CheckpointSet returns the current data set. This may be
|
|
|
|
// called before and after collection. The
|
|
|
|
// implementation is required to return the same value
|
|
|
|
// throughout its lifetime, since CheckpointSet exposes a
|
|
|
|
// sync.Locker interface. The caller is responsible for
|
|
|
|
// locking the CheckpointSet before initiating collection.
|
|
|
|
CheckpointSet() CheckpointSet
|
|
|
|
|
|
|
|
// StartCollection begins a collection interval.
|
|
|
|
StartCollection()
|
|
|
|
|
|
|
|
// FinishCollection ends a collection interval.
|
|
|
|
FinishCollection() error
|
|
|
|
}
|
|
|
|
|
2019-11-15 23:01:20 +02:00
|
|
|
// Aggregator implements a specific aggregation behavior, e.g., a
|
2020-05-16 07:11:12 +02:00
|
|
|
// behavior to track a sequence of updates to an instrument. Sum-only
|
|
|
|
// instruments commonly use a simple Sum aggregator, but for the
|
|
|
|
// distribution instruments (ValueRecorder, ValueObserver) there are a
|
|
|
|
// number of possible aggregators with different cost and accuracy
|
|
|
|
// tradeoffs.
|
2019-11-15 23:01:20 +02:00
|
|
|
//
|
|
|
|
// Note that any Aggregator may be attached to any instrument--this is
|
|
|
|
// the result of the OpenTelemetry API/SDK separation. It is possible
|
2020-05-16 07:11:12 +02:00
|
|
|
// to attach a Sum aggregator to a ValueRecorder instrument or a
|
|
|
|
// MinMaxSumCount aggregator to a Counter instrument.
|
2019-11-05 23:08:55 +02:00
|
|
|
type Aggregator interface {
|
2020-06-18 19:16:33 +02:00
|
|
|
// Aggregation returns an Aggregation interface to access the
|
|
|
|
// current state of this Aggregator. The caller is
|
|
|
|
// responsible for synchronization and must not call any the
|
|
|
|
// other methods in this interface concurrently while using
|
|
|
|
// the Aggregation.
|
|
|
|
Aggregation() aggregation.Aggregation
|
|
|
|
|
2019-10-29 22:27:22 +02:00
|
|
|
// Update receives a new measured value and incorporates it
|
2020-06-13 09:55:01 +02:00
|
|
|
// into the aggregation. Update() calls may be called
|
|
|
|
// concurrently.
|
2019-11-15 23:01:20 +02:00
|
|
|
//
|
|
|
|
// Descriptor.NumberKind() should be consulted to determine
|
|
|
|
// whether the provided number is an int64 or float64.
|
|
|
|
//
|
|
|
|
// The Context argument comes from user-level code and could be
|
2020-06-13 09:55:01 +02:00
|
|
|
// inspected for a `correlation.Map` or `trace.SpanContext`.
|
2020-05-11 08:44:42 +02:00
|
|
|
Update(context.Context, metric.Number, *metric.Descriptor) error
|
2019-11-15 23:01:20 +02:00
|
|
|
|
2020-06-23 19:41:11 +02:00
|
|
|
// SynchronizedMove is called during collection to finish one
|
2020-06-13 09:55:01 +02:00
|
|
|
// period of aggregation by atomically saving the
|
2020-06-23 19:41:11 +02:00
|
|
|
// currently-updating state into the argument Aggregator AND
|
|
|
|
// resetting the current value to the zero state.
|
2019-11-15 23:01:20 +02:00
|
|
|
//
|
2020-06-23 19:41:11 +02:00
|
|
|
// SynchronizedMove() is called concurrently with Update(). These
|
2020-06-13 09:55:01 +02:00
|
|
|
// two methods must be synchronized with respect to each
|
|
|
|
// other, for correctness.
|
|
|
|
//
|
|
|
|
// After saving a synchronized copy, the Aggregator can be converted
|
|
|
|
// into one or more of the interfaces in the `aggregation` sub-package,
|
|
|
|
// according to kind of Aggregator that was selected.
|
|
|
|
//
|
|
|
|
// This method will return an InconsistentAggregatorError if
|
|
|
|
// this Aggregator cannot be copied into the destination due
|
|
|
|
// to an incompatible type.
|
2019-11-15 23:01:20 +02:00
|
|
|
//
|
2020-06-09 20:00:50 +02:00
|
|
|
// This call has no Context argument because it is expected to
|
|
|
|
// perform only computation.
|
2020-06-23 19:41:11 +02:00
|
|
|
SynchronizedMove(destination Aggregator, descriptor *metric.Descriptor) error
|
2019-11-15 23:01:20 +02:00
|
|
|
|
|
|
|
// Merge combines the checkpointed state from the argument
|
2020-06-13 09:55:01 +02:00
|
|
|
// Aggregator into this Aggregator. Merge is not synchronized
|
2020-06-23 19:41:11 +02:00
|
|
|
// with respect to Update or SynchronizedMove.
|
2020-06-13 09:55:01 +02:00
|
|
|
//
|
|
|
|
// The owner of an Aggregator being merged is responsible for
|
|
|
|
// synchronization of both Aggregator states.
|
2020-03-19 21:02:46 +02:00
|
|
|
Merge(Aggregator, *metric.Descriptor) error
|
2019-11-15 23:01:20 +02:00
|
|
|
}
|
|
|
|
|
2020-06-23 07:59:51 +02:00
|
|
|
// Subtractor is an optional interface implemented by some
|
|
|
|
// Aggregators. An Aggregator must support `Subtract()` in order to
|
|
|
|
// be configured for a Precomputed-Sum instrument (SumObserver,
|
|
|
|
// UpDownSumObserver) using a DeltaExporter.
|
|
|
|
type Subtractor interface {
|
|
|
|
// Subtract subtracts the `operand` from this Aggregator and
|
|
|
|
// outputs the value in `result`.
|
|
|
|
Subtract(operand, result Aggregator, descriptor *metric.Descriptor) error
|
|
|
|
}
|
|
|
|
|
2019-11-15 23:01:20 +02:00
|
|
|
// Exporter handles presentation of the checkpoint of aggregate
|
|
|
|
// metrics. This is the final stage of a metrics export pipeline,
|
|
|
|
// where metric data are formatted for a specific system.
|
|
|
|
type Exporter interface {
|
|
|
|
// Export is called immediately after completing a collection
|
|
|
|
// pass in the SDK.
|
|
|
|
//
|
|
|
|
// The Context comes from the controller that initiated
|
|
|
|
// collection.
|
|
|
|
//
|
2020-06-23 21:00:15 +02:00
|
|
|
// The CheckpointSet interface refers to the Processor that just
|
2019-11-15 23:01:20 +02:00
|
|
|
// completed collection.
|
2020-05-19 02:44:28 +02:00
|
|
|
Export(context.Context, CheckpointSet) error
|
2020-06-23 07:59:51 +02:00
|
|
|
|
2020-06-23 21:00:15 +02:00
|
|
|
// ExportKindSelector is an interface used by the Processor
|
2020-06-23 07:59:51 +02:00
|
|
|
// in deciding whether to compute Delta or Cumulative
|
|
|
|
// Aggregations when passing Records to this Exporter.
|
|
|
|
ExportKindSelector
|
|
|
|
}
|
|
|
|
|
|
|
|
// ExportKindSelector is a sub-interface of Exporter used to indicate
|
2020-06-23 21:00:15 +02:00
|
|
|
// whether the Processor should compute Delta or Cumulative
|
2020-06-23 07:59:51 +02:00
|
|
|
// Aggregations.
|
|
|
|
type ExportKindSelector interface {
|
|
|
|
// ExportKindFor should return the correct ExportKind that
|
|
|
|
// should be used when exporting data for the given metric
|
|
|
|
// instrument and Aggregator kind.
|
|
|
|
ExportKindFor(*metric.Descriptor, aggregation.Kind) ExportKind
|
2019-11-15 23:01:20 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// CheckpointSet allows a controller to access a complete checkpoint of
|
2020-06-23 21:00:15 +02:00
|
|
|
// aggregated metrics from the Processor. This is passed to the
|
2019-11-15 23:01:20 +02:00
|
|
|
// Exporter which may then use ForEach to iterate over the collection
|
|
|
|
// of aggregated metrics.
|
|
|
|
type CheckpointSet interface {
|
|
|
|
// ForEach iterates over aggregated checkpoints for all
|
|
|
|
// metrics that were updated during the last collection
|
2020-03-17 01:28:33 +02:00
|
|
|
// period. Each aggregated checkpoint returned by the
|
|
|
|
// function parameter may return an error.
|
2020-06-23 07:59:51 +02:00
|
|
|
//
|
|
|
|
// The ExportKindSelector argument is used to determine
|
|
|
|
// whether the Record is computed using Delta or Cumulative
|
|
|
|
// aggregation.
|
|
|
|
//
|
2020-03-17 01:28:33 +02:00
|
|
|
// ForEach tolerates ErrNoData silently, as this is
|
|
|
|
// expected from the Meter implementation. Any other kind
|
|
|
|
// of error will immediately halt ForEach and return
|
|
|
|
// the error to the caller.
|
2020-06-23 07:59:51 +02:00
|
|
|
ForEach(ExportKindSelector, func(Record) error) error
|
2020-05-19 03:37:41 +02:00
|
|
|
|
|
|
|
// Locker supports locking the checkpoint set. Collection
|
|
|
|
// into the checkpoint set cannot take place (in case of a
|
2020-06-23 21:00:15 +02:00
|
|
|
// stateful processor) while it is locked.
|
2020-05-19 03:37:41 +02:00
|
|
|
//
|
2020-06-23 21:00:15 +02:00
|
|
|
// The Processor attached to the Accumulator MUST be called
|
2020-05-19 03:37:41 +02:00
|
|
|
// with the lock held.
|
|
|
|
sync.Locker
|
|
|
|
|
|
|
|
// RLock acquires a read lock corresponding to this Locker.
|
|
|
|
RLock()
|
|
|
|
// RUnlock releases a read lock corresponding to this Locker.
|
|
|
|
RUnlock()
|
2019-11-15 23:01:20 +02:00
|
|
|
}
|
2019-10-31 07:15:27 +02:00
|
|
|
|
2020-06-18 19:16:33 +02:00
|
|
|
// Metadata contains the common elements for exported metric data that
|
2020-06-23 21:00:15 +02:00
|
|
|
// are shared by the Accumulator->Processor and Processor->Exporter
|
2020-06-18 19:16:33 +02:00
|
|
|
// steps.
|
|
|
|
type Metadata struct {
|
2020-03-19 21:02:46 +02:00
|
|
|
descriptor *metric.Descriptor
|
2020-04-23 21:10:58 +02:00
|
|
|
labels *label.Set
|
2020-05-19 02:44:28 +02:00
|
|
|
resource *resource.Resource
|
2020-06-18 19:16:33 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Accumulation contains the exported data for a single metric instrument
|
2020-06-23 21:00:15 +02:00
|
|
|
// and label set, as prepared by an Accumulator for the Processor.
|
2020-06-18 19:16:33 +02:00
|
|
|
type Accumulation struct {
|
|
|
|
Metadata
|
2019-11-15 23:01:20 +02:00
|
|
|
aggregator Aggregator
|
2019-10-29 22:27:22 +02:00
|
|
|
}
|
|
|
|
|
2020-06-18 19:16:33 +02:00
|
|
|
// Record contains the exported data for a single metric instrument
|
2020-06-23 21:00:15 +02:00
|
|
|
// and label set, as prepared by the Processor for the Exporter.
|
2020-06-18 19:16:33 +02:00
|
|
|
// This includes the effective start and end time for the aggregation.
|
|
|
|
type Record struct {
|
|
|
|
Metadata
|
|
|
|
aggregation aggregation.Aggregation
|
|
|
|
start time.Time
|
|
|
|
end time.Time
|
|
|
|
}
|
|
|
|
|
|
|
|
// Descriptor describes the metric instrument being exported.
|
|
|
|
func (m Metadata) Descriptor() *metric.Descriptor {
|
|
|
|
return m.descriptor
|
|
|
|
}
|
|
|
|
|
|
|
|
// Labels describes the labels associated with the instrument and the
|
|
|
|
// aggregated data.
|
|
|
|
func (m Metadata) Labels() *label.Set {
|
|
|
|
return m.labels
|
|
|
|
}
|
|
|
|
|
|
|
|
// Resource contains common attributes that apply to this metric event.
|
|
|
|
func (m Metadata) Resource() *resource.Resource {
|
|
|
|
return m.resource
|
|
|
|
}
|
|
|
|
|
|
|
|
// NewAccumulation allows Accumulator implementations to construct new
|
2020-06-23 21:00:15 +02:00
|
|
|
// Accumulations to send to Processors. The Descriptor, Labels, Resource,
|
2020-06-18 19:16:33 +02:00
|
|
|
// and Aggregator represent aggregate metric events received over a single
|
|
|
|
// collection period.
|
|
|
|
func NewAccumulation(descriptor *metric.Descriptor, labels *label.Set, resource *resource.Resource, aggregator Aggregator) Accumulation {
|
|
|
|
return Accumulation{
|
|
|
|
Metadata: Metadata{
|
|
|
|
descriptor: descriptor,
|
|
|
|
labels: labels,
|
|
|
|
resource: resource,
|
|
|
|
},
|
2019-11-15 23:01:20 +02:00
|
|
|
aggregator: aggregator,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Aggregator returns the checkpointed aggregator. It is safe to
|
|
|
|
// access the checkpointed state without locking.
|
2020-06-18 19:16:33 +02:00
|
|
|
func (r Accumulation) Aggregator() Aggregator {
|
2019-11-15 23:01:20 +02:00
|
|
|
return r.aggregator
|
|
|
|
}
|
|
|
|
|
2020-06-23 21:00:15 +02:00
|
|
|
// NewRecord allows Processor implementations to construct export
|
2020-06-18 19:16:33 +02:00
|
|
|
// records. The Descriptor, Labels, and Aggregator represent
|
|
|
|
// aggregate metric events received over a single collection period.
|
|
|
|
func NewRecord(descriptor *metric.Descriptor, labels *label.Set, resource *resource.Resource, aggregation aggregation.Aggregation, start, end time.Time) Record {
|
|
|
|
return Record{
|
|
|
|
Metadata: Metadata{
|
|
|
|
descriptor: descriptor,
|
|
|
|
labels: labels,
|
|
|
|
resource: resource,
|
|
|
|
},
|
|
|
|
aggregation: aggregation,
|
|
|
|
start: start,
|
|
|
|
end: end,
|
|
|
|
}
|
2019-11-15 23:01:20 +02:00
|
|
|
}
|
|
|
|
|
2020-06-18 19:16:33 +02:00
|
|
|
// Aggregation returns the aggregation, an interface to the record and
|
|
|
|
// its aggregator, dependent on the kind of both the input and exporter.
|
|
|
|
func (r Record) Aggregation() aggregation.Aggregation {
|
|
|
|
return r.aggregation
|
2019-11-15 23:01:20 +02:00
|
|
|
}
|
2020-05-19 02:44:28 +02:00
|
|
|
|
2020-06-18 19:16:33 +02:00
|
|
|
// StartTime is the start time of the interval covered by this aggregation.
|
|
|
|
func (r Record) StartTime() time.Time {
|
|
|
|
return r.start
|
|
|
|
}
|
|
|
|
|
|
|
|
// EndTime is the end time of the interval covered by this aggregation.
|
|
|
|
func (r Record) EndTime() time.Time {
|
|
|
|
return r.end
|
2020-05-19 02:44:28 +02:00
|
|
|
}
|
2020-06-23 07:59:51 +02:00
|
|
|
|
|
|
|
// ExportKind indicates the kind of data exported by an exporter.
|
|
|
|
// These bits may be OR-d together when multiple exporters are in use.
|
|
|
|
type ExportKind int
|
|
|
|
|
|
|
|
const (
|
|
|
|
// CumulativeExporter indicates that the Exporter expects a
|
|
|
|
// Cumulative Aggregation.
|
|
|
|
CumulativeExporter ExportKind = 1 // e.g., Prometheus
|
|
|
|
|
|
|
|
// DeltaExporter indicates that the Exporter expects a
|
|
|
|
// Delta Aggregation.
|
|
|
|
DeltaExporter ExportKind = 2 // e.g., StatsD
|
|
|
|
|
|
|
|
// PassThroughExporter indicates that the Exporter expects
|
|
|
|
// either a Cumulative or a Delta Aggregation, whichever does
|
|
|
|
// not require maintaining state for the given instrument.
|
|
|
|
PassThroughExporter ExportKind = 4 // e.g., OTLP
|
|
|
|
)
|
|
|
|
|
|
|
|
// Includes tests whether `kind` includes a specific kind of
|
|
|
|
// exporter.
|
|
|
|
func (kind ExportKind) Includes(has ExportKind) bool {
|
|
|
|
return kind&has != 0
|
|
|
|
}
|
|
|
|
|
|
|
|
// ExportKindFor returns a constant, as an implementation of ExportKindSelector.
|
|
|
|
func (kind ExportKind) ExportKindFor(_ *metric.Descriptor, _ aggregation.Kind) ExportKind {
|
|
|
|
return kind
|
|
|
|
}
|
|
|
|
|
|
|
|
// MemoryRequired returns whether an exporter of this kind requires
|
|
|
|
// memory to export correctly.
|
|
|
|
func (kind ExportKind) MemoryRequired(mkind metric.Kind) bool {
|
|
|
|
switch mkind {
|
|
|
|
case metric.ValueRecorderKind, metric.ValueObserverKind,
|
|
|
|
metric.CounterKind, metric.UpDownCounterKind:
|
|
|
|
// Delta-oriented instruments:
|
|
|
|
return kind.Includes(CumulativeExporter)
|
|
|
|
|
|
|
|
case metric.SumObserverKind, metric.UpDownSumObserverKind:
|
|
|
|
// Cumulative-oriented instruments:
|
|
|
|
return kind.Includes(DeltaExporter)
|
|
|
|
}
|
|
|
|
// Something unexpected is happening--we could panic. This
|
|
|
|
// will become an error when the exporter tries to access a
|
|
|
|
// checkpoint, presumably, so let it be.
|
|
|
|
return false
|
|
|
|
}
|