2020-03-24 07:41:10 +02:00
|
|
|
// Copyright The OpenTelemetry Authors
|
2019-10-29 22:27:22 +02:00
|
|
|
//
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
2020-11-04 19:10:58 +02:00
|
|
|
package metric // import "go.opentelemetry.io/otel/sdk/metric"
|
2019-10-29 22:27:22 +02:00
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
2019-11-15 23:01:20 +02:00
|
|
|
"fmt"
|
2020-02-11 02:20:29 +02:00
|
|
|
"runtime"
|
2019-10-29 22:27:22 +02:00
|
|
|
"sync"
|
|
|
|
"sync/atomic"
|
|
|
|
|
2020-10-17 18:48:21 +02:00
|
|
|
"go.opentelemetry.io/otel"
|
|
|
|
api "go.opentelemetry.io/otel"
|
2020-10-17 19:03:48 +02:00
|
|
|
"go.opentelemetry.io/otel/global"
|
2020-10-11 20:46:29 +02:00
|
|
|
internal "go.opentelemetry.io/otel/internal/metric"
|
2020-08-18 05:25:03 +02:00
|
|
|
"go.opentelemetry.io/otel/label"
|
2019-11-05 23:08:55 +02:00
|
|
|
export "go.opentelemetry.io/otel/sdk/export/metric"
|
2020-06-10 07:53:30 +02:00
|
|
|
"go.opentelemetry.io/otel/sdk/metric/aggregator"
|
2020-05-19 02:44:28 +02:00
|
|
|
"go.opentelemetry.io/otel/sdk/resource"
|
2019-10-29 22:27:22 +02:00
|
|
|
)
|
|
|
|
|
|
|
|
type (
|
2020-05-11 19:23:06 +02:00
|
|
|
// Accumulator implements the OpenTelemetry Meter API. The
|
2020-06-23 21:00:15 +02:00
|
|
|
// Accumulator is bound to a single export.Processor in
|
2020-05-11 19:23:06 +02:00
|
|
|
// `NewAccumulator()`.
|
2019-10-29 22:27:22 +02:00
|
|
|
//
|
2020-05-11 19:23:06 +02:00
|
|
|
// The Accumulator supports a Collect() API to gather and export
|
2019-10-29 22:27:22 +02:00
|
|
|
// current data. Collect() should be arranged according to
|
2020-06-23 21:00:15 +02:00
|
|
|
// the processor model. Push-based processors will setup a
|
|
|
|
// timer to call Collect() periodically. Pull-based processors
|
2019-10-29 22:27:22 +02:00
|
|
|
// will call Collect() when a pull request arrives.
|
2020-05-11 19:23:06 +02:00
|
|
|
Accumulator struct {
|
2019-10-29 22:27:22 +02:00
|
|
|
// current maps `mapkey` to *record.
|
|
|
|
current sync.Map
|
|
|
|
|
2020-03-19 21:02:46 +02:00
|
|
|
// asyncInstruments is a set of
|
|
|
|
// `*asyncInstrument` instances
|
2020-05-14 01:27:52 +02:00
|
|
|
asyncLock sync.Mutex
|
|
|
|
asyncInstruments *internal.AsyncInstrumentState
|
2020-03-05 22:15:30 +02:00
|
|
|
|
2019-10-29 22:27:22 +02:00
|
|
|
// currentEpoch is the current epoch number. It is
|
|
|
|
// incremented in `Collect()`.
|
|
|
|
currentEpoch int64
|
|
|
|
|
2020-06-23 21:00:15 +02:00
|
|
|
// processor is the configured processor+configuration.
|
|
|
|
processor export.Processor
|
2019-11-15 23:01:20 +02:00
|
|
|
|
2019-10-29 22:27:22 +02:00
|
|
|
// collectLock prevents simultaneous calls to Collect().
|
|
|
|
collectLock sync.Mutex
|
2019-11-15 23:01:20 +02:00
|
|
|
|
2020-03-27 23:06:48 +02:00
|
|
|
// asyncSortSlice has a single purpose - as a temporary
|
|
|
|
// place for sorting during labels creation to avoid
|
|
|
|
// allocation. It is cleared after use.
|
2020-04-23 21:10:58 +02:00
|
|
|
asyncSortSlice label.Sortable
|
2020-05-19 02:44:28 +02:00
|
|
|
|
|
|
|
// resource is applied to all records in this Accumulator.
|
|
|
|
resource *resource.Resource
|
2019-10-29 22:27:22 +02:00
|
|
|
}
|
|
|
|
|
2020-03-19 21:02:46 +02:00
|
|
|
syncInstrument struct {
|
|
|
|
instrument
|
2019-10-29 22:27:22 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// mapkey uniquely describes a metric instrument in terms of
|
2020-03-27 23:06:48 +02:00
|
|
|
// its InstrumentID and the encoded form of its labels.
|
2019-10-29 22:27:22 +02:00
|
|
|
mapkey struct {
|
2020-10-17 18:48:21 +02:00
|
|
|
descriptor *otel.Descriptor
|
2020-04-23 21:10:58 +02:00
|
|
|
ordered label.Distinct
|
2019-10-29 22:27:22 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// record maintains the state of one metric instrument. Due
|
|
|
|
// the use of lock-free algorithms, there may be more than one
|
|
|
|
// `record` in existence at a time, although at most one can
|
2020-05-11 19:23:06 +02:00
|
|
|
// be referenced from the `Accumulator.current` map.
|
2019-10-29 22:27:22 +02:00
|
|
|
record struct {
|
2020-02-07 00:45:56 +02:00
|
|
|
// refMapped keeps track of refcounts and the mapping state to the
|
2020-05-11 19:23:06 +02:00
|
|
|
// Accumulator.current map.
|
2020-02-07 00:45:56 +02:00
|
|
|
refMapped refcountMapped
|
2019-10-29 22:27:22 +02:00
|
|
|
|
2020-04-22 05:23:15 +02:00
|
|
|
// updateCount is incremented on every Update.
|
|
|
|
updateCount int64
|
|
|
|
|
|
|
|
// collectedCount is set to updateCount on collection,
|
|
|
|
// supports checking for no updates during a round.
|
|
|
|
collectedCount int64
|
2019-10-29 22:27:22 +02:00
|
|
|
|
2020-04-23 21:10:58 +02:00
|
|
|
// storage is the stored label set for this record,
|
|
|
|
// except in cases where a label set is shared due to
|
|
|
|
// batch recording.
|
|
|
|
storage label.Set
|
|
|
|
|
2020-03-27 23:06:48 +02:00
|
|
|
// labels is the processed label set for this record.
|
2020-04-23 21:10:58 +02:00
|
|
|
// this may refer to the `storage` field in another
|
|
|
|
// record if this label set is shared resulting from
|
|
|
|
// `RecordBatch`.
|
|
|
|
labels *label.Set
|
2020-03-27 23:06:48 +02:00
|
|
|
|
|
|
|
// sortSlice has a single purpose - as a temporary
|
|
|
|
// place for sorting during labels creation to avoid
|
|
|
|
// allocation.
|
2020-04-23 21:10:58 +02:00
|
|
|
sortSlice label.Sortable
|
2020-01-06 20:08:40 +02:00
|
|
|
|
2020-03-19 21:02:46 +02:00
|
|
|
// inst is a pointer to the corresponding instrument.
|
|
|
|
inst *syncInstrument
|
2020-01-06 20:08:40 +02:00
|
|
|
|
2020-06-13 09:55:01 +02:00
|
|
|
// current implements the actual RecordOne() API,
|
2019-10-29 22:27:22 +02:00
|
|
|
// depending on the type of aggregation. If nil, the
|
|
|
|
// metric was disabled by the exporter.
|
2020-06-13 09:55:01 +02:00
|
|
|
current export.Aggregator
|
|
|
|
checkpoint export.Aggregator
|
2019-10-29 22:27:22 +02:00
|
|
|
}
|
|
|
|
|
2020-03-19 21:02:46 +02:00
|
|
|
instrument struct {
|
2020-05-11 19:23:06 +02:00
|
|
|
meter *Accumulator
|
2020-10-17 18:48:21 +02:00
|
|
|
descriptor otel.Descriptor
|
2020-03-05 22:15:30 +02:00
|
|
|
}
|
|
|
|
|
2020-03-19 21:02:46 +02:00
|
|
|
asyncInstrument struct {
|
|
|
|
instrument
|
2020-03-11 18:11:27 +02:00
|
|
|
// recorders maps ordered labels to the pair of
|
2020-03-05 22:15:30 +02:00
|
|
|
// labelset and recorder
|
2020-04-23 21:10:58 +02:00
|
|
|
recorders map[label.Distinct]*labeledRecorder
|
2020-03-05 22:15:30 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
labeledRecorder struct {
|
2020-04-22 05:23:15 +02:00
|
|
|
observedEpoch int64
|
2020-04-23 21:10:58 +02:00
|
|
|
labels *label.Set
|
2020-06-13 09:55:01 +02:00
|
|
|
observed export.Aggregator
|
2020-03-05 22:15:30 +02:00
|
|
|
}
|
2019-10-29 22:27:22 +02:00
|
|
|
)
|
|
|
|
|
|
|
|
var (
|
2020-05-11 19:23:06 +02:00
|
|
|
_ api.MeterImpl = &Accumulator{}
|
2020-04-23 21:10:58 +02:00
|
|
|
_ api.AsyncImpl = &asyncInstrument{}
|
|
|
|
_ api.SyncImpl = &syncInstrument{}
|
|
|
|
_ api.BoundSyncImpl = &record{}
|
2020-05-20 19:19:51 +02:00
|
|
|
|
|
|
|
ErrUninitializedInstrument = fmt.Errorf("use of an uninitialized instrument")
|
2019-10-29 22:27:22 +02:00
|
|
|
)
|
|
|
|
|
2020-03-19 21:02:46 +02:00
|
|
|
func (inst *instrument) Descriptor() api.Descriptor {
|
|
|
|
return inst.descriptor
|
2020-03-05 22:15:30 +02:00
|
|
|
}
|
|
|
|
|
2020-03-19 21:02:46 +02:00
|
|
|
func (a *asyncInstrument) Implementation() interface{} {
|
|
|
|
return a
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *syncInstrument) Implementation() interface{} {
|
|
|
|
return s
|
|
|
|
}
|
|
|
|
|
2020-05-14 01:27:52 +02:00
|
|
|
func (a *asyncInstrument) observe(number api.Number, labels *label.Set) {
|
2020-03-19 21:02:46 +02:00
|
|
|
if err := aggregator.RangeTest(number, &a.descriptor); err != nil {
|
2020-06-02 20:30:09 +02:00
|
|
|
global.Handle(err)
|
2020-03-05 22:15:30 +02:00
|
|
|
return
|
|
|
|
}
|
2020-03-27 23:06:48 +02:00
|
|
|
recorder := a.getRecorder(labels)
|
2020-03-05 22:15:30 +02:00
|
|
|
if recorder == nil {
|
|
|
|
// The instrument is disabled according to the
|
2020-06-23 19:51:15 +02:00
|
|
|
// AggregatorSelector.
|
2020-03-05 22:15:30 +02:00
|
|
|
return
|
|
|
|
}
|
2020-03-19 21:02:46 +02:00
|
|
|
if err := recorder.Update(context.Background(), number, &a.descriptor); err != nil {
|
2020-06-02 20:30:09 +02:00
|
|
|
global.Handle(err)
|
2020-03-05 22:15:30 +02:00
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-05-14 01:27:52 +02:00
|
|
|
func (a *asyncInstrument) getRecorder(labels *label.Set) export.Aggregator {
|
2020-04-23 21:10:58 +02:00
|
|
|
lrec, ok := a.recorders[labels.Equivalent()]
|
2020-03-05 22:15:30 +02:00
|
|
|
if ok {
|
2020-04-22 05:23:15 +02:00
|
|
|
if lrec.observedEpoch == a.meter.currentEpoch {
|
2020-04-03 01:51:37 +02:00
|
|
|
// last value wins for Observers, so if we see the same labels
|
|
|
|
// in the current epoch, we replace the old recorder
|
2020-06-23 21:00:15 +02:00
|
|
|
a.meter.processor.AggregatorFor(&a.descriptor, &lrec.observed)
|
2020-04-03 01:51:37 +02:00
|
|
|
} else {
|
2020-04-22 05:23:15 +02:00
|
|
|
lrec.observedEpoch = a.meter.currentEpoch
|
2020-04-03 01:51:37 +02:00
|
|
|
}
|
2020-04-23 21:10:58 +02:00
|
|
|
a.recorders[labels.Equivalent()] = lrec
|
2020-06-13 09:55:01 +02:00
|
|
|
return lrec.observed
|
2020-03-05 22:15:30 +02:00
|
|
|
}
|
2020-06-13 09:55:01 +02:00
|
|
|
var rec export.Aggregator
|
2020-06-23 21:00:15 +02:00
|
|
|
a.meter.processor.AggregatorFor(&a.descriptor, &rec)
|
2020-03-19 21:02:46 +02:00
|
|
|
if a.recorders == nil {
|
2020-04-23 21:10:58 +02:00
|
|
|
a.recorders = make(map[label.Distinct]*labeledRecorder)
|
2020-03-05 22:15:30 +02:00
|
|
|
}
|
|
|
|
// This may store nil recorder in the map, thus disabling the
|
2020-03-19 21:02:46 +02:00
|
|
|
// asyncInstrument for the labelset for good. This is intentional,
|
2020-03-05 22:15:30 +02:00
|
|
|
// but will be revisited later.
|
2020-04-23 21:10:58 +02:00
|
|
|
a.recorders[labels.Equivalent()] = &labeledRecorder{
|
2020-06-13 09:55:01 +02:00
|
|
|
observed: rec,
|
2020-05-14 01:27:52 +02:00
|
|
|
labels: labels,
|
2020-04-22 05:23:15 +02:00
|
|
|
observedEpoch: a.meter.currentEpoch,
|
2020-03-05 22:15:30 +02:00
|
|
|
}
|
|
|
|
return rec
|
|
|
|
}
|
|
|
|
|
2020-03-27 23:06:48 +02:00
|
|
|
// acquireHandle gets or creates a `*record` corresponding to `kvs`,
|
|
|
|
// the input labels. The second argument `labels` is passed in to
|
|
|
|
// support re-use of the orderedLabels computed by a previous
|
|
|
|
// measurement in the same batch. This performs two allocations
|
|
|
|
// in the common case.
|
2020-08-18 05:25:03 +02:00
|
|
|
func (s *syncInstrument) acquireHandle(kvs []label.KeyValue, labelPtr *label.Set) *record {
|
2020-03-27 23:06:48 +02:00
|
|
|
var rec *record
|
2020-04-23 21:10:58 +02:00
|
|
|
var equiv label.Distinct
|
2020-03-27 23:06:48 +02:00
|
|
|
|
2020-04-23 21:10:58 +02:00
|
|
|
if labelPtr == nil {
|
2020-03-27 23:06:48 +02:00
|
|
|
// This memory allocation may not be used, but it's
|
|
|
|
// needed for the `sortSlice` field, to avoid an
|
|
|
|
// allocation while sorting.
|
|
|
|
rec = &record{}
|
2020-04-23 21:10:58 +02:00
|
|
|
rec.storage = label.NewSetWithSortable(kvs, &rec.sortSlice)
|
|
|
|
rec.labels = &rec.storage
|
|
|
|
equiv = rec.storage.Equivalent()
|
2020-03-27 23:06:48 +02:00
|
|
|
} else {
|
2020-04-23 21:10:58 +02:00
|
|
|
equiv = labelPtr.Equivalent()
|
2020-03-27 23:06:48 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Create lookup key for sync.Map (one allocation, as this
|
|
|
|
// passes through an interface{})
|
2019-10-29 22:27:22 +02:00
|
|
|
mk := mapkey{
|
2020-03-19 21:02:46 +02:00
|
|
|
descriptor: &s.descriptor,
|
2020-04-23 21:10:58 +02:00
|
|
|
ordered: equiv,
|
2019-10-29 22:27:22 +02:00
|
|
|
}
|
|
|
|
|
2020-03-19 21:02:46 +02:00
|
|
|
if actual, ok := s.meter.current.Load(mk); ok {
|
2020-03-27 23:06:48 +02:00
|
|
|
// Existing record case.
|
|
|
|
existingRec := actual.(*record)
|
|
|
|
if existingRec.refMapped.ref() {
|
2020-02-07 00:45:56 +02:00
|
|
|
// At this moment it is guaranteed that the entry is in
|
|
|
|
// the map and will not be removed.
|
2020-03-27 23:06:48 +02:00
|
|
|
return existingRec
|
2020-02-07 00:45:56 +02:00
|
|
|
}
|
|
|
|
// This entry is no longer mapped, try to add a new entry.
|
2019-11-06 20:54:36 +02:00
|
|
|
}
|
|
|
|
|
2020-03-27 23:06:48 +02:00
|
|
|
if rec == nil {
|
|
|
|
rec = &record{}
|
2020-04-23 21:10:58 +02:00
|
|
|
rec.labels = labelPtr
|
2019-10-29 22:27:22 +02:00
|
|
|
}
|
2020-03-27 23:06:48 +02:00
|
|
|
rec.refMapped = refcountMapped{value: 2}
|
|
|
|
rec.inst = s
|
2020-06-13 09:55:01 +02:00
|
|
|
|
2020-06-23 21:00:15 +02:00
|
|
|
s.meter.processor.AggregatorFor(&s.descriptor, &rec.current, &rec.checkpoint)
|
2019-10-29 22:27:22 +02:00
|
|
|
|
2020-02-07 00:45:56 +02:00
|
|
|
for {
|
|
|
|
// Load/Store: there's a memory allocation to place `mk` into
|
|
|
|
// an interface here.
|
2020-03-19 21:02:46 +02:00
|
|
|
if actual, loaded := s.meter.current.LoadOrStore(mk, rec); loaded {
|
2020-02-07 00:45:56 +02:00
|
|
|
// Existing record case. Cannot change rec here because if fail
|
|
|
|
// will try to add rec again to avoid new allocations.
|
|
|
|
oldRec := actual.(*record)
|
|
|
|
if oldRec.refMapped.ref() {
|
|
|
|
// At this moment it is guaranteed that the entry is in
|
|
|
|
// the map and will not be removed.
|
|
|
|
return oldRec
|
|
|
|
}
|
|
|
|
// This loaded entry is marked as unmapped (so Collect will remove
|
|
|
|
// it from the map immediately), try again - this is a busy waiting
|
|
|
|
// strategy to wait until Collect() removes this entry from the map.
|
|
|
|
//
|
|
|
|
// This can be improved by having a list of "Unmapped" entries for
|
|
|
|
// one time only usages, OR we can make this a blocking path and use
|
|
|
|
// a Mutex that protects the delete operation (delete only if the old
|
|
|
|
// record is associated with the key).
|
2020-02-11 02:20:29 +02:00
|
|
|
|
|
|
|
// Let collector get work done to remove the entry from the map.
|
|
|
|
runtime.Gosched()
|
2020-02-07 00:45:56 +02:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
// The new entry was added to the map, good to go.
|
2019-10-29 22:27:22 +02:00
|
|
|
return rec
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-08-18 05:25:03 +02:00
|
|
|
func (s *syncInstrument) Bind(kvs []label.KeyValue) api.BoundSyncImpl {
|
2020-03-27 23:06:48 +02:00
|
|
|
return s.acquireHandle(kvs, nil)
|
2019-10-29 22:27:22 +02:00
|
|
|
}
|
|
|
|
|
2020-08-18 05:25:03 +02:00
|
|
|
func (s *syncInstrument) RecordOne(ctx context.Context, number api.Number, kvs []label.KeyValue) {
|
2020-03-27 23:06:48 +02:00
|
|
|
h := s.acquireHandle(kvs, nil)
|
2019-12-28 02:30:19 +02:00
|
|
|
defer h.Unbind()
|
2019-10-29 22:27:22 +02:00
|
|
|
h.RecordOne(ctx, number)
|
|
|
|
}
|
|
|
|
|
2020-05-11 19:23:06 +02:00
|
|
|
// NewAccumulator constructs a new Accumulator for the given
|
2020-06-23 21:00:15 +02:00
|
|
|
// processor. This Accumulator supports only a single processor.
|
2019-10-29 22:27:22 +02:00
|
|
|
//
|
2020-05-11 19:23:06 +02:00
|
|
|
// The Accumulator does not start any background process to collect itself
|
2020-06-23 21:00:15 +02:00
|
|
|
// periodically, this responsbility lies with the processor, typically,
|
2019-10-29 22:27:22 +02:00
|
|
|
// depending on the type of export. For example, a pull-based
|
2020-06-23 21:00:15 +02:00
|
|
|
// processor will call Collect() when it receives a request to scrape
|
|
|
|
// current metric values. A push-based processor should configure its
|
2019-10-29 22:27:22 +02:00
|
|
|
// own periodic collection.
|
2020-10-31 20:16:55 +02:00
|
|
|
func NewAccumulator(processor export.Processor, resource *resource.Resource) *Accumulator {
|
2020-05-11 19:23:06 +02:00
|
|
|
return &Accumulator{
|
2020-06-23 21:00:15 +02:00
|
|
|
processor: processor,
|
2020-06-02 20:30:09 +02:00
|
|
|
asyncInstruments: internal.NewAsyncInstrumentState(),
|
2020-10-31 20:16:55 +02:00
|
|
|
resource: resource,
|
2019-10-29 22:27:22 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-05-14 01:27:52 +02:00
|
|
|
// NewSyncInstrument implements api.MetricImpl.
|
2020-05-11 19:23:06 +02:00
|
|
|
func (m *Accumulator) NewSyncInstrument(descriptor api.Descriptor) (api.SyncImpl, error) {
|
2020-03-19 21:02:46 +02:00
|
|
|
return &syncInstrument{
|
|
|
|
instrument: instrument{
|
|
|
|
descriptor: descriptor,
|
|
|
|
meter: m,
|
|
|
|
},
|
2020-03-11 20:57:57 +02:00
|
|
|
}, nil
|
2020-03-05 22:15:30 +02:00
|
|
|
}
|
|
|
|
|
2020-05-14 01:27:52 +02:00
|
|
|
// NewAsyncInstrument implements api.MetricImpl.
|
2020-10-17 18:48:21 +02:00
|
|
|
func (m *Accumulator) NewAsyncInstrument(descriptor api.Descriptor, runner otel.AsyncRunner) (api.AsyncImpl, error) {
|
2020-03-19 21:02:46 +02:00
|
|
|
a := &asyncInstrument{
|
|
|
|
instrument: instrument{
|
|
|
|
descriptor: descriptor,
|
|
|
|
meter: m,
|
|
|
|
},
|
2020-03-05 22:15:30 +02:00
|
|
|
}
|
2020-05-14 01:27:52 +02:00
|
|
|
m.asyncLock.Lock()
|
|
|
|
defer m.asyncLock.Unlock()
|
|
|
|
m.asyncInstruments.Register(a, runner)
|
2020-03-19 21:02:46 +02:00
|
|
|
return a, nil
|
2020-03-05 22:15:30 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Collect traverses the list of active records and observers and
|
|
|
|
// exports data for each active instrument. Collect() may not be
|
|
|
|
// called concurrently.
|
2019-10-29 22:27:22 +02:00
|
|
|
//
|
2020-06-23 21:00:15 +02:00
|
|
|
// During the collection pass, the export.Processor will receive
|
2019-10-29 22:27:22 +02:00
|
|
|
// one Export() call per current aggregation.
|
2019-11-15 23:01:20 +02:00
|
|
|
//
|
|
|
|
// Returns the number of records that were checkpointed.
|
2020-05-11 19:23:06 +02:00
|
|
|
func (m *Accumulator) Collect(ctx context.Context) int {
|
2019-10-29 22:27:22 +02:00
|
|
|
m.collectLock.Lock()
|
|
|
|
defer m.collectLock.Unlock()
|
|
|
|
|
2020-05-20 06:33:10 +02:00
|
|
|
checkpointed := m.observeAsyncInstruments(ctx)
|
2020-06-09 20:00:50 +02:00
|
|
|
checkpointed += m.collectSyncInstruments()
|
2020-03-05 22:15:30 +02:00
|
|
|
m.currentEpoch++
|
2020-05-19 03:37:41 +02:00
|
|
|
|
2020-03-05 22:15:30 +02:00
|
|
|
return checkpointed
|
|
|
|
}
|
|
|
|
|
2020-06-09 20:00:50 +02:00
|
|
|
func (m *Accumulator) collectSyncInstruments() int {
|
2019-11-15 23:01:20 +02:00
|
|
|
checkpointed := 0
|
|
|
|
|
2020-02-07 00:45:56 +02:00
|
|
|
m.current.Range(func(key interface{}, value interface{}) bool {
|
2020-04-22 05:23:15 +02:00
|
|
|
// Note: always continue to iterate over the entire
|
|
|
|
// map by returning `true` in this function.
|
2020-02-07 00:45:56 +02:00
|
|
|
inuse := value.(*record)
|
2019-10-29 22:27:22 +02:00
|
|
|
|
2020-04-22 05:23:15 +02:00
|
|
|
mods := atomic.LoadInt64(&inuse.updateCount)
|
|
|
|
coll := inuse.collectedCount
|
|
|
|
|
|
|
|
if mods != coll {
|
|
|
|
// Updates happened in this interval,
|
|
|
|
// checkpoint and continue.
|
2020-06-09 20:00:50 +02:00
|
|
|
checkpointed += m.checkpointRecord(inuse)
|
2020-04-22 05:23:15 +02:00
|
|
|
inuse.collectedCount = mods
|
|
|
|
return true
|
2019-10-29 22:27:22 +02:00
|
|
|
}
|
|
|
|
|
2020-04-22 05:23:15 +02:00
|
|
|
// Having no updates since last collection, try to unmap:
|
|
|
|
if unmapped := inuse.refMapped.tryUnmap(); !unmapped {
|
|
|
|
// The record is referenced by a binding, continue.
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
// If any other goroutines are now trying to re-insert this
|
|
|
|
// entry in the map, they are busy calling Gosched() awaiting
|
|
|
|
// this deletion:
|
|
|
|
m.current.Delete(inuse.mapkey())
|
|
|
|
|
|
|
|
// There's a potential race between `LoadInt64` and
|
|
|
|
// `tryUnmap` in this function. Since this is the
|
|
|
|
// last we'll see of this record, checkpoint
|
|
|
|
mods = atomic.LoadInt64(&inuse.updateCount)
|
|
|
|
if mods != coll {
|
2020-06-09 20:00:50 +02:00
|
|
|
checkpointed += m.checkpointRecord(inuse)
|
2020-04-22 05:23:15 +02:00
|
|
|
}
|
2020-02-07 00:45:56 +02:00
|
|
|
return true
|
|
|
|
})
|
2019-10-29 22:27:22 +02:00
|
|
|
|
2019-11-15 23:01:20 +02:00
|
|
|
return checkpointed
|
2019-10-29 22:27:22 +02:00
|
|
|
}
|
|
|
|
|
2020-05-14 01:27:52 +02:00
|
|
|
// CollectAsync implements internal.AsyncCollector.
|
2020-10-17 18:48:21 +02:00
|
|
|
func (m *Accumulator) CollectAsync(kv []label.KeyValue, obs ...otel.Observation) {
|
2020-05-14 01:27:52 +02:00
|
|
|
labels := label.NewSetWithSortable(kv, &m.asyncSortSlice)
|
2020-03-05 22:15:30 +02:00
|
|
|
|
2020-05-14 01:27:52 +02:00
|
|
|
for _, ob := range obs {
|
2020-05-20 19:19:51 +02:00
|
|
|
if a := m.fromAsync(ob.AsyncImpl()); a != nil {
|
|
|
|
a.observe(ob.Number(), &labels)
|
|
|
|
}
|
2020-05-14 01:27:52 +02:00
|
|
|
}
|
|
|
|
}
|
2020-03-05 22:15:30 +02:00
|
|
|
|
2020-05-14 01:27:52 +02:00
|
|
|
func (m *Accumulator) observeAsyncInstruments(ctx context.Context) int {
|
|
|
|
m.asyncLock.Lock()
|
|
|
|
defer m.asyncLock.Unlock()
|
|
|
|
|
|
|
|
asyncCollected := 0
|
|
|
|
|
2020-06-09 20:00:50 +02:00
|
|
|
// TODO: change this to `ctx` (in a separate PR, with tests)
|
2020-05-20 06:33:10 +02:00
|
|
|
m.asyncInstruments.Run(context.Background(), m)
|
2020-05-14 01:27:52 +02:00
|
|
|
|
|
|
|
for _, inst := range m.asyncInstruments.Instruments() {
|
2020-05-20 19:19:51 +02:00
|
|
|
if a := m.fromAsync(inst); a != nil {
|
|
|
|
asyncCollected += m.checkpointAsync(a)
|
|
|
|
}
|
2020-05-14 01:27:52 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
return asyncCollected
|
2020-03-05 22:15:30 +02:00
|
|
|
}
|
|
|
|
|
2020-06-09 20:00:50 +02:00
|
|
|
func (m *Accumulator) checkpointRecord(r *record) int {
|
2020-06-13 09:55:01 +02:00
|
|
|
if r.current == nil {
|
|
|
|
return 0
|
|
|
|
}
|
2020-06-23 19:41:11 +02:00
|
|
|
err := r.current.SynchronizedMove(r.checkpoint, &r.inst.descriptor)
|
2020-06-13 09:55:01 +02:00
|
|
|
if err != nil {
|
|
|
|
global.Handle(err)
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
|
2020-06-18 19:16:33 +02:00
|
|
|
a := export.NewAccumulation(&r.inst.descriptor, r.labels, m.resource, r.checkpoint)
|
2020-06-23 21:00:15 +02:00
|
|
|
err = m.processor.Process(a)
|
2020-06-13 09:55:01 +02:00
|
|
|
if err != nil {
|
|
|
|
global.Handle(err)
|
|
|
|
}
|
|
|
|
return 1
|
2020-03-05 22:15:30 +02:00
|
|
|
}
|
|
|
|
|
2020-05-14 01:27:52 +02:00
|
|
|
func (m *Accumulator) checkpointAsync(a *asyncInstrument) int {
|
2020-03-19 21:02:46 +02:00
|
|
|
if len(a.recorders) == 0 {
|
2019-11-15 23:01:20 +02:00
|
|
|
return 0
|
|
|
|
}
|
2020-03-05 22:15:30 +02:00
|
|
|
checkpointed := 0
|
2020-03-19 21:02:46 +02:00
|
|
|
for encodedLabels, lrec := range a.recorders {
|
2020-03-27 23:06:48 +02:00
|
|
|
lrec := lrec
|
2020-04-22 05:23:15 +02:00
|
|
|
epochDiff := m.currentEpoch - lrec.observedEpoch
|
2020-03-05 22:15:30 +02:00
|
|
|
if epochDiff == 0 {
|
2020-06-13 09:55:01 +02:00
|
|
|
if lrec.observed != nil {
|
2020-06-18 19:16:33 +02:00
|
|
|
a := export.NewAccumulation(&a.descriptor, lrec.labels, m.resource, lrec.observed)
|
2020-06-23 21:00:15 +02:00
|
|
|
err := m.processor.Process(a)
|
2020-06-13 09:55:01 +02:00
|
|
|
if err != nil {
|
|
|
|
global.Handle(err)
|
|
|
|
}
|
|
|
|
checkpointed++
|
|
|
|
}
|
2020-03-05 22:15:30 +02:00
|
|
|
} else if epochDiff > 1 {
|
|
|
|
// This is second collection cycle with no
|
|
|
|
// observations for this labelset. Remove the
|
|
|
|
// recorder.
|
2020-03-19 21:02:46 +02:00
|
|
|
delete(a.recorders, encodedLabels)
|
2020-03-05 22:15:30 +02:00
|
|
|
}
|
|
|
|
}
|
2020-03-19 21:02:46 +02:00
|
|
|
if len(a.recorders) == 0 {
|
|
|
|
a.recorders = nil
|
2020-03-05 22:15:30 +02:00
|
|
|
}
|
|
|
|
return checkpointed
|
|
|
|
}
|
2019-11-15 23:01:20 +02:00
|
|
|
|
2019-10-29 22:27:22 +02:00
|
|
|
// RecordBatch enters a batch of metric events.
|
2020-08-18 05:25:03 +02:00
|
|
|
func (m *Accumulator) RecordBatch(ctx context.Context, kvs []label.KeyValue, measurements ...api.Measurement) {
|
2020-03-27 23:06:48 +02:00
|
|
|
// Labels will be computed the first time acquireHandle is
|
|
|
|
// called. Subsequent calls to acquireHandle will re-use the
|
|
|
|
// previously computed value instead of recomputing the
|
|
|
|
// ordered labels.
|
2020-04-23 21:10:58 +02:00
|
|
|
var labelsPtr *label.Set
|
2020-03-27 23:06:48 +02:00
|
|
|
for i, meas := range measurements {
|
2020-05-20 19:19:51 +02:00
|
|
|
s := m.fromSync(meas.SyncImpl())
|
|
|
|
if s == nil {
|
|
|
|
continue
|
|
|
|
}
|
2020-04-23 21:10:58 +02:00
|
|
|
h := s.acquireHandle(kvs, labelsPtr)
|
2020-03-27 23:06:48 +02:00
|
|
|
|
|
|
|
// Re-use labels for the next measurement.
|
|
|
|
if i == 0 {
|
2020-04-23 21:10:58 +02:00
|
|
|
labelsPtr = h.labels
|
2020-03-27 23:06:48 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
defer h.Unbind()
|
|
|
|
h.RecordOne(ctx, meas.Number())
|
2019-10-29 22:27:22 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-05-14 01:27:52 +02:00
|
|
|
// RecordOne implements api.SyncImpl.
|
2020-05-11 08:44:42 +02:00
|
|
|
func (r *record) RecordOne(ctx context.Context, number api.Number) {
|
2020-06-13 09:55:01 +02:00
|
|
|
if r.current == nil {
|
2020-06-23 19:51:15 +02:00
|
|
|
// The instrument is disabled according to the AggregatorSelector.
|
2019-11-15 23:01:20 +02:00
|
|
|
return
|
|
|
|
}
|
2020-03-19 21:02:46 +02:00
|
|
|
if err := aggregator.RangeTest(number, &r.inst.descriptor); err != nil {
|
2020-06-02 20:30:09 +02:00
|
|
|
global.Handle(err)
|
2019-11-15 23:01:20 +02:00
|
|
|
return
|
|
|
|
}
|
2020-06-13 09:55:01 +02:00
|
|
|
if err := r.current.Update(ctx, number, &r.inst.descriptor); err != nil {
|
2020-06-02 20:30:09 +02:00
|
|
|
global.Handle(err)
|
2019-11-15 23:01:20 +02:00
|
|
|
return
|
2019-10-29 22:27:22 +02:00
|
|
|
}
|
2020-04-22 05:23:15 +02:00
|
|
|
// Record was modified, inform the Collect() that things need
|
|
|
|
// to be collected while the record is still mapped.
|
|
|
|
atomic.AddInt64(&r.updateCount, 1)
|
2019-10-29 22:27:22 +02:00
|
|
|
}
|
|
|
|
|
2020-05-14 01:27:52 +02:00
|
|
|
// Unbind implements api.SyncImpl.
|
2019-12-28 02:30:19 +02:00
|
|
|
func (r *record) Unbind() {
|
2020-02-07 00:45:56 +02:00
|
|
|
r.refMapped.unref()
|
2019-10-29 22:27:22 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
func (r *record) mapkey() mapkey {
|
|
|
|
return mapkey{
|
2020-03-19 21:02:46 +02:00
|
|
|
descriptor: &r.inst.descriptor,
|
2020-04-23 21:10:58 +02:00
|
|
|
ordered: r.labels.Equivalent(),
|
2019-10-29 22:27:22 +02:00
|
|
|
}
|
|
|
|
}
|
2020-05-20 19:19:51 +02:00
|
|
|
|
|
|
|
// fromSync gets a sync implementation object, checking for
|
|
|
|
// uninitialized instruments and instruments created by another SDK.
|
2020-10-17 18:48:21 +02:00
|
|
|
func (m *Accumulator) fromSync(sync otel.SyncImpl) *syncInstrument {
|
2020-05-20 19:19:51 +02:00
|
|
|
if sync != nil {
|
|
|
|
if inst, ok := sync.Implementation().(*syncInstrument); ok {
|
|
|
|
return inst
|
|
|
|
}
|
|
|
|
}
|
2020-06-02 20:30:09 +02:00
|
|
|
global.Handle(ErrUninitializedInstrument)
|
2020-05-20 19:19:51 +02:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// fromSync gets an async implementation object, checking for
|
|
|
|
// uninitialized instruments and instruments created by another SDK.
|
2020-10-17 18:48:21 +02:00
|
|
|
func (m *Accumulator) fromAsync(async otel.AsyncImpl) *asyncInstrument {
|
2020-05-20 19:19:51 +02:00
|
|
|
if async != nil {
|
|
|
|
if inst, ok := async.Implementation().(*asyncInstrument); ok {
|
|
|
|
return inst
|
|
|
|
}
|
|
|
|
}
|
2020-06-02 20:30:09 +02:00
|
|
|
global.Handle(ErrUninitializedInstrument)
|
2020-05-20 19:19:51 +02:00
|
|
|
return nil
|
|
|
|
}
|