2020-03-23 22:41:10 -07:00
|
|
|
// Copyright The OpenTelemetry Authors
|
2019-11-15 13:01:20 -08:00
|
|
|
//
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
2020-05-11 10:23:06 -07:00
|
|
|
package simple // import "go.opentelemetry.io/otel/sdk/metric/integrator/simple"
|
2019-11-15 13:01:20 -08:00
|
|
|
|
|
|
|
import (
|
2020-03-16 16:28:33 -07:00
|
|
|
"errors"
|
2020-06-18 10:16:33 -07:00
|
|
|
"fmt"
|
2020-05-18 18:37:41 -07:00
|
|
|
"sync"
|
2020-06-18 10:16:33 -07:00
|
|
|
"time"
|
2019-11-15 13:01:20 -08:00
|
|
|
|
2020-04-23 12:10:58 -07:00
|
|
|
"go.opentelemetry.io/otel/api/label"
|
2020-03-19 12:02:46 -07:00
|
|
|
"go.opentelemetry.io/otel/api/metric"
|
2019-11-15 13:01:20 -08:00
|
|
|
export "go.opentelemetry.io/otel/sdk/export/metric"
|
2020-06-09 22:53:30 -07:00
|
|
|
"go.opentelemetry.io/otel/sdk/export/metric/aggregation"
|
2020-05-18 17:44:28 -07:00
|
|
|
"go.opentelemetry.io/otel/sdk/resource"
|
2019-11-15 13:01:20 -08:00
|
|
|
)
|
|
|
|
|
|
|
|
type (
|
2020-05-11 10:23:06 -07:00
|
|
|
Integrator struct {
|
2020-05-18 18:37:41 -07:00
|
|
|
export.AggregationSelector
|
2020-04-24 09:32:49 -07:00
|
|
|
stateful bool
|
2020-05-18 18:37:41 -07:00
|
|
|
batch
|
2019-11-15 13:01:20 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
batchKey struct {
|
2020-03-19 12:02:46 -07:00
|
|
|
descriptor *metric.Descriptor
|
2020-04-24 09:32:49 -07:00
|
|
|
distinct label.Distinct
|
2020-05-18 17:44:28 -07:00
|
|
|
resource label.Distinct
|
2019-11-15 13:01:20 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
batchValue struct {
|
|
|
|
aggregator export.Aggregator
|
2020-04-23 12:10:58 -07:00
|
|
|
labels *label.Set
|
2020-05-18 17:44:28 -07:00
|
|
|
resource *resource.Resource
|
2019-11-15 13:01:20 -08:00
|
|
|
}
|
|
|
|
|
2020-05-18 18:37:41 -07:00
|
|
|
batch struct {
|
2020-05-30 20:16:40 -07:00
|
|
|
// RWMutex implements locking for the `CheckpointSet` interface.
|
2020-05-18 18:37:41 -07:00
|
|
|
sync.RWMutex
|
|
|
|
values map[batchKey]batchValue
|
2020-06-18 10:16:33 -07:00
|
|
|
|
|
|
|
// Note: the timestamp logic currently assumes all
|
|
|
|
// exports are deltas.
|
|
|
|
|
|
|
|
intervalStart time.Time
|
|
|
|
intervalEnd time.Time
|
|
|
|
|
|
|
|
// startedCollection and finishedCollection are the
|
|
|
|
// number of StartCollection() and FinishCollection()
|
|
|
|
// calls, used to ensure that the sequence of starts
|
|
|
|
// and finishes are correctly balanced.
|
|
|
|
|
|
|
|
startedCollection int64
|
|
|
|
finishedCollection int64
|
2020-05-18 18:37:41 -07:00
|
|
|
}
|
2019-11-15 13:01:20 -08:00
|
|
|
)
|
|
|
|
|
2020-05-11 10:23:06 -07:00
|
|
|
var _ export.Integrator = &Integrator{}
|
2020-05-18 18:37:41 -07:00
|
|
|
var _ export.CheckpointSet = &batch{}
|
2020-06-18 10:16:33 -07:00
|
|
|
var ErrInconsistentState = fmt.Errorf("inconsistent integrator state")
|
2019-11-15 13:01:20 -08:00
|
|
|
|
2020-05-11 10:23:06 -07:00
|
|
|
func New(selector export.AggregationSelector, stateful bool) *Integrator {
|
|
|
|
return &Integrator{
|
2020-05-18 18:37:41 -07:00
|
|
|
AggregationSelector: selector,
|
|
|
|
stateful: stateful,
|
|
|
|
batch: batch{
|
2020-06-18 10:16:33 -07:00
|
|
|
values: map[batchKey]batchValue{},
|
|
|
|
intervalStart: time.Now(),
|
2020-05-18 18:37:41 -07:00
|
|
|
},
|
2019-11-15 13:01:20 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-06-18 10:16:33 -07:00
|
|
|
func (b *Integrator) Process(accumulation export.Accumulation) error {
|
|
|
|
if b.startedCollection != b.finishedCollection+1 {
|
|
|
|
return ErrInconsistentState
|
|
|
|
}
|
|
|
|
|
|
|
|
desc := accumulation.Descriptor()
|
2019-11-15 13:01:20 -08:00
|
|
|
key := batchKey{
|
|
|
|
descriptor: desc,
|
2020-06-18 10:16:33 -07:00
|
|
|
distinct: accumulation.Labels().Equivalent(),
|
|
|
|
resource: accumulation.Resource().Equivalent(),
|
2019-11-15 13:01:20 -08:00
|
|
|
}
|
2020-06-18 10:16:33 -07:00
|
|
|
agg := accumulation.Aggregator()
|
2020-05-18 18:37:41 -07:00
|
|
|
value, ok := b.batch.values[key]
|
2019-11-15 13:01:20 -08:00
|
|
|
if ok {
|
2019-12-23 16:38:35 -08:00
|
|
|
// Note: The call to Merge here combines only
|
2020-06-18 10:16:33 -07:00
|
|
|
// identical accumulations. It is required even for a
|
|
|
|
// stateless Integrator because such identical accumulations
|
2019-12-23 16:38:35 -08:00
|
|
|
// may arise in the Meter implementation due to race
|
|
|
|
// conditions.
|
2019-11-15 13:01:20 -08:00
|
|
|
return value.aggregator.Merge(agg, desc)
|
|
|
|
}
|
2020-05-11 10:23:06 -07:00
|
|
|
// If this integrator is stateful, create a copy of the
|
2019-11-15 13:01:20 -08:00
|
|
|
// Aggregator for long-term storage. Otherwise the
|
|
|
|
// Meter implementation will checkpoint the aggregator
|
|
|
|
// again, overwriting the long-lived state.
|
|
|
|
if b.stateful {
|
|
|
|
tmp := agg
|
2019-12-23 16:38:35 -08:00
|
|
|
// Note: the call to AggregatorFor() followed by Merge
|
|
|
|
// is effectively a Clone() operation.
|
2020-06-13 00:55:01 -07:00
|
|
|
b.AggregatorFor(desc, &agg)
|
2019-11-15 13:01:20 -08:00
|
|
|
if err := agg.Merge(tmp, desc); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
2020-05-18 18:37:41 -07:00
|
|
|
b.batch.values[key] = batchValue{
|
2019-11-15 13:01:20 -08:00
|
|
|
aggregator: agg,
|
2020-06-18 10:16:33 -07:00
|
|
|
labels: accumulation.Labels(),
|
|
|
|
resource: accumulation.Resource(),
|
2019-11-15 13:01:20 -08:00
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-05-11 10:23:06 -07:00
|
|
|
func (b *Integrator) CheckpointSet() export.CheckpointSet {
|
2020-05-18 18:37:41 -07:00
|
|
|
return &b.batch
|
2019-11-15 13:01:20 -08:00
|
|
|
}
|
|
|
|
|
2020-06-18 10:16:33 -07:00
|
|
|
func (b *Integrator) StartCollection() {
|
|
|
|
if b.startedCollection != 0 {
|
|
|
|
b.intervalStart = b.intervalEnd
|
|
|
|
}
|
|
|
|
b.startedCollection++
|
2019-11-15 13:01:20 -08:00
|
|
|
if !b.stateful {
|
2020-05-18 18:37:41 -07:00
|
|
|
b.batch.values = map[batchKey]batchValue{}
|
2019-11-15 13:01:20 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-06-18 10:16:33 -07:00
|
|
|
func (b *Integrator) FinishCollection() error {
|
|
|
|
b.finishedCollection++
|
|
|
|
b.intervalEnd = time.Now()
|
|
|
|
if b.startedCollection != b.finishedCollection {
|
|
|
|
return ErrInconsistentState
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-05-18 18:37:41 -07:00
|
|
|
func (b *batch) ForEach(f func(export.Record) error) error {
|
2020-06-18 10:16:33 -07:00
|
|
|
if b.startedCollection != b.finishedCollection {
|
|
|
|
return ErrInconsistentState
|
|
|
|
}
|
|
|
|
|
2020-05-18 18:37:41 -07:00
|
|
|
for key, value := range b.values {
|
2020-03-16 16:28:33 -07:00
|
|
|
if err := f(export.NewRecord(
|
2019-11-15 13:01:20 -08:00
|
|
|
key.descriptor,
|
|
|
|
value.labels,
|
2020-05-18 17:44:28 -07:00
|
|
|
value.resource,
|
2020-06-18 10:16:33 -07:00
|
|
|
value.aggregator.Aggregation(),
|
|
|
|
b.intervalStart,
|
|
|
|
b.intervalEnd,
|
2020-06-09 22:53:30 -07:00
|
|
|
)); err != nil && !errors.Is(err, aggregation.ErrNoData) {
|
2020-03-16 16:28:33 -07:00
|
|
|
return err
|
|
|
|
}
|
2019-11-15 13:01:20 -08:00
|
|
|
}
|
2020-03-16 16:28:33 -07:00
|
|
|
return nil
|
2019-11-15 13:01:20 -08:00
|
|
|
}
|