2020-03-25 23:47:17 +02:00
|
|
|
// Copyright The OpenTelemetry Authors
|
2020-01-21 19:15:09 +02:00
|
|
|
//
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
|
|
|
package histogram // import "go.opentelemetry.io/otel/sdk/metric/aggregator/histogram"
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
"sort"
|
2020-04-29 19:08:58 +02:00
|
|
|
"sync"
|
2020-01-21 19:15:09 +02:00
|
|
|
|
2020-03-19 21:02:46 +02:00
|
|
|
"go.opentelemetry.io/otel/api/metric"
|
2020-01-21 19:15:09 +02:00
|
|
|
export "go.opentelemetry.io/otel/sdk/export/metric"
|
|
|
|
"go.opentelemetry.io/otel/sdk/export/metric/aggregator"
|
|
|
|
)
|
|
|
|
|
2020-05-18 18:44:33 +02:00
|
|
|
// Note: This code uses a Mutex to govern access to the exclusive
|
|
|
|
// aggregator state. This is in contrast to a lock-free approach
|
|
|
|
// (as in the Go prometheus client) that was reverted here:
|
|
|
|
// https://github.com/open-telemetry/opentelemetry-go/pull/669
|
|
|
|
|
2020-01-21 19:15:09 +02:00
|
|
|
type (
|
|
|
|
// Aggregator observe events and counts them in pre-determined buckets.
|
|
|
|
// It also calculates the sum and count of all events.
|
|
|
|
Aggregator struct {
|
2020-04-29 19:08:58 +02:00
|
|
|
lock sync.Mutex
|
|
|
|
current state
|
|
|
|
checkpoint state
|
2020-05-21 19:29:03 +02:00
|
|
|
boundaries []float64
|
2020-05-11 08:44:42 +02:00
|
|
|
kind metric.NumberKind
|
2020-01-21 19:15:09 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// state represents the state of a histogram, consisting of
|
|
|
|
// the sum and counts for all observed values and
|
|
|
|
// the less than equal bucket count for the pre-determined boundaries.
|
|
|
|
state struct {
|
2020-05-21 19:29:03 +02:00
|
|
|
bucketCounts []float64
|
2020-05-18 18:44:33 +02:00
|
|
|
count metric.Number
|
|
|
|
sum metric.Number
|
2020-01-21 19:15:09 +02:00
|
|
|
}
|
|
|
|
)
|
|
|
|
|
|
|
|
var _ export.Aggregator = &Aggregator{}
|
|
|
|
var _ aggregator.Sum = &Aggregator{}
|
|
|
|
var _ aggregator.Count = &Aggregator{}
|
|
|
|
var _ aggregator.Histogram = &Aggregator{}
|
|
|
|
|
2020-05-16 07:11:12 +02:00
|
|
|
// New returns a new aggregator for computing Histograms.
|
2020-01-21 19:15:09 +02:00
|
|
|
//
|
|
|
|
// A Histogram observe events and counts them in pre-defined buckets.
|
|
|
|
// And also provides the total sum and count of all observations.
|
|
|
|
//
|
|
|
|
// Note that this aggregator maintains each value using independent
|
|
|
|
// atomic operations, which introduces the possibility that
|
|
|
|
// checkpoints are inconsistent.
|
2020-05-21 19:29:03 +02:00
|
|
|
func New(desc *metric.Descriptor, boundaries []float64) *Aggregator {
|
2020-01-21 19:15:09 +02:00
|
|
|
// Boundaries MUST be ordered otherwise the histogram could not
|
|
|
|
// be properly computed.
|
2020-05-21 19:29:03 +02:00
|
|
|
// metric.SortNumbers(desc.NumberKind(), boundaries)
|
|
|
|
// sortedBoundaries := numbers{
|
|
|
|
// numbers: make([]metric.Number, len(boundaries)),
|
|
|
|
// kind: desc.NumberKind(),
|
|
|
|
// }
|
|
|
|
sort.Float64s(boundaries)
|
2020-01-21 19:15:09 +02:00
|
|
|
|
2020-05-21 19:29:03 +02:00
|
|
|
// copy(sortedBoundaries.numbers, boundaries)
|
|
|
|
// sort.Sort(&sortedBoundaries)
|
|
|
|
// boundaries = sortedBoundaries.numbers
|
2020-01-21 19:15:09 +02:00
|
|
|
|
2020-05-18 18:44:33 +02:00
|
|
|
return &Aggregator{
|
2020-01-21 19:15:09 +02:00
|
|
|
kind: desc.NumberKind(),
|
|
|
|
boundaries: boundaries,
|
2020-05-18 18:44:33 +02:00
|
|
|
current: emptyState(boundaries),
|
|
|
|
checkpoint: emptyState(boundaries),
|
2020-01-21 19:15:09 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Sum returns the sum of all values in the checkpoint.
|
2020-05-11 08:44:42 +02:00
|
|
|
func (c *Aggregator) Sum() (metric.Number, error) {
|
2020-03-11 20:12:23 +02:00
|
|
|
c.lock.Lock()
|
|
|
|
defer c.lock.Unlock()
|
2020-04-29 19:08:58 +02:00
|
|
|
return c.checkpoint.sum, nil
|
2020-01-21 19:15:09 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Count returns the number of values in the checkpoint.
|
|
|
|
func (c *Aggregator) Count() (int64, error) {
|
2020-03-11 20:12:23 +02:00
|
|
|
c.lock.Lock()
|
|
|
|
defer c.lock.Unlock()
|
2020-04-29 19:08:58 +02:00
|
|
|
return int64(c.checkpoint.count), nil
|
2020-01-21 19:15:09 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Histogram returns the count of events in pre-determined buckets.
|
|
|
|
func (c *Aggregator) Histogram() (aggregator.Buckets, error) {
|
2020-03-11 20:12:23 +02:00
|
|
|
c.lock.Lock()
|
|
|
|
defer c.lock.Unlock()
|
2020-05-18 18:44:33 +02:00
|
|
|
return aggregator.Buckets{
|
|
|
|
Boundaries: c.boundaries,
|
|
|
|
Counts: c.checkpoint.bucketCounts,
|
|
|
|
}, nil
|
2020-01-21 19:15:09 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Checkpoint saves the current state and resets the current state to
|
|
|
|
// the empty set. Since no locks are taken, there is a chance that
|
|
|
|
// the independent Sum, Count and Bucket Count are not consistent with each
|
|
|
|
// other.
|
2020-03-19 21:02:46 +02:00
|
|
|
func (c *Aggregator) Checkpoint(ctx context.Context, desc *metric.Descriptor) {
|
2020-04-29 19:08:58 +02:00
|
|
|
c.lock.Lock()
|
2020-05-18 18:44:33 +02:00
|
|
|
c.checkpoint, c.current = c.current, emptyState(c.boundaries)
|
2020-04-29 19:08:58 +02:00
|
|
|
c.lock.Unlock()
|
2020-03-11 20:12:23 +02:00
|
|
|
}
|
|
|
|
|
2020-05-21 19:29:03 +02:00
|
|
|
func emptyState(boundaries []float64) state {
|
2020-04-29 19:08:58 +02:00
|
|
|
return state{
|
2020-05-21 19:29:03 +02:00
|
|
|
bucketCounts: make([]float64, len(boundaries)+1),
|
2020-04-29 19:08:58 +02:00
|
|
|
}
|
2020-01-21 19:15:09 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Update adds the recorded measurement to the current data set.
|
2020-05-11 08:44:42 +02:00
|
|
|
func (c *Aggregator) Update(_ context.Context, number metric.Number, desc *metric.Descriptor) error {
|
2020-01-21 19:15:09 +02:00
|
|
|
kind := desc.NumberKind()
|
|
|
|
|
2020-05-21 11:08:08 +02:00
|
|
|
bucketID := sort.Search(len(c.boundaries), func(i int) bool {
|
2020-05-21 19:29:03 +02:00
|
|
|
return number.CoerceToFloat64(kind) < c.boundaries[i]
|
2020-05-21 11:08:08 +02:00
|
|
|
})
|
2020-01-21 19:15:09 +02:00
|
|
|
|
2020-04-29 19:08:58 +02:00
|
|
|
c.lock.Lock()
|
|
|
|
defer c.lock.Unlock()
|
|
|
|
|
|
|
|
c.current.count.AddInt64(1)
|
|
|
|
c.current.sum.AddNumber(kind, number)
|
2020-05-21 19:29:03 +02:00
|
|
|
c.current.bucketCounts[bucketID]++
|
2020-03-11 20:12:23 +02:00
|
|
|
|
2020-01-21 19:15:09 +02:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-03-11 20:12:23 +02:00
|
|
|
// Merge combines two histograms that have the same buckets into a single one.
|
2020-03-19 21:02:46 +02:00
|
|
|
func (c *Aggregator) Merge(oa export.Aggregator, desc *metric.Descriptor) error {
|
2020-01-21 19:15:09 +02:00
|
|
|
o, _ := oa.(*Aggregator)
|
|
|
|
if o == nil {
|
|
|
|
return aggregator.NewInconsistentMergeError(c, oa)
|
|
|
|
}
|
|
|
|
|
2020-04-29 19:08:58 +02:00
|
|
|
c.checkpoint.sum.AddNumber(desc.NumberKind(), o.checkpoint.sum)
|
2020-05-11 08:44:42 +02:00
|
|
|
c.checkpoint.count.AddNumber(metric.Uint64NumberKind, o.checkpoint.count)
|
2020-01-21 19:15:09 +02:00
|
|
|
|
2020-05-18 18:44:33 +02:00
|
|
|
for i := 0; i < len(c.checkpoint.bucketCounts); i++ {
|
2020-05-21 19:29:03 +02:00
|
|
|
c.checkpoint.bucketCounts[i] += o.checkpoint.bucketCounts[i]
|
2020-01-21 19:15:09 +02:00
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|