You've already forked opentelemetry-go
							
							
				mirror of
				https://github.com/open-telemetry/opentelemetry-go.git
				synced 2025-10-31 00:07:40 +02:00 
			
		
		
		
	* Add MetricAggregator.Merge() implementations * Update from feedback * Type * Ckpt * Ckpt * Add push controller * Ckpt * Add aggregator interfaces, stdout encoder * Modify basic main.go * Main is working * Batch stdout output * Sum udpate * Rename stdout * Add stateless/stateful Batcher options * Undo a for-loop in the example, remove a done TODO * Update imports * Add note * Rename defaultkeys * Support variable label encoder to speed OpenMetrics/Statsd export * Lint * Doc * Precommit/lint * Simplify Aggregator API * Record->Identifier * Remove export.Record a.k.a. Identifier * Checkpoint * Propagate errors to the SDK, remove a bunch of 'TODO warn' * Checkpoint * Introduce export.Labels * Comments in export/metric.go * Comment * More merge * More doc * Complete example * Lint fixes * Add a testable example * Lint * Let Export return an error * add a basic stdout exporter test * Add measure test; fix aggregator APIs * Use JSON numbers, not strings * Test stdout exporter error * Add a test for the call to RangeTest * Add error handler API to improve correctness test; return errors from RecordOne * Undo the previous -- do not expose errors * Add simple selector variations, test * Repair examples * Test push controller error handling * Add SDK label encoder tests * Add a defaultkeys batcher test * Add an ungrouped batcher test * Lint new tests * Respond to krnowak's feedback * Undo comment * Use concrete receivers for export records and labels, since the constructors return structs not pointers * Bug fix for stateful batchers; clone an aggregator for long term storage * Remove TODO addressed in #318 * Add errors to all aggregator interfaces * Handle ErrNoLastValue case in stdout exporter * Move aggregator API into sdk/export/metric/aggregator * Update all aggregator exported-method comments * Document the aggregator APIs * More aggregator comments * Add multiple updates to the ungrouped test * Fixes for feedback from Gustavo and Liz * Producer->CheckpointSet; add FinishedCollection * Process takes an export.Record * ReadCheckpoint->CheckpointSet * EncodeLabels->Encode * Format a better inconsistent type error; add more aggregator API tests * More RangeTest test coverage * Make benbjohnson/clock a test-only dependency * Handle ErrNoLastValue in stress_test
		
			
				
	
	
		
			127 lines
		
	
	
		
			3.9 KiB
		
	
	
	
		
			Go
		
	
	
	
	
	
			
		
		
	
	
			127 lines
		
	
	
		
			3.9 KiB
		
	
	
	
		
			Go
		
	
	
	
	
	
| // Copyright 2019, OpenTelemetry Authors
 | |
| //
 | |
| // Licensed under the Apache License, Version 2.0 (the "License");
 | |
| // you may not use this file except in compliance with the License.
 | |
| // You may obtain a copy of the License at
 | |
| //
 | |
| //     http://www.apache.org/licenses/LICENSE-2.0
 | |
| //
 | |
| // Unless required by applicable law or agreed to in writing, software
 | |
| // distributed under the License is distributed on an "AS IS" BASIS,
 | |
| // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | |
| // See the License for the specific language governing permissions and
 | |
| // limitations under the License.
 | |
| 
 | |
| package maxsumcount // import "go.opentelemetry.io/otel/sdk/metric/aggregator/maxsumcount"
 | |
| 
 | |
| import (
 | |
| 	"context"
 | |
| 
 | |
| 	"go.opentelemetry.io/otel/api/core"
 | |
| 	export "go.opentelemetry.io/otel/sdk/export/metric"
 | |
| 	"go.opentelemetry.io/otel/sdk/export/metric/aggregator"
 | |
| )
 | |
| 
 | |
| type (
 | |
| 	// Aggregator aggregates measure events, keeping only the max,
 | |
| 	// sum, and count.
 | |
| 	Aggregator struct {
 | |
| 		current    state
 | |
| 		checkpoint state
 | |
| 	}
 | |
| 
 | |
| 	state struct {
 | |
| 		count core.Number
 | |
| 		sum   core.Number
 | |
| 		max   core.Number
 | |
| 	}
 | |
| )
 | |
| 
 | |
| // TODO: The SDK specification says this type should support Min
 | |
| // values, see #319.
 | |
| 
 | |
| var _ export.Aggregator = &Aggregator{}
 | |
| var _ aggregator.MaxSumCount = &Aggregator{}
 | |
| 
 | |
| // New returns a new measure aggregator for computing max, sum, and
 | |
| // count.  It does not compute quantile information other than Max.
 | |
| //
 | |
| // Note that this aggregator maintains each value using independent
 | |
| // atomic operations, which introduces the possibility that
 | |
| // checkpoints are inconsistent.  For greater consistency and lower
 | |
| // performance, consider using Array or DDSketch aggregators.
 | |
| func New() *Aggregator {
 | |
| 	return &Aggregator{}
 | |
| }
 | |
| 
 | |
| // Sum returns the sum of values in the checkpoint.
 | |
| func (c *Aggregator) Sum() (core.Number, error) {
 | |
| 	return c.checkpoint.sum, nil
 | |
| }
 | |
| 
 | |
| // Count returns the number of values in the checkpoint.
 | |
| func (c *Aggregator) Count() (int64, error) {
 | |
| 	return int64(c.checkpoint.count.AsUint64()), nil
 | |
| }
 | |
| 
 | |
| // Max returns the maximum value in the checkpoint.
 | |
| func (c *Aggregator) Max() (core.Number, error) {
 | |
| 	return c.checkpoint.max, nil
 | |
| }
 | |
| 
 | |
| // Checkpoint saves the current state and resets the current state to
 | |
| // the empty set.  Since no locks are taken, there is a chance that
 | |
| // the independent Max, Sum, and Count are not consistent with each
 | |
| // other.
 | |
| func (c *Aggregator) Checkpoint(ctx context.Context, _ *export.Descriptor) {
 | |
| 	// N.B. There is no atomic operation that can update all three
 | |
| 	// values at once without a memory allocation.
 | |
| 	//
 | |
| 	// This aggregator is intended to trade this correctness for
 | |
| 	// speed.
 | |
| 	//
 | |
| 	// Therefore, atomically swap fields independently, knowing
 | |
| 	// that individually the three parts of this aggregation could
 | |
| 	// be spread across multiple collections in rare cases.
 | |
| 
 | |
| 	c.checkpoint.count.SetUint64(c.current.count.SwapUint64Atomic(0))
 | |
| 	c.checkpoint.sum = c.current.sum.SwapNumberAtomic(core.Number(0))
 | |
| 	c.checkpoint.max = c.current.max.SwapNumberAtomic(core.Number(0))
 | |
| }
 | |
| 
 | |
| // Update adds the recorded measurement to the current data set.
 | |
| func (c *Aggregator) Update(_ context.Context, number core.Number, desc *export.Descriptor) error {
 | |
| 	kind := desc.NumberKind()
 | |
| 
 | |
| 	c.current.count.AddUint64Atomic(1)
 | |
| 	c.current.sum.AddNumberAtomic(kind, number)
 | |
| 
 | |
| 	for {
 | |
| 		current := c.current.max.AsNumberAtomic()
 | |
| 
 | |
| 		if number.CompareNumber(kind, current) <= 0 {
 | |
| 			break
 | |
| 		}
 | |
| 		if c.current.max.CompareAndSwapNumber(current, number) {
 | |
| 			break
 | |
| 		}
 | |
| 	}
 | |
| 	return nil
 | |
| }
 | |
| 
 | |
| // Merge combines two data sets into one.
 | |
| func (c *Aggregator) Merge(oa export.Aggregator, desc *export.Descriptor) error {
 | |
| 	o, _ := oa.(*Aggregator)
 | |
| 	if o == nil {
 | |
| 		return aggregator.NewInconsistentMergeError(c, oa)
 | |
| 	}
 | |
| 
 | |
| 	c.checkpoint.sum.AddNumber(desc.NumberKind(), o.checkpoint.sum)
 | |
| 	c.checkpoint.count.AddNumber(core.Uint64NumberKind, o.checkpoint.count)
 | |
| 
 | |
| 	if c.checkpoint.max.CompareNumber(desc.NumberKind(), o.checkpoint.max) < 0 {
 | |
| 		c.checkpoint.max.SetNumber(o.checkpoint.max)
 | |
| 	}
 | |
| 	return nil
 | |
| }
 |