1
0
mirror of https://github.com/open-telemetry/opentelemetry-go.git synced 2025-07-05 00:28:58 +02:00

Replace Measure instrument by ValueRecorder instrument (#732)

* Measure->Value recorder and cleanups re: measure

* More edits

* More edits

* Feedback
This commit is contained in:
Joshua MacDonald
2020-05-15 22:11:12 -07:00
committed by GitHub
parent 0122b586b7
commit 6bc14ffd2c
43 changed files with 321 additions and 375 deletions

View File

@ -59,7 +59,7 @@ func (*benchFixture) AggregatorFor(descriptor *metric.Descriptor) export.Aggrega
switch descriptor.MetricKind() { switch descriptor.MetricKind() {
case metric.CounterKind: case metric.CounterKind:
return sum.New() return sum.New()
case metric.MeasureKind: case metric.ValueRecorderKind:
if strings.HasSuffix(descriptor.Name(), "minmaxsumcount") { if strings.HasSuffix(descriptor.Name(), "minmaxsumcount") {
return minmaxsumcount.New(descriptor) return minmaxsumcount.New(descriptor)
} else if strings.HasSuffix(descriptor.Name(), "ddsketch") { } else if strings.HasSuffix(descriptor.Name(), "ddsketch") {

View File

@ -82,9 +82,9 @@ func TestDirect(t *testing.T) {
counter.Add(ctx, 1, labels1...) counter.Add(ctx, 1, labels1...)
counter.Add(ctx, 1, labels1...) counter.Add(ctx, 1, labels1...)
measure := Must(meter1).NewFloat64Measure("test.measure") valuerecorder := Must(meter1).NewFloat64ValueRecorder("test.valuerecorder")
measure.Record(ctx, 1, labels1...) valuerecorder.Record(ctx, 1, labels1...)
measure.Record(ctx, 2, labels1...) valuerecorder.Record(ctx, 2, labels1...)
_ = Must(meter1).RegisterFloat64Observer("test.observer.float", func(result metric.Float64ObserverResult) { _ = Must(meter1).RegisterFloat64Observer("test.observer.float", func(result metric.Float64ObserverResult) {
result.Observe(1., labels1...) result.Observe(1., labels1...)
@ -96,7 +96,7 @@ func TestDirect(t *testing.T) {
result.Observe(2, labels2...) result.Observe(2, labels2...)
}) })
second := Must(meter2).NewFloat64Measure("test.second") second := Must(meter2).NewFloat64ValueRecorder("test.second")
second.Record(ctx, 1, labels3...) second.Record(ctx, 1, labels3...)
second.Record(ctx, 2, labels3...) second.Record(ctx, 2, labels3...)
@ -104,7 +104,7 @@ func TestDirect(t *testing.T) {
global.SetMeterProvider(provider) global.SetMeterProvider(provider)
counter.Add(ctx, 1, labels1...) counter.Add(ctx, 1, labels1...)
measure.Record(ctx, 3, labels1...) valuerecorder.Record(ctx, 3, labels1...)
second.Record(ctx, 3, labels3...) second.Record(ctx, 3, labels3...)
mock.RunAsyncInstruments() mock.RunAsyncInstruments()
@ -120,7 +120,7 @@ func TestDirect(t *testing.T) {
Number: asInt(1), Number: asInt(1),
}, },
{ {
Name: "test.measure", Name: "test.valuerecorder",
LibraryName: "test1", LibraryName: "test1",
Labels: asMap(labels1...), Labels: asMap(labels1...),
Number: asFloat(3), Number: asFloat(3),
@ -174,8 +174,8 @@ func TestBound(t *testing.T) {
boundC.Add(ctx, 1) boundC.Add(ctx, 1)
boundC.Add(ctx, 1) boundC.Add(ctx, 1)
measure := Must(glob).NewInt64Measure("test.measure") valuerecorder := Must(glob).NewInt64ValueRecorder("test.valuerecorder")
boundM := measure.Bind(labels1...) boundM := valuerecorder.Bind(labels1...)
boundM.Record(ctx, 1) boundM.Record(ctx, 1)
boundM.Record(ctx, 2) boundM.Record(ctx, 2)
@ -194,7 +194,7 @@ func TestBound(t *testing.T) {
Number: asFloat(1), Number: asFloat(1),
}, },
{ {
Name: "test.measure", Name: "test.valuerecorder",
LibraryName: "test", LibraryName: "test",
Labels: asMap(labels1...), Labels: asMap(labels1...),
Number: asInt(3), Number: asInt(3),
@ -216,8 +216,8 @@ func TestUnbind(t *testing.T) {
counter := Must(glob).NewFloat64Counter("test.counter") counter := Must(glob).NewFloat64Counter("test.counter")
boundC := counter.Bind(labels1...) boundC := counter.Bind(labels1...)
measure := Must(glob).NewInt64Measure("test.measure") valuerecorder := Must(glob).NewInt64ValueRecorder("test.valuerecorder")
boundM := measure.Bind(labels1...) boundM := valuerecorder.Bind(labels1...)
boundC.Unbind() boundC.Unbind()
boundM.Unbind() boundM.Unbind()

View File

@ -36,11 +36,11 @@ var (
"counter.float64": func(name, libraryName string) (metric.InstrumentImpl, error) { "counter.float64": func(name, libraryName string) (metric.InstrumentImpl, error) {
return unwrap(MeterProvider().Meter(libraryName).NewFloat64Counter(name)) return unwrap(MeterProvider().Meter(libraryName).NewFloat64Counter(name))
}, },
"measure.int64": func(name, libraryName string) (metric.InstrumentImpl, error) { "valuerecorder.int64": func(name, libraryName string) (metric.InstrumentImpl, error) {
return unwrap(MeterProvider().Meter(libraryName).NewInt64Measure(name)) return unwrap(MeterProvider().Meter(libraryName).NewInt64ValueRecorder(name))
}, },
"measure.float64": func(name, libraryName string) (metric.InstrumentImpl, error) { "valuerecorder.float64": func(name, libraryName string) (metric.InstrumentImpl, error) {
return unwrap(MeterProvider().Meter(libraryName).NewFloat64Measure(name)) return unwrap(MeterProvider().Meter(libraryName).NewFloat64ValueRecorder(name))
}, },
"observer.int64": func(name, libraryName string) (metric.InstrumentImpl, error) { "observer.int64": func(name, libraryName string) (metric.InstrumentImpl, error) {
return unwrap(MeterProvider().Meter(libraryName).RegisterInt64Observer(name, func(metric.Int64ObserverResult) {})) return unwrap(MeterProvider().Meter(libraryName).RegisterInt64Observer(name, func(metric.Int64ObserverResult) {}))

View File

@ -119,29 +119,29 @@ func TestCounter(t *testing.T) {
} }
} }
func TestMeasure(t *testing.T) { func TestValueRecorder(t *testing.T) {
{ {
mockSDK, meter := mockTest.NewMeter() mockSDK, meter := mockTest.NewMeter()
m := Must(meter).NewFloat64Measure("test.measure.float") m := Must(meter).NewFloat64ValueRecorder("test.valuerecorder.float")
ctx := context.Background() ctx := context.Background()
labels := []kv.KeyValue{} labels := []kv.KeyValue{}
m.Record(ctx, 42, labels...) m.Record(ctx, 42, labels...)
boundInstrument := m.Bind(labels...) boundInstrument := m.Bind(labels...)
boundInstrument.Record(ctx, 42) boundInstrument.Record(ctx, 42)
meter.RecordBatch(ctx, labels, m.Measurement(42)) meter.RecordBatch(ctx, labels, m.Measurement(42))
t.Log("Testing float measure") t.Log("Testing float valuerecorder")
checkBatches(t, ctx, labels, mockSDK, metric.Float64NumberKind, m.SyncImpl()) checkBatches(t, ctx, labels, mockSDK, metric.Float64NumberKind, m.SyncImpl())
} }
{ {
mockSDK, meter := mockTest.NewMeter() mockSDK, meter := mockTest.NewMeter()
m := Must(meter).NewInt64Measure("test.measure.int") m := Must(meter).NewInt64ValueRecorder("test.valuerecorder.int")
ctx := context.Background() ctx := context.Background()
labels := []kv.KeyValue{kv.Int("I", 1)} labels := []kv.KeyValue{kv.Int("I", 1)}
m.Record(ctx, 42, labels...) m.Record(ctx, 42, labels...)
boundInstrument := m.Bind(labels...) boundInstrument := m.Bind(labels...)
boundInstrument.Record(ctx, 42) boundInstrument.Record(ctx, 42)
meter.RecordBatch(ctx, labels, m.Measurement(42)) meter.RecordBatch(ctx, labels, m.Measurement(42))
t.Log("Testing int measure") t.Log("Testing int valuerecorder")
checkBatches(t, ctx, labels, mockSDK, metric.Int64NumberKind, m.SyncImpl()) checkBatches(t, ctx, labels, mockSDK, metric.Int64NumberKind, m.SyncImpl())
} }
} }
@ -309,10 +309,10 @@ func TestWrappedInstrumentError(t *testing.T) {
impl := &testWrappedMeter{} impl := &testWrappedMeter{}
meter := metric.WrapMeterImpl(impl, "test") meter := metric.WrapMeterImpl(impl, "test")
measure, err := meter.NewInt64Measure("test.measure") valuerecorder, err := meter.NewInt64ValueRecorder("test.valuerecorder")
require.Equal(t, err, metric.ErrSDKReturnedNilImpl) require.Equal(t, err, metric.ErrSDKReturnedNilImpl)
require.NotNil(t, measure.SyncImpl()) require.NotNil(t, valuerecorder.SyncImpl())
observer, err := meter.RegisterInt64Observer("test.observer", func(result metric.Int64ObserverResult) {}) observer, err := meter.RegisterInt64Observer("test.observer", func(result metric.Int64ObserverResult) {})

View File

@ -13,57 +13,37 @@
// limitations under the License. // limitations under the License.
// metric package provides an API for reporting diagnostic // metric package provides an API for reporting diagnostic
// measurements using four basic kinds of instruments. // measurements using instruments categorized as follows:
// //
// The three basic kinds are: // Synchronous instruments are called by the user with a Context.
// Asynchronous instruments are called by the SDK during collection.
// //
// - counters // Additive instruments are semantically intended for capturing a sum.
// - measures // Non-additive instruments are intended for capturing a distribution.
// - observers
// //
// All instruments report either float64 or int64 values. // Additive instruments may be monotonic, in which case they are
// non-descreasing and naturally define a rate.
// //
// The primary object that handles metrics is Meter. Meter can be // The synchronous instrument names are:
// obtained from Provider. The implementations of the Meter and
// Provider are provided by SDK. Normally, the Meter is used directly
// only for the instrument creation and batch recording.
// //
// Counters are instruments that are reporting a quantity or a sum. An // Counter: additive, monotonic
// example could be bank account balance or bytes downloaded. Counters // UpDownCounter: additive
// can be created with either NewFloat64Counter or // ValueRecorder: non-additive
// NewInt64Counter. Counters expect non-negative values by default to
// be reported. This can be changed with the WithMonotonic option
// (passing false as a parameter) passed to the Meter.New*Counter
// function - this allows reporting negative values. To report the new
// value, use an Add function.
// //
// Measures are instruments that are reporting values that are // and the asynchronous instruments are:
// recorded separately to figure out some statistical properties from
// those values (like average). An example could be temperature over
// time or lines of code in the project over time. Measures can be
// created with either NewFloat64Measure or NewInt64Measure. Measures
// by default take only non-negative values. This can be changed with
// the WithAbsolute option (passing false as a parameter) passed to
// the New*Measure function - this allows reporting negative values
// too. To report a new value, use the Record function.
// //
// Observers are instruments that are reporting a current state of a // SumObserver: additive, monotonic
// set of values. An example could be voltage or // UpDownSumOnserver: additive
// temperature. Observers can be created with either // ValueObserver: non-additive
// RegisterFloat64Observer or RegisterInt64Observer. Observers by
// default have no limitations about reported values - they can be
// less or greater than the last reported value. This can be changed
// with the WithMonotonic option passed to the Register*Observer
// function - this permits the reported values only to go
// up. Reporting of the new values happens asynchronously, with the
// use of a callback passed to the Register*Observer function. The
// callback can report multiple values. There is no unregister function.
// //
// Counters and measures support creating bound instruments for a // All instruments are provided with support for either float64 or
// potentially more efficient reporting. The bound instruments have // int64 input values.
// the same function names as the instruments (so a Counter bound //
// instrument has Add, and a Measure bound instrument has Record). // The Meter interface supports allocating new instruments as well as
// Bound Instruments can be created with the Bind function of the // interfaces for recording batches of synchronous measurements or
// respective instrument. When done with the bound instrument, call // asynchronous observations. To obtain a Meter, use a Provider.
// Unbind on it. //
// The Provider interface supports obtaining a named Meter interface.
// To obtain a Provider implementation, initialize and configure any
// compatible SDK.
package metric // import "go.opentelemetry.io/otel/api/metric" package metric // import "go.opentelemetry.io/otel/api/metric"

View File

@ -20,8 +20,8 @@ package metric
type Kind int8 type Kind int8
const ( const (
// MeasureKind indicates a Measure instrument. // ValueRecorderKind indicates a ValueRecorder instrument.
MeasureKind Kind = iota ValueRecorderKind Kind = iota
// ObserverKind indicates an Observer instrument. // ObserverKind indicates an Observer instrument.
ObserverKind ObserverKind
// CounterKind indicates a Counter instrument. // CounterKind indicates a Counter instrument.

View File

@ -8,14 +8,14 @@ func _() {
// An "invalid array index" compiler error signifies that the constant values have changed. // An "invalid array index" compiler error signifies that the constant values have changed.
// Re-run the stringer command to generate them again. // Re-run the stringer command to generate them again.
var x [1]struct{} var x [1]struct{}
_ = x[MeasureKind-0] _ = x[ValueRecorderKind-0]
_ = x[ObserverKind-1] _ = x[ObserverKind-1]
_ = x[CounterKind-2] _ = x[CounterKind-2]
} }
const _Kind_name = "MeasureKindObserverKindCounterKind" const _Kind_name = "ValueRecorderKindObserverKindCounterKind"
var _Kind_index = [...]uint8{0, 11, 23, 34} var _Kind_index = [...]uint8{0, 17, 29, 40}
func (i Kind) String() string { func (i Kind) String() string {
if i < 0 || i >= Kind(len(_Kind_index)-1) { if i < 0 || i >= Kind(len(_Kind_index)-1) {

View File

@ -82,22 +82,22 @@ func (m Meter) NewFloat64Counter(name string, options ...Option) (Float64Counter
m.newSync(name, CounterKind, Float64NumberKind, options)) m.newSync(name, CounterKind, Float64NumberKind, options))
} }
// NewInt64Measure creates a new integer Measure instrument with the // NewInt64ValueRecorder creates a new integer ValueRecorder instrument with the
// given name, customized with options. May return an error if the // given name, customized with options. May return an error if the
// name is invalid (e.g., empty) or improperly registered (e.g., // name is invalid (e.g., empty) or improperly registered (e.g.,
// duplicate registration). // duplicate registration).
func (m Meter) NewInt64Measure(name string, opts ...Option) (Int64Measure, error) { func (m Meter) NewInt64ValueRecorder(name string, opts ...Option) (Int64ValueRecorder, error) {
return wrapInt64MeasureInstrument( return wrapInt64ValueRecorderInstrument(
m.newSync(name, MeasureKind, Int64NumberKind, opts)) m.newSync(name, ValueRecorderKind, Int64NumberKind, opts))
} }
// NewFloat64Measure creates a new floating point Measure with the // NewFloat64ValueRecorder creates a new floating point ValueRecorder with the
// given name, customized with options. May return an error if the // given name, customized with options. May return an error if the
// name is invalid (e.g., empty) or improperly registered (e.g., // name is invalid (e.g., empty) or improperly registered (e.g.,
// duplicate registration). // duplicate registration).
func (m Meter) NewFloat64Measure(name string, opts ...Option) (Float64Measure, error) { func (m Meter) NewFloat64ValueRecorder(name string, opts ...Option) (Float64ValueRecorder, error) {
return wrapFloat64MeasureInstrument( return wrapFloat64ValueRecorderInstrument(
m.newSync(name, MeasureKind, Float64NumberKind, opts)) m.newSync(name, ValueRecorderKind, Float64NumberKind, opts))
} }
// RegisterInt64Observer creates a new integer Observer instrument // RegisterInt64Observer creates a new integer Observer instrument

View File

@ -53,20 +53,20 @@ func (mm MeterMust) NewFloat64Counter(name string, cos ...Option) Float64Counter
} }
} }
// NewInt64Measure calls `Meter.NewInt64Measure` and returns the // NewInt64ValueRecorder calls `Meter.NewInt64ValueRecorder` and returns the
// instrument, panicking if it encounters an error. // instrument, panicking if it encounters an error.
func (mm MeterMust) NewInt64Measure(name string, mos ...Option) Int64Measure { func (mm MeterMust) NewInt64ValueRecorder(name string, mos ...Option) Int64ValueRecorder {
if inst, err := mm.meter.NewInt64Measure(name, mos...); err != nil { if inst, err := mm.meter.NewInt64ValueRecorder(name, mos...); err != nil {
panic(err) panic(err)
} else { } else {
return inst return inst
} }
} }
// NewFloat64Measure calls `Meter.NewFloat64Measure` and returns the // NewFloat64ValueRecorder calls `Meter.NewFloat64ValueRecorder` and returns the
// instrument, panicking if it encounters an error. // instrument, panicking if it encounters an error.
func (mm MeterMust) NewFloat64Measure(name string, mos ...Option) Float64Measure { func (mm MeterMust) NewFloat64ValueRecorder(name string, mos ...Option) Float64ValueRecorder {
if inst, err := mm.meter.NewFloat64Measure(name, mos...); err != nil { if inst, err := mm.meter.NewFloat64ValueRecorder(name, mos...); err != nil {
panic(err) panic(err)
} else { } else {
return inst return inst

View File

@ -37,11 +37,11 @@ var (
"counter.float64": func(m metric.Meter, name string) (metric.InstrumentImpl, error) { "counter.float64": func(m metric.Meter, name string) (metric.InstrumentImpl, error) {
return unwrap(m.NewFloat64Counter(name)) return unwrap(m.NewFloat64Counter(name))
}, },
"measure.int64": func(m metric.Meter, name string) (metric.InstrumentImpl, error) { "valuerecorder.int64": func(m metric.Meter, name string) (metric.InstrumentImpl, error) {
return unwrap(m.NewInt64Measure(name)) return unwrap(m.NewInt64ValueRecorder(name))
}, },
"measure.float64": func(m metric.Meter, name string) (metric.InstrumentImpl, error) { "valuerecorder.float64": func(m metric.Meter, name string) (metric.InstrumentImpl, error) {
return unwrap(m.NewFloat64Measure(name)) return unwrap(m.NewFloat64ValueRecorder(name))
}, },
"observer.int64": func(m metric.Meter, name string) (metric.InstrumentImpl, error) { "observer.int64": func(m metric.Meter, name string) (metric.InstrumentImpl, error) {
return unwrap(m.RegisterInt64Observer(name, func(metric.Int64ObserverResult) {})) return unwrap(m.RegisterInt64Observer(name, func(metric.Int64ObserverResult) {}))

View File

@ -53,7 +53,7 @@ type InstrumentImpl interface {
} }
// SyncImpl is the implementation-level interface to a generic // SyncImpl is the implementation-level interface to a generic
// synchronous instrument (e.g., Measure and Counter instruments). // synchronous instrument (e.g., ValueRecorder and Counter instruments).
type SyncImpl interface { type SyncImpl interface {
InstrumentImpl InstrumentImpl

View File

@ -174,22 +174,22 @@ func wrapFloat64CounterInstrument(syncInst SyncImpl, err error) (Float64Counter,
return Float64Counter{syncInstrument: common}, err return Float64Counter{syncInstrument: common}, err
} }
// wrapInt64MeasureInstrument returns an `Int64Measure` from a // wrapInt64ValueRecorderInstrument returns an `Int64ValueRecorder` from a
// `SyncImpl`. An error will be generated if the // `SyncImpl`. An error will be generated if the
// `SyncImpl` is nil (in which case a No-op is substituted), // `SyncImpl` is nil (in which case a No-op is substituted),
// otherwise the error passes through. // otherwise the error passes through.
func wrapInt64MeasureInstrument(syncInst SyncImpl, err error) (Int64Measure, error) { func wrapInt64ValueRecorderInstrument(syncInst SyncImpl, err error) (Int64ValueRecorder, error) {
common, err := checkNewSync(syncInst, err) common, err := checkNewSync(syncInst, err)
return Int64Measure{syncInstrument: common}, err return Int64ValueRecorder{syncInstrument: common}, err
} }
// wrapFloat64MeasureInstrument returns an `Float64Measure` from a // wrapFloat64ValueRecorderInstrument returns an `Float64ValueRecorder` from a
// `SyncImpl`. An error will be generated if the // `SyncImpl`. An error will be generated if the
// `SyncImpl` is nil (in which case a No-op is substituted), // `SyncImpl` is nil (in which case a No-op is substituted),
// otherwise the error passes through. // otherwise the error passes through.
func wrapFloat64MeasureInstrument(syncInst SyncImpl, err error) (Float64Measure, error) { func wrapFloat64ValueRecorderInstrument(syncInst SyncImpl, err error) (Float64ValueRecorder, error) {
common, err := checkNewSync(syncInst, err) common, err := checkNewSync(syncInst, err)
return Float64Measure{syncInstrument: common}, err return Float64ValueRecorder{syncInstrument: common}, err
} }
// wrapInt64ObserverInstrument returns an `Int64Observer` from a // wrapInt64ObserverInstrument returns an `Int64Observer` from a

View File

@ -20,78 +20,78 @@ import (
"go.opentelemetry.io/otel/api/kv" "go.opentelemetry.io/otel/api/kv"
) )
// Float64Measure is a metric that records float64 values. // Float64ValueRecorder is a metric that records float64 values.
type Float64Measure struct { type Float64ValueRecorder struct {
syncInstrument syncInstrument
} }
// Int64Measure is a metric that records int64 values. // Int64ValueRecorder is a metric that records int64 values.
type Int64Measure struct { type Int64ValueRecorder struct {
syncInstrument syncInstrument
} }
// BoundFloat64Measure is a bound instrument for Float64Measure. // BoundFloat64ValueRecorder is a bound instrument for Float64ValueRecorder.
// //
// It inherits the Unbind function from syncBoundInstrument. // It inherits the Unbind function from syncBoundInstrument.
type BoundFloat64Measure struct { type BoundFloat64ValueRecorder struct {
syncBoundInstrument syncBoundInstrument
} }
// BoundInt64Measure is a bound instrument for Int64Measure. // BoundInt64ValueRecorder is a bound instrument for Int64ValueRecorder.
// //
// It inherits the Unbind function from syncBoundInstrument. // It inherits the Unbind function from syncBoundInstrument.
type BoundInt64Measure struct { type BoundInt64ValueRecorder struct {
syncBoundInstrument syncBoundInstrument
} }
// Bind creates a bound instrument for this measure. The labels are // Bind creates a bound instrument for this ValueRecorder. The labels are
// associated with values recorded via subsequent calls to Record. // associated with values recorded via subsequent calls to Record.
func (c Float64Measure) Bind(labels ...kv.KeyValue) (h BoundFloat64Measure) { func (c Float64ValueRecorder) Bind(labels ...kv.KeyValue) (h BoundFloat64ValueRecorder) {
h.syncBoundInstrument = c.bind(labels) h.syncBoundInstrument = c.bind(labels)
return return
} }
// Bind creates a bound instrument for this measure. The labels are // Bind creates a bound instrument for this ValueRecorder. The labels are
// associated with values recorded via subsequent calls to Record. // associated with values recorded via subsequent calls to Record.
func (c Int64Measure) Bind(labels ...kv.KeyValue) (h BoundInt64Measure) { func (c Int64ValueRecorder) Bind(labels ...kv.KeyValue) (h BoundInt64ValueRecorder) {
h.syncBoundInstrument = c.bind(labels) h.syncBoundInstrument = c.bind(labels)
return return
} }
// Measurement creates a Measurement object to use with batch // Measurement creates a Measurement object to use with batch
// recording. // recording.
func (c Float64Measure) Measurement(value float64) Measurement { func (c Float64ValueRecorder) Measurement(value float64) Measurement {
return c.float64Measurement(value) return c.float64Measurement(value)
} }
// Measurement creates a Measurement object to use with batch // Measurement creates a Measurement object to use with batch
// recording. // recording.
func (c Int64Measure) Measurement(value int64) Measurement { func (c Int64ValueRecorder) Measurement(value int64) Measurement {
return c.int64Measurement(value) return c.int64Measurement(value)
} }
// Record adds a new value to the list of measure's records. The // Record adds a new value to the list of ValueRecorder's records. The
// labels should contain the keys and values to be associated with // labels should contain the keys and values to be associated with
// this value. // this value.
func (c Float64Measure) Record(ctx context.Context, value float64, labels ...kv.KeyValue) { func (c Float64ValueRecorder) Record(ctx context.Context, value float64, labels ...kv.KeyValue) {
c.directRecord(ctx, NewFloat64Number(value), labels) c.directRecord(ctx, NewFloat64Number(value), labels)
} }
// Record adds a new value to the list of measure's records. The // Record adds a new value to the ValueRecorder's distribution. The
// labels should contain the keys and values to be associated with // labels should contain the keys and values to be associated with
// this value. // this value.
func (c Int64Measure) Record(ctx context.Context, value int64, labels ...kv.KeyValue) { func (c Int64ValueRecorder) Record(ctx context.Context, value int64, labels ...kv.KeyValue) {
c.directRecord(ctx, NewInt64Number(value), labels) c.directRecord(ctx, NewInt64Number(value), labels)
} }
// Record adds a new value to the list of measure's records using the labels // Record adds a new value to the ValueRecorder's distribution using the labels
// previously bound to the measure via Bind() // previously bound to the ValueRecorder via Bind().
func (b BoundFloat64Measure) Record(ctx context.Context, value float64) { func (b BoundFloat64ValueRecorder) Record(ctx context.Context, value float64) {
b.directRecord(ctx, NewFloat64Number(value)) b.directRecord(ctx, NewFloat64Number(value))
} }
// Record adds a new value to the list of measure's records using the labels // Record adds a new value to the ValueRecorder's distribution using the labels
// previously bound to the measure via Bind() // previously bound to the ValueRecorder via Bind().
func (b BoundInt64Measure) Record(ctx context.Context, value int64) { func (b BoundInt64ValueRecorder) Record(ctx context.Context, value int64) {
b.directRecord(ctx, NewInt64Number(value)) b.directRecord(ctx, NewInt64Number(value))
} }

View File

@ -80,7 +80,7 @@ func main() {
metric.WithDescription("An observer set to 1.0"), metric.WithDescription("An observer set to 1.0"),
) )
measureTwo := metric.Must(meter).NewFloat64Measure("ex.com.two") valuerecorderTwo := metric.Must(meter).NewFloat64ValueRecorder("ex.com.two")
ctx := context.Background() ctx := context.Background()
@ -89,8 +89,8 @@ func main() {
barKey.String("bar1"), barKey.String("bar1"),
) )
measure := measureTwo.Bind(commonLabels...) valuerecorder := valuerecorderTwo.Bind(commonLabels...)
defer measure.Unbind() defer valuerecorder.Unbind()
err := tracer.WithSpan(ctx, "operation", func(ctx context.Context) error { err := tracer.WithSpan(ctx, "operation", func(ctx context.Context) error {
@ -103,7 +103,7 @@ func main() {
correlation.NewContext(ctx, anotherKey.String("xyz")), correlation.NewContext(ctx, anotherKey.String("xyz")),
commonLabels, commonLabels,
measureTwo.Measurement(2.0), valuerecorderTwo.Measurement(2.0),
) )
return tracer.WithSpan( return tracer.WithSpan(
@ -114,7 +114,7 @@ func main() {
trace.SpanFromContext(ctx).AddEvent(ctx, "Sub span event") trace.SpanFromContext(ctx).AddEvent(ctx, "Sub span event")
measure.Record(ctx, 1.3) valuerecorder.Record(ctx, 1.3)
return nil return nil
}, },

View File

@ -60,11 +60,11 @@ func main() {
result.Observe(value, labels...) result.Observe(value, labels...)
} }
_ = metric.Must(meter).RegisterFloat64Observer("ex.com.one", cb, _ = metric.Must(meter).RegisterFloat64Observer("ex.com.one", cb,
metric.WithDescription("A measure set to 1.0"), metric.WithDescription("An observer set to 1.0"),
) )
measureTwo := metric.Must(meter).NewFloat64Measure("ex.com.two") valuerecorder := metric.Must(meter).NewFloat64ValueRecorder("ex.com.two")
measureThree := metric.Must(meter).NewFloat64Counter("ex.com.three") counter := metric.Must(meter).NewFloat64Counter("ex.com.three")
commonLabels := []kv.KeyValue{lemonsKey.Int(10), kv.String("A", "1"), kv.String("B", "2"), kv.String("C", "3")} commonLabels := []kv.KeyValue{lemonsKey.Int(10), kv.String("A", "1"), kv.String("B", "2"), kv.String("C", "3")}
notSoCommonLabels := []kv.KeyValue{lemonsKey.Int(13)} notSoCommonLabels := []kv.KeyValue{lemonsKey.Int(13)}
@ -78,8 +78,8 @@ func main() {
meter.RecordBatch( meter.RecordBatch(
ctx, ctx,
commonLabels, commonLabels,
measureTwo.Measurement(2.0), valuerecorder.Measurement(2.0),
measureThree.Measurement(12.0), counter.Measurement(12.0),
) )
time.Sleep(5 * time.Second) time.Sleep(5 * time.Second)
@ -91,8 +91,8 @@ func main() {
meter.RecordBatch( meter.RecordBatch(
ctx, ctx,
notSoCommonLabels, notSoCommonLabels,
measureTwo.Measurement(2.0), valuerecorder.Measurement(2.0),
measureThree.Measurement(22.0), counter.Measurement(22.0),
) )
time.Sleep(5 * time.Second) time.Sleep(5 * time.Second)
@ -104,8 +104,8 @@ func main() {
meter.RecordBatch( meter.RecordBatch(
ctx, ctx,
commonLabels, commonLabels,
measureTwo.Measurement(12.0), valuerecorder.Measurement(12.0),
measureThree.Measurement(13.0), counter.Measurement(13.0),
) )
time.Sleep(100 * time.Second) time.Sleep(100 * time.Second)

View File

@ -147,7 +147,7 @@ func InstallNewPipeline(config Config) (*push.Controller, http.HandlerFunc, erro
// NewExportPipeline sets up a complete export pipeline with the recommended setup, // NewExportPipeline sets up a complete export pipeline with the recommended setup,
// chaining a NewRawExporter into the recommended selectors and integrators. // chaining a NewRawExporter into the recommended selectors and integrators.
func NewExportPipeline(config Config, period time.Duration) (*push.Controller, http.HandlerFunc, error) { func NewExportPipeline(config Config, period time.Duration) (*push.Controller, http.HandlerFunc, error) {
selector := simple.NewWithHistogramMeasure(config.DefaultHistogramBoundaries) selector := simple.NewWithHistogramDistribution(config.DefaultHistogramBoundaries)
exporter, err := NewRawExporter(config) exporter, err := NewRawExporter(config)
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err
@ -220,7 +220,7 @@ func (c *collector) Collect(ch chan<- prometheus.Metric) {
} }
} else if dist, ok := agg.(aggregator.Distribution); ok { } else if dist, ok := agg.(aggregator.Distribution); ok {
// TODO: summaries values are never being resetted. // TODO: summaries values are never being resetted.
// As measures are recorded, new records starts to have less impact on these summaries. // As measurements are recorded, new records starts to have less impact on these summaries.
// We should implement an solution that is similar to the Prometheus Clients // We should implement an solution that is similar to the Prometheus Clients
// using a rolling window for summaries could be a solution. // using a rolling window for summaries could be a solution.
// //

View File

@ -45,10 +45,10 @@ func TestPrometheusExporter(t *testing.T) {
"counter", metric.CounterKind, metric.Float64NumberKind) "counter", metric.CounterKind, metric.Float64NumberKind)
lastValue := metric.NewDescriptor( lastValue := metric.NewDescriptor(
"lastvalue", metric.ObserverKind, metric.Float64NumberKind) "lastvalue", metric.ObserverKind, metric.Float64NumberKind)
measure := metric.NewDescriptor( valuerecorder := metric.NewDescriptor(
"measure", metric.MeasureKind, metric.Float64NumberKind) "valuerecorder", metric.ValueRecorderKind, metric.Float64NumberKind)
histogramMeasure := metric.NewDescriptor( histogramValueRecorder := metric.NewDescriptor(
"histogram_measure", metric.MeasureKind, metric.Float64NumberKind) "histogram_valuerecorder", metric.ValueRecorderKind, metric.Float64NumberKind)
labels := []kv.KeyValue{ labels := []kv.KeyValue{
kv.Key("A").String("B"), kv.Key("A").String("B"),
@ -61,26 +61,26 @@ func TestPrometheusExporter(t *testing.T) {
checkpointSet.AddLastValue(&lastValue, 13.2, labels...) checkpointSet.AddLastValue(&lastValue, 13.2, labels...)
expected = append(expected, `lastvalue{A="B",C="D"} 13.2`) expected = append(expected, `lastvalue{A="B",C="D"} 13.2`)
checkpointSet.AddMeasure(&measure, 13, labels...) checkpointSet.AddValueRecorder(&valuerecorder, 13, labels...)
checkpointSet.AddMeasure(&measure, 15, labels...) checkpointSet.AddValueRecorder(&valuerecorder, 15, labels...)
checkpointSet.AddMeasure(&measure, 17, labels...) checkpointSet.AddValueRecorder(&valuerecorder, 17, labels...)
expected = append(expected, `measure{A="B",C="D",quantile="0.5"} 15`) expected = append(expected, `valuerecorder{A="B",C="D",quantile="0.5"} 15`)
expected = append(expected, `measure{A="B",C="D",quantile="0.9"} 17`) expected = append(expected, `valuerecorder{A="B",C="D",quantile="0.9"} 17`)
expected = append(expected, `measure{A="B",C="D",quantile="0.99"} 17`) expected = append(expected, `valuerecorder{A="B",C="D",quantile="0.99"} 17`)
expected = append(expected, `measure_sum{A="B",C="D"} 45`) expected = append(expected, `valuerecorder_sum{A="B",C="D"} 45`)
expected = append(expected, `measure_count{A="B",C="D"} 3`) expected = append(expected, `valuerecorder_count{A="B",C="D"} 3`)
boundaries := []metric.Number{metric.NewFloat64Number(-0.5), metric.NewFloat64Number(1)} boundaries := []metric.Number{metric.NewFloat64Number(-0.5), metric.NewFloat64Number(1)}
checkpointSet.AddHistogramMeasure(&histogramMeasure, boundaries, -0.6, labels...) checkpointSet.AddHistogramValueRecorder(&histogramValueRecorder, boundaries, -0.6, labels...)
checkpointSet.AddHistogramMeasure(&histogramMeasure, boundaries, -0.4, labels...) checkpointSet.AddHistogramValueRecorder(&histogramValueRecorder, boundaries, -0.4, labels...)
checkpointSet.AddHistogramMeasure(&histogramMeasure, boundaries, 0.6, labels...) checkpointSet.AddHistogramValueRecorder(&histogramValueRecorder, boundaries, 0.6, labels...)
checkpointSet.AddHistogramMeasure(&histogramMeasure, boundaries, 20, labels...) checkpointSet.AddHistogramValueRecorder(&histogramValueRecorder, boundaries, 20, labels...)
expected = append(expected, `histogram_measure_bucket{A="B",C="D",le="+Inf"} 4`) expected = append(expected, `histogram_valuerecorder_bucket{A="B",C="D",le="+Inf"} 4`)
expected = append(expected, `histogram_measure_bucket{A="B",C="D",le="-0.5"} 1`) expected = append(expected, `histogram_valuerecorder_bucket{A="B",C="D",le="-0.5"} 1`)
expected = append(expected, `histogram_measure_bucket{A="B",C="D",le="1"} 3`) expected = append(expected, `histogram_valuerecorder_bucket{A="B",C="D",le="1"} 3`)
expected = append(expected, `histogram_measure_count{A="B",C="D"} 4`) expected = append(expected, `histogram_valuerecorder_count{A="B",C="D"} 4`)
expected = append(expected, `histogram_measure_sum{A="B",C="D"} 19.6`) expected = append(expected, `histogram_valuerecorder_sum{A="B",C="D"} 19.6`)
missingLabels := []kv.KeyValue{ missingLabels := []kv.KeyValue{
kv.Key("A").String("E"), kv.Key("A").String("E"),
@ -93,25 +93,25 @@ func TestPrometheusExporter(t *testing.T) {
checkpointSet.AddLastValue(&lastValue, 32, missingLabels...) checkpointSet.AddLastValue(&lastValue, 32, missingLabels...)
expected = append(expected, `lastvalue{A="E",C=""} 32`) expected = append(expected, `lastvalue{A="E",C=""} 32`)
checkpointSet.AddMeasure(&measure, 19, missingLabels...) checkpointSet.AddValueRecorder(&valuerecorder, 19, missingLabels...)
expected = append(expected, `measure{A="E",C="",quantile="0.5"} 19`) expected = append(expected, `valuerecorder{A="E",C="",quantile="0.5"} 19`)
expected = append(expected, `measure{A="E",C="",quantile="0.9"} 19`) expected = append(expected, `valuerecorder{A="E",C="",quantile="0.9"} 19`)
expected = append(expected, `measure{A="E",C="",quantile="0.99"} 19`) expected = append(expected, `valuerecorder{A="E",C="",quantile="0.99"} 19`)
expected = append(expected, `measure_count{A="E",C=""} 1`) expected = append(expected, `valuerecorder_count{A="E",C=""} 1`)
expected = append(expected, `measure_sum{A="E",C=""} 19`) expected = append(expected, `valuerecorder_sum{A="E",C=""} 19`)
boundaries = []metric.Number{metric.NewFloat64Number(0), metric.NewFloat64Number(1)} boundaries = []metric.Number{metric.NewFloat64Number(0), metric.NewFloat64Number(1)}
checkpointSet.AddHistogramMeasure(&histogramMeasure, boundaries, -0.6, missingLabels...) checkpointSet.AddHistogramValueRecorder(&histogramValueRecorder, boundaries, -0.6, missingLabels...)
checkpointSet.AddHistogramMeasure(&histogramMeasure, boundaries, -0.4, missingLabels...) checkpointSet.AddHistogramValueRecorder(&histogramValueRecorder, boundaries, -0.4, missingLabels...)
checkpointSet.AddHistogramMeasure(&histogramMeasure, boundaries, -0.1, missingLabels...) checkpointSet.AddHistogramValueRecorder(&histogramValueRecorder, boundaries, -0.1, missingLabels...)
checkpointSet.AddHistogramMeasure(&histogramMeasure, boundaries, 15, missingLabels...) checkpointSet.AddHistogramValueRecorder(&histogramValueRecorder, boundaries, 15, missingLabels...)
checkpointSet.AddHistogramMeasure(&histogramMeasure, boundaries, 15, missingLabels...) checkpointSet.AddHistogramValueRecorder(&histogramValueRecorder, boundaries, 15, missingLabels...)
expected = append(expected, `histogram_measure_bucket{A="E",C="",le="+Inf"} 5`) expected = append(expected, `histogram_valuerecorder_bucket{A="E",C="",le="+Inf"} 5`)
expected = append(expected, `histogram_measure_bucket{A="E",C="",le="0"} 3`) expected = append(expected, `histogram_valuerecorder_bucket{A="E",C="",le="0"} 3`)
expected = append(expected, `histogram_measure_bucket{A="E",C="",le="1"} 3`) expected = append(expected, `histogram_valuerecorder_bucket{A="E",C="",le="1"} 3`)
expected = append(expected, `histogram_measure_count{A="E",C=""} 5`) expected = append(expected, `histogram_valuerecorder_count{A="E",C=""} 5`)
expected = append(expected, `histogram_measure_sum{A="E",C=""} 28.9`) expected = append(expected, `histogram_valuerecorder_sum{A="E",C=""} 28.9`)
compareExport(t, exporter, checkpointSet, expected) compareExport(t, exporter, checkpointSet, expected)
} }

View File

@ -53,8 +53,8 @@ type Config struct {
// useful to create deterministic test conditions. // useful to create deterministic test conditions.
DoNotPrintTime bool DoNotPrintTime bool
// Quantiles are the desired aggregation quantiles for measure // Quantiles are the desired aggregation quantiles for distribution
// metric data, used when the configured aggregator supports // summaries, used when the configured aggregator supports
// quantiles. // quantiles.
// //
// Note: this exporter is meant as a demonstration; a real // Note: this exporter is meant as a demonstration; a real
@ -133,7 +133,7 @@ func InstallNewPipeline(config Config, opts ...push.Option) (*push.Controller, e
// NewExportPipeline sets up a complete export pipeline with the recommended setup, // NewExportPipeline sets up a complete export pipeline with the recommended setup,
// chaining a NewRawExporter into the recommended selectors and integrators. // chaining a NewRawExporter into the recommended selectors and integrators.
func NewExportPipeline(config Config, period time.Duration, opts ...push.Option) (*push.Controller, error) { func NewExportPipeline(config Config, period time.Duration, opts ...push.Option) (*push.Controller, error) {
selector := simple.NewWithExactMeasure() selector := simple.NewWithExactDistribution()
exporter, err := NewRawExporter(config) exporter, err := NewRawExporter(config)
if err != nil { if err != nil {
return nil, err return nil, err

View File

@ -177,7 +177,7 @@ func TestStdoutMinMaxSumCount(t *testing.T) {
checkpointSet := test.NewCheckpointSet() checkpointSet := test.NewCheckpointSet()
desc := metric.NewDescriptor("test.name", metric.MeasureKind, metric.Float64NumberKind) desc := metric.NewDescriptor("test.name", metric.ValueRecorderKind, metric.Float64NumberKind)
magg := minmaxsumcount.New(&desc) magg := minmaxsumcount.New(&desc)
aggtest.CheckedUpdate(fix.t, magg, metric.NewFloat64Number(123.456), &desc) aggtest.CheckedUpdate(fix.t, magg, metric.NewFloat64Number(123.456), &desc)
aggtest.CheckedUpdate(fix.t, magg, metric.NewFloat64Number(876.543), &desc) aggtest.CheckedUpdate(fix.t, magg, metric.NewFloat64Number(876.543), &desc)
@ -190,14 +190,14 @@ func TestStdoutMinMaxSumCount(t *testing.T) {
require.Equal(t, `{"updates":[{"name":"test.name{A=B,C=D}","min":123.456,"max":876.543,"sum":999.999,"count":2}]}`, fix.Output()) require.Equal(t, `{"updates":[{"name":"test.name{A=B,C=D}","min":123.456,"max":876.543,"sum":999.999,"count":2}]}`, fix.Output())
} }
func TestStdoutMeasureFormat(t *testing.T) { func TestStdoutValueRecorderFormat(t *testing.T) {
fix := newFixture(t, nil, stdout.Config{ fix := newFixture(t, nil, stdout.Config{
PrettyPrint: true, PrettyPrint: true,
}) })
checkpointSet := test.NewCheckpointSet() checkpointSet := test.NewCheckpointSet()
desc := metric.NewDescriptor("test.name", metric.MeasureKind, metric.Float64NumberKind) desc := metric.NewDescriptor("test.name", metric.ValueRecorderKind, metric.Float64NumberKind)
magg := array.New() magg := array.New()
for i := 0; i < 1000; i++ { for i := 0; i < 1000; i++ {
@ -238,7 +238,7 @@ func TestStdoutMeasureFormat(t *testing.T) {
} }
func TestStdoutNoData(t *testing.T) { func TestStdoutNoData(t *testing.T) {
desc := metric.NewDescriptor("test.name", metric.MeasureKind, metric.Float64NumberKind) desc := metric.NewDescriptor("test.name", metric.ValueRecorderKind, metric.Float64NumberKind)
for name, tc := range map[string]export.Aggregator{ for name, tc := range map[string]export.Aggregator{
"ddsketch": ddsketch.New(ddsketch.NewDefaultConfig(), &desc), "ddsketch": ddsketch.New(ddsketch.NewDefaultConfig(), &desc),
"minmaxsumcount": minmaxsumcount.New(&desc), "minmaxsumcount": minmaxsumcount.New(&desc),

View File

@ -88,11 +88,11 @@ func (p *CheckpointSet) AddCounter(desc *metric.Descriptor, v float64, labels ..
p.updateAggregator(desc, sum.New(), v, labels...) p.updateAggregator(desc, sum.New(), v, labels...)
} }
func (p *CheckpointSet) AddMeasure(desc *metric.Descriptor, v float64, labels ...kv.KeyValue) { func (p *CheckpointSet) AddValueRecorder(desc *metric.Descriptor, v float64, labels ...kv.KeyValue) {
p.updateAggregator(desc, array.New(), v, labels...) p.updateAggregator(desc, array.New(), v, labels...)
} }
func (p *CheckpointSet) AddHistogramMeasure(desc *metric.Descriptor, boundaries []metric.Number, v float64, labels ...kv.KeyValue) { func (p *CheckpointSet) AddHistogramValueRecorder(desc *metric.Descriptor, boundaries []metric.Number, v float64, labels ...kv.KeyValue) {
p.updateAggregator(desc, histogram.New(desc, boundaries), v, labels...) p.updateAggregator(desc, histogram.New(desc, boundaries), v, labels...)
} }

View File

@ -111,7 +111,7 @@ func TestMinMaxSumCountMetricDescriptor(t *testing.T) {
}{ }{
{ {
"mmsc-test-a", "mmsc-test-a",
metric.MeasureKind, metric.ValueRecorderKind,
"test-a-description", "test-a-description",
unit.Dimensionless, unit.Dimensionless,
metric.Int64NumberKind, metric.Int64NumberKind,
@ -160,7 +160,7 @@ func TestMinMaxSumCountMetricDescriptor(t *testing.T) {
} }
func TestMinMaxSumCountDatapoints(t *testing.T) { func TestMinMaxSumCountDatapoints(t *testing.T) {
desc := metric.NewDescriptor("", metric.MeasureKind, metric.Int64NumberKind) desc := metric.NewDescriptor("", metric.ValueRecorderKind, metric.Int64NumberKind)
labels := label.NewSet() labels := label.NewSet()
mmsc := minmaxsumcount.New(&desc) mmsc := minmaxsumcount.New(&desc)
assert.NoError(t, mmsc.Update(context.Background(), 1, &desc)) assert.NoError(t, mmsc.Update(context.Background(), 1, &desc))
@ -228,7 +228,7 @@ func TestSumMetricDescriptor(t *testing.T) {
}, },
{ {
"sum-test-b", "sum-test-b",
metric.MeasureKind, // This shouldn't change anything. metric.ValueRecorderKind, // This shouldn't change anything.
"test-b-description", "test-b-description",
unit.Milliseconds, unit.Milliseconds,
metric.Float64NumberKind, metric.Float64NumberKind,
@ -257,7 +257,7 @@ func TestSumMetricDescriptor(t *testing.T) {
} }
func TestSumInt64DataPoints(t *testing.T) { func TestSumInt64DataPoints(t *testing.T) {
desc := metric.NewDescriptor("", metric.MeasureKind, metric.Int64NumberKind) desc := metric.NewDescriptor("", metric.ValueRecorderKind, metric.Int64NumberKind)
labels := label.NewSet() labels := label.NewSet()
s := sumAgg.New() s := sumAgg.New()
assert.NoError(t, s.Update(context.Background(), metric.Number(1), &desc)) assert.NoError(t, s.Update(context.Background(), metric.Number(1), &desc))
@ -271,7 +271,7 @@ func TestSumInt64DataPoints(t *testing.T) {
} }
func TestSumFloat64DataPoints(t *testing.T) { func TestSumFloat64DataPoints(t *testing.T) {
desc := metric.NewDescriptor("", metric.MeasureKind, metric.Float64NumberKind) desc := metric.NewDescriptor("", metric.ValueRecorderKind, metric.Float64NumberKind)
labels := label.NewSet() labels := label.NewSet()
s := sumAgg.New() s := sumAgg.New()
assert.NoError(t, s.Update(context.Background(), metric.NewFloat64Number(1), &desc)) assert.NoError(t, s.Update(context.Background(), metric.NewFloat64Number(1), &desc))
@ -285,7 +285,7 @@ func TestSumFloat64DataPoints(t *testing.T) {
} }
func TestSumErrUnknownValueType(t *testing.T) { func TestSumErrUnknownValueType(t *testing.T) {
desc := metric.NewDescriptor("", metric.MeasureKind, metric.NumberKind(-1)) desc := metric.NewDescriptor("", metric.ValueRecorderKind, metric.NumberKind(-1))
labels := label.NewSet() labels := label.NewSet()
s := sumAgg.New() s := sumAgg.New()
_, err := sum(&desc, &labels, s) _, err := sum(&desc, &labels, s)

View File

@ -109,7 +109,7 @@ func newExporterEndToEndTest(t *testing.T, additionalOpts []otlp.ExporterOption)
span.End() span.End()
} }
selector := simple.NewWithExactMeasure() selector := simple.NewWithExactDistribution()
integrator := integrator.New(selector, true) integrator := integrator.New(selector, true)
pusher := push.New(integrator, exp, 60*time.Second) pusher := push.New(integrator, exp, 60*time.Second)
pusher.Start() pusher.Start()
@ -126,8 +126,8 @@ func newExporterEndToEndTest(t *testing.T, additionalOpts []otlp.ExporterOption)
instruments := map[string]data{ instruments := map[string]data{
"test-int64-counter": {metric.CounterKind, metricapi.Int64NumberKind, 1}, "test-int64-counter": {metric.CounterKind, metricapi.Int64NumberKind, 1},
"test-float64-counter": {metric.CounterKind, metricapi.Float64NumberKind, 1}, "test-float64-counter": {metric.CounterKind, metricapi.Float64NumberKind, 1},
"test-int64-measure": {metric.MeasureKind, metricapi.Int64NumberKind, 2}, "test-int64-valuerecorder": {metric.ValueRecorderKind, metricapi.Int64NumberKind, 2},
"test-float64-measure": {metric.MeasureKind, metricapi.Float64NumberKind, 2}, "test-float64-valuerecorder": {metric.ValueRecorderKind, metricapi.Float64NumberKind, 2},
"test-int64-observer": {metric.ObserverKind, metricapi.Int64NumberKind, 3}, "test-int64-observer": {metric.ObserverKind, metricapi.Int64NumberKind, 3},
"test-float64-observer": {metric.ObserverKind, metricapi.Float64NumberKind, 3}, "test-float64-observer": {metric.ObserverKind, metricapi.Float64NumberKind, 3},
} }
@ -142,12 +142,12 @@ func newExporterEndToEndTest(t *testing.T, additionalOpts []otlp.ExporterOption)
default: default:
assert.Failf(t, "unsupported number testing kind", data.nKind.String()) assert.Failf(t, "unsupported number testing kind", data.nKind.String())
} }
case metric.MeasureKind: case metric.ValueRecorderKind:
switch data.nKind { switch data.nKind {
case metricapi.Int64NumberKind: case metricapi.Int64NumberKind:
metricapi.Must(meter).NewInt64Measure(name).Record(ctx, data.val, labels...) metricapi.Must(meter).NewInt64ValueRecorder(name).Record(ctx, data.val, labels...)
case metricapi.Float64NumberKind: case metricapi.Float64NumberKind:
metricapi.Must(meter).NewFloat64Measure(name).Record(ctx, float64(data.val), labels...) metricapi.Must(meter).NewFloat64ValueRecorder(name).Record(ctx, float64(data.val), labels...)
default: default:
assert.Failf(t, "unsupported number testing kind", data.nKind.String()) assert.Failf(t, "unsupported number testing kind", data.nKind.String())
} }
@ -246,7 +246,7 @@ func newExporterEndToEndTest(t *testing.T, additionalOpts []otlp.ExporterOption)
default: default:
assert.Failf(t, "invalid number kind", data.nKind.String()) assert.Failf(t, "invalid number kind", data.nKind.String())
} }
case metric.MeasureKind, metric.ObserverKind: case metric.ValueRecorderKind, metric.ObserverKind:
assert.Equal(t, metricpb.MetricDescriptor_SUMMARY.String(), desc.GetType().String()) assert.Equal(t, metricpb.MetricDescriptor_SUMMARY.String(), desc.GetType().String())
m.GetSummaryDataPoints() m.GetSummaryDataPoints()
if dp := m.GetSummaryDataPoints(); assert.Len(t, dp, 1) { if dp := m.GetSummaryDataPoints(); assert.Len(t, dp, 1) {

View File

@ -188,10 +188,10 @@ func TestNoGroupingExport(t *testing.T) {
) )
} }
func TestMeasureMetricGroupingExport(t *testing.T) { func TestValuerecorderMetricGroupingExport(t *testing.T) {
r := record{ r := record{
"measure", "valuerecorder",
metric.MeasureKind, metric.ValueRecorderKind,
metric.Int64NumberKind, metric.Int64NumberKind,
nil, nil,
nil, nil,
@ -205,7 +205,7 @@ func TestMeasureMetricGroupingExport(t *testing.T) {
Metrics: []*metricpb.Metric{ Metrics: []*metricpb.Metric{
{ {
MetricDescriptor: &metricpb.MetricDescriptor{ MetricDescriptor: &metricpb.MetricDescriptor{
Name: "measure", Name: "valuerecorder",
Type: metricpb.MetricDescriptor_SUMMARY, Type: metricpb.MetricDescriptor_SUMMARY,
Labels: []*commonpb.StringKeyValue{ Labels: []*commonpb.StringKeyValue{
{ {

View File

@ -116,7 +116,7 @@ func NewInconsistentMergeError(a1, a2 export.Aggregator) error {
// RangeTest is a commmon routine for testing for valid input values. // RangeTest is a commmon routine for testing for valid input values.
// This rejects NaN values. This rejects negative values when the // This rejects NaN values. This rejects negative values when the
// metric instrument does not support negative values, including // metric instrument does not support negative values, including
// monotonic counter metrics and absolute measure metrics. // monotonic counter metrics and absolute ValueRecorder metrics.
func RangeTest(number metric.Number, descriptor *metric.Descriptor) error { func RangeTest(number metric.Number, descriptor *metric.Descriptor) error {
numberKind := descriptor.NumberKind() numberKind := descriptor.NumberKind()

View File

@ -86,7 +86,7 @@ func TestNaNTest(t *testing.T) {
t.Run(nkind.String(), func(t *testing.T) { t.Run(nkind.String(), func(t *testing.T) {
for _, mkind := range []metric.Kind{ for _, mkind := range []metric.Kind{
metric.CounterKind, metric.CounterKind,
metric.MeasureKind, metric.ValueRecorderKind,
metric.ObserverKind, metric.ObserverKind,
} { } {
desc := metric.NewDescriptor( desc := metric.NewDescriptor(

View File

@ -100,22 +100,16 @@ type AggregationSelector interface {
} }
// Aggregator implements a specific aggregation behavior, e.g., a // Aggregator implements a specific aggregation behavior, e.g., a
// behavior to track a sequence of updates to a counter, a measure, or // behavior to track a sequence of updates to an instrument. Sum-only
// an observer instrument. For the most part, counter semantics are // instruments commonly use a simple Sum aggregator, but for the
// fixed and the provided implementation should be used. Measure and // distribution instruments (ValueRecorder, ValueObserver) there are a
// observer metrics offer a wide range of potential tradeoffs and // number of possible aggregators with different cost and accuracy
// several implementations are provided. // tradeoffs.
//
// Aggregators are meant to compute the change (i.e., delta) in state
// from one checkpoint to the next, with the exception of LastValue
// aggregators. LastValue aggregators are required to maintain the last
// value across checkpoints.
// //
// Note that any Aggregator may be attached to any instrument--this is // Note that any Aggregator may be attached to any instrument--this is
// the result of the OpenTelemetry API/SDK separation. It is possible // the result of the OpenTelemetry API/SDK separation. It is possible
// to attach a counter aggregator to a Measure instrument (to compute // to attach a Sum aggregator to a ValueRecorder instrument or a
// a simple sum) or a LastValue aggregator to a measure instrument (to // MinMaxSumCount aggregator to a Counter instrument.
// compute the last value).
type Aggregator interface { type Aggregator interface {
// Update receives a new measured value and incorporates it // Update receives a new measured value and incorporates it
// into the aggregation. Update() calls may arrive // into the aggregation. Update() calls may arrive

View File

@ -27,6 +27,8 @@ import (
) )
type ( type (
// Aggregator aggregates events that form a distribution, keeping
// an array with the exact set of values.
Aggregator struct { Aggregator struct {
// ckptSum needs to be aligned for 64-bit atomic operations. // ckptSum needs to be aligned for 64-bit atomic operations.
ckptSum metric.Number ckptSum metric.Number

View File

@ -50,7 +50,7 @@ type updateTest struct {
} }
func (ut *updateTest) run(t *testing.T, profile test.Profile) { func (ut *updateTest) run(t *testing.T, profile test.Profile) {
descriptor := test.NewAggregatorTest(metric.MeasureKind, profile.NumberKind) descriptor := test.NewAggregatorTest(metric.ValueRecorderKind, profile.NumberKind)
agg := New() agg := New()
@ -118,7 +118,7 @@ type mergeTest struct {
func (mt *mergeTest) run(t *testing.T, profile test.Profile) { func (mt *mergeTest) run(t *testing.T, profile test.Profile) {
ctx := context.Background() ctx := context.Background()
descriptor := test.NewAggregatorTest(metric.MeasureKind, profile.NumberKind) descriptor := test.NewAggregatorTest(metric.ValueRecorderKind, profile.NumberKind)
agg1 := New() agg1 := New()
agg2 := New() agg2 := New()
@ -215,7 +215,7 @@ func TestArrayErrors(t *testing.T) {
ctx := context.Background() ctx := context.Background()
descriptor := test.NewAggregatorTest(metric.MeasureKind, profile.NumberKind) descriptor := test.NewAggregatorTest(metric.ValueRecorderKind, profile.NumberKind)
test.CheckedUpdate(t, agg, metric.Number(0), descriptor) test.CheckedUpdate(t, agg, metric.Number(0), descriptor)
@ -243,7 +243,7 @@ func TestArrayErrors(t *testing.T) {
} }
func TestArrayFloat64(t *testing.T) { func TestArrayFloat64(t *testing.T) {
descriptor := test.NewAggregatorTest(metric.MeasureKind, metric.Float64NumberKind) descriptor := test.NewAggregatorTest(metric.ValueRecorderKind, metric.Float64NumberKind)
fpsf := func(sign int) []float64 { fpsf := func(sign int) []float64 {
// Check behavior of a bunch of odd floating // Check behavior of a bunch of odd floating

View File

@ -29,7 +29,7 @@ import (
// Config is an alias for the underlying DDSketch config object. // Config is an alias for the underlying DDSketch config object.
type Config = sdk.Config type Config = sdk.Config
// Aggregator aggregates measure events. // Aggregator aggregates events into a distribution.
type Aggregator struct { type Aggregator struct {
lock sync.Mutex lock sync.Mutex
cfg *Config cfg *Config

View File

@ -33,7 +33,7 @@ type updateTest struct {
func (ut *updateTest) run(t *testing.T, profile test.Profile) { func (ut *updateTest) run(t *testing.T, profile test.Profile) {
ctx := context.Background() ctx := context.Background()
descriptor := test.NewAggregatorTest(metric.MeasureKind, profile.NumberKind) descriptor := test.NewAggregatorTest(metric.ValueRecorderKind, profile.NumberKind)
agg := New(NewDefaultConfig(), descriptor) agg := New(NewDefaultConfig(), descriptor)
all := test.NewNumbers(profile.NumberKind) all := test.NewNumbers(profile.NumberKind)
@ -92,7 +92,7 @@ type mergeTest struct {
func (mt *mergeTest) run(t *testing.T, profile test.Profile) { func (mt *mergeTest) run(t *testing.T, profile test.Profile) {
ctx := context.Background() ctx := context.Background()
descriptor := test.NewAggregatorTest(metric.MeasureKind, profile.NumberKind) descriptor := test.NewAggregatorTest(metric.ValueRecorderKind, profile.NumberKind)
agg1 := New(NewDefaultConfig(), descriptor) agg1 := New(NewDefaultConfig(), descriptor)
agg2 := New(NewDefaultConfig(), descriptor) agg2 := New(NewDefaultConfig(), descriptor)

View File

@ -51,7 +51,7 @@ var _ aggregator.Sum = &Aggregator{}
var _ aggregator.Count = &Aggregator{} var _ aggregator.Count = &Aggregator{}
var _ aggregator.Histogram = &Aggregator{} var _ aggregator.Histogram = &Aggregator{}
// New returns a new measure aggregator for computing Histograms. // New returns a new aggregator for computing Histograms.
// //
// A Histogram observe events and counts them in pre-defined buckets. // A Histogram observe events and counts them in pre-defined buckets.
// And also provides the total sum and count of all observations. // And also provides the total sum and count of all observations.

View File

@ -84,7 +84,7 @@ func TestHistogramPositiveAndNegative(t *testing.T) {
// Validates count, sum and buckets for a given profile and policy // Validates count, sum and buckets for a given profile and policy
func histogram(t *testing.T, profile test.Profile, policy policy) { func histogram(t *testing.T, profile test.Profile, policy policy) {
ctx := context.Background() ctx := context.Background()
descriptor := test.NewAggregatorTest(metric.MeasureKind, profile.NumberKind) descriptor := test.NewAggregatorTest(metric.ValueRecorderKind, profile.NumberKind)
agg := New(descriptor, boundaries[profile.NumberKind]) agg := New(descriptor, boundaries[profile.NumberKind])
@ -126,7 +126,7 @@ func TestHistogramMerge(t *testing.T) {
ctx := context.Background() ctx := context.Background()
test.RunProfiles(t, func(t *testing.T, profile test.Profile) { test.RunProfiles(t, func(t *testing.T, profile test.Profile) {
descriptor := test.NewAggregatorTest(metric.MeasureKind, profile.NumberKind) descriptor := test.NewAggregatorTest(metric.ValueRecorderKind, profile.NumberKind)
agg1 := New(descriptor, boundaries[profile.NumberKind]) agg1 := New(descriptor, boundaries[profile.NumberKind])
agg2 := New(descriptor, boundaries[profile.NumberKind]) agg2 := New(descriptor, boundaries[profile.NumberKind])
@ -178,7 +178,7 @@ func TestHistogramNotSet(t *testing.T) {
ctx := context.Background() ctx := context.Background()
test.RunProfiles(t, func(t *testing.T, profile test.Profile) { test.RunProfiles(t, func(t *testing.T, profile test.Profile) {
descriptor := test.NewAggregatorTest(metric.MeasureKind, profile.NumberKind) descriptor := test.NewAggregatorTest(metric.ValueRecorderKind, profile.NumberKind)
agg := New(descriptor, boundaries[profile.NumberKind]) agg := New(descriptor, boundaries[profile.NumberKind])
agg.Checkpoint(ctx, descriptor) agg.Checkpoint(ctx, descriptor)

View File

@ -24,8 +24,8 @@ import (
) )
type ( type (
// Aggregator aggregates measure events, keeping only the min, max, // Aggregator aggregates events that form a distribution,
// sum, and count. // keeping only the min, max, sum, and count.
Aggregator struct { Aggregator struct {
lock sync.Mutex lock sync.Mutex
current state current state
@ -44,8 +44,9 @@ type (
var _ export.Aggregator = &Aggregator{} var _ export.Aggregator = &Aggregator{}
var _ aggregator.MinMaxSumCount = &Aggregator{} var _ aggregator.MinMaxSumCount = &Aggregator{}
// New returns a new measure aggregator for computing min, max, sum, and // New returns a new aggregator for computing the min, max, sum, and
// count. It does not compute quantile information other than Min and Max. // count. It does not compute quantile information other than Min and
// Max.
// //
// This type uses a mutex for Update() and Checkpoint() concurrency. // This type uses a mutex for Update() and Checkpoint() concurrency.
func New(desc *metric.Descriptor) *Aggregator { func New(desc *metric.Descriptor) *Aggregator {

View File

@ -79,7 +79,7 @@ func TestMinMaxSumCountPositiveAndNegative(t *testing.T) {
// Validates min, max, sum and count for a given profile and policy // Validates min, max, sum and count for a given profile and policy
func minMaxSumCount(t *testing.T, profile test.Profile, policy policy) { func minMaxSumCount(t *testing.T, profile test.Profile, policy policy) {
ctx := context.Background() ctx := context.Background()
descriptor := test.NewAggregatorTest(metric.MeasureKind, profile.NumberKind) descriptor := test.NewAggregatorTest(metric.ValueRecorderKind, profile.NumberKind)
agg := New(descriptor) agg := New(descriptor)
@ -127,7 +127,7 @@ func TestMinMaxSumCountMerge(t *testing.T) {
ctx := context.Background() ctx := context.Background()
test.RunProfiles(t, func(t *testing.T, profile test.Profile) { test.RunProfiles(t, func(t *testing.T, profile test.Profile) {
descriptor := test.NewAggregatorTest(metric.MeasureKind, profile.NumberKind) descriptor := test.NewAggregatorTest(metric.ValueRecorderKind, profile.NumberKind)
agg1 := New(descriptor) agg1 := New(descriptor)
agg2 := New(descriptor) agg2 := New(descriptor)
@ -185,7 +185,7 @@ func TestMaxSumCountNotSet(t *testing.T) {
ctx := context.Background() ctx := context.Background()
test.RunProfiles(t, func(t *testing.T, profile test.Profile) { test.RunProfiles(t, func(t *testing.T, profile test.Profile) {
descriptor := test.NewAggregatorTest(metric.MeasureKind, profile.NumberKind) descriptor := test.NewAggregatorTest(metric.ValueRecorderKind, profile.NumberKind)
agg := New(descriptor) agg := New(descriptor)
agg.Checkpoint(ctx, descriptor) agg.Checkpoint(ctx, descriptor)

View File

@ -71,13 +71,13 @@ func TestCounterSum(t *testing.T) {
}) })
} }
func TestMeasureSum(t *testing.T) { func TestValueRecorderSum(t *testing.T) {
ctx := context.Background() ctx := context.Background()
test.RunProfiles(t, func(t *testing.T, profile test.Profile) { test.RunProfiles(t, func(t *testing.T, profile test.Profile) {
agg := New() agg := New()
descriptor := test.NewAggregatorTest(metric.MeasureKind, profile.NumberKind) descriptor := test.NewAggregatorTest(metric.ValueRecorderKind, profile.NumberKind)
sum := metric.Number(0) sum := metric.Number(0)

View File

@ -311,7 +311,7 @@ func BenchmarkInt64LastValueAdd(b *testing.B) {
ctx := context.Background() ctx := context.Background()
fix := newFixture(b) fix := newFixture(b)
labs := makeLabels(1) labs := makeLabels(1)
mea := fix.meter.NewInt64Measure("int64.lastvalue") mea := fix.meter.NewInt64ValueRecorder("int64.lastvalue")
b.ResetTimer() b.ResetTimer()
@ -324,7 +324,7 @@ func BenchmarkInt64LastValueHandleAdd(b *testing.B) {
ctx := context.Background() ctx := context.Background()
fix := newFixture(b) fix := newFixture(b)
labs := makeLabels(1) labs := makeLabels(1)
mea := fix.meter.NewInt64Measure("int64.lastvalue") mea := fix.meter.NewInt64ValueRecorder("int64.lastvalue")
handle := mea.Bind(labs...) handle := mea.Bind(labs...)
b.ResetTimer() b.ResetTimer()
@ -338,7 +338,7 @@ func BenchmarkFloat64LastValueAdd(b *testing.B) {
ctx := context.Background() ctx := context.Background()
fix := newFixture(b) fix := newFixture(b)
labs := makeLabels(1) labs := makeLabels(1)
mea := fix.meter.NewFloat64Measure("float64.lastvalue") mea := fix.meter.NewFloat64ValueRecorder("float64.lastvalue")
b.ResetTimer() b.ResetTimer()
@ -351,7 +351,7 @@ func BenchmarkFloat64LastValueHandleAdd(b *testing.B) {
ctx := context.Background() ctx := context.Background()
fix := newFixture(b) fix := newFixture(b)
labs := makeLabels(1) labs := makeLabels(1)
mea := fix.meter.NewFloat64Measure("float64.lastvalue") mea := fix.meter.NewFloat64ValueRecorder("float64.lastvalue")
handle := mea.Bind(labs...) handle := mea.Bind(labs...)
b.ResetTimer() b.ResetTimer()
@ -361,13 +361,13 @@ func BenchmarkFloat64LastValueHandleAdd(b *testing.B) {
} }
} }
// Measures // ValueRecorders
func benchmarkInt64MeasureAdd(b *testing.B, name string) { func benchmarkInt64ValueRecorderAdd(b *testing.B, name string) {
ctx := context.Background() ctx := context.Background()
fix := newFixture(b) fix := newFixture(b)
labs := makeLabels(1) labs := makeLabels(1)
mea := fix.meter.NewInt64Measure(name) mea := fix.meter.NewInt64ValueRecorder(name)
b.ResetTimer() b.ResetTimer()
@ -376,11 +376,11 @@ func benchmarkInt64MeasureAdd(b *testing.B, name string) {
} }
} }
func benchmarkInt64MeasureHandleAdd(b *testing.B, name string) { func benchmarkInt64ValueRecorderHandleAdd(b *testing.B, name string) {
ctx := context.Background() ctx := context.Background()
fix := newFixture(b) fix := newFixture(b)
labs := makeLabels(1) labs := makeLabels(1)
mea := fix.meter.NewInt64Measure(name) mea := fix.meter.NewInt64ValueRecorder(name)
handle := mea.Bind(labs...) handle := mea.Bind(labs...)
b.ResetTimer() b.ResetTimer()
@ -390,11 +390,11 @@ func benchmarkInt64MeasureHandleAdd(b *testing.B, name string) {
} }
} }
func benchmarkFloat64MeasureAdd(b *testing.B, name string) { func benchmarkFloat64ValueRecorderAdd(b *testing.B, name string) {
ctx := context.Background() ctx := context.Background()
fix := newFixture(b) fix := newFixture(b)
labs := makeLabels(1) labs := makeLabels(1)
mea := fix.meter.NewFloat64Measure(name) mea := fix.meter.NewFloat64ValueRecorder(name)
b.ResetTimer() b.ResetTimer()
@ -403,11 +403,11 @@ func benchmarkFloat64MeasureAdd(b *testing.B, name string) {
} }
} }
func benchmarkFloat64MeasureHandleAdd(b *testing.B, name string) { func benchmarkFloat64ValueRecorderHandleAdd(b *testing.B, name string) {
ctx := context.Background() ctx := context.Background()
fix := newFixture(b) fix := newFixture(b)
labs := makeLabels(1) labs := makeLabels(1)
mea := fix.meter.NewFloat64Measure(name) mea := fix.meter.NewFloat64ValueRecorder(name)
handle := mea.Bind(labs...) handle := mea.Bind(labs...)
b.ResetTimer() b.ResetTimer()
@ -467,55 +467,55 @@ func BenchmarkObserverObservationFloat64(b *testing.B) {
// MaxSumCount // MaxSumCount
func BenchmarkInt64MaxSumCountAdd(b *testing.B) { func BenchmarkInt64MaxSumCountAdd(b *testing.B) {
benchmarkInt64MeasureAdd(b, "int64.minmaxsumcount") benchmarkInt64ValueRecorderAdd(b, "int64.minmaxsumcount")
} }
func BenchmarkInt64MaxSumCountHandleAdd(b *testing.B) { func BenchmarkInt64MaxSumCountHandleAdd(b *testing.B) {
benchmarkInt64MeasureHandleAdd(b, "int64.minmaxsumcount") benchmarkInt64ValueRecorderHandleAdd(b, "int64.minmaxsumcount")
} }
func BenchmarkFloat64MaxSumCountAdd(b *testing.B) { func BenchmarkFloat64MaxSumCountAdd(b *testing.B) {
benchmarkFloat64MeasureAdd(b, "float64.minmaxsumcount") benchmarkFloat64ValueRecorderAdd(b, "float64.minmaxsumcount")
} }
func BenchmarkFloat64MaxSumCountHandleAdd(b *testing.B) { func BenchmarkFloat64MaxSumCountHandleAdd(b *testing.B) {
benchmarkFloat64MeasureHandleAdd(b, "float64.minmaxsumcount") benchmarkFloat64ValueRecorderHandleAdd(b, "float64.minmaxsumcount")
} }
// DDSketch // DDSketch
func BenchmarkInt64DDSketchAdd(b *testing.B) { func BenchmarkInt64DDSketchAdd(b *testing.B) {
benchmarkInt64MeasureAdd(b, "int64.ddsketch") benchmarkInt64ValueRecorderAdd(b, "int64.ddsketch")
} }
func BenchmarkInt64DDSketchHandleAdd(b *testing.B) { func BenchmarkInt64DDSketchHandleAdd(b *testing.B) {
benchmarkInt64MeasureHandleAdd(b, "int64.ddsketch") benchmarkInt64ValueRecorderHandleAdd(b, "int64.ddsketch")
} }
func BenchmarkFloat64DDSketchAdd(b *testing.B) { func BenchmarkFloat64DDSketchAdd(b *testing.B) {
benchmarkFloat64MeasureAdd(b, "float64.ddsketch") benchmarkFloat64ValueRecorderAdd(b, "float64.ddsketch")
} }
func BenchmarkFloat64DDSketchHandleAdd(b *testing.B) { func BenchmarkFloat64DDSketchHandleAdd(b *testing.B) {
benchmarkFloat64MeasureHandleAdd(b, "float64.ddsketch") benchmarkFloat64ValueRecorderHandleAdd(b, "float64.ddsketch")
} }
// Array // Array
func BenchmarkInt64ArrayAdd(b *testing.B) { func BenchmarkInt64ArrayAdd(b *testing.B) {
benchmarkInt64MeasureAdd(b, "int64.array") benchmarkInt64ValueRecorderAdd(b, "int64.array")
} }
func BenchmarkInt64ArrayHandleAdd(b *testing.B) { func BenchmarkInt64ArrayHandleAdd(b *testing.B) {
benchmarkInt64MeasureHandleAdd(b, "int64.array") benchmarkInt64ValueRecorderHandleAdd(b, "int64.array")
} }
func BenchmarkFloat64ArrayAdd(b *testing.B) { func BenchmarkFloat64ArrayAdd(b *testing.B) {
benchmarkFloat64MeasureAdd(b, "float64.array") benchmarkFloat64ValueRecorderAdd(b, "float64.array")
} }
func BenchmarkFloat64ArrayHandleAdd(b *testing.B) { func BenchmarkFloat64ArrayHandleAdd(b *testing.B) {
benchmarkFloat64MeasureHandleAdd(b, "float64.array") benchmarkFloat64ValueRecorderHandleAdd(b, "float64.array")
} }
// BatchRecord // BatchRecord

View File

@ -107,7 +107,7 @@ func TestInputRangeTestCounter(t *testing.T) {
require.Nil(t, sdkErr) require.Nil(t, sdkErr)
} }
func TestInputRangeTestMeasure(t *testing.T) { func TestInputRangeTestValueRecorder(t *testing.T) {
ctx := context.Background() ctx := context.Background()
integrator := &correctnessIntegrator{ integrator := &correctnessIntegrator{
t: t, t: t,
@ -120,17 +120,17 @@ func TestInputRangeTestMeasure(t *testing.T) {
sdkErr = handleErr sdkErr = handleErr
}) })
measure := Must(meter).NewFloat64Measure("name.measure") valuerecorder := Must(meter).NewFloat64ValueRecorder("name.valuerecorder")
measure.Record(ctx, math.NaN()) valuerecorder.Record(ctx, math.NaN())
require.Equal(t, aggregator.ErrNaNInput, sdkErr) require.Equal(t, aggregator.ErrNaNInput, sdkErr)
sdkErr = nil sdkErr = nil
checkpointed := sdk.Collect(ctx) checkpointed := sdk.Collect(ctx)
require.Equal(t, 0, checkpointed) require.Equal(t, 0, checkpointed)
measure.Record(ctx, 1) valuerecorder.Record(ctx, 1)
measure.Record(ctx, 2) valuerecorder.Record(ctx, 2)
integrator.records = nil integrator.records = nil
checkpointed = sdk.Collect(ctx) checkpointed = sdk.Collect(ctx)
@ -150,9 +150,9 @@ func TestDisabledInstrument(t *testing.T) {
sdk := metricsdk.NewAccumulator(integrator) sdk := metricsdk.NewAccumulator(integrator)
meter := metric.WrapMeterImpl(sdk, "test") meter := metric.WrapMeterImpl(sdk, "test")
measure := Must(meter).NewFloat64Measure("name.disabled") valuerecorder := Must(meter).NewFloat64ValueRecorder("name.disabled")
measure.Record(ctx, -1) valuerecorder.Record(ctx, -1)
checkpointed := sdk.Collect(ctx) checkpointed := sdk.Collect(ctx)
require.Equal(t, 0, checkpointed) require.Equal(t, 0, checkpointed)
@ -389,8 +389,8 @@ func TestRecordBatch(t *testing.T) {
counter1 := Must(meter).NewInt64Counter("int64.counter") counter1 := Must(meter).NewInt64Counter("int64.counter")
counter2 := Must(meter).NewFloat64Counter("float64.counter") counter2 := Must(meter).NewFloat64Counter("float64.counter")
measure1 := Must(meter).NewInt64Measure("int64.measure") valuerecorder1 := Must(meter).NewInt64ValueRecorder("int64.valuerecorder")
measure2 := Must(meter).NewFloat64Measure("float64.measure") valuerecorder2 := Must(meter).NewFloat64ValueRecorder("float64.valuerecorder")
sdk.RecordBatch( sdk.RecordBatch(
ctx, ctx,
@ -400,8 +400,8 @@ func TestRecordBatch(t *testing.T) {
}, },
counter1.Measurement(1), counter1.Measurement(1),
counter2.Measurement(2), counter2.Measurement(2),
measure1.Measurement(3), valuerecorder1.Measurement(3),
measure2.Measurement(4), valuerecorder2.Measurement(4),
) )
sdk.Collect(ctx) sdk.Collect(ctx)
@ -413,8 +413,8 @@ func TestRecordBatch(t *testing.T) {
require.EqualValues(t, map[string]float64{ require.EqualValues(t, map[string]float64{
"int64.counter/A=B,C=D": 1, "int64.counter/A=B,C=D": 1,
"float64.counter/A=B,C=D": 2, "float64.counter/A=B,C=D": 2,
"int64.measure/A=B,C=D": 3, "int64.valuerecorder/A=B,C=D": 3,
"float64.measure/A=B,C=D": 4, "float64.valuerecorder/A=B,C=D": 4,
}, out.Map) }, out.Map)
} }

View File

@ -13,57 +13,34 @@
// limitations under the License. // limitations under the License.
/* /*
Package metric implements the OpenTelemetry metric.Meter API. The SDK Package metric implements the OpenTelemetry metric.MeterImpl
supports configurable metrics export behavior through a collection of interface. The Accumulator type supports configurable metrics export
export interfaces that support various export strategies, described below. behavior through a collection of export interfaces that support
various export strategies, described below.
The metric.Meter API consists of methods for constructing each of the basic The metric.MeterImpl API consists of methods for constructing
kinds of metric instrument. There are six types of instrument available to synchronous and asynchronous instruments. There are two constructors
the end user, comprised of three basic kinds of metric instrument (Counter, per instrument for the two kinds of number (int64, float64).
Measure, Observer) crossed with two kinds of number (int64, float64).
The API assists the SDK by consolidating the variety of metric instruments Synchronous instruments are managed by a sync.Map containing a *record
into a narrower interface, allowing the SDK to avoid repetition of with the current state for each synchronous instrument. A bound
boilerplate. The API and SDK are separated such that an event reaching instrument encapsulates a direct pointer to the record, allowing
the SDK has a uniform structure: an instrument, a label set, and a bound metric events to bypass a sync.Map lookup. A lock-free
numerical value. algorithm is used to protect against races when adding and removing
items from the sync.Map.
To this end, the API uses a kv.Number type to represent either an int64 Asynchronous instruments are managed by an internal
or a float64, depending on the instrument's definition. A single AsyncInstrumentState, which coordinates calling batch and single
implementation interface is used for counter and measure instruments, instrument callbacks.
metric.InstrumentImpl, and a single implementation interface is used for
their handles, metric.HandleImpl. For observers, the API defines
interfaces, for which the SDK provides an implementation.
There are four entry points for events in the Metrics API - three for
synchronous instruments (counters and measures) and one for asynchronous
instruments (observers). The entry points for synchronous instruments are:
via instrument handles, via direct instrument calls, and via BatchRecord.
The SDK is designed with handles as the primary entry point, the other two
entry points are implemented in terms of short-lived handles. For example,
the implementation of a direct call allocates a handle, operates on the
handle, and releases the handle. Similarly, the implementation of
RecordBatch uses a short-lived handle for each measurement in the batch.
The entry point for asynchronous instruments is via observer callbacks.
Observer callbacks behave like a set of instrument handles - one for each
observation for a distinct label set. The observer handles are alive as
long as they are used. If the callback stops reporting values for a
certain label set, the associated handle is dropped.
Internal Structure Internal Structure
The SDK is designed with minimal use of locking, to avoid adding
contention for user-level code. For each handle, whether it is held by
user-level code or a short-lived device, there exists an internal record
managed by the SDK. Each internal record corresponds to a specific
instrument and label set combination.
Each observer also has its own kind of record stored in the SDK. This Each observer also has its own kind of record stored in the SDK. This
record contains a set of recorders for every specific label set used in the record contains a set of recorders for every specific label set used in the
callback. callback.
A sync.Map maintains the mapping of current instruments and label sets to A sync.Map maintains the mapping of current instruments and label sets to
internal records. To create a new handle, the SDK consults the Map to internal records. To create a new bound instrument, the SDK consults the Map to
locate an existing record, otherwise it constructs a new record. The SDK locate an existing record, otherwise it constructs a new record. The SDK
maintains a count of the number of references to each record, ensuring maintains a count of the number of references to each record, ensuring
that records are not reclaimed from the Map while they are still active that records are not reclaimed from the Map while they are still active
@ -74,12 +51,7 @@ sweeps through all records in the SDK, checkpointing their state. When a
record is discovered that has no references and has not been updated since record is discovered that has no references and has not been updated since
the prior collection pass, it is removed from the Map. the prior collection pass, it is removed from the Map.
The SDK maintains a current epoch number, corresponding to the number of Both synchronous and asynchronous instruments have an associated
completed collections. Each recorder of an observer record contains the
last epoch during which it was updated. This variable allows the collection
code path to detect stale recorders and remove them.
Each record of a handle and recorder of an observer has an associated
aggregator, which maintains the current state resulting from all metric aggregator, which maintains the current state resulting from all metric
events since its last checkpoint. Aggregators may be lock-free or they may events since its last checkpoint. Aggregators may be lock-free or they may
use locking, but they should expect to be called concurrently. Aggregators use locking, but they should expect to be called concurrently. Aggregators
@ -97,21 +69,18 @@ enters the SDK resulting in a new record, and collection context,
where a system-level thread performs a collection pass through the where a system-level thread performs a collection pass through the
SDK. SDK.
Descriptor is a struct that describes the metric instrument to the export Descriptor is a struct that describes the metric instrument to the
pipeline, containing the name, recommended aggregation keys, units, export pipeline, containing the name, units, description, metric kind,
description, metric kind (counter or measure), number kind (int64 or number kind (int64 or float64). A Descriptor accompanies metric data
float64), and whether the instrument has alternate semantics or not (i.e., as it passes through the export pipeline.
monotonic=false counter, absolute=false measure). A Descriptor accompanies
metric data as it passes through the export pipeline.
The AggregationSelector interface supports choosing the method of The AggregationSelector interface supports choosing the method of
aggregation to apply to a particular instrument. Given the Descriptor, aggregation to apply to a particular instrument. Given the Descriptor,
this AggregatorFor method returns an implementation of Aggregator. If this this AggregatorFor method returns an implementation of Aggregator. If this
interface returns nil, the metric will be disabled. The aggregator should interface returns nil, the metric will be disabled. The aggregator should
be matched to the capabilities of the exporter. Selecting the aggregator be matched to the capabilities of the exporter. Selecting the aggregator
for counter instruments is relatively straightforward, but for measure and for sum-only instruments is relatively straightforward, but many options
observer instruments there are numerous choices with different cost and are available for aggregating distributions from ValueRecorder instruments.
quality tradeoffs.
Aggregator is an interface which implements a concrete strategy for Aggregator is an interface which implements a concrete strategy for
aggregating metric updates. Several Aggregator implementations are aggregating metric updates. Several Aggregator implementations are

View File

@ -25,7 +25,7 @@ import (
) )
func TestStressInt64Histogram(t *testing.T) { func TestStressInt64Histogram(t *testing.T) {
desc := metric.NewDescriptor("some_metric", metric.MeasureKind, metric.Int64NumberKind) desc := metric.NewDescriptor("some_metric", metric.ValueRecorderKind, metric.Int64NumberKind)
h := histogram.New(&desc, []metric.Number{metric.NewInt64Number(25), metric.NewInt64Number(50), metric.NewInt64Number(75)}) h := histogram.New(&desc, []metric.Number{metric.NewInt64Number(25), metric.NewInt64Number(50), metric.NewInt64Number(75)})
ctx, cancelFunc := context.WithCancel(context.Background()) ctx, cancelFunc := context.WithCancel(context.Background())

View File

@ -25,7 +25,7 @@ import (
) )
func TestStressInt64MinMaxSumCount(t *testing.T) { func TestStressInt64MinMaxSumCount(t *testing.T) {
desc := metric.NewDescriptor("some_metric", metric.MeasureKind, metric.Int64NumberKind) desc := metric.NewDescriptor("some_metric", metric.ValueRecorderKind, metric.Int64NumberKind)
mmsc := minmaxsumcount.New(&desc) mmsc := minmaxsumcount.New(&desc)
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())

View File

@ -42,40 +42,40 @@ var (
_ export.AggregationSelector = selectorHistogram{} _ export.AggregationSelector = selectorHistogram{}
) )
// NewWithInexpensiveMeasure returns a simple aggregation selector // NewWithInexpensiveDistribution returns a simple aggregation selector
// that uses counter, minmaxsumcount and minmaxsumcount aggregators // that uses counter, minmaxsumcount and minmaxsumcount aggregators
// for the three kinds of metric. This selector is faster and uses // for the three kinds of metric. This selector is faster and uses
// less memory than the others because minmaxsumcount does not // less memory than the others because minmaxsumcount does not
// aggregate quantile information. // aggregate quantile information.
func NewWithInexpensiveMeasure() export.AggregationSelector { func NewWithInexpensiveDistribution() export.AggregationSelector {
return selectorInexpensive{} return selectorInexpensive{}
} }
// NewWithSketchMeasure returns a simple aggregation selector that // NewWithSketchDistribution returns a simple aggregation selector that
// uses counter, ddsketch, and ddsketch aggregators for the three // uses counter, ddsketch, and ddsketch aggregators for the three
// kinds of metric. This selector uses more cpu and memory than the // kinds of metric. This selector uses more cpu and memory than the
// NewWithInexpensiveMeasure because it uses one DDSketch per distinct // NewWithInexpensiveDistribution because it uses one DDSketch per distinct
// measure/observer and labelset. // instrument and label set.
func NewWithSketchMeasure(config *ddsketch.Config) export.AggregationSelector { func NewWithSketchDistribution(config *ddsketch.Config) export.AggregationSelector {
return selectorSketch{ return selectorSketch{
config: config, config: config,
} }
} }
// NewWithExactMeasure returns a simple aggregation selector that uses // NewWithExactDistribution returns a simple aggregation selector that uses
// counter, array, and array aggregators for the three kinds of metric. // counter, array, and array aggregators for the three kinds of metric.
// This selector uses more memory than the NewWithSketchMeasure // This selector uses more memory than the NewWithSketchDistribution
// because it aggregates an array of all values, therefore is able to // because it aggregates an array of all values, therefore is able to
// compute exact quantiles. // compute exact quantiles.
func NewWithExactMeasure() export.AggregationSelector { func NewWithExactDistribution() export.AggregationSelector {
return selectorExact{} return selectorExact{}
} }
// NewWithHistogramMeasure returns a simple aggregation selector that uses counter, // NewWithHistogramDistribution returns a simple aggregation selector that uses counter,
// histogram, and histogram aggregators for the three kinds of metric. This // histogram, and histogram aggregators for the three kinds of metric. This
// selector uses more memory than the NewWithInexpensiveMeasure because it // selector uses more memory than the NewWithInexpensiveDistribution because it
// uses a counter per bucket. // uses a counter per bucket.
func NewWithHistogramMeasure(boundaries []metric.Number) export.AggregationSelector { func NewWithHistogramDistribution(boundaries []metric.Number) export.AggregationSelector {
return selectorHistogram{boundaries: boundaries} return selectorHistogram{boundaries: boundaries}
} }
@ -83,7 +83,7 @@ func (selectorInexpensive) AggregatorFor(descriptor *metric.Descriptor) export.A
switch descriptor.MetricKind() { switch descriptor.MetricKind() {
case metric.ObserverKind: case metric.ObserverKind:
fallthrough fallthrough
case metric.MeasureKind: case metric.ValueRecorderKind:
return minmaxsumcount.New(descriptor) return minmaxsumcount.New(descriptor)
default: default:
return sum.New() return sum.New()
@ -94,7 +94,7 @@ func (s selectorSketch) AggregatorFor(descriptor *metric.Descriptor) export.Aggr
switch descriptor.MetricKind() { switch descriptor.MetricKind() {
case metric.ObserverKind: case metric.ObserverKind:
fallthrough fallthrough
case metric.MeasureKind: case metric.ValueRecorderKind:
return ddsketch.New(s.config, descriptor) return ddsketch.New(s.config, descriptor)
default: default:
return sum.New() return sum.New()
@ -105,7 +105,7 @@ func (selectorExact) AggregatorFor(descriptor *metric.Descriptor) export.Aggrega
switch descriptor.MetricKind() { switch descriptor.MetricKind() {
case metric.ObserverKind: case metric.ObserverKind:
fallthrough fallthrough
case metric.MeasureKind: case metric.ValueRecorderKind:
return array.New() return array.New()
default: default:
return sum.New() return sum.New()
@ -116,7 +116,7 @@ func (s selectorHistogram) AggregatorFor(descriptor *metric.Descriptor) export.A
switch descriptor.MetricKind() { switch descriptor.MetricKind() {
case metric.ObserverKind: case metric.ObserverKind:
fallthrough fallthrough
case metric.MeasureKind: case metric.ValueRecorderKind:
return histogram.New(descriptor, s.boundaries) return histogram.New(descriptor, s.boundaries)
default: default:
return sum.New() return sum.New()

View File

@ -30,34 +30,34 @@ import (
var ( var (
testCounterDesc = metric.NewDescriptor("counter", metric.CounterKind, metric.Int64NumberKind) testCounterDesc = metric.NewDescriptor("counter", metric.CounterKind, metric.Int64NumberKind)
testMeasureDesc = metric.NewDescriptor("measure", metric.MeasureKind, metric.Int64NumberKind) testValueRecorderDesc = metric.NewDescriptor("valuerecorder", metric.ValueRecorderKind, metric.Int64NumberKind)
testObserverDesc = metric.NewDescriptor("observer", metric.ObserverKind, metric.Int64NumberKind) testObserverDesc = metric.NewDescriptor("observer", metric.ObserverKind, metric.Int64NumberKind)
) )
func TestInexpensiveMeasure(t *testing.T) { func TestInexpensiveDistribution(t *testing.T) {
inex := simple.NewWithInexpensiveMeasure() inex := simple.NewWithInexpensiveDistribution()
require.NotPanics(t, func() { _ = inex.AggregatorFor(&testCounterDesc).(*sum.Aggregator) }) require.NotPanics(t, func() { _ = inex.AggregatorFor(&testCounterDesc).(*sum.Aggregator) })
require.NotPanics(t, func() { _ = inex.AggregatorFor(&testMeasureDesc).(*minmaxsumcount.Aggregator) }) require.NotPanics(t, func() { _ = inex.AggregatorFor(&testValueRecorderDesc).(*minmaxsumcount.Aggregator) })
require.NotPanics(t, func() { _ = inex.AggregatorFor(&testObserverDesc).(*minmaxsumcount.Aggregator) }) require.NotPanics(t, func() { _ = inex.AggregatorFor(&testObserverDesc).(*minmaxsumcount.Aggregator) })
} }
func TestSketchMeasure(t *testing.T) { func TestSketchDistribution(t *testing.T) {
sk := simple.NewWithSketchMeasure(ddsketch.NewDefaultConfig()) sk := simple.NewWithSketchDistribution(ddsketch.NewDefaultConfig())
require.NotPanics(t, func() { _ = sk.AggregatorFor(&testCounterDesc).(*sum.Aggregator) }) require.NotPanics(t, func() { _ = sk.AggregatorFor(&testCounterDesc).(*sum.Aggregator) })
require.NotPanics(t, func() { _ = sk.AggregatorFor(&testMeasureDesc).(*ddsketch.Aggregator) }) require.NotPanics(t, func() { _ = sk.AggregatorFor(&testValueRecorderDesc).(*ddsketch.Aggregator) })
require.NotPanics(t, func() { _ = sk.AggregatorFor(&testObserverDesc).(*ddsketch.Aggregator) }) require.NotPanics(t, func() { _ = sk.AggregatorFor(&testObserverDesc).(*ddsketch.Aggregator) })
} }
func TestExactMeasure(t *testing.T) { func TestExactDistribution(t *testing.T) {
ex := simple.NewWithExactMeasure() ex := simple.NewWithExactDistribution()
require.NotPanics(t, func() { _ = ex.AggregatorFor(&testCounterDesc).(*sum.Aggregator) }) require.NotPanics(t, func() { _ = ex.AggregatorFor(&testCounterDesc).(*sum.Aggregator) })
require.NotPanics(t, func() { _ = ex.AggregatorFor(&testMeasureDesc).(*array.Aggregator) }) require.NotPanics(t, func() { _ = ex.AggregatorFor(&testValueRecorderDesc).(*array.Aggregator) })
require.NotPanics(t, func() { _ = ex.AggregatorFor(&testObserverDesc).(*array.Aggregator) }) require.NotPanics(t, func() { _ = ex.AggregatorFor(&testObserverDesc).(*array.Aggregator) })
} }
func TestHistogramMeasure(t *testing.T) { func TestHistogramDistribution(t *testing.T) {
ex := simple.NewWithHistogramMeasure([]metric.Number{}) ex := simple.NewWithHistogramDistribution([]metric.Number{})
require.NotPanics(t, func() { _ = ex.AggregatorFor(&testCounterDesc).(*sum.Aggregator) }) require.NotPanics(t, func() { _ = ex.AggregatorFor(&testCounterDesc).(*sum.Aggregator) })
require.NotPanics(t, func() { _ = ex.AggregatorFor(&testMeasureDesc).(*histogram.Aggregator) }) require.NotPanics(t, func() { _ = ex.AggregatorFor(&testValueRecorderDesc).(*histogram.Aggregator) })
require.NotPanics(t, func() { _ = ex.AggregatorFor(&testObserverDesc).(*histogram.Aggregator) }) require.NotPanics(t, func() { _ = ex.AggregatorFor(&testObserverDesc).(*histogram.Aggregator) })
} }

View File

@ -285,7 +285,7 @@ func (f *testFixture) Process(_ context.Context, record export.Record) error {
f.T.Fatal("Sum error: ", err) f.T.Fatal("Sum error: ", err)
} }
f.impl.storeCollect(actual, sum, time.Time{}) f.impl.storeCollect(actual, sum, time.Time{})
case metric.MeasureKind: case metric.ValueRecorderKind:
lv, ts, err := agg.(aggregator.LastValue).LastValue() lv, ts, err := agg.(aggregator.LastValue).LastValue()
if err != nil && err != aggregator.ErrNoData { if err != nil && err != aggregator.ErrNoData {
f.T.Fatal("Last value error: ", err) f.T.Fatal("Last value error: ", err)
@ -431,15 +431,15 @@ func TestStressFloat64Counter(t *testing.T) {
func intLastValueTestImpl() testImpl { func intLastValueTestImpl() testImpl {
return testImpl{ return testImpl{
newInstrument: func(meter api.Meter, name string) SyncImpler { newInstrument: func(meter api.Meter, name string) SyncImpler {
return Must(meter).NewInt64Measure(name + ".lastvalue") return Must(meter).NewInt64ValueRecorder(name + ".lastvalue")
}, },
getUpdateValue: func() api.Number { getUpdateValue: func() api.Number {
r1 := rand.Int63() r1 := rand.Int63()
return api.NewInt64Number(rand.Int63() - r1) return api.NewInt64Number(rand.Int63() - r1)
}, },
operate: func(inst interface{}, ctx context.Context, value api.Number, labels []kv.KeyValue) { operate: func(inst interface{}, ctx context.Context, value api.Number, labels []kv.KeyValue) {
measure := inst.(api.Int64Measure) valuerecorder := inst.(api.Int64ValueRecorder)
measure.Record(ctx, value.AsInt64(), labels...) valuerecorder.Record(ctx, value.AsInt64(), labels...)
}, },
newStore: func() interface{} { newStore: func() interface{} {
return &lastValueState{ return &lastValueState{
@ -473,14 +473,14 @@ func TestStressInt64LastValue(t *testing.T) {
func floatLastValueTestImpl() testImpl { func floatLastValueTestImpl() testImpl {
return testImpl{ return testImpl{
newInstrument: func(meter api.Meter, name string) SyncImpler { newInstrument: func(meter api.Meter, name string) SyncImpler {
return Must(meter).NewFloat64Measure(name + ".lastvalue") return Must(meter).NewFloat64ValueRecorder(name + ".lastvalue")
}, },
getUpdateValue: func() api.Number { getUpdateValue: func() api.Number {
return api.NewFloat64Number((-0.5 + rand.Float64()) * 100000) return api.NewFloat64Number((-0.5 + rand.Float64()) * 100000)
}, },
operate: func(inst interface{}, ctx context.Context, value api.Number, labels []kv.KeyValue) { operate: func(inst interface{}, ctx context.Context, value api.Number, labels []kv.KeyValue) {
measure := inst.(api.Float64Measure) valuerecorder := inst.(api.Float64ValueRecorder)
measure.Record(ctx, value.AsFloat64(), labels...) valuerecorder.Record(ctx, value.AsFloat64(), labels...)
}, },
newStore: func() interface{} { newStore: func() interface{} {
return &lastValueState{ return &lastValueState{