1
0
mirror of https://github.com/open-telemetry/opentelemetry-go.git synced 2024-12-10 09:50:58 +02:00
This commit is contained in:
Stefan Prisca 2020-05-20 10:28:40 +02:00
commit 1f16b8a573
80 changed files with 2682 additions and 1381 deletions

View File

@ -12,6 +12,6 @@
# https://help.github.com/en/articles/about-code-owners
#
* @jmacd @paivagustavo @krnowak @lizthegrey @MrAlias @Aneurysm9 @evantorrie
* @jmacd @paivagustavo @lizthegrey @MrAlias @Aneurysm9 @evantorrie
CODEOWNERS @MrAlias @jmacd

View File

@ -139,7 +139,6 @@ https://github.com/open-telemetry/opentelemetry-specification/issues/165
Approvers:
- [Krzesimir Nowak](https://github.com/krnowak), Kinvolk
- [Liz Fong-Jones](https://github.com/lizthegrey), Honeycomb
- [Gustavo Silva Paiva](https://github.com/paivagustavo), Stilingue
- [Anthony Mirabella](https://github.com/Aneurysm9), Centene

View File

@ -23,7 +23,9 @@ import (
"go.opentelemetry.io/otel/api/propagation"
)
const correlationContextHeader = "Correlation-Context"
// Temporary header name until W3C finalizes format.
// https://github.com/open-telemetry/opentelemetry-specification/blob/18b2752ebe6c7f0cdd8c7b2bcbdceb0ae3f5ad95/specification/correlationcontext/api.md#header-name
const correlationContextHeader = "otcorrelations"
// CorrelationContext propagates Key:Values in W3C CorrelationContext
// format.

View File

@ -89,7 +89,7 @@ func TestExtractValidDistributedContextFromHTTPReq(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
req, _ := http.NewRequest("GET", "http://example.com", nil)
req.Header.Set("Correlation-Context", tt.header)
req.Header.Set("otcorrelations", tt.header)
ctx := context.Background()
ctx = propagation.ExtractHTTP(ctx, props, req.Header)
@ -133,7 +133,7 @@ func TestExtractInvalidDistributedContextFromHTTPReq(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
req, _ := http.NewRequest("GET", "http://example.com", nil)
req.Header.Set("Correlation-Context", tt.header)
req.Header.Set("otcorrelations", tt.header)
ctx := context.Background()
ctx = propagation.ExtractHTTP(ctx, props, req.Header)
@ -202,17 +202,17 @@ func TestInjectCorrelationContextToHTTPReq(t *testing.T) {
ctx := correlation.ContextWithMap(context.Background(), correlation.NewMap(correlation.MapUpdate{MultiKV: tt.kvs}))
propagation.InjectHTTP(ctx, props, req.Header)
gotHeader := req.Header.Get("Correlation-Context")
gotHeader := req.Header.Get("otcorrelations")
wantedLen := len(strings.Join(tt.wantInHeader, ","))
if wantedLen != len(gotHeader) {
t.Errorf(
"%s: Inject Correlation-Context incorrect length %d != %d.", tt.name, tt.wantedLen, len(gotHeader),
"%s: Inject otcorrelations incorrect length %d != %d.", tt.name, tt.wantedLen, len(gotHeader),
)
}
for _, inHeader := range tt.wantInHeader {
if !strings.Contains(gotHeader, inHeader) {
t.Errorf(
"%s: Inject Correlation-Context missing part of header: %s in %s", tt.name, inHeader, gotHeader,
"%s: Inject otcorrelations missing part of header: %s in %s", tt.name, inHeader, gotHeader,
)
}
}
@ -222,7 +222,7 @@ func TestInjectCorrelationContextToHTTPReq(t *testing.T) {
func TestTraceContextPropagator_GetAllKeys(t *testing.T) {
var propagator correlation.CorrelationContext
want := []string{"Correlation-Context"}
want := []string{"otcorrelations"}
got := propagator.GetAllKeys()
if diff := cmp.Diff(got, want); diff != "" {
t.Errorf("GetAllKeys: -got +want %s", diff)

View File

@ -59,7 +59,7 @@ func (*benchFixture) AggregatorFor(descriptor *metric.Descriptor) export.Aggrega
switch descriptor.MetricKind() {
case metric.CounterKind:
return sum.New()
case metric.MeasureKind:
case metric.ValueRecorderKind:
if strings.HasSuffix(descriptor.Name(), "minmaxsumcount") {
return minmaxsumcount.New(descriptor)
} else if strings.HasSuffix(descriptor.Name(), "ddsketch") {

View File

@ -82,21 +82,21 @@ func TestDirect(t *testing.T) {
counter.Add(ctx, 1, labels1...)
counter.Add(ctx, 1, labels1...)
measure := Must(meter1).NewFloat64Measure("test.measure")
measure.Record(ctx, 1, labels1...)
measure.Record(ctx, 2, labels1...)
valuerecorder := Must(meter1).NewFloat64ValueRecorder("test.valuerecorder")
valuerecorder.Record(ctx, 1, labels1...)
valuerecorder.Record(ctx, 2, labels1...)
_ = Must(meter1).RegisterFloat64Observer("test.observer.float", func(result metric.Float64ObserverResult) {
_ = Must(meter1).RegisterFloat64ValueObserver("test.valueobserver.float", func(_ context.Context, result metric.Float64ObserverResult) {
result.Observe(1., labels1...)
result.Observe(2., labels2...)
})
_ = Must(meter1).RegisterInt64Observer("test.observer.int", func(result metric.Int64ObserverResult) {
_ = Must(meter1).RegisterInt64ValueObserver("test.valueobserver.int", func(_ context.Context, result metric.Int64ObserverResult) {
result.Observe(1, labels1...)
result.Observe(2, labels2...)
})
second := Must(meter2).NewFloat64Measure("test.second")
second := Must(meter2).NewFloat64ValueRecorder("test.second")
second.Record(ctx, 1, labels3...)
second.Record(ctx, 2, labels3...)
@ -104,7 +104,7 @@ func TestDirect(t *testing.T) {
global.SetMeterProvider(provider)
counter.Add(ctx, 1, labels1...)
measure.Record(ctx, 3, labels1...)
valuerecorder.Record(ctx, 3, labels1...)
second.Record(ctx, 3, labels3...)
mock.RunAsyncInstruments()
@ -120,7 +120,7 @@ func TestDirect(t *testing.T) {
Number: asInt(1),
},
{
Name: "test.measure",
Name: "test.valuerecorder",
LibraryName: "test1",
Labels: asMap(labels1...),
Number: asFloat(3),
@ -132,25 +132,25 @@ func TestDirect(t *testing.T) {
Number: asFloat(3),
},
{
Name: "test.observer.float",
Name: "test.valueobserver.float",
LibraryName: "test1",
Labels: asMap(labels1...),
Number: asFloat(1),
},
{
Name: "test.observer.float",
Name: "test.valueobserver.float",
LibraryName: "test1",
Labels: asMap(labels2...),
Number: asFloat(2),
},
{
Name: "test.observer.int",
Name: "test.valueobserver.int",
LibraryName: "test1",
Labels: asMap(labels1...),
Number: asInt(1),
},
{
Name: "test.observer.int",
Name: "test.valueobserver.int",
LibraryName: "test1",
Labels: asMap(labels2...),
Number: asInt(2),
@ -174,8 +174,8 @@ func TestBound(t *testing.T) {
boundC.Add(ctx, 1)
boundC.Add(ctx, 1)
measure := Must(glob).NewInt64Measure("test.measure")
boundM := measure.Bind(labels1...)
valuerecorder := Must(glob).NewInt64ValueRecorder("test.valuerecorder")
boundM := valuerecorder.Bind(labels1...)
boundM.Record(ctx, 1)
boundM.Record(ctx, 2)
@ -194,7 +194,7 @@ func TestBound(t *testing.T) {
Number: asFloat(1),
},
{
Name: "test.measure",
Name: "test.valuerecorder",
LibraryName: "test",
Labels: asMap(labels1...),
Number: asInt(3),
@ -216,8 +216,8 @@ func TestUnbind(t *testing.T) {
counter := Must(glob).NewFloat64Counter("test.counter")
boundC := counter.Bind(labels1...)
measure := Must(glob).NewInt64Measure("test.measure")
boundM := measure.Bind(labels1...)
valuerecorder := Must(glob).NewInt64ValueRecorder("test.valuerecorder")
boundM := valuerecorder.Bind(labels1...)
boundC.Unbind()
boundM.Unbind()
@ -331,12 +331,12 @@ func TestImplementationIndirection(t *testing.T) {
require.False(t, ok)
// Async: no SDK yet
observer := Must(meter1).RegisterFloat64Observer(
"interface.observer",
func(result metric.Float64ObserverResult) {},
valueobserver := Must(meter1).RegisterFloat64ValueObserver(
"interface.valueobserver",
func(_ context.Context, result metric.Float64ObserverResult) {},
)
ival = observer.AsyncImpl().Implementation()
ival = valueobserver.AsyncImpl().Implementation()
require.NotNil(t, ival)
_, ok = ival.(*metrictest.Async)
@ -356,7 +356,7 @@ func TestImplementationIndirection(t *testing.T) {
require.True(t, ok)
// Async
ival = observer.AsyncImpl().Implementation()
ival = valueobserver.AsyncImpl().Implementation()
require.NotNil(t, ival)
_, ok = ival.(*metrictest.Async)
@ -407,7 +407,7 @@ func TestRecordBatchRealSDK(t *testing.T) {
if err != nil {
t.Fatal(err)
}
global.SetMeterProvider(pusher)
global.SetMeterProvider(pusher.Provider())
meter.RecordBatch(context.Background(), nil, counter.Measurement(1))
pusher.Stop()

View File

@ -15,6 +15,7 @@
package internal
import (
"context"
"errors"
"testing"
@ -36,17 +37,17 @@ var (
"counter.float64": func(name, libraryName string) (metric.InstrumentImpl, error) {
return unwrap(MeterProvider().Meter(libraryName).NewFloat64Counter(name))
},
"measure.int64": func(name, libraryName string) (metric.InstrumentImpl, error) {
return unwrap(MeterProvider().Meter(libraryName).NewInt64Measure(name))
"valuerecorder.int64": func(name, libraryName string) (metric.InstrumentImpl, error) {
return unwrap(MeterProvider().Meter(libraryName).NewInt64ValueRecorder(name))
},
"measure.float64": func(name, libraryName string) (metric.InstrumentImpl, error) {
return unwrap(MeterProvider().Meter(libraryName).NewFloat64Measure(name))
"valuerecorder.float64": func(name, libraryName string) (metric.InstrumentImpl, error) {
return unwrap(MeterProvider().Meter(libraryName).NewFloat64ValueRecorder(name))
},
"observer.int64": func(name, libraryName string) (metric.InstrumentImpl, error) {
return unwrap(MeterProvider().Meter(libraryName).RegisterInt64Observer(name, func(metric.Int64ObserverResult) {}))
"valueobserver.int64": func(name, libraryName string) (metric.InstrumentImpl, error) {
return unwrap(MeterProvider().Meter(libraryName).RegisterInt64ValueObserver(name, func(context.Context, metric.Int64ObserverResult) {}))
},
"observer.float64": func(name, libraryName string) (metric.InstrumentImpl, error) {
return unwrap(MeterProvider().Meter(libraryName).RegisterFloat64Observer(name, func(metric.Float64ObserverResult) {}))
"valueobserver.float64": func(name, libraryName string) (metric.InstrumentImpl, error) {
return unwrap(MeterProvider().Meter(libraryName).RegisterFloat64ValueObserver(name, func(context.Context, metric.Float64ObserverResult) {}))
},
}
)

View File

@ -128,7 +128,7 @@ func Infer(k string, value interface{}) KeyValue {
case reflect.Float64:
return Float64(k, rv.Float())
case reflect.String:
return String(k, rv.Interface().(string))
return String(k, rv.String())
}
return String(k, fmt.Sprint(rv.Interface()))
return String(k, fmt.Sprint(value))
}

View File

@ -93,84 +93,140 @@ func TestOptions(t *testing.T) {
}
func TestCounter(t *testing.T) {
{
// N.B. the API does not check for negative
// values, that's the SDK's responsibility.
t.Run("float64 counter", func(t *testing.T) {
mockSDK, meter := mockTest.NewMeter()
c := Must(meter).NewFloat64Counter("test.counter.float")
ctx := context.Background()
labels := []kv.KeyValue{kv.String("A", "B")}
c.Add(ctx, 42, labels...)
c.Add(ctx, 1994.1, labels...)
boundInstrument := c.Bind(labels...)
boundInstrument.Add(ctx, 42)
boundInstrument.Add(ctx, -742)
meter.RecordBatch(ctx, labels, c.Measurement(42))
t.Log("Testing float counter")
checkBatches(t, ctx, labels, mockSDK, metric.Float64NumberKind, c.SyncImpl())
}
{
checkSyncBatches(t, ctx, labels, mockSDK, metric.Float64NumberKind, metric.CounterKind, c.SyncImpl(),
1994.1, -742, 42,
)
})
t.Run("int64 counter", func(t *testing.T) {
mockSDK, meter := mockTest.NewMeter()
c := Must(meter).NewInt64Counter("test.counter.int")
ctx := context.Background()
labels := []kv.KeyValue{kv.String("A", "B"), kv.String("C", "D")}
c.Add(ctx, 42, labels...)
boundInstrument := c.Bind(labels...)
boundInstrument.Add(ctx, 42)
boundInstrument.Add(ctx, 4200)
meter.RecordBatch(ctx, labels, c.Measurement(420000))
checkSyncBatches(t, ctx, labels, mockSDK, metric.Int64NumberKind, metric.CounterKind, c.SyncImpl(),
42, 4200, 420000,
)
})
t.Run("int64 updowncounter", func(t *testing.T) {
mockSDK, meter := mockTest.NewMeter()
c := Must(meter).NewInt64UpDownCounter("test.updowncounter.int")
ctx := context.Background()
labels := []kv.KeyValue{kv.String("A", "B"), kv.String("C", "D")}
c.Add(ctx, 100, labels...)
boundInstrument := c.Bind(labels...)
boundInstrument.Add(ctx, -100)
meter.RecordBatch(ctx, labels, c.Measurement(42))
t.Log("Testing int counter")
checkBatches(t, ctx, labels, mockSDK, metric.Int64NumberKind, c.SyncImpl())
}
checkSyncBatches(t, ctx, labels, mockSDK, metric.Int64NumberKind, metric.UpDownCounterKind, c.SyncImpl(),
100, -100, 42,
)
})
t.Run("float64 updowncounter", func(t *testing.T) {
mockSDK, meter := mockTest.NewMeter()
c := Must(meter).NewFloat64UpDownCounter("test.updowncounter.float")
ctx := context.Background()
labels := []kv.KeyValue{kv.String("A", "B"), kv.String("C", "D")}
c.Add(ctx, 100.1, labels...)
boundInstrument := c.Bind(labels...)
boundInstrument.Add(ctx, -76)
meter.RecordBatch(ctx, labels, c.Measurement(-100.1))
checkSyncBatches(t, ctx, labels, mockSDK, metric.Float64NumberKind, metric.UpDownCounterKind, c.SyncImpl(),
100.1, -76, -100.1,
)
})
}
func TestMeasure(t *testing.T) {
{
func TestValueRecorder(t *testing.T) {
t.Run("float64 valuerecorder", func(t *testing.T) {
mockSDK, meter := mockTest.NewMeter()
m := Must(meter).NewFloat64Measure("test.measure.float")
m := Must(meter).NewFloat64ValueRecorder("test.valuerecorder.float")
ctx := context.Background()
labels := []kv.KeyValue{}
m.Record(ctx, 42, labels...)
boundInstrument := m.Bind(labels...)
boundInstrument.Record(ctx, 42)
meter.RecordBatch(ctx, labels, m.Measurement(42))
t.Log("Testing float measure")
checkBatches(t, ctx, labels, mockSDK, metric.Float64NumberKind, m.SyncImpl())
}
{
boundInstrument.Record(ctx, 0)
meter.RecordBatch(ctx, labels, m.Measurement(-100.5))
checkSyncBatches(t, ctx, labels, mockSDK, metric.Float64NumberKind, metric.ValueRecorderKind, m.SyncImpl(),
42, 0, -100.5,
)
})
t.Run("int64 valuerecorder", func(t *testing.T) {
mockSDK, meter := mockTest.NewMeter()
m := Must(meter).NewInt64Measure("test.measure.int")
m := Must(meter).NewInt64ValueRecorder("test.valuerecorder.int")
ctx := context.Background()
labels := []kv.KeyValue{kv.Int("I", 1)}
m.Record(ctx, 42, labels...)
m.Record(ctx, 173, labels...)
boundInstrument := m.Bind(labels...)
boundInstrument.Record(ctx, 42)
meter.RecordBatch(ctx, labels, m.Measurement(42))
t.Log("Testing int measure")
checkBatches(t, ctx, labels, mockSDK, metric.Int64NumberKind, m.SyncImpl())
}
boundInstrument.Record(ctx, 80)
meter.RecordBatch(ctx, labels, m.Measurement(0))
checkSyncBatches(t, ctx, labels, mockSDK, metric.Int64NumberKind, metric.ValueRecorderKind, m.SyncImpl(),
173, 80, 0,
)
})
}
func TestObserver(t *testing.T) {
{
func TestObserverInstruments(t *testing.T) {
t.Run("float valueobserver", func(t *testing.T) {
labels := []kv.KeyValue{kv.String("O", "P")}
mockSDK, meter := mockTest.NewMeter()
o := Must(meter).RegisterFloat64Observer("test.observer.float", func(result metric.Float64ObserverResult) {
result.Observe(42, labels...)
o := Must(meter).RegisterFloat64ValueObserver("test.valueobserver.float", func(_ context.Context, result metric.Float64ObserverResult) {
result.Observe(42.1, labels...)
})
t.Log("Testing float observer")
mockSDK.RunAsyncInstruments()
checkObserverBatch(t, labels, mockSDK, metric.Float64NumberKind, o.AsyncImpl())
}
{
checkObserverBatch(t, labels, mockSDK, metric.Float64NumberKind, metric.ValueObserverKind, o.AsyncImpl(),
42.1,
)
})
t.Run("int valueobserver", func(t *testing.T) {
labels := []kv.KeyValue{}
mockSDK, meter := mockTest.NewMeter()
o := Must(meter).RegisterInt64Observer("test.observer.int", func(result metric.Int64ObserverResult) {
result.Observe(42, labels...)
o := Must(meter).RegisterInt64ValueObserver("test.observer.int", func(_ context.Context, result metric.Int64ObserverResult) {
result.Observe(-142, labels...)
})
t.Log("Testing int observer")
mockSDK.RunAsyncInstruments()
checkObserverBatch(t, labels, mockSDK, metric.Int64NumberKind, o.AsyncImpl())
}
checkObserverBatch(t, labels, mockSDK, metric.Int64NumberKind, metric.ValueObserverKind, o.AsyncImpl(),
-142,
)
})
t.Run("float sumobserver", func(t *testing.T) {
labels := []kv.KeyValue{kv.String("O", "P")}
mockSDK, meter := mockTest.NewMeter()
o := Must(meter).RegisterFloat64SumObserver("test.sumobserver.float", func(_ context.Context, result metric.Float64ObserverResult) {
result.Observe(42.1, labels...)
})
mockSDK.RunAsyncInstruments()
checkObserverBatch(t, labels, mockSDK, metric.Float64NumberKind, metric.SumObserverKind, o.AsyncImpl(),
42.1,
)
})
t.Run("int sumobserver", func(t *testing.T) {
labels := []kv.KeyValue{}
mockSDK, meter := mockTest.NewMeter()
o := Must(meter).RegisterInt64SumObserver("test.observer.int", func(_ context.Context, result metric.Int64ObserverResult) {
result.Observe(-142, labels...)
})
mockSDK.RunAsyncInstruments()
checkObserverBatch(t, labels, mockSDK, metric.Int64NumberKind, metric.SumObserverKind, o.AsyncImpl(),
-142,
)
})
}
func checkBatches(t *testing.T, ctx context.Context, labels []kv.KeyValue, mock *mockTest.MeterImpl, kind metric.NumberKind, instrument metric.InstrumentImpl) {
func checkSyncBatches(t *testing.T, ctx context.Context, labels []kv.KeyValue, mock *mockTest.MeterImpl, nkind metric.NumberKind, mkind metric.Kind, instrument metric.InstrumentImpl, expected ...float64) {
t.Helper()
if len(mock.MeasurementBatches) != 3 {
t.Errorf("Expected 3 recorded measurement batches, got %d", len(mock.MeasurementBatches))
@ -195,6 +251,8 @@ func checkBatches(t *testing.T, ctx context.Context, labels []kv.KeyValue, mock
}
for j := 0; j < minMLen; j++ {
measurement := got.Measurements[j]
require.Equal(t, mkind, measurement.Instrument.Descriptor().MetricKind())
if measurement.Instrument.Implementation() != ourInstrument {
d := func(iface interface{}) string {
i := iface.(*mockTest.Instrument)
@ -202,19 +260,19 @@ func checkBatches(t *testing.T, ctx context.Context, labels []kv.KeyValue, mock
}
t.Errorf("Wrong recorded instrument in measurement %d in batch %d, expected %s, got %s", j, i, d(ourInstrument), d(measurement.Instrument.Implementation()))
}
ft := fortyTwo(t, kind)
if measurement.Number.CompareNumber(kind, ft) != 0 {
t.Errorf("Wrong recorded value in measurement %d in batch %d, expected %s, got %s", j, i, ft.Emit(kind), measurement.Number.Emit(kind))
expect := number(t, nkind, expected[i])
if measurement.Number.CompareNumber(nkind, expect) != 0 {
t.Errorf("Wrong recorded value in measurement %d in batch %d, expected %s, got %s", j, i, expect.Emit(nkind), measurement.Number.Emit(nkind))
}
}
}
}
func TestBatchObserver(t *testing.T) {
func TestBatchObserverInstruments(t *testing.T) {
mockSDK, meter := mockTest.NewMeter()
var obs1 metric.Int64Observer
var obs2 metric.Float64Observer
var obs1 metric.Int64ValueObserver
var obs2 metric.Float64ValueObserver
labels := []kv.KeyValue{
kv.String("A", "B"),
@ -222,15 +280,15 @@ func TestBatchObserver(t *testing.T) {
}
cb := Must(meter).NewBatchObserver(
func(result metric.BatchObserverResult) {
func(_ context.Context, result metric.BatchObserverResult) {
result.Observe(labels,
obs1.Observation(42),
obs2.Observation(42.0),
)
},
)
obs1 = cb.RegisterInt64Observer("test.observer.int")
obs2 = cb.RegisterFloat64Observer("test.observer.float")
obs1 = cb.RegisterInt64ValueObserver("test.observer.int")
obs2 = cb.RegisterFloat64ValueObserver("test.observer.float")
mockSDK.RunAsyncInstruments()
@ -248,14 +306,14 @@ func TestBatchObserver(t *testing.T) {
m1 := got.Measurements[0]
require.Equal(t, impl1, m1.Instrument.Implementation().(*mockTest.Async))
require.Equal(t, 0, m1.Number.CompareNumber(metric.Int64NumberKind, fortyTwo(t, metric.Int64NumberKind)))
require.Equal(t, 0, m1.Number.CompareNumber(metric.Int64NumberKind, number(t, metric.Int64NumberKind, 42)))
m2 := got.Measurements[1]
require.Equal(t, impl2, m2.Instrument.Implementation().(*mockTest.Async))
require.Equal(t, 0, m2.Number.CompareNumber(metric.Float64NumberKind, fortyTwo(t, metric.Float64NumberKind)))
require.Equal(t, 0, m2.Number.CompareNumber(metric.Float64NumberKind, number(t, metric.Float64NumberKind, 42)))
}
func checkObserverBatch(t *testing.T, labels []kv.KeyValue, mock *mockTest.MeterImpl, kind metric.NumberKind, observer metric.AsyncImpl) {
func checkObserverBatch(t *testing.T, labels []kv.KeyValue, mock *mockTest.MeterImpl, nkind metric.NumberKind, mkind metric.Kind, observer metric.AsyncImpl, expected float64) {
t.Helper()
assert.Len(t, mock.MeasurementBatches, 1)
if len(mock.MeasurementBatches) < 1 {
@ -272,21 +330,21 @@ func checkObserverBatch(t *testing.T, labels []kv.KeyValue, mock *mockTest.Meter
return
}
measurement := got.Measurements[0]
require.Equal(t, mkind, measurement.Instrument.Descriptor().MetricKind())
assert.Equal(t, o, measurement.Instrument.Implementation().(*mockTest.Async))
ft := fortyTwo(t, kind)
assert.Equal(t, 0, measurement.Number.CompareNumber(kind, ft))
ft := number(t, nkind, expected)
assert.Equal(t, 0, measurement.Number.CompareNumber(nkind, ft))
}
func fortyTwo(t *testing.T, kind metric.NumberKind) metric.Number {
func number(t *testing.T, kind metric.NumberKind, value float64) metric.Number {
t.Helper()
switch kind {
case metric.Int64NumberKind:
return metric.NewInt64Number(42)
return metric.NewInt64Number(int64(value))
case metric.Float64NumberKind:
return metric.NewFloat64Number(42)
return metric.NewFloat64Number(value)
}
t.Errorf("Invalid value kind %q", kind)
return metric.NewInt64Number(0)
panic("invalid number kind")
}
type testWrappedMeter struct {
@ -309,12 +367,12 @@ func TestWrappedInstrumentError(t *testing.T) {
impl := &testWrappedMeter{}
meter := metric.WrapMeterImpl(impl, "test")
measure, err := meter.NewInt64Measure("test.measure")
valuerecorder, err := meter.NewInt64ValueRecorder("test.valuerecorder")
require.Equal(t, err, metric.ErrSDKReturnedNilImpl)
require.NotNil(t, measure.SyncImpl())
require.NotNil(t, valuerecorder.SyncImpl())
observer, err := meter.RegisterInt64Observer("test.observer", func(result metric.Int64ObserverResult) {})
observer, err := meter.RegisterInt64ValueObserver("test.observer", func(_ context.Context, result metric.Int64ObserverResult) {})
require.NotNil(t, err)
require.NotNil(t, observer.AsyncImpl())
@ -324,7 +382,7 @@ func TestNilCallbackObserverNoop(t *testing.T) {
// Tests that a nil callback yields a no-op observer without error.
_, meter := mockTest.NewMeter()
observer := Must(meter).RegisterInt64Observer("test.observer", nil)
observer := Must(meter).RegisterInt64ValueObserver("test.observer", nil)
_, ok := observer.AsyncImpl().(metric.NoopAsync)
require.True(t, ok)

View File

@ -14,7 +14,11 @@
package metric
import "go.opentelemetry.io/otel/api/kv"
import (
"context"
"go.opentelemetry.io/otel/api/kv"
)
// The file is organized as follows:
//
@ -29,7 +33,7 @@ import "go.opentelemetry.io/otel/api/kv"
// Observation is used for reporting an asynchronous batch of metric
// values. Instances of this type should be created by asynchronous
// instruments (e.g., Int64Observer.Observation()).
// instruments (e.g., Int64ValueObserver.Observation()).
type Observation struct {
// number needs to be aligned for 64-bit atomic operations.
number Number
@ -38,16 +42,16 @@ type Observation struct {
// Int64ObserverCallback is a type of callback that integral
// observers run.
type Int64ObserverCallback func(Int64ObserverResult)
type Int64ObserverCallback func(context.Context, Int64ObserverResult)
// Float64ObserverCallback is a type of callback that floating point
// observers run.
type Float64ObserverCallback func(Float64ObserverResult)
type Float64ObserverCallback func(context.Context, Float64ObserverResult)
// BatchObserverCallback is a callback argument for use with any
// Observer instrument that will be reported as a batch of
// observations.
type BatchObserverCallback func(BatchObserverResult)
type BatchObserverCallback func(context.Context, BatchObserverResult)
// Int64ObserverResult is passed to an observer callback to capture
// observations for one asynchronous integer metric instrument.
@ -110,7 +114,7 @@ type AsyncSingleRunner interface {
// receives one captured observation. (The function accepts
// multiple observations so the same implementation can be
// used for batch runners.)
Run(single AsyncImpl, capture func([]kv.KeyValue, ...Observation))
Run(ctx context.Context, single AsyncImpl, capture func([]kv.KeyValue, ...Observation))
AsyncRunner
}
@ -120,7 +124,7 @@ type AsyncSingleRunner interface {
type AsyncBatchRunner interface {
// Run accepts a function for capturing observations of
// multiple instruments.
Run(capture func([]kv.KeyValue, ...Observation))
Run(ctx context.Context, capture func([]kv.KeyValue, ...Observation))
AsyncRunner
}
@ -154,24 +158,48 @@ func (*Float64ObserverCallback) AnyRunner() {}
func (*BatchObserverCallback) AnyRunner() {}
// Run implements AsyncSingleRunner.
func (i *Int64ObserverCallback) Run(impl AsyncImpl, function func([]kv.KeyValue, ...Observation)) {
(*i)(Int64ObserverResult{
func (i *Int64ObserverCallback) Run(ctx context.Context, impl AsyncImpl, function func([]kv.KeyValue, ...Observation)) {
(*i)(ctx, Int64ObserverResult{
instrument: impl,
function: function,
})
}
// Run implements AsyncSingleRunner.
func (f *Float64ObserverCallback) Run(impl AsyncImpl, function func([]kv.KeyValue, ...Observation)) {
(*f)(Float64ObserverResult{
func (f *Float64ObserverCallback) Run(ctx context.Context, impl AsyncImpl, function func([]kv.KeyValue, ...Observation)) {
(*f)(ctx, Float64ObserverResult{
instrument: impl,
function: function,
})
}
// Run implements AsyncBatchRunner.
func (b *BatchObserverCallback) Run(function func([]kv.KeyValue, ...Observation)) {
(*b)(BatchObserverResult{
func (b *BatchObserverCallback) Run(ctx context.Context, function func([]kv.KeyValue, ...Observation)) {
(*b)(ctx, BatchObserverResult{
function: function,
})
}
// wrapInt64ValueObserverInstrument converts an AsyncImpl into Int64ValueObserver.
func wrapInt64ValueObserverInstrument(asyncInst AsyncImpl, err error) (Int64ValueObserver, error) {
common, err := checkNewAsync(asyncInst, err)
return Int64ValueObserver{asyncInstrument: common}, err
}
// wrapFloat64ValueObserverInstrument converts an AsyncImpl into Float64ValueObserver.
func wrapFloat64ValueObserverInstrument(asyncInst AsyncImpl, err error) (Float64ValueObserver, error) {
common, err := checkNewAsync(asyncInst, err)
return Float64ValueObserver{asyncInstrument: common}, err
}
// wrapInt64SumObserverInstrument converts an AsyncImpl into Int64SumObserver.
func wrapInt64SumObserverInstrument(asyncInst AsyncImpl, err error) (Int64SumObserver, error) {
common, err := checkNewAsync(asyncInst, err)
return Int64SumObserver{asyncInstrument: common}, err
}
// wrapFloat64SumObserverInstrument converts an AsyncImpl into Float64SumObserver.
func wrapFloat64SumObserverInstrument(asyncInst AsyncImpl, err error) (Float64SumObserver, error) {
common, err := checkNewAsync(asyncInst, err)
return Float64SumObserver{asyncInstrument: common}, err
}

View File

@ -13,57 +13,37 @@
// limitations under the License.
// metric package provides an API for reporting diagnostic
// measurements using four basic kinds of instruments.
// measurements using instruments categorized as follows:
//
// The three basic kinds are:
// Synchronous instruments are called by the user with a Context.
// Asynchronous instruments are called by the SDK during collection.
//
// - counters
// - measures
// - observers
// Additive instruments are semantically intended for capturing a sum.
// Non-additive instruments are intended for capturing a distribution.
//
// All instruments report either float64 or int64 values.
// Additive instruments may be monotonic, in which case they are
// non-descreasing and naturally define a rate.
//
// The primary object that handles metrics is Meter. Meter can be
// obtained from Provider. The implementations of the Meter and
// Provider are provided by SDK. Normally, the Meter is used directly
// only for the instrument creation and batch recording.
// The synchronous instrument names are:
//
// Counters are instruments that are reporting a quantity or a sum. An
// example could be bank account balance or bytes downloaded. Counters
// can be created with either NewFloat64Counter or
// NewInt64Counter. Counters expect non-negative values by default to
// be reported. This can be changed with the WithMonotonic option
// (passing false as a parameter) passed to the Meter.New*Counter
// function - this allows reporting negative values. To report the new
// value, use an Add function.
// Counter: additive, monotonic
// UpDownCounter: additive
// ValueRecorder: non-additive
//
// Measures are instruments that are reporting values that are
// recorded separately to figure out some statistical properties from
// those values (like average). An example could be temperature over
// time or lines of code in the project over time. Measures can be
// created with either NewFloat64Measure or NewInt64Measure. Measures
// by default take only non-negative values. This can be changed with
// the WithAbsolute option (passing false as a parameter) passed to
// the New*Measure function - this allows reporting negative values
// too. To report a new value, use the Record function.
// and the asynchronous instruments are:
//
// Observers are instruments that are reporting a current state of a
// set of values. An example could be voltage or
// temperature. Observers can be created with either
// RegisterFloat64Observer or RegisterInt64Observer. Observers by
// default have no limitations about reported values - they can be
// less or greater than the last reported value. This can be changed
// with the WithMonotonic option passed to the Register*Observer
// function - this permits the reported values only to go
// up. Reporting of the new values happens asynchronously, with the
// use of a callback passed to the Register*Observer function. The
// callback can report multiple values. There is no unregister function.
// SumObserver: additive, monotonic
// UpDownSumOnserver: additive
// ValueObserver: non-additive
//
// Counters and measures support creating bound instruments for a
// potentially more efficient reporting. The bound instruments have
// the same function names as the instruments (so a Counter bound
// instrument has Add, and a Measure bound instrument has Record).
// Bound Instruments can be created with the Bind function of the
// respective instrument. When done with the bound instrument, call
// Unbind on it.
// All instruments are provided with support for either float64 or
// int64 input values.
//
// The Meter interface supports allocating new instruments as well as
// interfaces for recording batches of synchronous measurements or
// asynchronous observations. To obtain a Meter, use a Provider.
//
// The Provider interface supports obtaining a named Meter interface.
// To obtain a Provider implementation, initialize and configure any
// compatible SDK.
package metric // import "go.opentelemetry.io/otel/api/metric"

View File

@ -20,10 +20,16 @@ package metric
type Kind int8
const (
// MeasureKind indicates a Measure instrument.
MeasureKind Kind = iota
// ObserverKind indicates an Observer instrument.
ObserverKind
// ValueRecorderKind indicates a ValueRecorder instrument.
ValueRecorderKind Kind = iota
// ValueObserverKind indicates an ValueObserver instrument.
ValueObserverKind
// CounterKind indicates a Counter instrument.
CounterKind
// UpDownCounterKind indicates a UpDownCounter instrument.
UpDownCounterKind
// SumObserverKind indicates a SumObserver instrument.
SumObserverKind
)

View File

@ -8,14 +8,16 @@ func _() {
// An "invalid array index" compiler error signifies that the constant values have changed.
// Re-run the stringer command to generate them again.
var x [1]struct{}
_ = x[MeasureKind-0]
_ = x[ObserverKind-1]
_ = x[ValueRecorderKind-0]
_ = x[ValueObserverKind-1]
_ = x[CounterKind-2]
_ = x[UpDownCounterKind-3]
_ = x[SumObserverKind-4]
}
const _Kind_name = "MeasureKindObserverKindCounterKind"
const _Kind_name = "ValueRecorderKindValueObserverKindCounterKindUpDownCounterKindSumObserverKind"
var _Kind_index = [...]uint8{0, 11, 23, 34}
var _Kind_index = [...]uint8{0, 17, 34, 45, 62, 77}
func (i Kind) String() string {
if i < 0 || i >= Kind(len(_Kind_index)-1) {

View File

@ -82,72 +82,141 @@ func (m Meter) NewFloat64Counter(name string, options ...Option) (Float64Counter
m.newSync(name, CounterKind, Float64NumberKind, options))
}
// NewInt64Measure creates a new integer Measure instrument with the
// NewInt64UpDownCounter creates a new integer UpDownCounter instrument with the
// given name, customized with options. May return an error if the
// name is invalid (e.g., empty) or improperly registered (e.g.,
// duplicate registration).
func (m Meter) NewInt64Measure(name string, opts ...Option) (Int64Measure, error) {
return wrapInt64MeasureInstrument(
m.newSync(name, MeasureKind, Int64NumberKind, opts))
func (m Meter) NewInt64UpDownCounter(name string, options ...Option) (Int64UpDownCounter, error) {
return wrapInt64UpDownCounterInstrument(
m.newSync(name, UpDownCounterKind, Int64NumberKind, options))
}
// NewFloat64Measure creates a new floating point Measure with the
// NewFloat64UpDownCounter creates a new floating point UpDownCounter with the
// given name, customized with options. May return an error if the
// name is invalid (e.g., empty) or improperly registered (e.g.,
// duplicate registration).
func (m Meter) NewFloat64Measure(name string, opts ...Option) (Float64Measure, error) {
return wrapFloat64MeasureInstrument(
m.newSync(name, MeasureKind, Float64NumberKind, opts))
func (m Meter) NewFloat64UpDownCounter(name string, options ...Option) (Float64UpDownCounter, error) {
return wrapFloat64UpDownCounterInstrument(
m.newSync(name, UpDownCounterKind, Float64NumberKind, options))
}
// RegisterInt64Observer creates a new integer Observer instrument
// NewInt64ValueRecorder creates a new integer ValueRecorder instrument with the
// given name, customized with options. May return an error if the
// name is invalid (e.g., empty) or improperly registered (e.g.,
// duplicate registration).
func (m Meter) NewInt64ValueRecorder(name string, opts ...Option) (Int64ValueRecorder, error) {
return wrapInt64ValueRecorderInstrument(
m.newSync(name, ValueRecorderKind, Int64NumberKind, opts))
}
// NewFloat64ValueRecorder creates a new floating point ValueRecorder with the
// given name, customized with options. May return an error if the
// name is invalid (e.g., empty) or improperly registered (e.g.,
// duplicate registration).
func (m Meter) NewFloat64ValueRecorder(name string, opts ...Option) (Float64ValueRecorder, error) {
return wrapFloat64ValueRecorderInstrument(
m.newSync(name, ValueRecorderKind, Float64NumberKind, opts))
}
// RegisterInt64ValueObserver creates a new integer ValueObserver instrument
// with the given name, running a given callback, and customized with
// options. May return an error if the name is invalid (e.g., empty)
// or improperly registered (e.g., duplicate registration).
func (m Meter) RegisterInt64Observer(name string, callback Int64ObserverCallback, opts ...Option) (Int64Observer, error) {
func (m Meter) RegisterInt64ValueObserver(name string, callback Int64ObserverCallback, opts ...Option) (Int64ValueObserver, error) {
if callback == nil {
return wrapInt64ObserverInstrument(NoopAsync{}, nil)
return wrapInt64ValueObserverInstrument(NoopAsync{}, nil)
}
return wrapInt64ObserverInstrument(
m.newAsync(name, ObserverKind, Int64NumberKind, opts,
return wrapInt64ValueObserverInstrument(
m.newAsync(name, ValueObserverKind, Int64NumberKind, opts,
newInt64AsyncRunner(callback)))
}
// RegisterFloat64Observer creates a new floating point Observer with
// RegisterFloat64ValueObserver creates a new floating point ValueObserver with
// the given name, running a given callback, and customized with
// options. May return an error if the name is invalid (e.g., empty)
// or improperly registered (e.g., duplicate registration).
func (m Meter) RegisterFloat64Observer(name string, callback Float64ObserverCallback, opts ...Option) (Float64Observer, error) {
func (m Meter) RegisterFloat64ValueObserver(name string, callback Float64ObserverCallback, opts ...Option) (Float64ValueObserver, error) {
if callback == nil {
return wrapFloat64ObserverInstrument(NoopAsync{}, nil)
return wrapFloat64ValueObserverInstrument(NoopAsync{}, nil)
}
return wrapFloat64ObserverInstrument(
m.newAsync(name, ObserverKind, Float64NumberKind, opts,
return wrapFloat64ValueObserverInstrument(
m.newAsync(name, ValueObserverKind, Float64NumberKind, opts,
newFloat64AsyncRunner(callback)))
}
// RegisterInt64Observer creates a new integer Observer instrument
// RegisterInt64SumObserver creates a new integer SumObserver instrument
// with the given name, running a given callback, and customized with
// options. May return an error if the name is invalid (e.g., empty)
// or improperly registered (e.g., duplicate registration).
func (m Meter) RegisterInt64SumObserver(name string, callback Int64ObserverCallback, opts ...Option) (Int64SumObserver, error) {
if callback == nil {
return wrapInt64SumObserverInstrument(NoopAsync{}, nil)
}
return wrapInt64SumObserverInstrument(
m.newAsync(name, SumObserverKind, Int64NumberKind, opts,
newInt64AsyncRunner(callback)))
}
// RegisterFloat64SumObserver creates a new floating point SumObserver with
// the given name, running a given callback, and customized with
// options. May return an error if the name is invalid (e.g., empty)
// or improperly registered (e.g., duplicate registration).
func (m Meter) RegisterFloat64SumObserver(name string, callback Float64ObserverCallback, opts ...Option) (Float64SumObserver, error) {
if callback == nil {
return wrapFloat64SumObserverInstrument(NoopAsync{}, nil)
}
return wrapFloat64SumObserverInstrument(
m.newAsync(name, SumObserverKind, Float64NumberKind, opts,
newFloat64AsyncRunner(callback)))
}
// RegisterInt64ValueObserver creates a new integer ValueObserver instrument
// with the given name, running in a batch callback, and customized with
// options. May return an error if the name is invalid (e.g., empty)
// or improperly registered (e.g., duplicate registration).
func (b BatchObserver) RegisterInt64Observer(name string, opts ...Option) (Int64Observer, error) {
func (b BatchObserver) RegisterInt64ValueObserver(name string, opts ...Option) (Int64ValueObserver, error) {
if b.runner == nil {
return wrapInt64ObserverInstrument(NoopAsync{}, nil)
return wrapInt64ValueObserverInstrument(NoopAsync{}, nil)
}
return wrapInt64ObserverInstrument(
b.meter.newAsync(name, ObserverKind, Int64NumberKind, opts, b.runner))
return wrapInt64ValueObserverInstrument(
b.meter.newAsync(name, ValueObserverKind, Int64NumberKind, opts, b.runner))
}
// RegisterFloat64Observer creates a new floating point Observer with
// RegisterFloat64ValueObserver creates a new floating point ValueObserver with
// the given name, running in a batch callback, and customized with
// options. May return an error if the name is invalid (e.g., empty)
// or improperly registered (e.g., duplicate registration).
func (b BatchObserver) RegisterFloat64Observer(name string, opts ...Option) (Float64Observer, error) {
func (b BatchObserver) RegisterFloat64ValueObserver(name string, opts ...Option) (Float64ValueObserver, error) {
if b.runner == nil {
return wrapFloat64ObserverInstrument(NoopAsync{}, nil)
return wrapFloat64ValueObserverInstrument(NoopAsync{}, nil)
}
return wrapFloat64ObserverInstrument(
b.meter.newAsync(name, ObserverKind, Float64NumberKind, opts,
return wrapFloat64ValueObserverInstrument(
b.meter.newAsync(name, ValueObserverKind, Float64NumberKind, opts,
b.runner))
}
// RegisterInt64SumObserver creates a new integer SumObserver instrument
// with the given name, running in a batch callback, and customized with
// options. May return an error if the name is invalid (e.g., empty)
// or improperly registered (e.g., duplicate registration).
func (b BatchObserver) RegisterInt64SumObserver(name string, opts ...Option) (Int64SumObserver, error) {
if b.runner == nil {
return wrapInt64SumObserverInstrument(NoopAsync{}, nil)
}
return wrapInt64SumObserverInstrument(
b.meter.newAsync(name, SumObserverKind, Int64NumberKind, opts, b.runner))
}
// RegisterFloat64SumObserver creates a new floating point SumObserver with
// the given name, running in a batch callback, and customized with
// options. May return an error if the name is invalid (e.g., empty)
// or improperly registered (e.g., duplicate registration).
func (b BatchObserver) RegisterFloat64SumObserver(name string, opts ...Option) (Float64SumObserver, error) {
if b.runner == nil {
return wrapFloat64SumObserverInstrument(NoopAsync{}, nil)
}
return wrapFloat64SumObserverInstrument(
b.meter.newAsync(name, SumObserverKind, Float64NumberKind, opts,
b.runner))
}

View File

@ -53,40 +53,80 @@ func (mm MeterMust) NewFloat64Counter(name string, cos ...Option) Float64Counter
}
}
// NewInt64Measure calls `Meter.NewInt64Measure` and returns the
// NewInt64UpDownCounter calls `Meter.NewInt64UpDownCounter` and returns the
// instrument, panicking if it encounters an error.
func (mm MeterMust) NewInt64Measure(name string, mos ...Option) Int64Measure {
if inst, err := mm.meter.NewInt64Measure(name, mos...); err != nil {
func (mm MeterMust) NewInt64UpDownCounter(name string, cos ...Option) Int64UpDownCounter {
if inst, err := mm.meter.NewInt64UpDownCounter(name, cos...); err != nil {
panic(err)
} else {
return inst
}
}
// NewFloat64Measure calls `Meter.NewFloat64Measure` and returns the
// NewFloat64UpDownCounter calls `Meter.NewFloat64UpDownCounter` and returns the
// instrument, panicking if it encounters an error.
func (mm MeterMust) NewFloat64Measure(name string, mos ...Option) Float64Measure {
if inst, err := mm.meter.NewFloat64Measure(name, mos...); err != nil {
func (mm MeterMust) NewFloat64UpDownCounter(name string, cos ...Option) Float64UpDownCounter {
if inst, err := mm.meter.NewFloat64UpDownCounter(name, cos...); err != nil {
panic(err)
} else {
return inst
}
}
// RegisterInt64Observer calls `Meter.RegisterInt64Observer` and
// returns the instrument, panicking if it encounters an error.
func (mm MeterMust) RegisterInt64Observer(name string, callback Int64ObserverCallback, oos ...Option) Int64Observer {
if inst, err := mm.meter.RegisterInt64Observer(name, callback, oos...); err != nil {
// NewInt64ValueRecorder calls `Meter.NewInt64ValueRecorder` and returns the
// instrument, panicking if it encounters an error.
func (mm MeterMust) NewInt64ValueRecorder(name string, mos ...Option) Int64ValueRecorder {
if inst, err := mm.meter.NewInt64ValueRecorder(name, mos...); err != nil {
panic(err)
} else {
return inst
}
}
// RegisterFloat64Observer calls `Meter.RegisterFloat64Observer` and
// NewFloat64ValueRecorder calls `Meter.NewFloat64ValueRecorder` and returns the
// instrument, panicking if it encounters an error.
func (mm MeterMust) NewFloat64ValueRecorder(name string, mos ...Option) Float64ValueRecorder {
if inst, err := mm.meter.NewFloat64ValueRecorder(name, mos...); err != nil {
panic(err)
} else {
return inst
}
}
// RegisterInt64ValueObserver calls `Meter.RegisterInt64ValueObserver` and
// returns the instrument, panicking if it encounters an error.
func (mm MeterMust) RegisterFloat64Observer(name string, callback Float64ObserverCallback, oos ...Option) Float64Observer {
if inst, err := mm.meter.RegisterFloat64Observer(name, callback, oos...); err != nil {
func (mm MeterMust) RegisterInt64ValueObserver(name string, callback Int64ObserverCallback, oos ...Option) Int64ValueObserver {
if inst, err := mm.meter.RegisterInt64ValueObserver(name, callback, oos...); err != nil {
panic(err)
} else {
return inst
}
}
// RegisterFloat64ValueObserver calls `Meter.RegisterFloat64ValueObserver` and
// returns the instrument, panicking if it encounters an error.
func (mm MeterMust) RegisterFloat64ValueObserver(name string, callback Float64ObserverCallback, oos ...Option) Float64ValueObserver {
if inst, err := mm.meter.RegisterFloat64ValueObserver(name, callback, oos...); err != nil {
panic(err)
} else {
return inst
}
}
// RegisterInt64SumObserver calls `Meter.RegisterInt64SumObserver` and
// returns the instrument, panicking if it encounters an error.
func (mm MeterMust) RegisterInt64SumObserver(name string, callback Int64ObserverCallback, oos ...Option) Int64SumObserver {
if inst, err := mm.meter.RegisterInt64SumObserver(name, callback, oos...); err != nil {
panic(err)
} else {
return inst
}
}
// RegisterFloat64SumObserver calls `Meter.RegisterFloat64SumObserver` and
// returns the instrument, panicking if it encounters an error.
func (mm MeterMust) RegisterFloat64SumObserver(name string, callback Float64ObserverCallback, oos ...Option) Float64SumObserver {
if inst, err := mm.meter.RegisterFloat64SumObserver(name, callback, oos...); err != nil {
panic(err)
} else {
return inst
@ -101,20 +141,40 @@ func (mm MeterMust) NewBatchObserver(callback BatchObserverCallback) BatchObserv
}
}
// RegisterInt64Observer calls `BatchObserver.RegisterInt64Observer` and
// RegisterInt64ValueObserver calls `BatchObserver.RegisterInt64ValueObserver` and
// returns the instrument, panicking if it encounters an error.
func (bm BatchObserverMust) RegisterInt64Observer(name string, oos ...Option) Int64Observer {
if inst, err := bm.batch.RegisterInt64Observer(name, oos...); err != nil {
func (bm BatchObserverMust) RegisterInt64ValueObserver(name string, oos ...Option) Int64ValueObserver {
if inst, err := bm.batch.RegisterInt64ValueObserver(name, oos...); err != nil {
panic(err)
} else {
return inst
}
}
// RegisterFloat64Observer calls `BatchObserver.RegisterFloat64Observer` and
// RegisterFloat64ValueObserver calls `BatchObserver.RegisterFloat64ValueObserver` and
// returns the instrument, panicking if it encounters an error.
func (bm BatchObserverMust) RegisterFloat64Observer(name string, oos ...Option) Float64Observer {
if inst, err := bm.batch.RegisterFloat64Observer(name, oos...); err != nil {
func (bm BatchObserverMust) RegisterFloat64ValueObserver(name string, oos ...Option) Float64ValueObserver {
if inst, err := bm.batch.RegisterFloat64ValueObserver(name, oos...); err != nil {
panic(err)
} else {
return inst
}
}
// RegisterInt64SumObserver calls `BatchObserver.RegisterInt64SumObserver` and
// returns the instrument, panicking if it encounters an error.
func (bm BatchObserverMust) RegisterInt64SumObserver(name string, oos ...Option) Int64SumObserver {
if inst, err := bm.batch.RegisterInt64SumObserver(name, oos...); err != nil {
panic(err)
} else {
return inst
}
}
// RegisterFloat64SumObserver calls `BatchObserver.RegisterFloat64SumObserver` and
// returns the instrument, panicking if it encounters an error.
func (bm BatchObserverMust) RegisterFloat64SumObserver(name string, oos ...Option) Float64SumObserver {
if inst, err := bm.batch.RegisterFloat64SumObserver(name, oos...); err != nil {
panic(err)
} else {
return inst

View File

@ -21,15 +21,27 @@ type BatchObserver struct {
runner AsyncBatchRunner
}
// Int64Observer is a metric that captures a set of int64 values at a
// Int64ValueObserver is a metric that captures a set of int64 values at a
// point in time.
type Int64Observer struct {
type Int64ValueObserver struct {
asyncInstrument
}
// Float64Observer is a metric that captures a set of float64 values
// Float64ValueObserver is a metric that captures a set of float64 values
// at a point in time.
type Float64Observer struct {
type Float64ValueObserver struct {
asyncInstrument
}
// Int64SumObserver is a metric that captures a precomputed sum of
// int64 values at a point in time.
type Int64SumObserver struct {
asyncInstrument
}
// Float64SumObserver is a metric that captures a precomputed sum of
// float64 values at a point in time.
type Float64SumObserver struct {
asyncInstrument
}
@ -37,7 +49,7 @@ type Float64Observer struct {
// argument, for an asynchronous integer instrument.
// This returns an implementation-level object for use by the SDK,
// users should not refer to this.
func (i Int64Observer) Observation(v int64) Observation {
func (i Int64ValueObserver) Observation(v int64) Observation {
return Observation{
number: NewInt64Number(v),
instrument: i.instrument,
@ -48,7 +60,29 @@ func (i Int64Observer) Observation(v int64) Observation {
// argument, for an asynchronous integer instrument.
// This returns an implementation-level object for use by the SDK,
// users should not refer to this.
func (f Float64Observer) Observation(v float64) Observation {
func (f Float64ValueObserver) Observation(v float64) Observation {
return Observation{
number: NewFloat64Number(v),
instrument: f.instrument,
}
}
// Observation returns an Observation, a BatchObserverCallback
// argument, for an asynchronous integer instrument.
// This returns an implementation-level object for use by the SDK,
// users should not refer to this.
func (i Int64SumObserver) Observation(v int64) Observation {
return Observation{
number: NewInt64Number(v),
instrument: i.instrument,
}
}
// Observation returns an Observation, a BatchObserverCallback
// argument, for an asynchronous integer instrument.
// This returns an implementation-level object for use by the SDK,
// users should not refer to this.
func (f Float64SumObserver) Observation(v float64) Observation {
return Observation{
number: NewFloat64Number(v),
instrument: f.instrument,

View File

@ -23,6 +23,13 @@ import (
"go.opentelemetry.io/otel/api/metric"
)
// Provider is a standard metric.Provider for wrapping `MeterImpl`
type Provider struct {
impl metric.MeterImpl
}
var _ metric.Provider = (*Provider)(nil)
// uniqueInstrumentMeterImpl implements the metric.MeterImpl interface, adding
// uniqueness checking for instrument descriptors. Use NewUniqueInstrumentMeter
// to wrap an implementation with uniqueness checking.
@ -39,6 +46,19 @@ type key struct {
libraryName string
}
// NewProvider returns a new provider that implements instrument
// name-uniqueness checking.
func NewProvider(impl metric.MeterImpl) *Provider {
return &Provider{
impl: NewUniqueInstrumentMeterImpl(impl),
}
}
// Meter implements metric.Provider.
func (p *Provider) Meter(name string) metric.Meter {
return metric.WrapMeterImpl(p.impl, name)
}
// ErrMetricKindMismatch is the standard error for mismatched metric
// instrument definitions.
var ErrMetricKindMismatch = fmt.Errorf(

View File

@ -15,6 +15,7 @@
package registry_test
import (
"context"
"errors"
"testing"
@ -37,17 +38,17 @@ var (
"counter.float64": func(m metric.Meter, name string) (metric.InstrumentImpl, error) {
return unwrap(m.NewFloat64Counter(name))
},
"measure.int64": func(m metric.Meter, name string) (metric.InstrumentImpl, error) {
return unwrap(m.NewInt64Measure(name))
"valuerecorder.int64": func(m metric.Meter, name string) (metric.InstrumentImpl, error) {
return unwrap(m.NewInt64ValueRecorder(name))
},
"measure.float64": func(m metric.Meter, name string) (metric.InstrumentImpl, error) {
return unwrap(m.NewFloat64Measure(name))
"valuerecorder.float64": func(m metric.Meter, name string) (metric.InstrumentImpl, error) {
return unwrap(m.NewFloat64ValueRecorder(name))
},
"observer.int64": func(m metric.Meter, name string) (metric.InstrumentImpl, error) {
return unwrap(m.RegisterInt64Observer(name, func(metric.Int64ObserverResult) {}))
"valueobserver.int64": func(m metric.Meter, name string) (metric.InstrumentImpl, error) {
return unwrap(m.RegisterInt64ValueObserver(name, func(context.Context, metric.Int64ObserverResult) {}))
},
"observer.float64": func(m metric.Meter, name string) (metric.InstrumentImpl, error) {
return unwrap(m.RegisterFloat64Observer(name, func(metric.Float64ObserverResult) {}))
"valueobserver.float64": func(m metric.Meter, name string) (metric.InstrumentImpl, error) {
return unwrap(m.RegisterFloat64ValueObserver(name, func(context.Context, metric.Float64ObserverResult) {}))
},
}
)
@ -118,3 +119,14 @@ func TestRegistryDiffInstruments(t *testing.T) {
}
}
}
func TestProvider(t *testing.T) {
impl, _ := mockTest.NewMeter()
p := registry.NewProvider(impl)
m1 := p.Meter("m1")
m1p := p.Meter("m1")
m2 := p.Meter("m2")
require.Equal(t, m1, m1p)
require.NotEqual(t, m1, m2)
}

View File

@ -53,7 +53,7 @@ type InstrumentImpl interface {
}
// SyncImpl is the implementation-level interface to a generic
// synchronous instrument (e.g., Measure and Counter instruments).
// synchronous instrument (e.g., ValueRecorder and Counter instruments).
type SyncImpl interface {
InstrumentImpl

View File

@ -156,56 +156,38 @@ func newMeasurement(instrument SyncImpl, number Number) Measurement {
}
}
// wrapInt64CounterInstrument returns an `Int64Counter` from a
// `SyncImpl`. An error will be generated if the
// `SyncImpl` is nil (in which case a No-op is substituted),
// otherwise the error passes through.
// wrapInt64CounterInstrument converts a SyncImpl into Int64Counter.
func wrapInt64CounterInstrument(syncInst SyncImpl, err error) (Int64Counter, error) {
common, err := checkNewSync(syncInst, err)
return Int64Counter{syncInstrument: common}, err
}
// wrapFloat64CounterInstrument returns an `Float64Counter` from a
// `SyncImpl`. An error will be generated if the
// `SyncImpl` is nil (in which case a No-op is substituted),
// otherwise the error passes through.
// wrapFloat64CounterInstrument converts a SyncImpl into Float64Counter.
func wrapFloat64CounterInstrument(syncInst SyncImpl, err error) (Float64Counter, error) {
common, err := checkNewSync(syncInst, err)
return Float64Counter{syncInstrument: common}, err
}
// wrapInt64MeasureInstrument returns an `Int64Measure` from a
// `SyncImpl`. An error will be generated if the
// `SyncImpl` is nil (in which case a No-op is substituted),
// otherwise the error passes through.
func wrapInt64MeasureInstrument(syncInst SyncImpl, err error) (Int64Measure, error) {
// wrapInt64UpDownCounterInstrument converts a SyncImpl into Int64UpDownCounter.
func wrapInt64UpDownCounterInstrument(syncInst SyncImpl, err error) (Int64UpDownCounter, error) {
common, err := checkNewSync(syncInst, err)
return Int64Measure{syncInstrument: common}, err
return Int64UpDownCounter{syncInstrument: common}, err
}
// wrapFloat64MeasureInstrument returns an `Float64Measure` from a
// `SyncImpl`. An error will be generated if the
// `SyncImpl` is nil (in which case a No-op is substituted),
// otherwise the error passes through.
func wrapFloat64MeasureInstrument(syncInst SyncImpl, err error) (Float64Measure, error) {
// wrapFloat64UpDownCounterInstrument converts a SyncImpl into Float64UpDownCounter.
func wrapFloat64UpDownCounterInstrument(syncInst SyncImpl, err error) (Float64UpDownCounter, error) {
common, err := checkNewSync(syncInst, err)
return Float64Measure{syncInstrument: common}, err
return Float64UpDownCounter{syncInstrument: common}, err
}
// wrapInt64ObserverInstrument returns an `Int64Observer` from a
// `AsyncImpl`. An error will be generated if the
// `AsyncImpl` is nil (in which case a No-op is substituted),
// otherwise the error passes through.
func wrapInt64ObserverInstrument(asyncInst AsyncImpl, err error) (Int64Observer, error) {
common, err := checkNewAsync(asyncInst, err)
return Int64Observer{asyncInstrument: common}, err
// wrapInt64ValueRecorderInstrument converts a SyncImpl into Int64ValueRecorder.
func wrapInt64ValueRecorderInstrument(syncInst SyncImpl, err error) (Int64ValueRecorder, error) {
common, err := checkNewSync(syncInst, err)
return Int64ValueRecorder{syncInstrument: common}, err
}
// wrapFloat64ObserverInstrument returns an `Float64Observer` from a
// `AsyncImpl`. An error will be generated if the
// `AsyncImpl` is nil (in which case a No-op is substituted),
// otherwise the error passes through.
func wrapFloat64ObserverInstrument(asyncInst AsyncImpl, err error) (Float64Observer, error) {
common, err := checkNewAsync(asyncInst, err)
return Float64Observer{asyncInstrument: common}, err
// wrapFloat64ValueRecorderInstrument converts a SyncImpl into Float64ValueRecorder.
func wrapFloat64ValueRecorderInstrument(syncInst SyncImpl, err error) (Float64ValueRecorder, error) {
common, err := checkNewSync(syncInst, err)
return Float64ValueRecorder{syncInstrument: common}, err
}

View File

@ -0,0 +1,96 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package metric
import (
"context"
"go.opentelemetry.io/otel/api/kv"
)
// Float64UpDownCounter is a metric instrument that sums floating
// point values.
type Float64UpDownCounter struct {
syncInstrument
}
// Int64UpDownCounter is a metric instrument that sums integer values.
type Int64UpDownCounter struct {
syncInstrument
}
// BoundFloat64UpDownCounter is a bound instrument for Float64UpDownCounter.
//
// It inherits the Unbind function from syncBoundInstrument.
type BoundFloat64UpDownCounter struct {
syncBoundInstrument
}
// BoundInt64UpDownCounter is a boundInstrument for Int64UpDownCounter.
//
// It inherits the Unbind function from syncBoundInstrument.
type BoundInt64UpDownCounter struct {
syncBoundInstrument
}
// Bind creates a bound instrument for this counter. The labels are
// associated with values recorded via subsequent calls to Record.
func (c Float64UpDownCounter) Bind(labels ...kv.KeyValue) (h BoundFloat64UpDownCounter) {
h.syncBoundInstrument = c.bind(labels)
return
}
// Bind creates a bound instrument for this counter. The labels are
// associated with values recorded via subsequent calls to Record.
func (c Int64UpDownCounter) Bind(labels ...kv.KeyValue) (h BoundInt64UpDownCounter) {
h.syncBoundInstrument = c.bind(labels)
return
}
// Measurement creates a Measurement object to use with batch
// recording.
func (c Float64UpDownCounter) Measurement(value float64) Measurement {
return c.float64Measurement(value)
}
// Measurement creates a Measurement object to use with batch
// recording.
func (c Int64UpDownCounter) Measurement(value int64) Measurement {
return c.int64Measurement(value)
}
// Add adds the value to the counter's sum. The labels should contain
// the keys and values to be associated with this value.
func (c Float64UpDownCounter) Add(ctx context.Context, value float64, labels ...kv.KeyValue) {
c.directRecord(ctx, NewFloat64Number(value), labels)
}
// Add adds the value to the counter's sum. The labels should contain
// the keys and values to be associated with this value.
func (c Int64UpDownCounter) Add(ctx context.Context, value int64, labels ...kv.KeyValue) {
c.directRecord(ctx, NewInt64Number(value), labels)
}
// Add adds the value to the counter's sum using the labels
// previously bound to this counter via Bind()
func (b BoundFloat64UpDownCounter) Add(ctx context.Context, value float64) {
b.directRecord(ctx, NewFloat64Number(value))
}
// Add adds the value to the counter's sum using the labels
// previously bound to this counter via Bind()
func (b BoundInt64UpDownCounter) Add(ctx context.Context, value int64) {
b.directRecord(ctx, NewInt64Number(value))
}

View File

@ -20,78 +20,78 @@ import (
"go.opentelemetry.io/otel/api/kv"
)
// Float64Measure is a metric that records float64 values.
type Float64Measure struct {
// Float64ValueRecorder is a metric that records float64 values.
type Float64ValueRecorder struct {
syncInstrument
}
// Int64Measure is a metric that records int64 values.
type Int64Measure struct {
// Int64ValueRecorder is a metric that records int64 values.
type Int64ValueRecorder struct {
syncInstrument
}
// BoundFloat64Measure is a bound instrument for Float64Measure.
// BoundFloat64ValueRecorder is a bound instrument for Float64ValueRecorder.
//
// It inherits the Unbind function from syncBoundInstrument.
type BoundFloat64Measure struct {
type BoundFloat64ValueRecorder struct {
syncBoundInstrument
}
// BoundInt64Measure is a bound instrument for Int64Measure.
// BoundInt64ValueRecorder is a bound instrument for Int64ValueRecorder.
//
// It inherits the Unbind function from syncBoundInstrument.
type BoundInt64Measure struct {
type BoundInt64ValueRecorder struct {
syncBoundInstrument
}
// Bind creates a bound instrument for this measure. The labels are
// Bind creates a bound instrument for this ValueRecorder. The labels are
// associated with values recorded via subsequent calls to Record.
func (c Float64Measure) Bind(labels ...kv.KeyValue) (h BoundFloat64Measure) {
func (c Float64ValueRecorder) Bind(labels ...kv.KeyValue) (h BoundFloat64ValueRecorder) {
h.syncBoundInstrument = c.bind(labels)
return
}
// Bind creates a bound instrument for this measure. The labels are
// Bind creates a bound instrument for this ValueRecorder. The labels are
// associated with values recorded via subsequent calls to Record.
func (c Int64Measure) Bind(labels ...kv.KeyValue) (h BoundInt64Measure) {
func (c Int64ValueRecorder) Bind(labels ...kv.KeyValue) (h BoundInt64ValueRecorder) {
h.syncBoundInstrument = c.bind(labels)
return
}
// Measurement creates a Measurement object to use with batch
// recording.
func (c Float64Measure) Measurement(value float64) Measurement {
func (c Float64ValueRecorder) Measurement(value float64) Measurement {
return c.float64Measurement(value)
}
// Measurement creates a Measurement object to use with batch
// recording.
func (c Int64Measure) Measurement(value int64) Measurement {
func (c Int64ValueRecorder) Measurement(value int64) Measurement {
return c.int64Measurement(value)
}
// Record adds a new value to the list of measure's records. The
// Record adds a new value to the list of ValueRecorder's records. The
// labels should contain the keys and values to be associated with
// this value.
func (c Float64Measure) Record(ctx context.Context, value float64, labels ...kv.KeyValue) {
func (c Float64ValueRecorder) Record(ctx context.Context, value float64, labels ...kv.KeyValue) {
c.directRecord(ctx, NewFloat64Number(value), labels)
}
// Record adds a new value to the list of measure's records. The
// Record adds a new value to the ValueRecorder's distribution. The
// labels should contain the keys and values to be associated with
// this value.
func (c Int64Measure) Record(ctx context.Context, value int64, labels ...kv.KeyValue) {
func (c Int64ValueRecorder) Record(ctx context.Context, value int64, labels ...kv.KeyValue) {
c.directRecord(ctx, NewInt64Number(value), labels)
}
// Record adds a new value to the list of measure's records using the labels
// previously bound to the measure via Bind()
func (b BoundFloat64Measure) Record(ctx context.Context, value float64) {
// Record adds a new value to the ValueRecorder's distribution using the labels
// previously bound to the ValueRecorder via Bind().
func (b BoundFloat64ValueRecorder) Record(ctx context.Context, value float64) {
b.directRecord(ctx, NewFloat64Number(value))
}
// Record adds a new value to the list of measure's records using the labels
// previously bound to the measure via Bind()
func (b BoundInt64Measure) Record(ctx context.Context, value int64) {
// Record adds a new value to the ValueRecorder's distribution using the labels
// previously bound to the ValueRecorder via Bind().
func (b BoundInt64ValueRecorder) Record(ctx context.Context, value int64) {
b.directRecord(ctx, NewInt64Number(value))
}

22
api/standard/doc.go Normal file
View File

@ -0,0 +1,22 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package standard contains keys and values that have been standardized for
// use in OpenTelemetry. These standardizations are specified in the
// OpenTelemetry specification:
//
// - https://github.com/open-telemetry/opentelemetry-specification/tree/v0.4.0/specification/resource/semantic_conventions
// - https://github.com/open-telemetry/opentelemetry-specification/tree/v0.4.0/specification/trace/semantic_conventions
// - https://github.com/open-telemetry/opentelemetry-specification/tree/v0.4.0/specification/metrics/semantic_conventions
package standard

152
api/standard/resource.go Normal file
View File

@ -0,0 +1,152 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package standard // import "go.opentelemetry.io/otel/api/standard"
import "go.opentelemetry.io/otel/api/kv"
// Standard service resource attribute keys.
const (
// Name of the service.
ServiceNameKey = kv.Key("service.name")
// A namespace for `service.name`. This needs to have meaning that helps
// to distinguish a group of services. For example, the team name that
// owns a group of services. `service.name` is expected to be unique
// within the same namespace.
ServiceNamespaceKey = kv.Key("service.namespace")
// A unique identifier of the service instance. In conjunction with the
// `service.name` and `service.namespace` this must be unique.
ServiceInstanceIDKey = kv.Key("service.instance.id")
// The version of the service API.
ServiceVersionKey = kv.Key("service.version")
)
// Standard telemetry SDK resource attribute keys.
const (
// The name of the telemetry SDK.
//
// The default OpenTelemetry SDK provided by the OpenTelemetry project
// MUST set telemetry.sdk.name to the value `opentelemetry`.
//
// If another SDK is used, this attribute MUST be set to the import path
// of that SDK's package.
//
// The value `opentelemetry` is reserved and MUST NOT be used by
// non-OpenTelemetry SDKs.
TelemetrySDKNameKey = kv.Key("telemetry.sdk.name")
// The language of the telemetry SDK.
TelemetrySDKLanguageKey = kv.Key("telemetry.sdk.language")
// The version string of the telemetry SDK.
TelemetrySDKVersionKey = kv.Key("telemetry.sdk.version")
)
// Standard telemetry SDK resource attributes.
var (
TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go")
)
// Standard container resource attribute keys.
const (
// A uniquely identifying name for the Container.
ContainerNameKey = kv.Key("container.name")
// Name of the image the container was built on.
ContainerImageNameKey = kv.Key("container.image.name")
// Container image tag.
ContainerImageTagKey = kv.Key("container.image.tag")
)
// Standard Function-as-a-Service resource attribute keys.
const (
// A uniquely identifying name for the FaaS.
FaaSName = kv.Key("faas.name")
// The unique name of the function being executed.
FaaSID = kv.Key("faas.id")
// The version of the function being executed.
FaaSVersion = kv.Key("faas.version")
// The execution environment identifier.
FaaSInstance = kv.Key("faas.instance")
)
// Standard Kubernetes resource attribute keys.
const (
// A uniquely identifying name for the Kubernetes cluster. Kubernetes
// does not have cluster names as an internal concept so this may be
// set to any meaningful value within the environment. For example,
// GKE clusters have a name which can be used for this label.
K8SClusterNameKey = kv.Key("k8s.cluster.name")
// The name of the namespace that the pod is running in.
K8SNamespaceNameKey = kv.Key("k8s.namespace.name")
// The name of the pod.
K8SPodNameKey = kv.Key("k8s.pod.name")
// The name of the deployment.
K8SDeploymentNameKey = kv.Key("k8s.deployment.name")
)
// Standard host resource attribute keys.
const (
// A uniquely identifying name for the host.
HostNameKey = kv.Key("host.name")
// A hostname as returned by the 'hostname' command on host machine.
HostHostNameKey = kv.Key("host.hostname")
// Unique host ID. For cloud environments this will be the instance ID.
HostIDKey = kv.Key("host.id")
// Type of host. For cloud environments this will be the machine type.
HostTypeKey = kv.Key("host.type")
// Name of the OS or VM image the host is running.
HostImageNameKey = kv.Key("host.image.name")
// Identifier of the image the host is running.
HostImageIDKey = kv.Key("host.image.id")
// Version of the image the host is running.
HostImageVersionKey = kv.Key("host.image.version")
)
// Standard cloud environment resource attribute keys.
const (
// Name of the cloud provider.
CloudProviderKey = kv.Key("cloud.provider")
// The account ID from the cloud provider used for authorization.
CloudAccountIDKey = kv.Key("cloud.account.id")
// Geographical region where this resource is.
CloudRegionKey = kv.Key("cloud.region")
// Zone of the region where this resource is.
CloudZoneKey = kv.Key("cloud.zone")
)
var (
CloudProviderAWS = CloudProviderKey.String("aws")
CloudProviderAzure = CloudProviderKey.String("azure")
CloudProviderGCP = CloudProviderKey.String("gcp")
)

262
api/standard/trace.go Normal file
View File

@ -0,0 +1,262 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package standard
import "go.opentelemetry.io/otel/api/kv"
// Standard attribute keys used for network related operations.
const (
// Transport protocol used.
NetTransportKey = kv.Key("net.transport")
// Remote address of the peer.
NetPeerIPKey = kv.Key("net.peer.ip")
// Remote port number.
NetPeerPortKey = kv.Key("net.peer.port")
// Remote hostname or similar.
NetPeerNameKey = kv.Key("net.peer.name")
// Local host IP. Useful in case of a multi-IP host.
NetHostIPKey = kv.Key("net.host.ip")
// Local host port.
NetHostPortKey = kv.Key("net.host.port")
// Local hostname or similar.
NetHostNameKey = kv.Key("net.host.name")
)
var (
NetTransportTCP = NetTransportKey.String("IP.TCP")
NetTransportUDP = NetTransportKey.String("IP.UDP")
NetTransportIP = NetTransportKey.String("IP")
NetTransportUnix = NetTransportKey.String("Unix")
NetTransportPipe = NetTransportKey.String("pipe")
NetTransportInProc = NetTransportKey.String("inproc")
NetTransportOther = NetTransportKey.String("other")
)
// Standard attribute keys used to identify an authorized enduser.
const (
// Username or the client identifier extracted from the access token or
// authorization header in the inbound request from outside the system.
EnduserIDKey = kv.Key("enduser.id")
// Actual or assumed role the client is making the request with.
EnduserRoleKey = kv.Key("enduser.role")
// Scopes or granted authorities the client currently possesses.
EnduserScopeKey = kv.Key("enduser.scope")
)
// Standard attribute keys for HTTP.
const (
// HTTP request method.
HTTPMethodKey = kv.Key("http.method")
// Full HTTP request URL in the form:
// scheme://host[:port]/path?query[#fragment].
HTTPUrlKey = kv.Key("http.url")
// The full request target as passed in a HTTP request line or
// equivalent, e.g. "/path/12314/?q=ddds#123".
HTTPTargetKey = kv.Key("http.target")
// The value of the HTTP host header.
HTTPHostKey = kv.Key("http.host")
// The URI scheme identifying the used protocol.
HTTPSchemeKey = kv.Key("http.scheme")
// HTTP response status code.
HTTPStatusCodeKey = kv.Key("http.status_code")
// HTTP reason phrase.
HTTPStatusTextKey = kv.Key("http.status_text")
// Kind of HTTP protocol used.
HTTPFlavorKey = kv.Key("http.flavor")
// Value of the HTTP User-Agent header sent by the client.
HTTPUserAgentKey = kv.Key("http.user_agent")
// The primary server name of the matched virtual host.
HTTPServerNameKey = kv.Key("http.server_name")
// The matched route served (path template). For example,
// "/users/:userID?".
HTTPRouteKey = kv.Key("http.route")
// The IP address of the original client behind all proxies, if known
// (e.g. from X-Forwarded-For).
HTTPClientIPKey = kv.Key("http.client_ip")
)
var (
HTTPSchemeHTTP = HTTPSchemeKey.String("http")
HTTPSchemeHTTPS = HTTPSchemeKey.String("https")
HTTPFlavor1_0 = HTTPFlavorKey.String("1.0")
HTTPFlavor1_1 = HTTPFlavorKey.String("1.1")
HTTPFlavor2 = HTTPFlavorKey.String("2")
HTTPFlavorSPDY = HTTPFlavorKey.String("SPDY")
HTTPFlavorQUIC = HTTPFlavorKey.String("QUIC")
)
// Standard attribute keys for database clients.
const (
// Database type. For any SQL database, "sql". For others, the
// lower-case database category, e.g. "cassandra", "hbase", or "redis".
DBTypeKey = kv.Key("db.type")
// Database instance name.
DBInstanceKey = kv.Key("db.instance")
// A database statement for the given database type.
DBStatementKey = kv.Key("db.statement")
// Username for accessing database.
DBUserKey = kv.Key("db.user")
// Database URL.
DBUrlKey = kv.Key("db.url")
)
// Standard attribute keys for RPC.
const (
// The RPC service name.
RPCServiceKey = kv.Key("rpc.service")
// Name of message transmitted or received.
RPCNameKey = kv.Key("name")
// Type of message transmitted or received.
RPCMessageTypeKey = kv.Key("message.type")
// Identifier of message transmitted or received.
RPCMessageIDKey = kv.Key("message.id")
// The compressed size of the message transmitted or received in bytes.
RPCMessageCompressedSizeKey = kv.Key("message.compressed_size")
// The uncompressed size of the message transmitted or received in
// bytes.
RPCMessageUncompressedSizeKey = kv.Key("message.uncompressed_size")
)
var (
RPCNameMessage = RPCNameKey.String("message")
RPCMessageTypeSent = RPCMessageTypeKey.String("SENT")
RPCMessageTypeReceived = RPCMessageTypeKey.String("RECEIVED")
)
// Standard attribute keys for messaging systems.
const (
// A unique identifier describing the messaging system. For example,
// kafka, rabbitmq or activemq.
MessagingSystemKey = kv.Key("messaging.system")
// The message destination name, e.g. MyQueue or MyTopic.
MessagingDestinationKey = kv.Key("messaging.destination")
// The kind of message destination.
MessagingDestinationKindKey = kv.Key("messaging.destination_kind")
// Describes if the destination is temporary or not.
MessagingTempDestinationKey = kv.Key("messaging.temp_destination")
// The name of the transport protocol.
MessagingProtocolKey = kv.Key("messaging.protocol")
// The version of the transport protocol.
MessagingProtocolVersionKey = kv.Key("messaging.protocol_version")
// Messaging service URL.
MessagingURLKey = kv.Key("messaging.url")
// Identifier used by the messaging system for a message.
MessagingMessageIDKey = kv.Key("messaging.message_id")
// Identifier used by the messaging system for a conversation.
MessagingConversationIDKey = kv.Key("messaging.conversation_id")
// The (uncompressed) size of the message payload in bytes.
MessagingMessagePayloadSizeBytesKey = kv.Key("messaging.message_payload_size_bytes")
// The compressed size of the message payload in bytes.
MessagingMessagePayloadCompressedSizeBytesKey = kv.Key("messaging.message_payload_compressed_size_bytes")
// Identifies which part and kind of message consumption is being
// preformed.
MessagingOperationKey = kv.Key("messaging.operation")
// RabbitMQ specific attribute describing the destination routing key.
MessagingRabbitMQRoutingKeyKey = kv.Key("messaging.rabbitmq.routing_key")
)
var (
MessagingDestinationKindKeyQueue = MessagingDestinationKindKey.String("queue")
MessagingDestinationKindKeyTopic = MessagingDestinationKindKey.String("topic")
MessagingTempDestination = MessagingTempDestinationKey.Bool(true)
MessagingOperationReceive = MessagingOperationKey.String("receive")
MessagingOperationProcess = MessagingOperationKey.String("process")
)
// Standard attribute keys for FaaS systems.
const (
// Type of the trigger on which the function is executed.
FaaSTriggerKey = kv.Key("faas.trigger")
// String containing the execution identifier of the function.
FaaSExecutionKey = kv.Key("faas.execution")
// The name of the source on which the operation was performed.
// For example, in Cloud Storage or S3 corresponds to the bucket name,
// and in Cosmos DB to the database name.
FaaSDocumentCollectionKey = kv.Key("faas.document.collection")
// The type of the operation that was performed on the data.
FaaSDocumentOperationKey = kv.Key("faas.document.operation")
// A string containing the time when the data was accessed.
FaaSDocumentTimeKey = kv.Key("faas.document.time")
// The document name/table subjected to the operation.
FaaSDocumentNameKey = kv.Key("faas.document.name")
// The function invocation time.
FaaSTimeKey = kv.Key("faas.time")
// The schedule period as Cron Expression.
FaaSCronKey = kv.Key("faas.cron")
)
var (
FaasTriggerDatasource = FaaSTriggerKey.String("datasource")
FaasTriggerHTTP = FaaSTriggerKey.String("http")
FaasTriggerPubSub = FaaSTriggerKey.String("pubsub")
FaasTriggerTimer = FaaSTriggerKey.String("timer")
FaasTriggerOther = FaaSTriggerKey.String("other")
FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert")
FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit")
FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete")
)

View File

@ -73,14 +73,14 @@ func main() {
commonLabels := []kv.KeyValue{lemonsKey.Int(10), kv.String("A", "1"), kv.String("B", "2"), kv.String("C", "3")}
oneMetricCB := func(result metric.Float64ObserverResult) {
oneMetricCB := func(_ context.Context, result metric.Float64ObserverResult) {
result.Observe(1, commonLabels...)
}
_ = metric.Must(meter).RegisterFloat64Observer("ex.com.one", oneMetricCB,
metric.WithDescription("An observer set to 1.0"),
_ = metric.Must(meter).RegisterFloat64ValueObserver("ex.com.one", oneMetricCB,
metric.WithDescription("A ValueObserver set to 1.0"),
)
measureTwo := metric.Must(meter).NewFloat64Measure("ex.com.two")
valuerecorderTwo := metric.Must(meter).NewFloat64ValueRecorder("ex.com.two")
ctx := context.Background()
@ -89,8 +89,8 @@ func main() {
barKey.String("bar1"),
)
measure := measureTwo.Bind(commonLabels...)
defer measure.Unbind()
valuerecorder := valuerecorderTwo.Bind(commonLabels...)
defer valuerecorder.Unbind()
err := tracer.WithSpan(ctx, "operation", func(ctx context.Context) error {
@ -103,7 +103,7 @@ func main() {
correlation.NewContext(ctx, anotherKey.String("xyz")),
commonLabels,
measureTwo.Measurement(2.0),
valuerecorderTwo.Measurement(2.0),
)
return tracer.WithSpan(
@ -114,7 +114,7 @@ func main() {
trace.SpanFromContext(ctx).AddEvent(ctx, "Sub span event")
measure.Record(ctx, 1.3)
valuerecorder.Record(ctx, 1.3)
return nil
},

View File

@ -11,7 +11,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
FROM golang:alpine AS base
FROM golang:1.14-alpine AS base
COPY . /go/src/github.com/open-telemetry/opentelemetry-go/
WORKDIR /go/src/github.com/open-telemetry/opentelemetry-go/example/http/

View File

@ -33,11 +33,11 @@ var (
)
func initMeter() *push.Controller {
pusher, hf, err := prometheus.InstallNewPipeline(prometheus.Config{})
pusher, exporter, err := prometheus.InstallNewPipeline(prometheus.Config{})
if err != nil {
log.Panicf("failed to initialize prometheus exporter %v", err)
}
http.HandleFunc("/", hf)
http.HandleFunc("/", exporter.ServeHTTP)
go func() {
_ = http.ListenAndServe(":2222", nil)
}()
@ -52,19 +52,19 @@ func main() {
observerLock := new(sync.RWMutex)
observerValueToReport := new(float64)
observerLabelsToReport := new([]kv.KeyValue)
cb := func(result metric.Float64ObserverResult) {
cb := func(_ context.Context, result metric.Float64ObserverResult) {
(*observerLock).RLock()
value := *observerValueToReport
labels := *observerLabelsToReport
(*observerLock).RUnlock()
result.Observe(value, labels...)
}
_ = metric.Must(meter).RegisterFloat64Observer("ex.com.one", cb,
metric.WithDescription("A measure set to 1.0"),
_ = metric.Must(meter).RegisterFloat64ValueObserver("ex.com.one", cb,
metric.WithDescription("A ValueObserver set to 1.0"),
)
measureTwo := metric.Must(meter).NewFloat64Measure("ex.com.two")
measureThree := metric.Must(meter).NewFloat64Counter("ex.com.three")
valuerecorder := metric.Must(meter).NewFloat64ValueRecorder("ex.com.two")
counter := metric.Must(meter).NewFloat64Counter("ex.com.three")
commonLabels := []kv.KeyValue{lemonsKey.Int(10), kv.String("A", "1"), kv.String("B", "2"), kv.String("C", "3")}
notSoCommonLabels := []kv.KeyValue{lemonsKey.Int(13)}
@ -78,8 +78,8 @@ func main() {
meter.RecordBatch(
ctx,
commonLabels,
measureTwo.Measurement(2.0),
measureThree.Measurement(12.0),
valuerecorder.Measurement(2.0),
counter.Measurement(12.0),
)
time.Sleep(5 * time.Second)
@ -91,8 +91,8 @@ func main() {
meter.RecordBatch(
ctx,
notSoCommonLabels,
measureTwo.Measurement(2.0),
measureThree.Measurement(22.0),
valuerecorder.Measurement(2.0),
counter.Measurement(22.0),
)
time.Sleep(5 * time.Second)
@ -104,8 +104,8 @@ func main() {
meter.RecordBatch(
ctx,
commonLabels,
measureTwo.Measurement(12.0),
measureThree.Measurement(13.0),
valuerecorder.Measurement(12.0),
counter.Measurement(13.0),
)
time.Sleep(100 * time.Second)

View File

@ -11,7 +11,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
FROM golang:alpine
FROM golang:1.14-alpine
COPY . /go/src/github.com/open-telemetry/opentelemetry-go/
WORKDIR /go/src/github.com/open-telemetry/opentelemetry-go/example/zipkin/
RUN go install ./main.go

View File

@ -0,0 +1,97 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package prometheus_test
import (
"bytes"
"context"
"fmt"
"io/ioutil"
"net/http"
"net/http/httptest"
"go.opentelemetry.io/otel/api/kv"
"go.opentelemetry.io/otel/api/metric"
"go.opentelemetry.io/otel/exporters/metric/prometheus"
sdk "go.opentelemetry.io/otel/sdk/metric"
integrator "go.opentelemetry.io/otel/sdk/metric/integrator/simple"
"go.opentelemetry.io/otel/sdk/metric/selector/simple"
)
// This test demonstrates that it is relatively difficult to setup a
// Prometheus export pipeline:
//
// 1. The default boundaries are difficult to pass, should be []float instead of []metric.Number
// 2. The push controller doesn't make sense b/c Prometheus is pull-bsaed
//
// TODO: Address these issues; add Resources to the test.
func ExampleNewExportPipeline() {
// Create a meter
selector := simple.NewWithHistogramDistribution(nil)
exporter, err := prometheus.NewRawExporter(prometheus.Config{})
if err != nil {
panic(err)
}
integrator := integrator.New(selector, true)
meterImpl := sdk.NewAccumulator(integrator)
meter := metric.WrapMeterImpl(meterImpl, "example")
ctx := context.Background()
// Use two instruments
counter := metric.Must(meter).NewInt64Counter(
"a.counter",
metric.WithDescription("Counts things"),
)
recorder := metric.Must(meter).NewInt64ValueRecorder(
"a.valuerecorder",
metric.WithDescription("Records values"),
)
counter.Add(ctx, 100, kv.String("key", "value"))
recorder.Record(ctx, 100, kv.String("key", "value"))
// Simulate a push
meterImpl.Collect(ctx)
err = exporter.Export(ctx, integrator.CheckpointSet())
if err != nil {
panic(err)
}
// GET the HTTP endpoint
var input bytes.Buffer
resp := httptest.NewRecorder()
req, err := http.NewRequest("GET", "/", &input)
if err != nil {
panic(err)
}
exporter.ServeHTTP(resp, req)
data, err := ioutil.ReadAll(resp.Result().Body)
if err != nil {
panic(err)
}
fmt.Print(string(data))
// Output:
// # HELP a_counter Counts things
// # TYPE a_counter counter
// a_counter{key="value"} 100
// # HELP a_valuerecorder Records values
// # TYPE a_valuerecorder histogram
// a_valuerecorder_bucket{key="value",le="+Inf"} 1
// a_valuerecorder_sum{key="value"} 100
// a_valuerecorder_count{key="value"} 1
}

View File

@ -18,7 +18,7 @@ import (
"context"
"fmt"
"net/http"
"time"
"sync"
"go.opentelemetry.io/otel/api/metric"
@ -30,9 +30,7 @@ import (
export "go.opentelemetry.io/otel/sdk/export/metric"
"go.opentelemetry.io/otel/sdk/export/metric/aggregator"
"go.opentelemetry.io/otel/sdk/metric/controller/push"
integrator "go.opentelemetry.io/otel/sdk/metric/integrator/simple"
"go.opentelemetry.io/otel/sdk/metric/selector/simple"
"go.opentelemetry.io/otel/sdk/resource"
)
// Exporter is an implementation of metric.Exporter that sends metrics to
@ -43,6 +41,7 @@ type Exporter struct {
registerer prometheus.Registerer
gatherer prometheus.Gatherer
lock sync.RWMutex
snapshot export.CheckpointSet
onError func(error)
@ -135,42 +134,49 @@ func NewRawExporter(config Config) (*Exporter, error) {
// http.HandleFunc("/metrics", hf)
// defer pipeline.Stop()
// ... Done
func InstallNewPipeline(config Config) (*push.Controller, http.HandlerFunc, error) {
controller, hf, err := NewExportPipeline(config, time.Minute)
func InstallNewPipeline(config Config, options ...push.Option) (*push.Controller, *Exporter, error) {
controller, exp, err := NewExportPipeline(config, options...)
if err != nil {
return controller, hf, err
return controller, exp, err
}
global.SetMeterProvider(controller)
return controller, hf, err
global.SetMeterProvider(controller.Provider())
return controller, exp, err
}
// NewExportPipeline sets up a complete export pipeline with the recommended setup,
// chaining a NewRawExporter into the recommended selectors and integrators.
func NewExportPipeline(config Config, period time.Duration) (*push.Controller, http.HandlerFunc, error) {
selector := simple.NewWithHistogramMeasure(config.DefaultHistogramBoundaries)
//
// The returned Controller contains an implementation of
// `metric.Provider`. The controller is returned unstarted and should
// be started by the caller to begin collection.
func NewExportPipeline(config Config, options ...push.Option) (*push.Controller, *Exporter, error) {
exporter, err := NewRawExporter(config)
if err != nil {
return nil, nil, err
}
// Prometheus needs to use a stateful integrator since counters (and histogram since they are a collection of Counters)
// are cumulative (i.e., monotonically increasing values) and should not be resetted after each export.
// Prometheus uses a stateful push controller since instruments are
// cumulative and should not be reset after each collection interval.
//
// Prometheus uses this approach to be resilient to scrape failures.
// If a Prometheus server tries to scrape metrics from a host and fails for some reason,
// it could try again on the next scrape and no data would be lost, only resolution.
//
// Gauges (or LastValues) and Summaries are an exception to this and have different behaviors.
integrator := integrator.New(selector, true)
pusher := push.New(integrator, exporter, period)
pusher.Start()
pusher := push.New(
simple.NewWithHistogramDistribution(config.DefaultHistogramBoundaries),
exporter,
append(options, push.WithStateful(true))...,
)
return pusher, exporter.ServeHTTP, nil
return pusher, exporter, nil
}
// Export exports the provide metric record to prometheus.
func (e *Exporter) Export(_ context.Context, _ *resource.Resource, checkpointSet export.CheckpointSet) error {
func (e *Exporter) Export(_ context.Context, checkpointSet export.CheckpointSet) error {
// TODO: Use the resource value in this exporter.
e.lock.Lock()
defer e.lock.Unlock()
e.snapshot = checkpointSet
return nil
}
@ -189,10 +195,16 @@ func newCollector(exporter *Exporter) *collector {
}
func (c *collector) Describe(ch chan<- *prometheus.Desc) {
c.exp.lock.RLock()
defer c.exp.lock.RUnlock()
if c.exp.snapshot == nil {
return
}
c.exp.snapshot.RLock()
defer c.exp.snapshot.RUnlock()
_ = c.exp.snapshot.ForEach(func(record export.Record) error {
ch <- c.toDesc(&record)
return nil
@ -204,13 +216,20 @@ func (c *collector) Describe(ch chan<- *prometheus.Desc) {
// Collect is invoked whenever prometheus.Gatherer is also invoked.
// For example, when the HTTP endpoint is invoked by Prometheus.
func (c *collector) Collect(ch chan<- prometheus.Metric) {
c.exp.lock.RLock()
defer c.exp.lock.RUnlock()
if c.exp.snapshot == nil {
return
}
c.exp.snapshot.RLock()
defer c.exp.snapshot.RUnlock()
err := c.exp.snapshot.ForEach(func(record export.Record) error {
agg := record.Aggregator()
numberKind := record.Descriptor().NumberKind()
// TODO: Use the resource value in this record.
labels := labelValues(record.Labels())
desc := c.toDesc(&record)
@ -220,7 +239,7 @@ func (c *collector) Collect(ch chan<- prometheus.Metric) {
}
} else if dist, ok := agg.(aggregator.Distribution); ok {
// TODO: summaries values are never being resetted.
// As measures are recorded, new records starts to have less impact on these summaries.
// As measurements are recorded, new records starts to have less impact on these summaries.
// We should implement an solution that is similar to the Prometheus Clients
// using a rolling window for summaries could be a solution.
//

View File

@ -15,40 +15,44 @@
package prometheus_test
import (
"bytes"
"context"
"log"
"io/ioutil"
"net/http"
"net/http/httptest"
"runtime"
"sort"
"strings"
"testing"
"time"
"github.com/stretchr/testify/require"
"go.opentelemetry.io/otel/api/kv"
"go.opentelemetry.io/otel/api/metric"
"go.opentelemetry.io/otel/exporters/metric/prometheus"
"go.opentelemetry.io/otel/exporters/metric/test"
exportTest "go.opentelemetry.io/otel/exporters/metric/test"
"go.opentelemetry.io/otel/sdk/metric/controller/push"
controllerTest "go.opentelemetry.io/otel/sdk/metric/controller/test"
)
func TestPrometheusExporter(t *testing.T) {
exporter, err := prometheus.NewRawExporter(prometheus.Config{
DefaultSummaryQuantiles: []float64{0.5, 0.9, 0.99},
})
if err != nil {
log.Panicf("failed to initialize prometheus exporter %v", err)
}
require.NoError(t, err)
var expected []string
checkpointSet := test.NewCheckpointSet()
checkpointSet := exportTest.NewCheckpointSet(nil)
counter := metric.NewDescriptor(
"counter", metric.CounterKind, metric.Float64NumberKind)
lastValue := metric.NewDescriptor(
"lastvalue", metric.ObserverKind, metric.Float64NumberKind)
measure := metric.NewDescriptor(
"measure", metric.MeasureKind, metric.Float64NumberKind)
histogramMeasure := metric.NewDescriptor(
"histogram_measure", metric.MeasureKind, metric.Float64NumberKind)
"lastvalue", metric.ValueObserverKind, metric.Float64NumberKind)
valuerecorder := metric.NewDescriptor(
"valuerecorder", metric.ValueRecorderKind, metric.Float64NumberKind)
histogramValueRecorder := metric.NewDescriptor(
"histogram_valuerecorder", metric.ValueRecorderKind, metric.Float64NumberKind)
labels := []kv.KeyValue{
kv.Key("A").String("B"),
@ -61,26 +65,26 @@ func TestPrometheusExporter(t *testing.T) {
checkpointSet.AddLastValue(&lastValue, 13.2, labels...)
expected = append(expected, `lastvalue{A="B",C="D"} 13.2`)
checkpointSet.AddMeasure(&measure, 13, labels...)
checkpointSet.AddMeasure(&measure, 15, labels...)
checkpointSet.AddMeasure(&measure, 17, labels...)
expected = append(expected, `measure{A="B",C="D",quantile="0.5"} 15`)
expected = append(expected, `measure{A="B",C="D",quantile="0.9"} 17`)
expected = append(expected, `measure{A="B",C="D",quantile="0.99"} 17`)
expected = append(expected, `measure_sum{A="B",C="D"} 45`)
expected = append(expected, `measure_count{A="B",C="D"} 3`)
checkpointSet.AddValueRecorder(&valuerecorder, 13, labels...)
checkpointSet.AddValueRecorder(&valuerecorder, 15, labels...)
checkpointSet.AddValueRecorder(&valuerecorder, 17, labels...)
expected = append(expected, `valuerecorder{A="B",C="D",quantile="0.5"} 15`)
expected = append(expected, `valuerecorder{A="B",C="D",quantile="0.9"} 17`)
expected = append(expected, `valuerecorder{A="B",C="D",quantile="0.99"} 17`)
expected = append(expected, `valuerecorder_sum{A="B",C="D"} 45`)
expected = append(expected, `valuerecorder_count{A="B",C="D"} 3`)
boundaries := []metric.Number{metric.NewFloat64Number(-0.5), metric.NewFloat64Number(1)}
checkpointSet.AddHistogramMeasure(&histogramMeasure, boundaries, -0.6, labels...)
checkpointSet.AddHistogramMeasure(&histogramMeasure, boundaries, -0.4, labels...)
checkpointSet.AddHistogramMeasure(&histogramMeasure, boundaries, 0.6, labels...)
checkpointSet.AddHistogramMeasure(&histogramMeasure, boundaries, 20, labels...)
checkpointSet.AddHistogramValueRecorder(&histogramValueRecorder, boundaries, -0.6, labels...)
checkpointSet.AddHistogramValueRecorder(&histogramValueRecorder, boundaries, -0.4, labels...)
checkpointSet.AddHistogramValueRecorder(&histogramValueRecorder, boundaries, 0.6, labels...)
checkpointSet.AddHistogramValueRecorder(&histogramValueRecorder, boundaries, 20, labels...)
expected = append(expected, `histogram_measure_bucket{A="B",C="D",le="+Inf"} 4`)
expected = append(expected, `histogram_measure_bucket{A="B",C="D",le="-0.5"} 1`)
expected = append(expected, `histogram_measure_bucket{A="B",C="D",le="1"} 3`)
expected = append(expected, `histogram_measure_count{A="B",C="D"} 4`)
expected = append(expected, `histogram_measure_sum{A="B",C="D"} 19.6`)
expected = append(expected, `histogram_valuerecorder_bucket{A="B",C="D",le="+Inf"} 4`)
expected = append(expected, `histogram_valuerecorder_bucket{A="B",C="D",le="-0.5"} 1`)
expected = append(expected, `histogram_valuerecorder_bucket{A="B",C="D",le="1"} 3`)
expected = append(expected, `histogram_valuerecorder_count{A="B",C="D"} 4`)
expected = append(expected, `histogram_valuerecorder_sum{A="B",C="D"} 19.6`)
missingLabels := []kv.KeyValue{
kv.Key("A").String("E"),
@ -93,31 +97,31 @@ func TestPrometheusExporter(t *testing.T) {
checkpointSet.AddLastValue(&lastValue, 32, missingLabels...)
expected = append(expected, `lastvalue{A="E",C=""} 32`)
checkpointSet.AddMeasure(&measure, 19, missingLabels...)
expected = append(expected, `measure{A="E",C="",quantile="0.5"} 19`)
expected = append(expected, `measure{A="E",C="",quantile="0.9"} 19`)
expected = append(expected, `measure{A="E",C="",quantile="0.99"} 19`)
expected = append(expected, `measure_count{A="E",C=""} 1`)
expected = append(expected, `measure_sum{A="E",C=""} 19`)
checkpointSet.AddValueRecorder(&valuerecorder, 19, missingLabels...)
expected = append(expected, `valuerecorder{A="E",C="",quantile="0.5"} 19`)
expected = append(expected, `valuerecorder{A="E",C="",quantile="0.9"} 19`)
expected = append(expected, `valuerecorder{A="E",C="",quantile="0.99"} 19`)
expected = append(expected, `valuerecorder_count{A="E",C=""} 1`)
expected = append(expected, `valuerecorder_sum{A="E",C=""} 19`)
boundaries = []metric.Number{metric.NewFloat64Number(0), metric.NewFloat64Number(1)}
checkpointSet.AddHistogramMeasure(&histogramMeasure, boundaries, -0.6, missingLabels...)
checkpointSet.AddHistogramMeasure(&histogramMeasure, boundaries, -0.4, missingLabels...)
checkpointSet.AddHistogramMeasure(&histogramMeasure, boundaries, -0.1, missingLabels...)
checkpointSet.AddHistogramMeasure(&histogramMeasure, boundaries, 15, missingLabels...)
checkpointSet.AddHistogramMeasure(&histogramMeasure, boundaries, 15, missingLabels...)
checkpointSet.AddHistogramValueRecorder(&histogramValueRecorder, boundaries, -0.6, missingLabels...)
checkpointSet.AddHistogramValueRecorder(&histogramValueRecorder, boundaries, -0.4, missingLabels...)
checkpointSet.AddHistogramValueRecorder(&histogramValueRecorder, boundaries, -0.1, missingLabels...)
checkpointSet.AddHistogramValueRecorder(&histogramValueRecorder, boundaries, 15, missingLabels...)
checkpointSet.AddHistogramValueRecorder(&histogramValueRecorder, boundaries, 15, missingLabels...)
expected = append(expected, `histogram_measure_bucket{A="E",C="",le="+Inf"} 5`)
expected = append(expected, `histogram_measure_bucket{A="E",C="",le="0"} 3`)
expected = append(expected, `histogram_measure_bucket{A="E",C="",le="1"} 3`)
expected = append(expected, `histogram_measure_count{A="E",C=""} 5`)
expected = append(expected, `histogram_measure_sum{A="E",C=""} 28.9`)
expected = append(expected, `histogram_valuerecorder_bucket{A="E",C="",le="+Inf"} 5`)
expected = append(expected, `histogram_valuerecorder_bucket{A="E",C="",le="0"} 3`)
expected = append(expected, `histogram_valuerecorder_bucket{A="E",C="",le="1"} 3`)
expected = append(expected, `histogram_valuerecorder_count{A="E",C=""} 5`)
expected = append(expected, `histogram_valuerecorder_sum{A="E",C=""} 28.9`)
compareExport(t, exporter, checkpointSet, expected)
}
func compareExport(t *testing.T, exporter *prometheus.Exporter, checkpointSet *test.CheckpointSet, expected []string) {
err := exporter.Export(context.Background(), nil, checkpointSet)
func compareExport(t *testing.T, exporter *prometheus.Exporter, checkpointSet *exportTest.CheckpointSet, expected []string) {
err := exporter.Export(context.Background(), checkpointSet)
require.Nil(t, err)
rec := httptest.NewRecorder()
@ -138,3 +142,58 @@ func compareExport(t *testing.T, exporter *prometheus.Exporter, checkpointSet *t
require.Equal(t, strings.Join(expected, "\n"), strings.Join(metricsOnly, "\n"))
}
func TestPrometheusStatefulness(t *testing.T) {
// Create a meter
controller, exporter, err := prometheus.NewExportPipeline(prometheus.Config{}, push.WithPeriod(time.Minute))
require.NoError(t, err)
meter := controller.Provider().Meter("test")
mock := controllerTest.NewMockClock()
controller.SetClock(mock)
controller.Start()
// GET the HTTP endpoint
scrape := func() string {
var input bytes.Buffer
resp := httptest.NewRecorder()
req, err := http.NewRequest("GET", "/", &input)
require.NoError(t, err)
exporter.ServeHTTP(resp, req)
data, err := ioutil.ReadAll(resp.Result().Body)
require.NoError(t, err)
return string(data)
}
ctx := context.Background()
counter := metric.Must(meter).NewInt64Counter(
"a.counter",
metric.WithDescription("Counts things"),
)
counter.Add(ctx, 100, kv.String("key", "value"))
// Trigger a push
mock.Add(time.Minute)
runtime.Gosched()
require.Equal(t, `# HELP a_counter Counts things
# TYPE a_counter counter
a_counter{key="value"} 100
`, scrape())
counter.Add(ctx, 100, kv.String("key", "value"))
// Again, now expect cumulative count
mock.Add(time.Minute)
runtime.Gosched()
require.Equal(t, `# HELP a_counter Counts things
# TYPE a_counter counter
a_counter{key="value"} 200
`, scrape())
}

View File

@ -17,7 +17,6 @@ package stdout_test
import (
"context"
"log"
"time"
"go.opentelemetry.io/otel/api/kv"
"go.opentelemetry.io/otel/api/metric"
@ -29,7 +28,7 @@ func ExampleNewExportPipeline() {
pusher, err := stdout.NewExportPipeline(stdout.Config{
PrettyPrint: true,
DoNotPrintTime: true,
}, time.Minute)
})
if err != nil {
log.Fatal("Could not initialize stdout exporter:", err)
}
@ -38,7 +37,7 @@ func ExampleNewExportPipeline() {
ctx := context.Background()
key := kv.Key("key")
meter := pusher.Meter("example")
meter := pusher.Provider().Meter("example")
// Create and update a single counter:
counter := metric.Must(meter).NewInt64Counter("a.counter")

View File

@ -25,12 +25,10 @@ import (
"go.opentelemetry.io/otel/api/global"
"go.opentelemetry.io/otel/api/label"
"go.opentelemetry.io/otel/sdk/resource"
export "go.opentelemetry.io/otel/sdk/export/metric"
"go.opentelemetry.io/otel/sdk/export/metric/aggregator"
"go.opentelemetry.io/otel/sdk/metric/controller/push"
integrator "go.opentelemetry.io/otel/sdk/metric/integrator/simple"
"go.opentelemetry.io/otel/sdk/metric/selector/simple"
)
@ -53,8 +51,8 @@ type Config struct {
// useful to create deterministic test conditions.
DoNotPrintTime bool
// Quantiles are the desired aggregation quantiles for measure
// metric data, used when the configured aggregator supports
// Quantiles are the desired aggregation quantiles for distribution
// summaries, used when the configured aggregator supports
// quantiles.
//
// Note: this exporter is meant as a demonstration; a real
@ -121,42 +119,48 @@ func NewRawExporter(config Config) (*Exporter, error) {
// }
// defer pipeline.Stop()
// ... Done
func InstallNewPipeline(config Config, opts ...push.Option) (*push.Controller, error) {
controller, err := NewExportPipeline(config, time.Minute, opts...)
func InstallNewPipeline(config Config, options ...push.Option) (*push.Controller, error) {
controller, err := NewExportPipeline(config, options...)
if err != nil {
return controller, err
}
global.SetMeterProvider(controller)
global.SetMeterProvider(controller.Provider())
return controller, err
}
// NewExportPipeline sets up a complete export pipeline with the recommended setup,
// chaining a NewRawExporter into the recommended selectors and integrators.
func NewExportPipeline(config Config, period time.Duration, opts ...push.Option) (*push.Controller, error) {
selector := simple.NewWithExactMeasure()
// NewExportPipeline sets up a complete export pipeline with the
// recommended setup, chaining a NewRawExporter into the recommended
// selectors and integrators.
//
// The pipeline is configured with a stateful integrator unless the
// push.WithStateful(false) option is used.
func NewExportPipeline(config Config, options ...push.Option) (*push.Controller, error) {
exporter, err := NewRawExporter(config)
if err != nil {
return nil, err
}
integrator := integrator.New(selector, true)
pusher := push.New(integrator, exporter, period, opts...)
pusher := push.New(
simple.NewWithExactDistribution(),
exporter,
append([]push.Option{push.WithStateful(true)}, options...)...,
)
pusher.Start()
return pusher, nil
}
func (e *Exporter) Export(_ context.Context, resource *resource.Resource, checkpointSet export.CheckpointSet) error {
func (e *Exporter) Export(_ context.Context, checkpointSet export.CheckpointSet) error {
var aggError error
var batch expoBatch
if !e.config.DoNotPrintTime {
ts := time.Now()
batch.Timestamp = &ts
}
encodedResource := resource.Encoded(e.config.LabelEncoder)
aggError = checkpointSet.ForEach(func(record export.Record) error {
desc := record.Descriptor()
agg := record.Aggregator()
kind := desc.NumberKind()
encodedResource := record.Resource().Encoded(e.config.LabelEncoder)
var expose expoLine

View File

@ -44,10 +44,11 @@ type testFixture struct {
ctx context.Context
exporter *stdout.Exporter
output *bytes.Buffer
resource *resource.Resource
}
func newFixture(t *testing.T, resource *resource.Resource, config stdout.Config) testFixture {
var testResource = resource.New(kv.String("R", "V"))
func newFixture(t *testing.T, config stdout.Config) testFixture {
buf := &bytes.Buffer{}
config.Writer = buf
config.DoNotPrintTime = true
@ -60,7 +61,6 @@ func newFixture(t *testing.T, resource *resource.Resource, config stdout.Config)
ctx: context.Background(),
exporter: exp,
output: buf,
resource: resource,
}
}
@ -69,7 +69,7 @@ func (fix testFixture) Output() string {
}
func (fix testFixture) Export(checkpointSet export.CheckpointSet) {
err := fix.exporter.Export(fix.ctx, fix.resource, checkpointSet)
err := fix.exporter.Export(fix.ctx, checkpointSet)
if err != nil {
fix.t.Error("export failed: ", err)
}
@ -95,17 +95,17 @@ func TestStdoutTimestamp(t *testing.T) {
before := time.Now()
checkpointSet := test.NewCheckpointSet()
checkpointSet := test.NewCheckpointSet(testResource)
ctx := context.Background()
desc := metric.NewDescriptor("test.name", metric.ObserverKind, metric.Int64NumberKind)
desc := metric.NewDescriptor("test.name", metric.ValueObserverKind, metric.Int64NumberKind)
lvagg := lastvalue.New()
aggtest.CheckedUpdate(t, lvagg, metric.NewInt64Number(321), &desc)
lvagg.Checkpoint(ctx, &desc)
checkpointSet.Add(&desc, lvagg)
if err := exporter.Export(ctx, nil, checkpointSet); err != nil {
if err := exporter.Export(ctx, checkpointSet); err != nil {
t.Fatal("Unexpected export error: ", err)
}
@ -139,9 +139,9 @@ func TestStdoutTimestamp(t *testing.T) {
}
func TestStdoutCounterFormat(t *testing.T) {
fix := newFixture(t, nil, stdout.Config{})
fix := newFixture(t, stdout.Config{})
checkpointSet := test.NewCheckpointSet()
checkpointSet := test.NewCheckpointSet(testResource)
desc := metric.NewDescriptor("test.name", metric.CounterKind, metric.Int64NumberKind)
cagg := sum.New()
@ -152,15 +152,15 @@ func TestStdoutCounterFormat(t *testing.T) {
fix.Export(checkpointSet)
require.Equal(t, `{"updates":[{"name":"test.name{A=B,C=D}","sum":123}]}`, fix.Output())
require.Equal(t, `{"updates":[{"name":"test.name{R=V,A=B,C=D}","sum":123}]}`, fix.Output())
}
func TestStdoutLastValueFormat(t *testing.T) {
fix := newFixture(t, nil, stdout.Config{})
fix := newFixture(t, stdout.Config{})
checkpointSet := test.NewCheckpointSet()
checkpointSet := test.NewCheckpointSet(testResource)
desc := metric.NewDescriptor("test.name", metric.ObserverKind, metric.Float64NumberKind)
desc := metric.NewDescriptor("test.name", metric.ValueObserverKind, metric.Float64NumberKind)
lvagg := lastvalue.New()
aggtest.CheckedUpdate(fix.t, lvagg, metric.NewFloat64Number(123.456), &desc)
lvagg.Checkpoint(fix.ctx, &desc)
@ -169,15 +169,15 @@ func TestStdoutLastValueFormat(t *testing.T) {
fix.Export(checkpointSet)
require.Equal(t, `{"updates":[{"name":"test.name{A=B,C=D}","last":123.456}]}`, fix.Output())
require.Equal(t, `{"updates":[{"name":"test.name{R=V,A=B,C=D}","last":123.456}]}`, fix.Output())
}
func TestStdoutMinMaxSumCount(t *testing.T) {
fix := newFixture(t, nil, stdout.Config{})
fix := newFixture(t, stdout.Config{})
checkpointSet := test.NewCheckpointSet()
checkpointSet := test.NewCheckpointSet(testResource)
desc := metric.NewDescriptor("test.name", metric.MeasureKind, metric.Float64NumberKind)
desc := metric.NewDescriptor("test.name", metric.ValueRecorderKind, metric.Float64NumberKind)
magg := minmaxsumcount.New(&desc)
aggtest.CheckedUpdate(fix.t, magg, metric.NewFloat64Number(123.456), &desc)
aggtest.CheckedUpdate(fix.t, magg, metric.NewFloat64Number(876.543), &desc)
@ -187,17 +187,17 @@ func TestStdoutMinMaxSumCount(t *testing.T) {
fix.Export(checkpointSet)
require.Equal(t, `{"updates":[{"name":"test.name{A=B,C=D}","min":123.456,"max":876.543,"sum":999.999,"count":2}]}`, fix.Output())
require.Equal(t, `{"updates":[{"name":"test.name{R=V,A=B,C=D}","min":123.456,"max":876.543,"sum":999.999,"count":2}]}`, fix.Output())
}
func TestStdoutMeasureFormat(t *testing.T) {
fix := newFixture(t, nil, stdout.Config{
func TestStdoutValueRecorderFormat(t *testing.T) {
fix := newFixture(t, stdout.Config{
PrettyPrint: true,
})
checkpointSet := test.NewCheckpointSet()
checkpointSet := test.NewCheckpointSet(testResource)
desc := metric.NewDescriptor("test.name", metric.MeasureKind, metric.Float64NumberKind)
desc := metric.NewDescriptor("test.name", metric.ValueRecorderKind, metric.Float64NumberKind)
magg := array.New()
for i := 0; i < 1000; i++ {
@ -213,7 +213,7 @@ func TestStdoutMeasureFormat(t *testing.T) {
require.Equal(t, `{
"updates": [
{
"name": "test.name{A=B,C=D}",
"name": "test.name{R=V,A=B,C=D}",
"min": 0.5,
"max": 999.5,
"sum": 500000,
@ -238,7 +238,7 @@ func TestStdoutMeasureFormat(t *testing.T) {
}
func TestStdoutNoData(t *testing.T) {
desc := metric.NewDescriptor("test.name", metric.MeasureKind, metric.Float64NumberKind)
desc := metric.NewDescriptor("test.name", metric.ValueRecorderKind, metric.Float64NumberKind)
for name, tc := range map[string]export.Aggregator{
"ddsketch": ddsketch.New(ddsketch.NewDefaultConfig(), &desc),
"minmaxsumcount": minmaxsumcount.New(&desc),
@ -247,9 +247,9 @@ func TestStdoutNoData(t *testing.T) {
t.Run(name, func(t *testing.T) {
t.Parallel()
fix := newFixture(t, nil, stdout.Config{})
fix := newFixture(t, stdout.Config{})
checkpointSet := test.NewCheckpointSet()
checkpointSet := test.NewCheckpointSet(testResource)
magg := tc
magg.Checkpoint(fix.ctx, &desc)
@ -264,11 +264,11 @@ func TestStdoutNoData(t *testing.T) {
}
func TestStdoutLastValueNotSet(t *testing.T) {
fix := newFixture(t, nil, stdout.Config{})
fix := newFixture(t, stdout.Config{})
checkpointSet := test.NewCheckpointSet()
checkpointSet := test.NewCheckpointSet(testResource)
desc := metric.NewDescriptor("test.name", metric.ObserverKind, metric.Float64NumberKind)
desc := metric.NewDescriptor("test.name", metric.ValueObserverKind, metric.Float64NumberKind)
lvagg := lastvalue.New()
lvagg.Checkpoint(fix.ctx, &desc)
@ -314,11 +314,11 @@ func TestStdoutResource(t *testing.T) {
}
for _, tc := range testCases {
fix := newFixture(t, tc.res, stdout.Config{})
fix := newFixture(t, stdout.Config{})
checkpointSet := test.NewCheckpointSet()
checkpointSet := test.NewCheckpointSet(tc.res)
desc := metric.NewDescriptor("test.name", metric.ObserverKind, metric.Float64NumberKind)
desc := metric.NewDescriptor("test.name", metric.ValueObserverKind, metric.Float64NumberKind)
lvagg := lastvalue.New()
aggtest.CheckedUpdate(fix.t, lvagg, metric.NewFloat64Number(123.456), &desc)
lvagg.Checkpoint(fix.ctx, &desc)

View File

@ -17,6 +17,7 @@ package test
import (
"context"
"errors"
"sync"
"go.opentelemetry.io/otel/api/kv"
"go.opentelemetry.io/otel/api/label"
@ -27,6 +28,7 @@ import (
"go.opentelemetry.io/otel/sdk/metric/aggregator/histogram"
"go.opentelemetry.io/otel/sdk/metric/aggregator/lastvalue"
"go.opentelemetry.io/otel/sdk/metric/aggregator/sum"
"go.opentelemetry.io/otel/sdk/resource"
)
type mapkey struct {
@ -35,15 +37,18 @@ type mapkey struct {
}
type CheckpointSet struct {
records map[mapkey]export.Record
updates []export.Record
sync.RWMutex
records map[mapkey]export.Record
updates []export.Record
resource *resource.Resource
}
// NewCheckpointSet returns a test CheckpointSet that new records could be added.
// Records are grouped by their encoded labels.
func NewCheckpointSet() *CheckpointSet {
func NewCheckpointSet(resource *resource.Resource) *CheckpointSet {
return &CheckpointSet{
records: make(map[mapkey]export.Record),
records: make(map[mapkey]export.Record),
resource: resource,
}
}
@ -67,7 +72,7 @@ func (p *CheckpointSet) Add(desc *metric.Descriptor, newAgg export.Aggregator, l
return record.Aggregator(), false
}
rec := export.NewRecord(desc, &elabels, newAgg)
rec := export.NewRecord(desc, &elabels, p.resource, newAgg)
p.updates = append(p.updates, rec)
p.records[key] = rec
return newAgg, true
@ -88,11 +93,11 @@ func (p *CheckpointSet) AddCounter(desc *metric.Descriptor, v float64, labels ..
p.updateAggregator(desc, sum.New(), v, labels...)
}
func (p *CheckpointSet) AddMeasure(desc *metric.Descriptor, v float64, labels ...kv.KeyValue) {
func (p *CheckpointSet) AddValueRecorder(desc *metric.Descriptor, v float64, labels ...kv.KeyValue) {
p.updateAggregator(desc, array.New(), v, labels...)
}
func (p *CheckpointSet) AddHistogramMeasure(desc *metric.Descriptor, boundaries []metric.Number, v float64, labels ...kv.KeyValue) {
func (p *CheckpointSet) AddHistogramValueRecorder(desc *metric.Descriptor, boundaries []metric.Number, v float64, labels ...kv.KeyValue) {
p.updateAggregator(desc, histogram.New(desc, boundaries), v, labels...)
}

View File

@ -61,7 +61,7 @@ type result struct {
// CheckpointSet transforms all records contained in a checkpoint into
// batched OTLP ResourceMetrics.
func CheckpointSet(ctx context.Context, resource *resource.Resource, cps export.CheckpointSet, numWorkers uint) ([]*metricpb.ResourceMetrics, error) {
func CheckpointSet(ctx context.Context, cps export.CheckpointSet, numWorkers uint) ([]*metricpb.ResourceMetrics, error) {
records, errc := source(ctx, cps)
// Start a fixed number of goroutines to transform records.
@ -71,7 +71,7 @@ func CheckpointSet(ctx context.Context, resource *resource.Resource, cps export.
for i := uint(0); i < numWorkers; i++ {
go func() {
defer wg.Done()
transformer(ctx, resource, records, transformed)
transformer(ctx, records, transformed)
}()
}
go func() {
@ -116,7 +116,7 @@ func source(ctx context.Context, cps export.CheckpointSet) (<-chan export.Record
// transformer transforms records read from the passed in chan into
// OTLP Metrics which are sent on the out chan.
func transformer(ctx context.Context, resource *resource.Resource, in <-chan export.Record, out chan<- result) {
func transformer(ctx context.Context, in <-chan export.Record, out chan<- result) {
for r := range in {
m, err := Record(r)
// Propagate errors, but do not send empty results.
@ -124,7 +124,7 @@ func transformer(ctx context.Context, resource *resource.Resource, in <-chan exp
continue
}
res := result{
Resource: resource,
Resource: r.Resource(),
Library: r.Descriptor().LibraryName(),
Metric: m,
Err: err,

View File

@ -111,7 +111,7 @@ func TestMinMaxSumCountMetricDescriptor(t *testing.T) {
}{
{
"mmsc-test-a",
metric.MeasureKind,
metric.ValueRecorderKind,
"test-a-description",
unit.Dimensionless,
metric.Int64NumberKind,
@ -160,7 +160,7 @@ func TestMinMaxSumCountMetricDescriptor(t *testing.T) {
}
func TestMinMaxSumCountDatapoints(t *testing.T) {
desc := metric.NewDescriptor("", metric.MeasureKind, metric.Int64NumberKind)
desc := metric.NewDescriptor("", metric.ValueRecorderKind, metric.Int64NumberKind)
labels := label.NewSet()
mmsc := minmaxsumcount.New(&desc)
assert.NoError(t, mmsc.Update(context.Background(), 1, &desc))
@ -228,7 +228,7 @@ func TestSumMetricDescriptor(t *testing.T) {
},
{
"sum-test-b",
metric.MeasureKind, // This shouldn't change anything.
metric.ValueRecorderKind, // This shouldn't change anything.
"test-b-description",
unit.Milliseconds,
metric.Float64NumberKind,
@ -257,7 +257,7 @@ func TestSumMetricDescriptor(t *testing.T) {
}
func TestSumInt64DataPoints(t *testing.T) {
desc := metric.NewDescriptor("", metric.MeasureKind, metric.Int64NumberKind)
desc := metric.NewDescriptor("", metric.ValueRecorderKind, metric.Int64NumberKind)
labels := label.NewSet()
s := sumAgg.New()
assert.NoError(t, s.Update(context.Background(), metric.Number(1), &desc))
@ -271,7 +271,7 @@ func TestSumInt64DataPoints(t *testing.T) {
}
func TestSumFloat64DataPoints(t *testing.T) {
desc := metric.NewDescriptor("", metric.MeasureKind, metric.Float64NumberKind)
desc := metric.NewDescriptor("", metric.ValueRecorderKind, metric.Float64NumberKind)
labels := label.NewSet()
s := sumAgg.New()
assert.NoError(t, s.Update(context.Background(), metric.NewFloat64Number(1), &desc))
@ -285,7 +285,7 @@ func TestSumFloat64DataPoints(t *testing.T) {
}
func TestSumErrUnknownValueType(t *testing.T) {
desc := metric.NewDescriptor("", metric.MeasureKind, metric.NumberKind(-1))
desc := metric.NewDescriptor("", metric.ValueRecorderKind, metric.NumberKind(-1))
labels := label.NewSet()
s := sumAgg.New()
_, err := sum(&desc, &labels, s)

View File

@ -31,7 +31,6 @@ import (
"go.opentelemetry.io/otel/exporters/otlp/internal/transform"
metricsdk "go.opentelemetry.io/otel/sdk/export/metric"
tracesdk "go.opentelemetry.io/otel/sdk/export/trace"
"go.opentelemetry.io/otel/sdk/resource"
)
type Exporter struct {
@ -212,7 +211,7 @@ func (e *Exporter) Stop() error {
// Export implements the "go.opentelemetry.io/otel/sdk/export/metric".Exporter
// interface. It transforms and batches metric Records into OTLP Metrics and
// transmits them to the configured collector.
func (e *Exporter) Export(parent context.Context, resource *resource.Resource, cps metricsdk.CheckpointSet) error {
func (e *Exporter) Export(parent context.Context, cps metricsdk.CheckpointSet) error {
// Unify the parent context Done signal with the exporter stopCh.
ctx, cancel := context.WithCancel(parent)
defer cancel()
@ -224,7 +223,7 @@ func (e *Exporter) Export(parent context.Context, resource *resource.Resource, c
}
}(ctx, cancel)
rms, err := transform.CheckpointSet(ctx, resource, cps, e.c.numWorkers)
rms, err := transform.CheckpointSet(ctx, cps, e.c.numWorkers)
if err != nil {
return err
}

View File

@ -109,13 +109,13 @@ func newExporterEndToEndTest(t *testing.T, additionalOpts []otlp.ExporterOption)
span.End()
}
selector := simple.NewWithExactMeasure()
selector := simple.NewWithExactDistribution()
integrator := integrator.New(selector, true)
pusher := push.New(integrator, exp, 60*time.Second)
pusher := push.New(integrator, exp)
pusher.Start()
ctx := context.Background()
meter := pusher.Meter("test-meter")
meter := pusher.Provider().Meter("test-meter")
labels := []kv.KeyValue{kv.Bool("test", true)}
type data struct {
@ -124,12 +124,12 @@ func newExporterEndToEndTest(t *testing.T, additionalOpts []otlp.ExporterOption)
val int64
}
instruments := map[string]data{
"test-int64-counter": {metric.CounterKind, metricapi.Int64NumberKind, 1},
"test-float64-counter": {metric.CounterKind, metricapi.Float64NumberKind, 1},
"test-int64-measure": {metric.MeasureKind, metricapi.Int64NumberKind, 2},
"test-float64-measure": {metric.MeasureKind, metricapi.Float64NumberKind, 2},
"test-int64-observer": {metric.ObserverKind, metricapi.Int64NumberKind, 3},
"test-float64-observer": {metric.ObserverKind, metricapi.Float64NumberKind, 3},
"test-int64-counter": {metric.CounterKind, metricapi.Int64NumberKind, 1},
"test-float64-counter": {metric.CounterKind, metricapi.Float64NumberKind, 1},
"test-int64-valuerecorder": {metric.ValueRecorderKind, metricapi.Int64NumberKind, 2},
"test-float64-valuerecorder": {metric.ValueRecorderKind, metricapi.Float64NumberKind, 2},
"test-int64-valueobserver": {metric.ValueObserverKind, metricapi.Int64NumberKind, 3},
"test-float64-valueobserver": {metric.ValueObserverKind, metricapi.Float64NumberKind, 3},
}
for name, data := range instruments {
switch data.iKind {
@ -142,27 +142,27 @@ func newExporterEndToEndTest(t *testing.T, additionalOpts []otlp.ExporterOption)
default:
assert.Failf(t, "unsupported number testing kind", data.nKind.String())
}
case metric.MeasureKind:
case metric.ValueRecorderKind:
switch data.nKind {
case metricapi.Int64NumberKind:
metricapi.Must(meter).NewInt64Measure(name).Record(ctx, data.val, labels...)
metricapi.Must(meter).NewInt64ValueRecorder(name).Record(ctx, data.val, labels...)
case metricapi.Float64NumberKind:
metricapi.Must(meter).NewFloat64Measure(name).Record(ctx, float64(data.val), labels...)
metricapi.Must(meter).NewFloat64ValueRecorder(name).Record(ctx, float64(data.val), labels...)
default:
assert.Failf(t, "unsupported number testing kind", data.nKind.String())
}
case metric.ObserverKind:
case metric.ValueObserverKind:
switch data.nKind {
case metricapi.Int64NumberKind:
callback := func(v int64) metricapi.Int64ObserverCallback {
return metricapi.Int64ObserverCallback(func(result metricapi.Int64ObserverResult) { result.Observe(v, labels...) })
return metricapi.Int64ObserverCallback(func(_ context.Context, result metricapi.Int64ObserverResult) { result.Observe(v, labels...) })
}(data.val)
metricapi.Must(meter).RegisterInt64Observer(name, callback)
metricapi.Must(meter).RegisterInt64ValueObserver(name, callback)
case metricapi.Float64NumberKind:
callback := func(v float64) metricapi.Float64ObserverCallback {
return metricapi.Float64ObserverCallback(func(result metricapi.Float64ObserverResult) { result.Observe(v, labels...) })
return metricapi.Float64ObserverCallback(func(_ context.Context, result metricapi.Float64ObserverResult) { result.Observe(v, labels...) })
}(float64(data.val))
metricapi.Must(meter).RegisterFloat64Observer(name, callback)
metricapi.Must(meter).RegisterFloat64ValueObserver(name, callback)
default:
assert.Failf(t, "unsupported number testing kind", data.nKind.String())
}
@ -246,7 +246,7 @@ func newExporterEndToEndTest(t *testing.T, additionalOpts []otlp.ExporterOption)
default:
assert.Failf(t, "invalid number kind", data.nKind.String())
}
case metric.MeasureKind, metric.ObserverKind:
case metric.ValueRecorderKind, metric.ValueObserverKind:
assert.Equal(t, metricpb.MetricDescriptor_SUMMARY.String(), desc.GetType().String())
m.GetSummaryDataPoints()
if dp := m.GetSummaryDataPoints(); assert.Len(t, dp, 1) {

View File

@ -16,6 +16,7 @@ package otlp
import (
"context"
"sync"
"testing"
colmetricpb "github.com/open-telemetry/opentelemetry-proto/gen/go/collector/metrics/v1"
@ -60,10 +61,11 @@ func (m *metricsServiceClientStub) Reset() {
}
type checkpointSet struct {
sync.RWMutex
records []metricsdk.Record
}
func (m checkpointSet) ForEach(fn func(metricsdk.Record) error) error {
func (m *checkpointSet) ForEach(fn func(metricsdk.Record) error) error {
for _, r := range m.records {
if err := fn(r); err != nil && err != aggregator.ErrNoData {
return err
@ -188,10 +190,10 @@ func TestNoGroupingExport(t *testing.T) {
)
}
func TestMeasureMetricGroupingExport(t *testing.T) {
func TestValuerecorderMetricGroupingExport(t *testing.T) {
r := record{
"measure",
metric.MeasureKind,
"valuerecorder",
metric.ValueRecorderKind,
metric.Int64NumberKind,
nil,
nil,
@ -205,7 +207,7 @@ func TestMeasureMetricGroupingExport(t *testing.T) {
Metrics: []*metricpb.Metric{
{
MetricDescriptor: &metricpb.MetricDescriptor{
Name: "measure",
Name: "valuerecorder",
Type: metricpb.MetricDescriptor_SUMMARY,
Labels: []*commonpb.StringKeyValue{
{
@ -659,11 +661,10 @@ func runMetricExportTest(t *testing.T, exp *Exporter, rs []record, expected []me
equiv := r.resource.Equivalent()
resources[equiv] = r.resource
recs[equiv] = append(recs[equiv], metricsdk.NewRecord(&desc, &labs, agg))
recs[equiv] = append(recs[equiv], metricsdk.NewRecord(&desc, &labs, r.resource, agg))
}
for equiv, records := range recs {
resource := resources[equiv]
assert.NoError(t, exp.Export(context.Background(), resource, checkpointSet{records: records}))
for _, records := range recs {
assert.NoError(t, exp.Export(context.Background(), &checkpointSet{records: records}))
}
// assert.ElementsMatch does not equate nested slices of different order,
@ -713,8 +714,6 @@ func TestEmptyMetricExport(t *testing.T) {
exp.metricExporter = msc
exp.started = true
resource := resource.New(kv.String("R", "S"))
for _, test := range []struct {
records []metricsdk.Record
want []metricpb.ResourceMetrics
@ -729,7 +728,7 @@ func TestEmptyMetricExport(t *testing.T) {
},
} {
msc.Reset()
require.NoError(t, exp.Export(context.Background(), resource, checkpointSet{records: test.records}))
require.NoError(t, exp.Export(context.Background(), &checkpointSet{records: test.records}))
assert.Equal(t, test.want, msc.ResourceMetrics())
}
}

View File

@ -15,6 +15,7 @@
package metric
import (
"context"
"errors"
"fmt"
"os"
@ -133,7 +134,7 @@ func (a *AsyncInstrumentState) Register(inst metric.AsyncImpl, runner metric.Asy
}
// Run executes the complete set of observer callbacks.
func (a *AsyncInstrumentState) Run(collector AsyncCollector) {
func (a *AsyncInstrumentState) Run(ctx context.Context, collector AsyncCollector) {
a.lock.Lock()
runners := a.runners
a.lock.Unlock()
@ -144,12 +145,12 @@ func (a *AsyncInstrumentState) Run(collector AsyncCollector) {
// interface has un-exported methods.
if singleRunner, ok := rp.runner.(metric.AsyncSingleRunner); ok {
singleRunner.Run(rp.inst, collector.CollectAsync)
singleRunner.Run(ctx, rp.inst, collector.CollectAsync)
continue
}
if multiRunner, ok := rp.runner.(metric.AsyncBatchRunner); ok {
multiRunner.Run(collector.CollectAsync)
multiRunner.Run(ctx, collector.CollectAsync)
continue
}

View File

@ -38,13 +38,6 @@ type (
LibraryName string
}
MeterProvider struct {
lock sync.Mutex
impl *MeterImpl
unique metric.MeterImpl
registered map[string]apimetric.Meter
}
MeterImpl struct {
lock sync.Mutex
@ -123,24 +116,7 @@ func NewProvider() (*MeterImpl, apimetric.Provider) {
impl := &MeterImpl{
asyncInstruments: NewAsyncInstrumentState(nil),
}
p := &MeterProvider{
impl: impl,
unique: registry.NewUniqueInstrumentMeterImpl(impl),
registered: map[string]apimetric.Meter{},
}
return impl, p
}
func (p *MeterProvider) Meter(name string) apimetric.Meter {
p.lock.Lock()
defer p.lock.Unlock()
if lookup, ok := p.registered[name]; ok {
return lookup
}
m := apimetric.WrapMeterImpl(p.unique, name)
p.registered[name] = m
return m
return impl, registry.NewProvider(impl)
}
func NewMeter() (*MeterImpl, apimetric.Meter) {
@ -211,5 +187,5 @@ func (m *MeterImpl) collect(ctx context.Context, labels []kv.KeyValue, measureme
}
func (m *MeterImpl) RunAsyncInstruments() {
m.asyncInstruments.Run(m)
m.asyncInstruments.Run(context.Background(), m)
}

View File

@ -43,9 +43,29 @@ var (
messageUncompressedSizeKey = kv.Key("message.uncompressed_size")
)
type messageType string
// Event adds an event of the messageType to the span associated with the
// passed context with id and size (if message is a proto message).
func (m messageType) Event(ctx context.Context, id int, message interface{}) {
span := trace.SpanFromContext(ctx)
if p, ok := message.(proto.Message); ok {
span.AddEvent(ctx, "message",
messageTypeKey.String(string(m)),
messageIDKey.Int(id),
messageUncompressedSizeKey.Int(proto.Size(p)),
)
} else {
span.AddEvent(ctx, "message",
messageTypeKey.String(string(m)),
messageIDKey.Int(id),
)
}
}
const (
messageTypeSent = "SENT"
messageTypeReceived = "RECEIVED"
messageSent messageType = "SENT"
messageReceived messageType = "RECEIVED"
)
// UnaryClientInterceptor returns a grpc.UnaryClientInterceptor suitable
@ -80,11 +100,11 @@ func UnaryClientInterceptor(tracer trace.Tracer) grpc.UnaryClientInterceptor {
Inject(ctx, &metadataCopy)
ctx = metadata.NewOutgoingContext(ctx, metadataCopy)
addEventForMessageSent(ctx, 1, req)
messageSent.Event(ctx, 1, req)
err := invoker(ctx, method, req, reply, cc, opts...)
addEventForMessageReceived(ctx, 1, reply)
messageReceived.Event(ctx, 1, reply)
if err != nil {
s, _ := status.FromError(err)
@ -134,7 +154,7 @@ func (w *clientStream) RecvMsg(m interface{}) error {
w.events <- streamEvent{errorEvent, err}
} else {
w.receivedMessageID++
addEventForMessageReceived(w.Context(), w.receivedMessageID, m)
messageReceived.Event(w.Context(), w.receivedMessageID, m)
}
return err
@ -144,7 +164,7 @@ func (w *clientStream) SendMsg(m interface{}) error {
err := w.ClientStream.SendMsg(m)
w.sentMessageID++
addEventForMessageSent(w.Context(), w.sentMessageID, m)
messageSent.Event(w.Context(), w.sentMessageID, m)
if err != nil {
w.events <- streamEvent{errorEvent, err}
@ -297,15 +317,15 @@ func UnaryServerInterceptor(tracer trace.Tracer) grpc.UnaryServerInterceptor {
)
defer span.End()
addEventForMessageReceived(ctx, 1, req)
messageReceived.Event(ctx, 1, req)
resp, err := handler(ctx, req)
addEventForMessageSent(ctx, 1, resp)
if err != nil {
s, _ := status.FromError(err)
span.SetStatus(s.Code(), s.Message())
messageSent.Event(ctx, 1, s.Proto())
} else {
messageSent.Event(ctx, 1, resp)
}
return resp, err
@ -331,7 +351,7 @@ func (w *serverStream) RecvMsg(m interface{}) error {
if err == nil {
w.receivedMessageID++
addEventForMessageReceived(w.Context(), w.receivedMessageID, m)
messageReceived.Event(w.Context(), w.receivedMessageID, m)
}
return err
@ -341,7 +361,7 @@ func (w *serverStream) SendMsg(m interface{}) error {
err := w.ServerStream.SendMsg(m)
w.sentMessageID++
addEventForMessageSent(w.Context(), w.sentMessageID, m)
messageSent.Event(w.Context(), w.sentMessageID, m)
return err
}
@ -435,25 +455,3 @@ func serviceFromFullMethod(method string) string {
return match[1]
}
func addEventForMessageReceived(ctx context.Context, id int, m interface{}) {
size := proto.Size(m.(proto.Message))
span := trace.SpanFromContext(ctx)
span.AddEvent(ctx, "message",
messageTypeKey.String(messageTypeReceived),
messageIDKey.Int(id),
messageUncompressedSizeKey.Int(size),
)
}
func addEventForMessageSent(ctx context.Context, id int, m interface{}) {
size := proto.Size(m.(proto.Message))
span := trace.SpanFromContext(ctx)
span.AddEvent(ctx, "message",
messageTypeKey.String(messageTypeSent),
messageIDKey.Int(id),
messageUncompressedSizeKey.Int(size),
)
}

View File

@ -15,127 +15,399 @@ package grpctrace
import (
"context"
"fmt"
"sync"
"testing"
"time"
"github.com/golang/protobuf/proto"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/status"
"go.opentelemetry.io/otel/api/kv"
"go.opentelemetry.io/otel/api/kv/value"
export "go.opentelemetry.io/otel/sdk/export/trace"
sdktrace "go.opentelemetry.io/otel/sdk/trace"
)
type testExporter struct {
spanMap map[string][]*export.SpanData
mu sync.Mutex
spanMap map[string]*export.SpanData
}
func (t *testExporter) ExportSpan(ctx context.Context, s *export.SpanData) {
t.spanMap[s.Name] = append(t.spanMap[s.Name], s)
t.mu.Lock()
defer t.mu.Unlock()
t.spanMap[s.Name] = s
}
type mockCCInvoker struct {
type mockUICInvoker struct {
ctx context.Context
}
func (mcci *mockCCInvoker) invoke(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, opts ...grpc.CallOption) error {
mcci.ctx = ctx
func (mcuici *mockUICInvoker) invoker(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, opts ...grpc.CallOption) error {
mcuici.ctx = ctx
return nil
}
type mockProtoMessage struct {
type mockProtoMessage struct{}
func (mm *mockProtoMessage) Reset() {
}
func (mm *mockProtoMessage) Reset() {}
func (mm *mockProtoMessage) String() string { return "mock" }
func (mm *mockProtoMessage) ProtoMessage() {}
type nameAttributeTestCase struct {
testName string
expectedName string
fullNameFmt string
func (mm *mockProtoMessage) String() string {
return "mock"
}
func (tc nameAttributeTestCase) fullName() string {
return fmt.Sprintf(tc.fullNameFmt, tc.expectedName)
func (mm *mockProtoMessage) ProtoMessage() {
}
func TestUCISetsExpectedServiceNameAttribute(t *testing.T) {
testCases := []nameAttributeTestCase{
{
"FullyQualifiedMethodName",
"serviceName",
"/github.com.foo.%s/bar",
},
{
"SimpleMethodName",
"serviceName",
"/%s/bar",
},
{
"MethodNameWithoutFullPath",
"serviceName",
"%s/bar",
},
{
"InvalidMethodName",
"",
"invalidName",
},
{
"NonAlphanumericMethodName",
"serviceName_123",
"/github.com.foo.%s/method",
},
}
for _, tc := range testCases {
t.Run(tc.testName, tc.testUCISetsExpectedNameAttribute)
}
}
func (tc nameAttributeTestCase) testUCISetsExpectedNameAttribute(t *testing.T) {
exp := &testExporter{make(map[string][]*export.SpanData)}
func TestUnaryClientInterceptor(t *testing.T) {
exp := &testExporter{spanMap: make(map[string]*export.SpanData)}
tp, _ := sdktrace.NewProvider(sdktrace.WithSyncer(exp),
sdktrace.WithConfig(sdktrace.Config{DefaultSampler: sdktrace.AlwaysSample()}))
tr := tp.Tracer("grpctrace/client")
ctx, span := tr.Start(context.Background(), tc.testName)
defer span.End()
sdktrace.WithConfig(sdktrace.Config{
DefaultSampler: sdktrace.AlwaysSample(),
},
))
clientConn, err := grpc.Dial("fake:connection", grpc.WithInsecure())
if err != nil {
t.Fatalf("failed to create client connection: %v", err)
}
unaryInt := UnaryClientInterceptor(tr)
tracer := tp.Tracer("grpctrace/client")
unaryInterceptor := UnaryClientInterceptor(tracer)
req := &mockProtoMessage{}
reply := &mockProtoMessage{}
ccInvoker := &mockCCInvoker{}
uniInterceptorInvoker := &mockUICInvoker{}
err = unaryInt(ctx, tc.fullName(), req, reply, clientConn, ccInvoker.invoke)
checks := []struct {
name string
expectedAttr map[kv.Key]value.Value
eventsAttr []map[kv.Key]value.Value
}{
{
name: "/github.com.serviceName/bar",
expectedAttr: map[kv.Key]value.Value{
rpcServiceKey: value.String("serviceName"),
netPeerIPKey: value.String("fake"),
netPeerPortKey: value.String("connection"),
},
eventsAttr: []map[kv.Key]value.Value{
{
messageTypeKey: value.String("SENT"),
messageIDKey: value.Int(1),
messageUncompressedSizeKey: value.Int(proto.Size(proto.Message(req))),
},
{
messageTypeKey: value.String("RECEIVED"),
messageIDKey: value.Int(1),
messageUncompressedSizeKey: value.Int(proto.Size(proto.Message(reply))),
},
},
},
{
name: "/serviceName/bar",
expectedAttr: map[kv.Key]value.Value{
rpcServiceKey: value.String("serviceName"),
netPeerIPKey: value.String("fake"),
netPeerPortKey: value.String("connection"),
},
eventsAttr: []map[kv.Key]value.Value{
{
messageTypeKey: value.String("SENT"),
messageIDKey: value.Int(1),
messageUncompressedSizeKey: value.Int(proto.Size(proto.Message(req))),
},
{
messageTypeKey: value.String("RECEIVED"),
messageIDKey: value.Int(1),
messageUncompressedSizeKey: value.Int(proto.Size(proto.Message(reply))),
},
},
},
{
name: "serviceName/bar",
expectedAttr: map[kv.Key]value.Value{
rpcServiceKey: value.String("serviceName"),
netPeerIPKey: value.String("fake"),
netPeerPortKey: value.String("connection"),
},
eventsAttr: []map[kv.Key]value.Value{
{
messageTypeKey: value.String("SENT"),
messageIDKey: value.Int(1),
messageUncompressedSizeKey: value.Int(proto.Size(proto.Message(req))),
},
{
messageTypeKey: value.String("RECEIVED"),
messageIDKey: value.Int(1),
messageUncompressedSizeKey: value.Int(proto.Size(proto.Message(reply))),
},
},
},
{
name: "invalidName",
expectedAttr: map[kv.Key]value.Value{
rpcServiceKey: value.String(""),
netPeerIPKey: value.String("fake"),
netPeerPortKey: value.String("connection"),
},
eventsAttr: []map[kv.Key]value.Value{
{
messageTypeKey: value.String("SENT"),
messageIDKey: value.Int(1),
messageUncompressedSizeKey: value.Int(proto.Size(proto.Message(req))),
},
{
messageTypeKey: value.String("RECEIVED"),
messageIDKey: value.Int(1),
messageUncompressedSizeKey: value.Int(proto.Size(proto.Message(reply))),
},
},
},
{
name: "/github.com.foo.serviceName_123/method",
expectedAttr: map[kv.Key]value.Value{
rpcServiceKey: value.String("serviceName_123"),
netPeerIPKey: value.String("fake"),
netPeerPortKey: value.String("connection"),
},
eventsAttr: []map[kv.Key]value.Value{
{
messageTypeKey: value.String("SENT"),
messageIDKey: value.Int(1),
messageUncompressedSizeKey: value.Int(proto.Size(proto.Message(req))),
},
{
messageTypeKey: value.String("RECEIVED"),
messageIDKey: value.Int(1),
messageUncompressedSizeKey: value.Int(proto.Size(proto.Message(reply))),
},
},
},
}
for _, check := range checks {
err = unaryInterceptor(context.Background(), check.name, req, reply, clientConn, uniInterceptorInvoker.invoker)
if err != nil {
t.Errorf("failed to run unary interceptor: %v", err)
continue
}
spanData, ok := exp.spanMap[check.name]
if !ok {
t.Errorf("no span data found for name < %s >", check.name)
continue
}
attrs := spanData.Attributes
if len(check.expectedAttr) > len(attrs) {
t.Errorf("attributes received are less than expected attributes, received %d, expected %d",
len(attrs), len(check.expectedAttr))
}
for _, attr := range attrs {
expectedAttr, ok := check.expectedAttr[attr.Key]
if ok {
if expectedAttr != attr.Value {
t.Errorf("name: %s invalid %s found. expected %s, actual %s", check.name, string(attr.Key),
expectedAttr.AsString(), attr.Value.AsString())
}
delete(check.expectedAttr, attr.Key)
} else {
t.Errorf("attribute %s not found in expected attributes map", string(attr.Key))
}
}
// Check if any expected attr not seen
if len(check.expectedAttr) > 0 {
for attr := range check.expectedAttr {
t.Errorf("missing attribute %s in span", string(attr))
}
}
events := spanData.MessageEvents
if len(check.eventsAttr) > len(events) {
t.Errorf("events received are less than expected events, received %d, expected %d",
len(events), len(check.eventsAttr))
}
for event := 0; event < len(check.eventsAttr); event++ {
for _, attr := range events[event].Attributes {
expectedAttr, ok := check.eventsAttr[event][attr.Key]
if ok {
if attr.Value != expectedAttr {
t.Errorf("invalid value for attribute %s in events, expected %s actual %s",
string(attr.Key), attr.Value.AsString(), expectedAttr.AsString())
}
delete(check.eventsAttr[event], attr.Key)
} else {
t.Errorf("attribute in event %s not found in expected attributes map", string(attr.Key))
}
}
if len(check.eventsAttr[event]) > 0 {
for attr := range check.eventsAttr[event] {
t.Errorf("missing attribute %s in span event", string(attr))
}
}
}
}
}
type mockClientStream struct {
Desc *grpc.StreamDesc
Ctx context.Context
}
func (mockClientStream) SendMsg(m interface{}) error { return nil }
func (mockClientStream) RecvMsg(m interface{}) error { return nil }
func (mockClientStream) CloseSend() error { return nil }
func (c mockClientStream) Context() context.Context { return c.Ctx }
func (mockClientStream) Header() (metadata.MD, error) { return nil, nil }
func (mockClientStream) Trailer() metadata.MD { return nil }
func TestStreamClientInterceptor(t *testing.T) {
exp := &testExporter{spanMap: make(map[string]*export.SpanData)}
tp, _ := sdktrace.NewProvider(sdktrace.WithSyncer(exp),
sdktrace.WithConfig(sdktrace.Config{
DefaultSampler: sdktrace.AlwaysSample(),
},
))
clientConn, err := grpc.Dial("fake:connection", grpc.WithInsecure())
if err != nil {
t.Fatalf("failed to run unary interceptor: %v", err)
t.Fatalf("failed to create client connection: %v", err)
}
spanData, hasSpanData := exp.spanMap[tc.fullName()]
// tracer
tracer := tp.Tracer("grpctrace/Server")
streamCI := StreamClientInterceptor(tracer)
if !hasSpanData || len(spanData) == 0 {
t.Fatalf("no span data found for name < %s >", tc.fullName())
var mockClStr mockClientStream
methodName := "/github.com.serviceName/bar"
streamClient, err := streamCI(context.Background(),
&grpc.StreamDesc{ServerStreams: true},
clientConn,
methodName,
func(ctx context.Context,
desc *grpc.StreamDesc,
cc *grpc.ClientConn,
method string,
opts ...grpc.CallOption) (grpc.ClientStream, error) {
mockClStr = mockClientStream{Desc: desc, Ctx: ctx}
return mockClStr, nil
})
if err != nil {
t.Fatalf("failed to initialize grpc stream client: %v", err)
}
attributes := spanData[0].Attributes
// no span exported while stream is open
if _, ok := exp.spanMap[methodName]; ok {
t.Fatalf("span shouldn't end while stream is open")
}
var actualServiceName string
for _, attr := range attributes {
if attr.Key == rpcServiceKey {
actualServiceName = attr.Value.AsString()
req := &mockProtoMessage{}
reply := &mockProtoMessage{}
// send and receive fake data
for i := 0; i < 10; i++ {
_ = streamClient.SendMsg(req)
_ = streamClient.RecvMsg(reply)
}
// close client and server stream
_ = streamClient.CloseSend()
mockClStr.Desc.ServerStreams = false
_ = streamClient.RecvMsg(reply)
// added retry because span end is called in separate go routine
var spanData *export.SpanData
for retry := 0; retry < 5; retry++ {
ok := false
exp.mu.Lock()
spanData, ok = exp.spanMap[methodName]
exp.mu.Unlock()
if ok {
break
}
time.Sleep(time.Second * 1)
}
if spanData == nil {
t.Fatalf("no span data found for name < %s >", methodName)
}
attrs := spanData.Attributes
expectedAttr := map[kv.Key]string{
rpcServiceKey: "serviceName",
netPeerIPKey: "fake",
netPeerPortKey: "connection",
}
for _, attr := range attrs {
expected, ok := expectedAttr[attr.Key]
if ok {
if expected != attr.Value.AsString() {
t.Errorf("name: %s invalid %s found. expected %s, actual %s", methodName, string(attr.Key),
expected, attr.Value.AsString())
}
}
}
if tc.expectedName != actualServiceName {
t.Fatalf("invalid service name found. expected %s, actual %s",
tc.expectedName, actualServiceName)
events := spanData.MessageEvents
if len(events) != 20 {
t.Fatalf("incorrect number of events expected 20 got %d", len(events))
}
for i := 0; i < 20; i += 2 {
msgID := i/2 + 1
validate := func(eventName string, attrs []kv.KeyValue) {
for _, attr := range attrs {
if attr.Key == messageTypeKey && attr.Value.AsString() != eventName {
t.Errorf("invalid event on index: %d expecting %s event, receive %s event", i, eventName, attr.Value.AsString())
}
if attr.Key == messageIDKey && attr.Value != value.Int(msgID) {
t.Errorf("invalid id for message event expected %d received %d", msgID, attr.Value.AsInt32())
}
}
}
validate("SENT", events[i].Attributes)
validate("RECEIVED", events[i+1].Attributes)
}
}
func TestServerInterceptorError(t *testing.T) {
exp := &testExporter{spanMap: make(map[string]*export.SpanData)}
tp, err := sdktrace.NewProvider(
sdktrace.WithSyncer(exp),
sdktrace.WithConfig(sdktrace.Config{
DefaultSampler: sdktrace.AlwaysSample(),
}),
)
require.NoError(t, err)
tracer := tp.Tracer("grpctrace/Server")
usi := UnaryServerInterceptor(tracer)
deniedErr := status.Error(codes.PermissionDenied, "PERMISSION_DENIED_TEXT")
handler := func(_ context.Context, _ interface{}) (interface{}, error) {
return nil, deniedErr
}
_, err = usi(context.Background(), &mockProtoMessage{}, &grpc.UnaryServerInfo{}, handler)
require.Error(t, err)
assert.Equal(t, err, deniedErr)
span, ok := exp.spanMap[""]
if !ok {
t.Fatalf("failed to export error span")
}
assert.Equal(t, span.StatusCode, codes.PermissionDenied)
assert.Contains(t, deniedErr.Error(), span.StatusMessage)
assert.Len(t, span.MessageEvents, 2)
assert.Equal(t, []kv.KeyValue{
kv.String("message.type", "SENT"),
kv.Int("message.id", 1),
kv.Int("message.uncompressed_size", 26),
}, span.MessageEvents[1].Attributes)
}

View File

@ -116,7 +116,7 @@ func NewInconsistentMergeError(a1, a2 export.Aggregator) error {
// RangeTest is a commmon routine for testing for valid input values.
// This rejects NaN values. This rejects negative values when the
// metric instrument does not support negative values, including
// monotonic counter metrics and absolute measure metrics.
// monotonic counter metrics and absolute ValueRecorder metrics.
func RangeTest(number metric.Number, descriptor *metric.Descriptor) error {
numberKind := descriptor.NumberKind()
@ -125,7 +125,7 @@ func RangeTest(number metric.Number, descriptor *metric.Descriptor) error {
}
switch descriptor.MetricKind() {
case metric.CounterKind:
case metric.CounterKind, metric.SumObserverKind:
if number.IsNegative(numberKind) {
return ErrNegativeInput
}

View File

@ -86,8 +86,8 @@ func TestNaNTest(t *testing.T) {
t.Run(nkind.String(), func(t *testing.T) {
for _, mkind := range []metric.Kind{
metric.CounterKind,
metric.MeasureKind,
metric.ObserverKind,
metric.ValueRecorderKind,
metric.ValueObserverKind,
} {
desc := metric.NewDescriptor(
"name",

View File

@ -16,6 +16,7 @@ package metric // import "go.opentelemetry.io/otel/sdk/export/metric"
import (
"context"
"sync"
"go.opentelemetry.io/otel/api/label"
"go.opentelemetry.io/otel/api/metric"
@ -39,10 +40,6 @@ import (
// single-threaded context from the SDK, after the aggregator is
// checkpointed, allowing the integrator to build the set of metrics
// currently being exported.
//
// The `CheckpointSet` method is called during collection in a
// single-threaded context from the Exporter, giving the exporter
// access to a producer for iterating over the complete checkpoint.
type Integrator interface {
// AggregationSelector is responsible for selecting the
// concrete type of Aggregator used for a metric in the SDK.
@ -70,17 +67,6 @@ type Integrator interface {
// The Context argument originates from the controller that
// orchestrates collection.
Process(ctx context.Context, record Record) error
// CheckpointSet is the interface used by the controller to
// access the fully aggregated checkpoint after collection.
//
// The returned CheckpointSet is passed to the Exporter.
CheckpointSet() CheckpointSet
// FinishedCollection informs the Integrator that a complete
// collection round was completed. Stateless integrators might
// reset state in this method, for example.
FinishedCollection()
}
// AggregationSelector supports selecting the kind of Aggregator to
@ -100,22 +86,16 @@ type AggregationSelector interface {
}
// Aggregator implements a specific aggregation behavior, e.g., a
// behavior to track a sequence of updates to a counter, a measure, or
// an observer instrument. For the most part, counter semantics are
// fixed and the provided implementation should be used. Measure and
// observer metrics offer a wide range of potential tradeoffs and
// several implementations are provided.
//
// Aggregators are meant to compute the change (i.e., delta) in state
// from one checkpoint to the next, with the exception of LastValue
// aggregators. LastValue aggregators are required to maintain the last
// value across checkpoints.
// behavior to track a sequence of updates to an instrument. Sum-only
// instruments commonly use a simple Sum aggregator, but for the
// distribution instruments (ValueRecorder, ValueObserver) there are a
// number of possible aggregators with different cost and accuracy
// tradeoffs.
//
// Note that any Aggregator may be attached to any instrument--this is
// the result of the OpenTelemetry API/SDK separation. It is possible
// to attach a counter aggregator to a Measure instrument (to compute
// a simple sum) or a LastValue aggregator to a measure instrument (to
// compute the last value).
// to attach a Sum aggregator to a ValueRecorder instrument or a
// MinMaxSumCount aggregator to a Counter instrument.
type Aggregator interface {
// Update receives a new measured value and incorporates it
// into the aggregation. Update() calls may arrive
@ -160,12 +140,9 @@ type Exporter interface {
// The Context comes from the controller that initiated
// collection.
//
// The Resource contains common attributes that apply to all
// metric events in the SDK.
//
// The CheckpointSet interface refers to the Integrator that just
// completed collection.
Export(context.Context, *resource.Resource, CheckpointSet) error
Export(context.Context, CheckpointSet) error
}
// CheckpointSet allows a controller to access a complete checkpoint of
@ -182,6 +159,19 @@ type CheckpointSet interface {
// of error will immediately halt ForEach and return
// the error to the caller.
ForEach(func(Record) error) error
// Locker supports locking the checkpoint set. Collection
// into the checkpoint set cannot take place (in case of a
// stateful integrator) while it is locked.
//
// The Integrator attached to the Accumulator MUST be called
// with the lock held.
sync.Locker
// RLock acquires a read lock corresponding to this Locker.
RLock()
// RUnlock releases a read lock corresponding to this Locker.
RUnlock()
}
// Record contains the exported data for a single metric instrument
@ -189,16 +179,18 @@ type CheckpointSet interface {
type Record struct {
descriptor *metric.Descriptor
labels *label.Set
resource *resource.Resource
aggregator Aggregator
}
// NewRecord allows Integrator implementations to construct export
// records. The Descriptor, Labels, and Aggregator represent
// aggregate metric events received over a single collection period.
func NewRecord(descriptor *metric.Descriptor, labels *label.Set, aggregator Aggregator) Record {
func NewRecord(descriptor *metric.Descriptor, labels *label.Set, resource *resource.Resource, aggregator Aggregator) Record {
return Record{
descriptor: descriptor,
labels: labels,
resource: resource,
aggregator: aggregator,
}
}
@ -219,3 +211,8 @@ func (r Record) Descriptor() *metric.Descriptor {
func (r Record) Labels() *label.Set {
return r.labels
}
// Resource contains common attributes that apply to this metric event.
func (r Record) Resource() *resource.Resource {
return r.resource
}

View File

@ -27,6 +27,8 @@ import (
)
type (
// Aggregator aggregates events that form a distribution, keeping
// an array with the exact set of values.
Aggregator struct {
// ckptSum needs to be aligned for 64-bit atomic operations.
ckptSum metric.Number

View File

@ -50,7 +50,7 @@ type updateTest struct {
}
func (ut *updateTest) run(t *testing.T, profile test.Profile) {
descriptor := test.NewAggregatorTest(metric.MeasureKind, profile.NumberKind)
descriptor := test.NewAggregatorTest(metric.ValueRecorderKind, profile.NumberKind)
agg := New()
@ -118,7 +118,7 @@ type mergeTest struct {
func (mt *mergeTest) run(t *testing.T, profile test.Profile) {
ctx := context.Background()
descriptor := test.NewAggregatorTest(metric.MeasureKind, profile.NumberKind)
descriptor := test.NewAggregatorTest(metric.ValueRecorderKind, profile.NumberKind)
agg1 := New()
agg2 := New()
@ -215,7 +215,7 @@ func TestArrayErrors(t *testing.T) {
ctx := context.Background()
descriptor := test.NewAggregatorTest(metric.MeasureKind, profile.NumberKind)
descriptor := test.NewAggregatorTest(metric.ValueRecorderKind, profile.NumberKind)
test.CheckedUpdate(t, agg, metric.Number(0), descriptor)
@ -243,7 +243,7 @@ func TestArrayErrors(t *testing.T) {
}
func TestArrayFloat64(t *testing.T) {
descriptor := test.NewAggregatorTest(metric.MeasureKind, metric.Float64NumberKind)
descriptor := test.NewAggregatorTest(metric.ValueRecorderKind, metric.Float64NumberKind)
fpsf := func(sign int) []float64 {
// Check behavior of a bunch of odd floating

View File

@ -29,7 +29,7 @@ import (
// Config is an alias for the underlying DDSketch config object.
type Config = sdk.Config
// Aggregator aggregates measure events.
// Aggregator aggregates events into a distribution.
type Aggregator struct {
lock sync.Mutex
cfg *Config

View File

@ -33,7 +33,7 @@ type updateTest struct {
func (ut *updateTest) run(t *testing.T, profile test.Profile) {
ctx := context.Background()
descriptor := test.NewAggregatorTest(metric.MeasureKind, profile.NumberKind)
descriptor := test.NewAggregatorTest(metric.ValueRecorderKind, profile.NumberKind)
agg := New(NewDefaultConfig(), descriptor)
all := test.NewNumbers(profile.NumberKind)
@ -92,7 +92,7 @@ type mergeTest struct {
func (mt *mergeTest) run(t *testing.T, profile test.Profile) {
ctx := context.Background()
descriptor := test.NewAggregatorTest(metric.MeasureKind, profile.NumberKind)
descriptor := test.NewAggregatorTest(metric.ValueRecorderKind, profile.NumberKind)
agg1 := New(NewDefaultConfig(), descriptor)
agg2 := New(NewDefaultConfig(), descriptor)

View File

@ -24,6 +24,11 @@ import (
"go.opentelemetry.io/otel/sdk/export/metric/aggregator"
)
// Note: This code uses a Mutex to govern access to the exclusive
// aggregator state. This is in contrast to a lock-free approach
// (as in the Go prometheus client) that was reverted here:
// https://github.com/open-telemetry/opentelemetry-go/pull/669
type (
// Aggregator observe events and counts them in pre-determined buckets.
// It also calculates the sum and count of all events.
@ -39,10 +44,9 @@ type (
// the sum and counts for all observed values and
// the less than equal bucket count for the pre-determined boundaries.
state struct {
// all fields have to be aligned for 64-bit atomic operations.
buckets aggregator.Buckets
count metric.Number
sum metric.Number
bucketCounts []metric.Number
count metric.Number
sum metric.Number
}
)
@ -51,7 +55,7 @@ var _ aggregator.Sum = &Aggregator{}
var _ aggregator.Count = &Aggregator{}
var _ aggregator.Histogram = &Aggregator{}
// New returns a new measure aggregator for computing Histograms.
// New returns a new aggregator for computing Histograms.
//
// A Histogram observe events and counts them in pre-defined buckets.
// And also provides the total sum and count of all observations.
@ -71,17 +75,12 @@ func New(desc *metric.Descriptor, boundaries []metric.Number) *Aggregator {
sort.Sort(&sortedBoundaries)
boundaries = sortedBoundaries.numbers
agg := Aggregator{
return &Aggregator{
kind: desc.NumberKind(),
boundaries: boundaries,
current: state{
buckets: aggregator.Buckets{
Boundaries: boundaries,
Counts: make([]metric.Number, len(boundaries)+1),
},
},
current: emptyState(boundaries),
checkpoint: emptyState(boundaries),
}
return &agg
}
// Sum returns the sum of all values in the checkpoint.
@ -102,7 +101,10 @@ func (c *Aggregator) Count() (int64, error) {
func (c *Aggregator) Histogram() (aggregator.Buckets, error) {
c.lock.Lock()
defer c.lock.Unlock()
return c.checkpoint.buckets, nil
return aggregator.Buckets{
Boundaries: c.boundaries,
Counts: c.checkpoint.bucketCounts,
}, nil
}
// Checkpoint saves the current state and resets the current state to
@ -111,16 +113,13 @@ func (c *Aggregator) Histogram() (aggregator.Buckets, error) {
// other.
func (c *Aggregator) Checkpoint(ctx context.Context, desc *metric.Descriptor) {
c.lock.Lock()
c.checkpoint, c.current = c.current, c.emptyState()
c.checkpoint, c.current = c.current, emptyState(c.boundaries)
c.lock.Unlock()
}
func (c *Aggregator) emptyState() state {
func emptyState(boundaries []metric.Number) state {
return state{
buckets: aggregator.Buckets{
Boundaries: c.boundaries,
Counts: make([]metric.Number, len(c.boundaries)+1),
},
bucketCounts: make([]metric.Number, len(boundaries)+1),
}
}
@ -141,7 +140,7 @@ func (c *Aggregator) Update(_ context.Context, number metric.Number, desc *metri
c.current.count.AddInt64(1)
c.current.sum.AddNumber(kind, number)
c.current.buckets.Counts[bucketID].AddUint64(1)
c.current.bucketCounts[bucketID].AddUint64(1)
return nil
}
@ -156,8 +155,8 @@ func (c *Aggregator) Merge(oa export.Aggregator, desc *metric.Descriptor) error
c.checkpoint.sum.AddNumber(desc.NumberKind(), o.checkpoint.sum)
c.checkpoint.count.AddNumber(metric.Uint64NumberKind, o.checkpoint.count)
for i := 0; i < len(c.checkpoint.buckets.Counts); i++ {
c.checkpoint.buckets.Counts[i].AddNumber(metric.Uint64NumberKind, o.checkpoint.buckets.Counts[i])
for i := 0; i < len(c.checkpoint.bucketCounts); i++ {
c.checkpoint.bucketCounts[i].AddNumber(metric.Uint64NumberKind, o.checkpoint.bucketCounts[i])
}
return nil
}

View File

@ -84,7 +84,7 @@ func TestHistogramPositiveAndNegative(t *testing.T) {
// Validates count, sum and buckets for a given profile and policy
func histogram(t *testing.T, profile test.Profile, policy policy) {
ctx := context.Background()
descriptor := test.NewAggregatorTest(metric.MeasureKind, profile.NumberKind)
descriptor := test.NewAggregatorTest(metric.ValueRecorderKind, profile.NumberKind)
agg := New(descriptor, boundaries[profile.NumberKind])
@ -113,20 +113,33 @@ func histogram(t *testing.T, profile test.Profile, policy policy) {
require.Equal(t, all.Count(), count, "Same count -"+policy.name)
require.Nil(t, err)
require.Equal(t, len(agg.checkpoint.buckets.Counts), len(boundaries[profile.NumberKind])+1, "There should be b + 1 counts, where b is the number of boundaries")
require.Equal(t, len(agg.checkpoint.bucketCounts), len(boundaries[profile.NumberKind])+1, "There should be b + 1 counts, where b is the number of boundaries")
counts := calcBuckets(all.Points(), profile)
for i, v := range counts {
bCount := agg.checkpoint.buckets.Counts[i].AsUint64()
require.Equal(t, v, bCount, "Wrong bucket #%d count: %v != %v", i, counts, agg.checkpoint.buckets.Counts)
bCount := agg.checkpoint.bucketCounts[i].AsUint64()
require.Equal(t, v, bCount, "Wrong bucket #%d count: %v != %v", i, counts, agg.checkpoint.bucketCounts)
}
}
func TestHistogramInitial(t *testing.T) {
test.RunProfiles(t, func(t *testing.T, profile test.Profile) {
descriptor := test.NewAggregatorTest(metric.ValueRecorderKind, profile.NumberKind)
agg := New(descriptor, boundaries[profile.NumberKind])
buckets, err := agg.Histogram()
require.NoError(t, err)
require.Equal(t, len(buckets.Counts), len(boundaries[profile.NumberKind])+1)
require.Equal(t, len(buckets.Boundaries), len(boundaries[profile.NumberKind]))
})
}
func TestHistogramMerge(t *testing.T) {
ctx := context.Background()
test.RunProfiles(t, func(t *testing.T, profile test.Profile) {
descriptor := test.NewAggregatorTest(metric.MeasureKind, profile.NumberKind)
descriptor := test.NewAggregatorTest(metric.ValueRecorderKind, profile.NumberKind)
agg1 := New(descriptor, boundaries[profile.NumberKind])
agg2 := New(descriptor, boundaries[profile.NumberKind])
@ -164,12 +177,12 @@ func TestHistogramMerge(t *testing.T) {
require.Equal(t, all.Count(), count, "Same count - absolute")
require.Nil(t, err)
require.Equal(t, len(agg1.checkpoint.buckets.Counts), len(boundaries[profile.NumberKind])+1, "There should be b + 1 counts, where b is the number of boundaries")
require.Equal(t, len(agg1.checkpoint.bucketCounts), len(boundaries[profile.NumberKind])+1, "There should be b + 1 counts, where b is the number of boundaries")
counts := calcBuckets(all.Points(), profile)
for i, v := range counts {
bCount := agg1.checkpoint.buckets.Counts[i].AsUint64()
require.Equal(t, v, bCount, "Wrong bucket #%d count: %v != %v", i, counts, agg1.checkpoint.buckets.Counts)
bCount := agg1.checkpoint.bucketCounts[i].AsUint64()
require.Equal(t, v, bCount, "Wrong bucket #%d count: %v != %v", i, counts, agg1.checkpoint.bucketCounts)
}
})
}
@ -178,7 +191,7 @@ func TestHistogramNotSet(t *testing.T) {
ctx := context.Background()
test.RunProfiles(t, func(t *testing.T, profile test.Profile) {
descriptor := test.NewAggregatorTest(metric.MeasureKind, profile.NumberKind)
descriptor := test.NewAggregatorTest(metric.ValueRecorderKind, profile.NumberKind)
agg := New(descriptor, boundaries[profile.NumberKind])
agg.Checkpoint(ctx, descriptor)
@ -191,8 +204,8 @@ func TestHistogramNotSet(t *testing.T) {
require.Equal(t, int64(0), count, "Empty checkpoint count = 0")
require.Nil(t, err)
require.Equal(t, len(agg.checkpoint.buckets.Counts), len(boundaries[profile.NumberKind])+1, "There should be b + 1 counts, where b is the number of boundaries")
for i, bCount := range agg.checkpoint.buckets.Counts {
require.Equal(t, len(agg.checkpoint.bucketCounts), len(boundaries[profile.NumberKind])+1, "There should be b + 1 counts, where b is the number of boundaries")
for i, bCount := range agg.checkpoint.bucketCounts {
require.Equal(t, uint64(0), bCount.AsUint64(), "Bucket #%d must have 0 observed values", i)
}
})

View File

@ -55,7 +55,7 @@ func TestLastValueUpdate(t *testing.T) {
test.RunProfiles(t, func(t *testing.T, profile test.Profile) {
agg := New()
record := test.NewAggregatorTest(metric.ObserverKind, profile.NumberKind)
record := test.NewAggregatorTest(metric.ValueObserverKind, profile.NumberKind)
var last metric.Number
for i := 0; i < count; i++ {
@ -79,7 +79,7 @@ func TestLastValueMerge(t *testing.T) {
agg1 := New()
agg2 := New()
descriptor := test.NewAggregatorTest(metric.ObserverKind, profile.NumberKind)
descriptor := test.NewAggregatorTest(metric.ValueObserverKind, profile.NumberKind)
first1 := profile.Random(+1)
first2 := profile.Random(+1)
@ -107,7 +107,7 @@ func TestLastValueMerge(t *testing.T) {
}
func TestLastValueNotSet(t *testing.T) {
descriptor := test.NewAggregatorTest(metric.ObserverKind, metric.Int64NumberKind)
descriptor := test.NewAggregatorTest(metric.ValueObserverKind, metric.Int64NumberKind)
g := New()
g.Checkpoint(context.Background(), descriptor)

View File

@ -24,8 +24,8 @@ import (
)
type (
// Aggregator aggregates measure events, keeping only the min, max,
// sum, and count.
// Aggregator aggregates events that form a distribution,
// keeping only the min, max, sum, and count.
Aggregator struct {
lock sync.Mutex
current state
@ -44,8 +44,9 @@ type (
var _ export.Aggregator = &Aggregator{}
var _ aggregator.MinMaxSumCount = &Aggregator{}
// New returns a new measure aggregator for computing min, max, sum, and
// count. It does not compute quantile information other than Min and Max.
// New returns a new aggregator for computing the min, max, sum, and
// count. It does not compute quantile information other than Min and
// Max.
//
// This type uses a mutex for Update() and Checkpoint() concurrency.
func New(desc *metric.Descriptor) *Aggregator {

View File

@ -79,7 +79,7 @@ func TestMinMaxSumCountPositiveAndNegative(t *testing.T) {
// Validates min, max, sum and count for a given profile and policy
func minMaxSumCount(t *testing.T, profile test.Profile, policy policy) {
ctx := context.Background()
descriptor := test.NewAggregatorTest(metric.MeasureKind, profile.NumberKind)
descriptor := test.NewAggregatorTest(metric.ValueRecorderKind, profile.NumberKind)
agg := New(descriptor)
@ -127,7 +127,7 @@ func TestMinMaxSumCountMerge(t *testing.T) {
ctx := context.Background()
test.RunProfiles(t, func(t *testing.T, profile test.Profile) {
descriptor := test.NewAggregatorTest(metric.MeasureKind, profile.NumberKind)
descriptor := test.NewAggregatorTest(metric.ValueRecorderKind, profile.NumberKind)
agg1 := New(descriptor)
agg2 := New(descriptor)
@ -185,7 +185,7 @@ func TestMaxSumCountNotSet(t *testing.T) {
ctx := context.Background()
test.RunProfiles(t, func(t *testing.T, profile test.Profile) {
descriptor := test.NewAggregatorTest(metric.MeasureKind, profile.NumberKind)
descriptor := test.NewAggregatorTest(metric.ValueRecorderKind, profile.NumberKind)
agg := New(descriptor)
agg.Checkpoint(ctx, descriptor)

View File

@ -71,13 +71,13 @@ func TestCounterSum(t *testing.T) {
})
}
func TestMeasureSum(t *testing.T) {
func TestValueRecorderSum(t *testing.T) {
ctx := context.Background()
test.RunProfiles(t, func(t *testing.T, profile test.Profile) {
agg := New()
descriptor := test.NewAggregatorTest(metric.MeasureKind, profile.NumberKind)
descriptor := test.NewAggregatorTest(metric.ValueRecorderKind, profile.NumberKind)
sum := metric.Number(0)

View File

@ -311,7 +311,7 @@ func BenchmarkInt64LastValueAdd(b *testing.B) {
ctx := context.Background()
fix := newFixture(b)
labs := makeLabels(1)
mea := fix.meter.NewInt64Measure("int64.lastvalue")
mea := fix.meter.NewInt64ValueRecorder("int64.lastvalue")
b.ResetTimer()
@ -324,7 +324,7 @@ func BenchmarkInt64LastValueHandleAdd(b *testing.B) {
ctx := context.Background()
fix := newFixture(b)
labs := makeLabels(1)
mea := fix.meter.NewInt64Measure("int64.lastvalue")
mea := fix.meter.NewInt64ValueRecorder("int64.lastvalue")
handle := mea.Bind(labs...)
b.ResetTimer()
@ -338,7 +338,7 @@ func BenchmarkFloat64LastValueAdd(b *testing.B) {
ctx := context.Background()
fix := newFixture(b)
labs := makeLabels(1)
mea := fix.meter.NewFloat64Measure("float64.lastvalue")
mea := fix.meter.NewFloat64ValueRecorder("float64.lastvalue")
b.ResetTimer()
@ -351,7 +351,7 @@ func BenchmarkFloat64LastValueHandleAdd(b *testing.B) {
ctx := context.Background()
fix := newFixture(b)
labs := makeLabels(1)
mea := fix.meter.NewFloat64Measure("float64.lastvalue")
mea := fix.meter.NewFloat64ValueRecorder("float64.lastvalue")
handle := mea.Bind(labs...)
b.ResetTimer()
@ -361,13 +361,13 @@ func BenchmarkFloat64LastValueHandleAdd(b *testing.B) {
}
}
// Measures
// ValueRecorders
func benchmarkInt64MeasureAdd(b *testing.B, name string) {
func benchmarkInt64ValueRecorderAdd(b *testing.B, name string) {
ctx := context.Background()
fix := newFixture(b)
labs := makeLabels(1)
mea := fix.meter.NewInt64Measure(name)
mea := fix.meter.NewInt64ValueRecorder(name)
b.ResetTimer()
@ -376,11 +376,11 @@ func benchmarkInt64MeasureAdd(b *testing.B, name string) {
}
}
func benchmarkInt64MeasureHandleAdd(b *testing.B, name string) {
func benchmarkInt64ValueRecorderHandleAdd(b *testing.B, name string) {
ctx := context.Background()
fix := newFixture(b)
labs := makeLabels(1)
mea := fix.meter.NewInt64Measure(name)
mea := fix.meter.NewInt64ValueRecorder(name)
handle := mea.Bind(labs...)
b.ResetTimer()
@ -390,11 +390,11 @@ func benchmarkInt64MeasureHandleAdd(b *testing.B, name string) {
}
}
func benchmarkFloat64MeasureAdd(b *testing.B, name string) {
func benchmarkFloat64ValueRecorderAdd(b *testing.B, name string) {
ctx := context.Background()
fix := newFixture(b)
labs := makeLabels(1)
mea := fix.meter.NewFloat64Measure(name)
mea := fix.meter.NewFloat64ValueRecorder(name)
b.ResetTimer()
@ -403,11 +403,11 @@ func benchmarkFloat64MeasureAdd(b *testing.B, name string) {
}
}
func benchmarkFloat64MeasureHandleAdd(b *testing.B, name string) {
func benchmarkFloat64ValueRecorderHandleAdd(b *testing.B, name string) {
ctx := context.Background()
fix := newFixture(b)
labs := makeLabels(1)
mea := fix.meter.NewFloat64Measure(name)
mea := fix.meter.NewFloat64ValueRecorder(name)
handle := mea.Bind(labs...)
b.ResetTimer()
@ -423,22 +423,22 @@ func BenchmarkObserverRegistration(b *testing.B) {
fix := newFixture(b)
names := make([]string, 0, b.N)
for i := 0; i < b.N; i++ {
names = append(names, fmt.Sprintf("test.observer.%d", i))
names = append(names, fmt.Sprintf("test.valueobserver.%d", i))
}
cb := func(result metric.Int64ObserverResult) {}
cb := func(_ context.Context, result metric.Int64ObserverResult) {}
b.ResetTimer()
for i := 0; i < b.N; i++ {
fix.meter.RegisterInt64Observer(names[i], cb)
fix.meter.RegisterInt64ValueObserver(names[i], cb)
}
}
func BenchmarkObserverObservationInt64(b *testing.B) {
func BenchmarkValueObserverObservationInt64(b *testing.B) {
ctx := context.Background()
fix := newFixture(b)
labs := makeLabels(1)
_ = fix.meter.RegisterInt64Observer("test.observer", func(result metric.Int64ObserverResult) {
_ = fix.meter.RegisterInt64ValueObserver("test.valueobserver", func(_ context.Context, result metric.Int64ObserverResult) {
for i := 0; i < b.N; i++ {
result.Observe((int64)(i), labs...)
}
@ -449,11 +449,11 @@ func BenchmarkObserverObservationInt64(b *testing.B) {
fix.accumulator.Collect(ctx)
}
func BenchmarkObserverObservationFloat64(b *testing.B) {
func BenchmarkValueObserverObservationFloat64(b *testing.B) {
ctx := context.Background()
fix := newFixture(b)
labs := makeLabels(1)
_ = fix.meter.RegisterFloat64Observer("test.observer", func(result metric.Float64ObserverResult) {
_ = fix.meter.RegisterFloat64ValueObserver("test.valueobserver", func(_ context.Context, result metric.Float64ObserverResult) {
for i := 0; i < b.N; i++ {
result.Observe((float64)(i), labs...)
}
@ -467,55 +467,55 @@ func BenchmarkObserverObservationFloat64(b *testing.B) {
// MaxSumCount
func BenchmarkInt64MaxSumCountAdd(b *testing.B) {
benchmarkInt64MeasureAdd(b, "int64.minmaxsumcount")
benchmarkInt64ValueRecorderAdd(b, "int64.minmaxsumcount")
}
func BenchmarkInt64MaxSumCountHandleAdd(b *testing.B) {
benchmarkInt64MeasureHandleAdd(b, "int64.minmaxsumcount")
benchmarkInt64ValueRecorderHandleAdd(b, "int64.minmaxsumcount")
}
func BenchmarkFloat64MaxSumCountAdd(b *testing.B) {
benchmarkFloat64MeasureAdd(b, "float64.minmaxsumcount")
benchmarkFloat64ValueRecorderAdd(b, "float64.minmaxsumcount")
}
func BenchmarkFloat64MaxSumCountHandleAdd(b *testing.B) {
benchmarkFloat64MeasureHandleAdd(b, "float64.minmaxsumcount")
benchmarkFloat64ValueRecorderHandleAdd(b, "float64.minmaxsumcount")
}
// DDSketch
func BenchmarkInt64DDSketchAdd(b *testing.B) {
benchmarkInt64MeasureAdd(b, "int64.ddsketch")
benchmarkInt64ValueRecorderAdd(b, "int64.ddsketch")
}
func BenchmarkInt64DDSketchHandleAdd(b *testing.B) {
benchmarkInt64MeasureHandleAdd(b, "int64.ddsketch")
benchmarkInt64ValueRecorderHandleAdd(b, "int64.ddsketch")
}
func BenchmarkFloat64DDSketchAdd(b *testing.B) {
benchmarkFloat64MeasureAdd(b, "float64.ddsketch")
benchmarkFloat64ValueRecorderAdd(b, "float64.ddsketch")
}
func BenchmarkFloat64DDSketchHandleAdd(b *testing.B) {
benchmarkFloat64MeasureHandleAdd(b, "float64.ddsketch")
benchmarkFloat64ValueRecorderHandleAdd(b, "float64.ddsketch")
}
// Array
func BenchmarkInt64ArrayAdd(b *testing.B) {
benchmarkInt64MeasureAdd(b, "int64.array")
benchmarkInt64ValueRecorderAdd(b, "int64.array")
}
func BenchmarkInt64ArrayHandleAdd(b *testing.B) {
benchmarkInt64MeasureHandleAdd(b, "int64.array")
benchmarkInt64ValueRecorderHandleAdd(b, "int64.array")
}
func BenchmarkFloat64ArrayAdd(b *testing.B) {
benchmarkFloat64MeasureAdd(b, "float64.array")
benchmarkFloat64ValueRecorderAdd(b, "float64.array")
}
func BenchmarkFloat64ArrayHandleAdd(b *testing.B) {
benchmarkFloat64MeasureHandleAdd(b, "float64.array")
benchmarkFloat64ValueRecorderHandleAdd(b, "float64.array")
}
// BatchRecord

View File

@ -14,6 +14,8 @@
package metric
import "go.opentelemetry.io/otel/sdk/resource"
// Config contains configuration for an SDK.
type Config struct {
// ErrorHandler is the function called when the SDK encounters an error.
@ -21,6 +23,10 @@ type Config struct {
// This option can be overridden after instantiation of the SDK
// with the `SetErrorHandler` method.
ErrorHandler ErrorHandler
// Resource describes all the metric records processed by the
// Accumulator.
Resource *resource.Resource
}
// Option is the interface that applies the value to a configuration option.
@ -39,3 +45,16 @@ type errorHandlerOption ErrorHandler
func (o errorHandlerOption) Apply(config *Config) {
config.ErrorHandler = ErrorHandler(o)
}
// WithResource sets the Resource configuration option of a Config.
func WithResource(res *resource.Resource) Option {
return resourceOption{res}
}
type resourceOption struct {
*resource.Resource
}
func (o resourceOption) Apply(config *Config) {
config.Resource = o.Resource
}

View File

@ -15,6 +15,8 @@
package push
import (
"time"
sdk "go.opentelemetry.io/otel/sdk/metric"
"go.opentelemetry.io/otel/sdk/resource"
)
@ -30,6 +32,19 @@ type Config struct {
// Resource is the OpenTelemetry resource associated with all Meters
// created by the Controller.
Resource *resource.Resource
// Stateful causes the controller to maintain state across
// collection events, so that records in the exported
// checkpoint set are cumulative.
Stateful bool
// Period is the interval between calls to Collect a checkpoint.
Period time.Duration
// Timeout is the duration a collection (i.e. collect, accumulate,
// integrate, and export) can last before it is canceled. Defaults to
// the controller push period.
Timeout time.Duration
}
// Option is the interface that applies the value to a configuration option.
@ -59,3 +74,36 @@ type resourceOption struct{ *resource.Resource }
func (o resourceOption) Apply(config *Config) {
config.Resource = o.Resource
}
// WithStateful sets the Stateful configuration option of a Config.
func WithStateful(stateful bool) Option {
return statefulOption(stateful)
}
type statefulOption bool
func (o statefulOption) Apply(config *Config) {
config.Stateful = bool(o)
}
// WithPeriod sets the Period configuration option of a Config.
func WithPeriod(period time.Duration) Option {
return periodOption(period)
}
type periodOption time.Duration
func (o periodOption) Apply(config *Config) {
config.Period = time.Duration(o)
}
// WithTimeout sets the Timeout configuration option of a Config.
func WithTimeout(timeout time.Duration) Option {
return timeoutOption(timeout)
}
type timeoutOption time.Duration
func (o timeoutOption) Apply(config *Config) {
config.Timeout = time.Duration(o)
}

View File

@ -23,85 +23,73 @@ import (
"go.opentelemetry.io/otel/api/metric/registry"
export "go.opentelemetry.io/otel/sdk/export/metric"
sdk "go.opentelemetry.io/otel/sdk/metric"
"go.opentelemetry.io/otel/sdk/resource"
controllerTime "go.opentelemetry.io/otel/sdk/metric/controller/time"
"go.opentelemetry.io/otel/sdk/metric/integrator/simple"
)
// DefaultPushPeriod is the default time interval between pushes.
const DefaultPushPeriod = 10 * time.Second
// Controller organizes a periodic push of metric data.
type Controller struct {
lock sync.Mutex
collectLock sync.Mutex
accumulator *sdk.Accumulator
resource *resource.Resource
uniq metric.MeterImpl
named map[string]metric.Meter
provider *registry.Provider
errorHandler sdk.ErrorHandler
integrator export.Integrator
integrator *simple.Integrator
exporter export.Exporter
wg sync.WaitGroup
ch chan struct{}
period time.Duration
ticker Ticker
clock Clock
timeout time.Duration
clock controllerTime.Clock
ticker controllerTime.Ticker
}
var _ metric.Provider = &Controller{}
// Several types below are created to match "github.com/benbjohnson/clock"
// so that it remains a test-only dependency.
type Clock interface {
Now() time.Time
Ticker(time.Duration) Ticker
}
type Ticker interface {
Stop()
C() <-chan time.Time
}
type realClock struct {
}
type realTicker struct {
ticker *time.Ticker
}
var _ Clock = realClock{}
var _ Ticker = realTicker{}
// New constructs a Controller, an implementation of metric.Provider,
// using the provided integrator, exporter, collection period, and SDK
// configuration options to configure an SDK with periodic collection.
// The integrator itself is configured with the aggregation selector policy.
func New(integrator export.Integrator, exporter export.Exporter, period time.Duration, opts ...Option) *Controller {
c := &Config{ErrorHandler: sdk.DefaultErrorHandler}
// using the provided exporter and options to configure an SDK with
// periodic collection.
func New(selector export.AggregationSelector, exporter export.Exporter, opts ...Option) *Controller {
c := &Config{
ErrorHandler: sdk.DefaultErrorHandler,
Period: DefaultPushPeriod,
}
for _, opt := range opts {
opt.Apply(c)
}
if c.Timeout == 0 {
c.Timeout = c.Period
}
impl := sdk.NewAccumulator(integrator, sdk.WithErrorHandler(c.ErrorHandler))
integrator := simple.New(selector, c.Stateful)
impl := sdk.NewAccumulator(
integrator,
sdk.WithErrorHandler(c.ErrorHandler),
sdk.WithResource(c.Resource),
)
return &Controller{
provider: registry.NewProvider(impl),
accumulator: impl,
resource: c.Resource,
uniq: registry.NewUniqueInstrumentMeterImpl(impl),
named: map[string]metric.Meter{},
errorHandler: c.ErrorHandler,
integrator: integrator,
exporter: exporter,
errorHandler: c.ErrorHandler,
ch: make(chan struct{}),
period: period,
clock: realClock{},
period: c.Period,
timeout: c.Timeout,
clock: controllerTime.RealClock{},
}
}
// SetClock supports setting a mock clock for testing. This must be
// called before Start().
func (c *Controller) SetClock(clock Clock) {
func (c *Controller) SetClock(clock controllerTime.Clock) {
c.lock.Lock()
defer c.lock.Unlock()
c.clock = clock
}
// SetErrorHandler sets the handler for errors. If none has been set, the
// SDK default error handler is used.
func (c *Controller) SetErrorHandler(errorHandler sdk.ErrorHandler) {
c.lock.Lock()
defer c.lock.Unlock()
@ -109,19 +97,9 @@ func (c *Controller) SetErrorHandler(errorHandler sdk.ErrorHandler) {
c.accumulator.SetErrorHandler(errorHandler)
}
// Meter returns a named Meter, satisifying the metric.Provider
// interface.
func (c *Controller) Meter(name string) metric.Meter {
c.lock.Lock()
defer c.lock.Unlock()
if meter, ok := c.named[name]; ok {
return meter
}
meter := metric.WrapMeterImpl(c.uniq, name)
c.named[name] = meter
return meter
// Provider returns a metric.Provider instance for this controller.
func (c *Controller) Provider() metric.Provider {
return c.provider
}
// Start begins a ticker that periodically collects and exports
@ -170,56 +148,18 @@ func (c *Controller) run(ch chan struct{}) {
}
func (c *Controller) tick() {
// TODO: either remove the context argument from Export() or
// configure a timeout here?
ctx := context.Background()
c.collect(ctx)
checkpointSet := syncCheckpointSet{
mtx: &c.collectLock,
delegate: c.integrator.CheckpointSet(),
}
err := c.exporter.Export(ctx, c.resource, checkpointSet)
ctx, cancel := context.WithTimeout(context.Background(), c.timeout)
defer cancel()
c.integrator.Lock()
defer c.integrator.Unlock()
c.accumulator.Collect(ctx)
err := c.exporter.Export(ctx, c.integrator.CheckpointSet())
c.integrator.FinishedCollection()
if err != nil {
c.errorHandler(err)
}
}
func (c *Controller) collect(ctx context.Context) {
c.collectLock.Lock()
defer c.collectLock.Unlock()
c.accumulator.Collect(ctx)
}
// syncCheckpointSet is a wrapper for a CheckpointSet to synchronize
// SDK's collection and reads of a CheckpointSet by an exporter.
type syncCheckpointSet struct {
mtx *sync.Mutex
delegate export.CheckpointSet
}
var _ export.CheckpointSet = (*syncCheckpointSet)(nil)
func (c syncCheckpointSet) ForEach(fn func(export.Record) error) error {
c.mtx.Lock()
defer c.mtx.Unlock()
return c.delegate.ForEach(fn)
}
func (realClock) Now() time.Time {
return time.Now()
}
func (realClock) Ticker(period time.Duration) Ticker {
return realTicker{time.NewTicker(period)}
}
func (t realTicker) Stop() {
t.ticker.Stop()
}
func (t realTicker) C() <-chan time.Time {
return t.ticker.C
}

View File

@ -22,25 +22,21 @@ import (
"testing"
"time"
"github.com/benbjohnson/clock"
"github.com/stretchr/testify/require"
"go.opentelemetry.io/otel/api/kv"
"go.opentelemetry.io/otel/api/label"
"go.opentelemetry.io/otel/api/metric"
"go.opentelemetry.io/otel/exporters/metric/test"
export "go.opentelemetry.io/otel/sdk/export/metric"
"go.opentelemetry.io/otel/sdk/export/metric/aggregator"
"go.opentelemetry.io/otel/sdk/metric/aggregator/sum"
"go.opentelemetry.io/otel/sdk/metric/controller/push"
controllerTest "go.opentelemetry.io/otel/sdk/metric/controller/test"
"go.opentelemetry.io/otel/sdk/resource"
)
type testIntegrator struct {
t *testing.T
lock sync.Mutex
checkpointSet *test.CheckpointSet
checkpoints int
finishes int
}
var testResource = resource.New(kv.String("R", "V"))
type testExporter struct {
t *testing.T
@ -52,70 +48,28 @@ type testExporter struct {
type testFixture struct {
checkpointSet *test.CheckpointSet
integrator *testIntegrator
exporter *testExporter
}
type mockClock struct {
mock *clock.Mock
}
type mockTicker struct {
ticker *clock.Ticker
}
var _ push.Clock = mockClock{}
var _ push.Ticker = mockTicker{}
type testSelector struct{}
func newFixture(t *testing.T) testFixture {
checkpointSet := test.NewCheckpointSet()
checkpointSet := test.NewCheckpointSet(testResource)
integrator := &testIntegrator{
t: t,
checkpointSet: checkpointSet,
}
exporter := &testExporter{
t: t,
}
return testFixture{
checkpointSet: checkpointSet,
integrator: integrator,
exporter: exporter,
}
}
func (b *testIntegrator) AggregatorFor(*metric.Descriptor) export.Aggregator {
func (testSelector) AggregatorFor(*metric.Descriptor) export.Aggregator {
return sum.New()
}
func (b *testIntegrator) CheckpointSet() export.CheckpointSet {
b.lock.Lock()
defer b.lock.Unlock()
b.checkpoints++
return b.checkpointSet
}
func (b *testIntegrator) FinishedCollection() {
b.lock.Lock()
defer b.lock.Unlock()
b.finishes++
}
func (b *testIntegrator) Process(_ context.Context, record export.Record) error {
b.lock.Lock()
defer b.lock.Unlock()
labels := record.Labels().ToSlice()
b.checkpointSet.Add(record.Descriptor(), record.Aggregator(), labels...)
return nil
}
func (b *testIntegrator) getCounts() (checkpoints, finishes int) {
b.lock.Lock()
defer b.lock.Unlock()
return b.checkpoints, b.finishes
}
func (e *testExporter) Export(_ context.Context, _ *resource.Resource, checkpointSet export.CheckpointSet) error {
func (e *testExporter) Export(_ context.Context, checkpointSet export.CheckpointSet) error {
e.lock.Lock()
defer e.lock.Unlock()
e.exports++
@ -143,29 +97,9 @@ func (e *testExporter) resetRecords() ([]export.Record, int) {
return r, e.exports
}
func (c mockClock) Now() time.Time {
return c.mock.Now()
}
func (c mockClock) Ticker(period time.Duration) push.Ticker {
return mockTicker{c.mock.Ticker(period)}
}
func (c mockClock) Add(d time.Duration) {
c.mock.Add(d)
}
func (t mockTicker) Stop() {
t.ticker.Stop()
}
func (t mockTicker) C() <-chan time.Time {
return t.ticker.C
}
func TestPushDoubleStop(t *testing.T) {
fix := newFixture(t)
p := push.New(fix.integrator, fix.exporter, time.Second)
p := push.New(testSelector{}, fix.exporter)
p.Start()
p.Stop()
p.Stop()
@ -173,7 +107,7 @@ func TestPushDoubleStop(t *testing.T) {
func TestPushDoubleStart(t *testing.T) {
fix := newFixture(t)
p := push.New(fix.integrator, fix.exporter, time.Second)
p := push.New(testSelector{}, fix.exporter)
p.Start()
p.Start()
p.Stop()
@ -182,10 +116,15 @@ func TestPushDoubleStart(t *testing.T) {
func TestPushTicker(t *testing.T) {
fix := newFixture(t)
p := push.New(fix.integrator, fix.exporter, time.Second)
meter := p.Meter("name")
p := push.New(
testSelector{},
fix.exporter,
push.WithPeriod(time.Second),
push.WithResource(testResource),
)
meter := p.Provider().Meter("name")
mock := mockClock{clock.NewMock()}
mock := controllerTest.NewMockClock()
p.SetClock(mock)
ctx := context.Background()
@ -197,9 +136,6 @@ func TestPushTicker(t *testing.T) {
counter.Add(ctx, 3)
records, exports := fix.exporter.resetRecords()
checkpoints, finishes := fix.integrator.getCounts()
require.Equal(t, 0, checkpoints)
require.Equal(t, 0, finishes)
require.Equal(t, 0, exports)
require.Equal(t, 0, len(records))
@ -207,12 +143,10 @@ func TestPushTicker(t *testing.T) {
runtime.Gosched()
records, exports = fix.exporter.resetRecords()
checkpoints, finishes = fix.integrator.getCounts()
require.Equal(t, 1, checkpoints)
require.Equal(t, 1, finishes)
require.Equal(t, 1, exports)
require.Equal(t, 1, len(records))
require.Equal(t, "counter", records[0].Descriptor().Name())
require.Equal(t, "R=V", records[0].Resource().Encoded(label.DefaultEncoder()))
sum, err := records[0].Aggregator().(aggregator.Sum).Sum()
require.Equal(t, int64(3), sum.AsInt64())
@ -226,12 +160,10 @@ func TestPushTicker(t *testing.T) {
runtime.Gosched()
records, exports = fix.exporter.resetRecords()
checkpoints, finishes = fix.integrator.getCounts()
require.Equal(t, 2, checkpoints)
require.Equal(t, 2, finishes)
require.Equal(t, 2, exports)
require.Equal(t, 1, len(records))
require.Equal(t, "counter", records[0].Descriptor().Name())
require.Equal(t, "R=V", records[0].Resource().Encoded(label.DefaultEncoder()))
sum, err = records[0].Aggregator().(aggregator.Sum).Sum()
require.Equal(t, int64(7), sum.AsInt64())
@ -256,8 +188,8 @@ func TestPushExportError(t *testing.T) {
expectedDescriptors []string
expectedError error
}{
{"errNone", nil, []string{"counter1", "counter2"}, nil},
{"errNoData", aggregator.ErrNoData, []string{"counter2"}, nil},
{"errNone", nil, []string{"counter1{R=V,X=Y}", "counter2{R=V,}"}, nil},
{"errNoData", aggregator.ErrNoData, []string{"counter2{R=V,}"}, nil},
{"errUnexpected", errAggregator, []string{}, errAggregator},
}
for _, tt := range tests {
@ -265,7 +197,12 @@ func TestPushExportError(t *testing.T) {
fix := newFixture(t)
fix.exporter.injectErr = injector("counter1", tt.injectedError)
p := push.New(fix.integrator, fix.exporter, time.Second)
p := push.New(
testSelector{},
fix.exporter,
push.WithPeriod(time.Second),
push.WithResource(testResource),
)
var err error
var lock sync.Mutex
@ -275,19 +212,19 @@ func TestPushExportError(t *testing.T) {
err = sdkErr
})
mock := mockClock{clock.NewMock()}
mock := controllerTest.NewMockClock()
p.SetClock(mock)
ctx := context.Background()
meter := p.Meter("name")
meter := p.Provider().Meter("name")
counter1 := metric.Must(meter).NewInt64Counter("counter1")
counter2 := metric.Must(meter).NewInt64Counter("counter2")
p.Start()
runtime.Gosched()
counter1.Add(ctx, 3)
counter1.Add(ctx, 3, kv.String("X", "Y"))
counter2.Add(ctx, 5)
require.Equal(t, 0, fix.exporter.exports)
@ -297,10 +234,7 @@ func TestPushExportError(t *testing.T) {
runtime.Gosched()
records, exports := fix.exporter.resetRecords()
checkpoints, finishes := fix.integrator.getCounts()
require.Equal(t, 1, exports)
require.Equal(t, 1, checkpoints)
require.Equal(t, 1, finishes)
lock.Lock()
if tt.expectedError == nil {
require.NoError(t, err)
@ -311,11 +245,16 @@ func TestPushExportError(t *testing.T) {
lock.Unlock()
require.Equal(t, len(tt.expectedDescriptors), len(records))
for _, r := range records {
require.Contains(t, tt.expectedDescriptors, r.Descriptor().Name())
require.Contains(t, tt.expectedDescriptors,
fmt.Sprintf("%s{%s,%s}",
r.Descriptor().Name(),
r.Resource().Encoded(label.DefaultEncoder()),
r.Labels().Encoded(label.DefaultEncoder()),
),
)
}
p.Stop()
})
}
}

View File

@ -0,0 +1,58 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package test
import (
"time"
"github.com/benbjohnson/clock"
controllerTime "go.opentelemetry.io/otel/sdk/metric/controller/time"
)
type MockClock struct {
mock *clock.Mock
}
type MockTicker struct {
ticker *clock.Ticker
}
var _ controllerTime.Clock = MockClock{}
var _ controllerTime.Ticker = MockTicker{}
func NewMockClock() MockClock {
return MockClock{clock.NewMock()}
}
func (c MockClock) Now() time.Time {
return c.mock.Now()
}
func (c MockClock) Ticker(period time.Duration) controllerTime.Ticker {
return MockTicker{c.mock.Ticker(period)}
}
func (c MockClock) Add(d time.Duration) {
c.mock.Add(d)
}
func (t MockTicker) Stop() {
t.ticker.Stop()
}
func (t MockTicker) C() <-chan time.Time {
return t.ticker.C
}

View File

@ -0,0 +1,59 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package time // import "go.opentelemetry.io/otel/sdk/metric/controller/time"
import (
"time"
lib "time"
)
// Several types below are created to match "github.com/benbjohnson/clock"
// so that it remains a test-only dependency.
type Clock interface {
Now() lib.Time
Ticker(lib.Duration) Ticker
}
type Ticker interface {
Stop()
C() <-chan lib.Time
}
type RealClock struct {
}
type RealTicker struct {
ticker *lib.Ticker
}
var _ Clock = RealClock{}
var _ Ticker = RealTicker{}
func (RealClock) Now() time.Time {
return time.Now()
}
func (RealClock) Ticker(period time.Duration) Ticker {
return RealTicker{time.NewTicker(period)}
}
func (t RealTicker) Stop() {
t.ticker.Stop()
}
func (t RealTicker) C() <-chan time.Time {
return t.ticker.C
}

View File

@ -19,6 +19,7 @@ import (
"fmt"
"math"
"strings"
"sync"
"sync/atomic"
"testing"
@ -33,9 +34,11 @@ import (
"go.opentelemetry.io/otel/sdk/metric/aggregator/array"
"go.opentelemetry.io/otel/sdk/metric/aggregator/sum"
batchTest "go.opentelemetry.io/otel/sdk/metric/integrator/test"
"go.opentelemetry.io/otel/sdk/resource"
)
var Must = metric.Must
var testResource = resource.New(kv.String("R", "V"))
type correctnessIntegrator struct {
newAggCount int64
@ -43,9 +46,37 @@ type correctnessIntegrator struct {
t *testing.T
records []export.Record
sync.Mutex
err error
}
func (cb *correctnessIntegrator) AggregatorFor(descriptor *metric.Descriptor) (agg export.Aggregator) {
func newSDK(t *testing.T) (metric.Meter, *metricsdk.Accumulator, *correctnessIntegrator) {
integrator := &correctnessIntegrator{
t: t,
}
accum := metricsdk.NewAccumulator(
integrator,
metricsdk.WithResource(testResource),
metricsdk.WithErrorHandler(func(err error) {
integrator.Lock()
defer integrator.Unlock()
integrator.err = err
}),
)
meter := metric.WrapMeterImpl(accum, "test")
return meter, accum, integrator
}
func (ci *correctnessIntegrator) sdkErr() error {
ci.Lock()
defer ci.Unlock()
t := ci.err
ci.err = nil
return t
}
func (ci *correctnessIntegrator) AggregatorFor(descriptor *metric.Descriptor) (agg export.Aggregator) {
name := descriptor.Name()
switch {
@ -57,31 +88,27 @@ func (cb *correctnessIntegrator) AggregatorFor(descriptor *metric.Descriptor) (a
agg = array.New()
}
if agg != nil {
atomic.AddInt64(&cb.newAggCount, 1)
atomic.AddInt64(&ci.newAggCount, 1)
}
return
}
func (cb *correctnessIntegrator) CheckpointSet() export.CheckpointSet {
cb.t.Fatal("Should not be called")
func (ci *correctnessIntegrator) CheckpointSet() export.CheckpointSet {
ci.t.Fatal("Should not be called")
return nil
}
func (*correctnessIntegrator) FinishedCollection() {
}
func (cb *correctnessIntegrator) Process(_ context.Context, record export.Record) error {
cb.records = append(cb.records, record)
func (ci *correctnessIntegrator) Process(_ context.Context, record export.Record) error {
ci.records = append(ci.records, record)
return nil
}
func TestInputRangeTestCounter(t *testing.T) {
func TestInputRangeCounter(t *testing.T) {
ctx := context.Background()
integrator := &correctnessIntegrator{
t: t,
}
sdk := metricsdk.NewAccumulator(integrator)
meter := metric.WrapMeterImpl(sdk, "test")
meter, sdk, integrator := newSDK(t)
var sdkErr error
sdk.SetErrorHandler(func(handleErr error) {
@ -107,30 +134,50 @@ func TestInputRangeTestCounter(t *testing.T) {
require.Nil(t, sdkErr)
}
func TestInputRangeTestMeasure(t *testing.T) {
func TestInputRangeUpDownCounter(t *testing.T) {
ctx := context.Background()
integrator := &correctnessIntegrator{
t: t,
}
sdk := metricsdk.NewAccumulator(integrator)
meter := metric.WrapMeterImpl(sdk, "test")
meter, sdk, integrator := newSDK(t)
var sdkErr error
sdk.SetErrorHandler(func(handleErr error) {
sdkErr = handleErr
})
measure := Must(meter).NewFloat64Measure("name.measure")
counter := Must(meter).NewInt64UpDownCounter("name.updowncounter")
measure.Record(ctx, math.NaN())
counter.Add(ctx, -1)
counter.Add(ctx, -1)
counter.Add(ctx, 2)
counter.Add(ctx, 1)
checkpointed := sdk.Collect(ctx)
sum, err := integrator.records[0].Aggregator().(aggregator.Sum).Sum()
require.Equal(t, int64(1), sum.AsInt64())
require.Equal(t, 1, checkpointed)
require.Nil(t, err)
require.Nil(t, sdkErr)
}
func TestInputRangeValueRecorder(t *testing.T) {
ctx := context.Background()
meter, sdk, integrator := newSDK(t)
var sdkErr error
sdk.SetErrorHandler(func(handleErr error) {
sdkErr = handleErr
})
valuerecorder := Must(meter).NewFloat64ValueRecorder("name.valuerecorder")
valuerecorder.Record(ctx, math.NaN())
require.Equal(t, aggregator.ErrNaNInput, sdkErr)
sdkErr = nil
checkpointed := sdk.Collect(ctx)
require.Equal(t, 0, checkpointed)
measure.Record(ctx, 1)
measure.Record(ctx, 2)
valuerecorder.Record(ctx, 1)
valuerecorder.Record(ctx, 2)
integrator.records = nil
checkpointed = sdk.Collect(ctx)
@ -144,15 +191,11 @@ func TestInputRangeTestMeasure(t *testing.T) {
func TestDisabledInstrument(t *testing.T) {
ctx := context.Background()
integrator := &correctnessIntegrator{
t: t,
}
sdk := metricsdk.NewAccumulator(integrator)
meter := metric.WrapMeterImpl(sdk, "test")
meter, sdk, integrator := newSDK(t)
measure := Must(meter).NewFloat64Measure("name.disabled")
valuerecorder := Must(meter).NewFloat64ValueRecorder("name.disabled")
measure.Record(ctx, -1)
valuerecorder.Record(ctx, -1)
checkpointed := sdk.Collect(ctx)
require.Equal(t, 0, checkpointed)
@ -161,12 +204,7 @@ func TestDisabledInstrument(t *testing.T) {
func TestRecordNaN(t *testing.T) {
ctx := context.Background()
integrator := &correctnessIntegrator{
t: t,
}
sdk := metricsdk.NewAccumulator(integrator)
meter := metric.WrapMeterImpl(sdk, "test")
meter, sdk, _ := newSDK(t)
var sdkErr error
sdk.SetErrorHandler(func(handleErr error) {
@ -181,11 +219,7 @@ func TestRecordNaN(t *testing.T) {
func TestSDKLabelsDeduplication(t *testing.T) {
ctx := context.Background()
integrator := &correctnessIntegrator{
t: t,
}
sdk := metricsdk.NewAccumulator(integrator)
meter := metric.WrapMeterImpl(sdk, "test")
meter, sdk, integrator := newSDK(t)
counter := Must(meter).NewInt64Counter("counter")
@ -284,113 +318,156 @@ func TestDefaultLabelEncoder(t *testing.T) {
func TestObserverCollection(t *testing.T) {
ctx := context.Background()
integrator := &correctnessIntegrator{
t: t,
}
meter, sdk, integrator := newSDK(t)
sdk := metricsdk.NewAccumulator(integrator)
meter := metric.WrapMeterImpl(sdk, "test")
_ = Must(meter).RegisterFloat64Observer("float.observer", func(result metric.Float64ObserverResult) {
_ = Must(meter).RegisterFloat64ValueObserver("float.valueobserver", func(_ context.Context, result metric.Float64ObserverResult) {
result.Observe(1, kv.String("A", "B"))
// last value wins
result.Observe(-1, kv.String("A", "B"))
result.Observe(-1, kv.String("C", "D"))
})
_ = Must(meter).RegisterInt64Observer("int.observer", func(result metric.Int64ObserverResult) {
_ = Must(meter).RegisterInt64ValueObserver("int.valueobserver", func(_ context.Context, result metric.Int64ObserverResult) {
result.Observe(-1, kv.String("A", "B"))
result.Observe(1)
// last value wins
result.Observe(1, kv.String("A", "B"))
result.Observe(1)
})
_ = Must(meter).RegisterInt64Observer("empty.observer", func(result metric.Int64ObserverResult) {
_ = Must(meter).RegisterFloat64SumObserver("float.sumobserver", func(_ context.Context, result metric.Float64ObserverResult) {
result.Observe(1, kv.String("A", "B"))
result.Observe(2, kv.String("A", "B"))
result.Observe(1, kv.String("C", "D"))
})
_ = Must(meter).RegisterInt64SumObserver("int.sumobserver", func(_ context.Context, result metric.Int64ObserverResult) {
result.Observe(2, kv.String("A", "B"))
result.Observe(1)
// last value wins
result.Observe(1, kv.String("A", "B"))
result.Observe(1)
})
_ = Must(meter).RegisterInt64ValueObserver("empty.valueobserver", func(_ context.Context, result metric.Int64ObserverResult) {
})
collected := sdk.Collect(ctx)
require.Equal(t, 4, collected)
require.Equal(t, 4, len(integrator.records))
require.Equal(t, 8, collected)
require.Equal(t, 8, len(integrator.records))
out := batchTest.NewOutput(label.DefaultEncoder())
for _, rec := range integrator.records {
_ = out.AddTo(rec)
}
require.EqualValues(t, map[string]float64{
"float.observer/A=B": -1,
"float.observer/C=D": -1,
"int.observer/": 1,
"int.observer/A=B": 1,
"float.sumobserver/A=B/R=V": 2,
"float.sumobserver/C=D/R=V": 1,
"int.sumobserver//R=V": 1,
"int.sumobserver/A=B/R=V": 1,
"float.valueobserver/A=B/R=V": -1,
"float.valueobserver/C=D/R=V": -1,
"int.valueobserver//R=V": 1,
"int.valueobserver/A=B/R=V": 1,
}, out.Map)
}
func TestSumObserverInputRange(t *testing.T) {
ctx := context.Background()
meter, sdk, integrator := newSDK(t)
_ = Must(meter).RegisterFloat64SumObserver("float.sumobserver", func(_ context.Context, result metric.Float64ObserverResult) {
result.Observe(-2, kv.String("A", "B"))
require.Equal(t, aggregator.ErrNegativeInput, integrator.sdkErr())
result.Observe(-1, kv.String("C", "D"))
require.Equal(t, aggregator.ErrNegativeInput, integrator.sdkErr())
})
_ = Must(meter).RegisterInt64SumObserver("int.sumobserver", func(_ context.Context, result metric.Int64ObserverResult) {
result.Observe(-1, kv.String("A", "B"))
require.Equal(t, aggregator.ErrNegativeInput, integrator.sdkErr())
result.Observe(-1)
require.Equal(t, aggregator.ErrNegativeInput, integrator.sdkErr())
})
collected := sdk.Collect(ctx)
require.Equal(t, 0, collected)
require.Equal(t, 0, len(integrator.records))
// check that the error condition was reset
require.NoError(t, integrator.sdkErr())
}
func TestObserverBatch(t *testing.T) {
ctx := context.Background()
integrator := &correctnessIntegrator{
t: t,
}
meter, sdk, integrator := newSDK(t)
sdk := metricsdk.NewAccumulator(integrator)
meter := metric.WrapMeterImpl(sdk, "test")
var floatValueObs metric.Float64ValueObserver
var intValueObs metric.Int64ValueObserver
var floatSumObs metric.Float64SumObserver
var intSumObs metric.Int64SumObserver
var floatObs metric.Float64Observer
var intObs metric.Int64Observer
var batch = Must(meter).NewBatchObserver(
func(result metric.BatchObserverResult) {
func(_ context.Context, result metric.BatchObserverResult) {
result.Observe(
[]kv.KeyValue{
kv.String("A", "B"),
},
floatObs.Observation(1),
floatObs.Observation(-1),
intObs.Observation(-1),
intObs.Observation(1),
floatValueObs.Observation(1),
floatValueObs.Observation(-1),
intValueObs.Observation(-1),
intValueObs.Observation(1),
floatSumObs.Observation(1000),
intSumObs.Observation(100),
)
result.Observe(
[]kv.KeyValue{
kv.String("C", "D"),
},
floatObs.Observation(-1),
floatValueObs.Observation(-1),
floatSumObs.Observation(-1),
)
result.Observe(
nil,
intObs.Observation(1),
intObs.Observation(1),
intValueObs.Observation(1),
intValueObs.Observation(1),
intSumObs.Observation(10),
floatSumObs.Observation(1.1),
)
})
floatObs = batch.RegisterFloat64Observer("float.observer")
intObs = batch.RegisterInt64Observer("int.observer")
floatValueObs = batch.RegisterFloat64ValueObserver("float.valueobserver")
intValueObs = batch.RegisterInt64ValueObserver("int.valueobserver")
floatSumObs = batch.RegisterFloat64SumObserver("float.sumobserver")
intSumObs = batch.RegisterInt64SumObserver("int.sumobserver")
collected := sdk.Collect(ctx)
require.Equal(t, 4, collected)
require.Equal(t, 4, len(integrator.records))
require.Equal(t, 8, collected)
require.Equal(t, 8, len(integrator.records))
out := batchTest.NewOutput(label.DefaultEncoder())
for _, rec := range integrator.records {
_ = out.AddTo(rec)
}
require.EqualValues(t, map[string]float64{
"float.observer/A=B": -1,
"float.observer/C=D": -1,
"int.observer/": 1,
"int.observer/A=B": 1,
"float.sumobserver//R=V": 1.1,
"float.sumobserver/A=B/R=V": 1000,
"int.sumobserver//R=V": 10,
"int.sumobserver/A=B/R=V": 100,
"float.valueobserver/A=B/R=V": -1,
"float.valueobserver/C=D/R=V": -1,
"int.valueobserver//R=V": 1,
"int.valueobserver/A=B/R=V": 1,
}, out.Map)
}
func TestRecordBatch(t *testing.T) {
ctx := context.Background()
integrator := &correctnessIntegrator{
t: t,
}
sdk := metricsdk.NewAccumulator(integrator)
meter := metric.WrapMeterImpl(sdk, "test")
meter, sdk, integrator := newSDK(t)
counter1 := Must(meter).NewInt64Counter("int64.counter")
counter2 := Must(meter).NewFloat64Counter("float64.counter")
measure1 := Must(meter).NewInt64Measure("int64.measure")
measure2 := Must(meter).NewFloat64Measure("float64.measure")
valuerecorder1 := Must(meter).NewInt64ValueRecorder("int64.valuerecorder")
valuerecorder2 := Must(meter).NewFloat64ValueRecorder("float64.valuerecorder")
sdk.RecordBatch(
ctx,
@ -400,8 +477,8 @@ func TestRecordBatch(t *testing.T) {
},
counter1.Measurement(1),
counter2.Measurement(2),
measure1.Measurement(3),
measure2.Measurement(4),
valuerecorder1.Measurement(3),
valuerecorder2.Measurement(4),
)
sdk.Collect(ctx)
@ -411,10 +488,10 @@ func TestRecordBatch(t *testing.T) {
_ = out.AddTo(rec)
}
require.EqualValues(t, map[string]float64{
"int64.counter/A=B,C=D": 1,
"float64.counter/A=B,C=D": 2,
"int64.measure/A=B,C=D": 3,
"float64.measure/A=B,C=D": 4,
"int64.counter/A=B,C=D/R=V": 1,
"float64.counter/A=B,C=D/R=V": 2,
"int64.valuerecorder/A=B,C=D/R=V": 3,
"float64.valuerecorder/A=B,C=D/R=V": 4,
}, out.Map)
}
@ -423,12 +500,7 @@ func TestRecordBatch(t *testing.T) {
// that its encoded labels will be cached across collection intervals.
func TestRecordPersistence(t *testing.T) {
ctx := context.Background()
integrator := &correctnessIntegrator{
t: t,
}
sdk := metricsdk.NewAccumulator(integrator)
meter := metric.WrapMeterImpl(sdk, "test")
meter, sdk, integrator := newSDK(t)
c := Must(meter).NewFloat64Counter("sum.name")
b := c.Bind(kv.String("bound", "true"))
@ -442,3 +514,27 @@ func TestRecordPersistence(t *testing.T) {
require.Equal(t, int64(2), integrator.newAggCount)
}
func TestSyncInAsync(t *testing.T) {
ctx := context.Background()
meter, sdk, integrator := newSDK(t)
counter := Must(meter).NewFloat64Counter("counter")
_ = Must(meter).RegisterInt64ValueObserver("observer",
func(ctx context.Context, result metric.Int64ObserverResult) {
result.Observe(10)
counter.Add(ctx, 100)
},
)
sdk.Collect(ctx)
out := batchTest.NewOutput(label.DefaultEncoder())
for _, rec := range integrator.records {
_ = out.AddTo(rec)
}
require.EqualValues(t, map[string]float64{
"counter//R=V": 100,
"observer//R=V": 10,
}, out.Map)
}

View File

@ -13,57 +13,34 @@
// limitations under the License.
/*
Package metric implements the OpenTelemetry metric.Meter API. The SDK
supports configurable metrics export behavior through a collection of
export interfaces that support various export strategies, described below.
Package metric implements the OpenTelemetry metric.MeterImpl
interface. The Accumulator type supports configurable metrics export
behavior through a collection of export interfaces that support
various export strategies, described below.
The metric.Meter API consists of methods for constructing each of the basic
kinds of metric instrument. There are six types of instrument available to
the end user, comprised of three basic kinds of metric instrument (Counter,
Measure, Observer) crossed with two kinds of number (int64, float64).
The metric.MeterImpl API consists of methods for constructing
synchronous and asynchronous instruments. There are two constructors
per instrument for the two kinds of number (int64, float64).
The API assists the SDK by consolidating the variety of metric instruments
into a narrower interface, allowing the SDK to avoid repetition of
boilerplate. The API and SDK are separated such that an event reaching
the SDK has a uniform structure: an instrument, a label set, and a
numerical value.
Synchronous instruments are managed by a sync.Map containing a *record
with the current state for each synchronous instrument. A bound
instrument encapsulates a direct pointer to the record, allowing
bound metric events to bypass a sync.Map lookup. A lock-free
algorithm is used to protect against races when adding and removing
items from the sync.Map.
To this end, the API uses a kv.Number type to represent either an int64
or a float64, depending on the instrument's definition. A single
implementation interface is used for counter and measure instruments,
metric.InstrumentImpl, and a single implementation interface is used for
their handles, metric.HandleImpl. For observers, the API defines
interfaces, for which the SDK provides an implementation.
There are four entry points for events in the Metrics API - three for
synchronous instruments (counters and measures) and one for asynchronous
instruments (observers). The entry points for synchronous instruments are:
via instrument handles, via direct instrument calls, and via BatchRecord.
The SDK is designed with handles as the primary entry point, the other two
entry points are implemented in terms of short-lived handles. For example,
the implementation of a direct call allocates a handle, operates on the
handle, and releases the handle. Similarly, the implementation of
RecordBatch uses a short-lived handle for each measurement in the batch.
The entry point for asynchronous instruments is via observer callbacks.
Observer callbacks behave like a set of instrument handles - one for each
observation for a distinct label set. The observer handles are alive as
long as they are used. If the callback stops reporting values for a
certain label set, the associated handle is dropped.
Asynchronous instruments are managed by an internal
AsyncInstrumentState, which coordinates calling batch and single
instrument callbacks.
Internal Structure
The SDK is designed with minimal use of locking, to avoid adding
contention for user-level code. For each handle, whether it is held by
user-level code or a short-lived device, there exists an internal record
managed by the SDK. Each internal record corresponds to a specific
instrument and label set combination.
Each observer also has its own kind of record stored in the SDK. This
record contains a set of recorders for every specific label set used in the
callback.
A sync.Map maintains the mapping of current instruments and label sets to
internal records. To create a new handle, the SDK consults the Map to
internal records. To create a new bound instrument, the SDK consults the Map to
locate an existing record, otherwise it constructs a new record. The SDK
maintains a count of the number of references to each record, ensuring
that records are not reclaimed from the Map while they are still active
@ -74,12 +51,7 @@ sweeps through all records in the SDK, checkpointing their state. When a
record is discovered that has no references and has not been updated since
the prior collection pass, it is removed from the Map.
The SDK maintains a current epoch number, corresponding to the number of
completed collections. Each recorder of an observer record contains the
last epoch during which it was updated. This variable allows the collection
code path to detect stale recorders and remove them.
Each record of a handle and recorder of an observer has an associated
Both synchronous and asynchronous instruments have an associated
aggregator, which maintains the current state resulting from all metric
events since its last checkpoint. Aggregators may be lock-free or they may
use locking, but they should expect to be called concurrently. Aggregators
@ -97,21 +69,18 @@ enters the SDK resulting in a new record, and collection context,
where a system-level thread performs a collection pass through the
SDK.
Descriptor is a struct that describes the metric instrument to the export
pipeline, containing the name, recommended aggregation keys, units,
description, metric kind (counter or measure), number kind (int64 or
float64), and whether the instrument has alternate semantics or not (i.e.,
monotonic=false counter, absolute=false measure). A Descriptor accompanies
metric data as it passes through the export pipeline.
Descriptor is a struct that describes the metric instrument to the
export pipeline, containing the name, units, description, metric kind,
number kind (int64 or float64). A Descriptor accompanies metric data
as it passes through the export pipeline.
The AggregationSelector interface supports choosing the method of
aggregation to apply to a particular instrument. Given the Descriptor,
this AggregatorFor method returns an implementation of Aggregator. If this
interface returns nil, the metric will be disabled. The aggregator should
be matched to the capabilities of the exporter. Selecting the aggregator
for counter instruments is relatively straightforward, but for measure and
observer instruments there are numerous choices with different cost and
quality tradeoffs.
for sum-only instruments is relatively straightforward, but many options
are available for aggregating distributions from ValueRecorder instruments.
Aggregator is an interface which implements a concrete strategy for
aggregating metric updates. Several Aggregator implementations are

View File

@ -17,7 +17,6 @@ package metric_test
import (
"context"
"fmt"
"time"
"go.opentelemetry.io/otel/api/kv"
@ -29,7 +28,7 @@ func ExampleNew() {
pusher, err := stdout.NewExportPipeline(stdout.Config{
PrettyPrint: true,
DoNotPrintTime: true, // This makes the output deterministic
}, time.Minute)
})
if err != nil {
panic(fmt.Sprintln("Could not initialize stdout exporter:", err))
}
@ -38,7 +37,7 @@ func ExampleNew() {
ctx := context.Background()
key := kv.Key("key")
meter := pusher.Meter("example")
meter := pusher.Provider().Meter("example")
counter := metric.Must(meter).NewInt64Counter("a.counter")

View File

@ -25,7 +25,7 @@ import (
)
func TestStressInt64Histogram(t *testing.T) {
desc := metric.NewDescriptor("some_metric", metric.MeasureKind, metric.Int64NumberKind)
desc := metric.NewDescriptor("some_metric", metric.ValueRecorderKind, metric.Int64NumberKind)
h := histogram.New(&desc, []metric.Number{metric.NewInt64Number(25), metric.NewInt64Number(50), metric.NewInt64Number(75)})
ctx, cancelFunc := context.WithCancel(context.Background())

View File

@ -17,56 +17,63 @@ package simple // import "go.opentelemetry.io/otel/sdk/metric/integrator/simple"
import (
"context"
"errors"
"sync"
"go.opentelemetry.io/otel/api/label"
"go.opentelemetry.io/otel/api/metric"
export "go.opentelemetry.io/otel/sdk/export/metric"
"go.opentelemetry.io/otel/sdk/export/metric/aggregator"
"go.opentelemetry.io/otel/sdk/resource"
)
type (
Integrator struct {
selector export.AggregationSelector
batchMap batchMap
export.AggregationSelector
stateful bool
batch
}
batchKey struct {
descriptor *metric.Descriptor
distinct label.Distinct
resource label.Distinct
}
batchValue struct {
aggregator export.Aggregator
labels *label.Set
resource *resource.Resource
}
batchMap map[batchKey]batchValue
batch struct {
// RWMutex implements locking for the `CheckpoingSet` interface.
sync.RWMutex
values map[batchKey]batchValue
}
)
var _ export.Integrator = &Integrator{}
var _ export.CheckpointSet = batchMap{}
var _ export.CheckpointSet = &batch{}
func New(selector export.AggregationSelector, stateful bool) *Integrator {
return &Integrator{
selector: selector,
batchMap: batchMap{},
stateful: stateful,
AggregationSelector: selector,
stateful: stateful,
batch: batch{
values: map[batchKey]batchValue{},
},
}
}
func (b *Integrator) AggregatorFor(descriptor *metric.Descriptor) export.Aggregator {
return b.selector.AggregatorFor(descriptor)
}
func (b *Integrator) Process(_ context.Context, record export.Record) error {
desc := record.Descriptor()
key := batchKey{
descriptor: desc,
distinct: record.Labels().Equivalent(),
resource: record.Resource().Equivalent(),
}
agg := record.Aggregator()
value, ok := b.batchMap[key]
value, ok := b.batch.values[key]
if ok {
// Note: The call to Merge here combines only
// identical records. It is required even for a
@ -88,28 +95,30 @@ func (b *Integrator) Process(_ context.Context, record export.Record) error {
return err
}
}
b.batchMap[key] = batchValue{
b.batch.values[key] = batchValue{
aggregator: agg,
labels: record.Labels(),
resource: record.Resource(),
}
return nil
}
func (b *Integrator) CheckpointSet() export.CheckpointSet {
return b.batchMap
return &b.batch
}
func (b *Integrator) FinishedCollection() {
if !b.stateful {
b.batchMap = batchMap{}
b.batch.values = map[batchKey]batchValue{}
}
}
func (c batchMap) ForEach(f func(export.Record) error) error {
for key, value := range c {
func (b *batch) ForEach(f func(export.Record) error) error {
for key, value := range b.values {
if err := f(export.NewRecord(
key.descriptor,
value.labels,
value.resource,
value.aggregator,
)); err != nil && !errors.Is(err, aggregator.ErrNoData) {
return err

View File

@ -29,7 +29,7 @@ import (
// These tests use the ../test label encoding.
func TestUngroupedStateless(t *testing.T) {
func TestSimpleStateless(t *testing.T) {
ctx := context.Background()
b := simple.New(test.NewAggregationSelector(), false)
@ -60,7 +60,6 @@ func TestUngroupedStateless(t *testing.T) {
_ = b.Process(ctx, test.NewCounterRecord(&test.CounterBDesc, test.Labels1, 50))
checkpointSet := b.CheckpointSet()
b.FinishedCollection()
records := test.NewOutput(test.SdkEncoder)
_ = checkpointSet.ForEach(records.AddTo)
@ -68,30 +67,31 @@ func TestUngroupedStateless(t *testing.T) {
// Output lastvalue should have only the "G=H" and "G=" keys.
// Output counter should have only the "C=D" and "C=" keys.
require.EqualValues(t, map[string]float64{
"sum.a/C~D&G~H": 60, // labels1
"sum.a/C~D&E~F": 20, // labels2
"sum.a/": 40, // labels3
"sum.b/C~D&G~H": 60, // labels1
"sum.b/C~D&E~F": 20, // labels2
"sum.b/": 40, // labels3
"lastvalue.a/C~D&G~H": 50, // labels1
"lastvalue.a/C~D&E~F": 20, // labels2
"lastvalue.a/": 30, // labels3
"lastvalue.b/C~D&G~H": 50, // labels1
"lastvalue.b/C~D&E~F": 20, // labels2
"lastvalue.b/": 30, // labels3
"sum.a/C~D&G~H/R~V": 60, // labels1
"sum.a/C~D&E~F/R~V": 20, // labels2
"sum.a//R~V": 40, // labels3
"sum.b/C~D&G~H/R~V": 60, // labels1
"sum.b/C~D&E~F/R~V": 20, // labels2
"sum.b//R~V": 40, // labels3
"lastvalue.a/C~D&G~H/R~V": 50, // labels1
"lastvalue.a/C~D&E~F/R~V": 20, // labels2
"lastvalue.a//R~V": 30, // labels3
"lastvalue.b/C~D&G~H/R~V": 50, // labels1
"lastvalue.b/C~D&E~F/R~V": 20, // labels2
"lastvalue.b//R~V": 30, // labels3
}, records.Map)
b.FinishedCollection()
// Verify that state was reset
checkpointSet = b.CheckpointSet()
b.FinishedCollection()
_ = checkpointSet.ForEach(func(rec export.Record) error {
t.Fatal("Unexpected call")
return nil
})
b.FinishedCollection()
}
func TestUngroupedStateful(t *testing.T) {
func TestSimpleStateful(t *testing.T) {
ctx := context.Background()
b := simple.New(test.NewAggregationSelector(), true)
@ -110,18 +110,18 @@ func TestUngroupedStateful(t *testing.T) {
_ = checkpointSet.ForEach(records1.AddTo)
require.EqualValues(t, map[string]float64{
"sum.a/C~D&G~H": 10, // labels1
"sum.b/C~D&G~H": 10, // labels1
"sum.a/C~D&G~H/R~V": 10, // labels1
"sum.b/C~D&G~H/R~V": 10, // labels1
}, records1.Map)
// Test that state was NOT reset
checkpointSet = b.CheckpointSet()
b.FinishedCollection()
records2 := test.NewOutput(test.SdkEncoder)
_ = checkpointSet.ForEach(records2.AddTo)
require.EqualValues(t, records1.Map, records2.Map)
b.FinishedCollection()
// Update and re-checkpoint the original record.
_ = caggA.Update(ctx, metric.NewInt64Number(20), &test.CounterADesc)
@ -132,25 +132,25 @@ func TestUngroupedStateful(t *testing.T) {
// As yet cagg has not been passed to Integrator.Process. Should
// not see an update.
checkpointSet = b.CheckpointSet()
b.FinishedCollection()
records3 := test.NewOutput(test.SdkEncoder)
_ = checkpointSet.ForEach(records3.AddTo)
require.EqualValues(t, records1.Map, records3.Map)
b.FinishedCollection()
// Now process the second update
_ = b.Process(ctx, export.NewRecord(&test.CounterADesc, test.Labels1, caggA))
_ = b.Process(ctx, export.NewRecord(&test.CounterBDesc, test.Labels1, caggB))
_ = b.Process(ctx, export.NewRecord(&test.CounterADesc, test.Labels1, test.Resource, caggA))
_ = b.Process(ctx, export.NewRecord(&test.CounterBDesc, test.Labels1, test.Resource, caggB))
checkpointSet = b.CheckpointSet()
b.FinishedCollection()
records4 := test.NewOutput(test.SdkEncoder)
_ = checkpointSet.ForEach(records4.AddTo)
require.EqualValues(t, map[string]float64{
"sum.a/C~D&G~H": 30,
"sum.b/C~D&G~H": 30,
"sum.a/C~D&G~H/R~V": 30,
"sum.b/C~D&G~H/R~V": 30,
}, records4.Map)
b.FinishedCollection()
}

View File

@ -26,6 +26,7 @@ import (
"go.opentelemetry.io/otel/sdk/export/metric/aggregator"
"go.opentelemetry.io/otel/sdk/metric/aggregator/lastvalue"
"go.opentelemetry.io/otel/sdk/metric/aggregator/sum"
"go.opentelemetry.io/otel/sdk/resource"
)
type (
@ -45,11 +46,14 @@ type (
)
var (
// Resource is applied to all test records built in this package.
Resource = resource.New(kv.String("R", "V"))
// LastValueADesc and LastValueBDesc group by "G"
LastValueADesc = metric.NewDescriptor(
"lastvalue.a", metric.ObserverKind, metric.Int64NumberKind)
"lastvalue.a", metric.ValueObserverKind, metric.Int64NumberKind)
LastValueBDesc = metric.NewDescriptor(
"lastvalue.b", metric.ObserverKind, metric.Int64NumberKind)
"lastvalue.b", metric.ValueObserverKind, metric.Int64NumberKind)
// CounterADesc and CounterBDesc group by "C"
CounterADesc = metric.NewDescriptor(
"sum.a", metric.CounterKind, metric.Int64NumberKind)
@ -92,7 +96,7 @@ func (*testAggregationSelector) AggregatorFor(desc *metric.Descriptor) export.Ag
switch desc.MetricKind() {
case metric.CounterKind:
return sum.New()
case metric.ObserverKind:
case metric.ValueObserverKind:
return lastvalue.New()
default:
panic("Invalid descriptor MetricKind for this test")
@ -133,12 +137,12 @@ func LastValueAgg(desc *metric.Descriptor, v int64) export.Aggregator {
// Convenience method for building a test exported lastValue record.
func NewLastValueRecord(desc *metric.Descriptor, labels *label.Set, value int64) export.Record {
return export.NewRecord(desc, labels, LastValueAgg(desc, value))
return export.NewRecord(desc, labels, Resource, LastValueAgg(desc, value))
}
// Convenience method for building a test exported counter record.
func NewCounterRecord(desc *metric.Descriptor, labels *label.Set, value int64) export.Record {
return export.NewRecord(desc, labels, CounterAgg(desc, value))
return export.NewRecord(desc, labels, Resource, CounterAgg(desc, value))
}
// CounterAgg returns a checkpointed counter aggregator w/ the specified descriptor and value.
@ -154,7 +158,8 @@ func CounterAgg(desc *metric.Descriptor, v int64) export.Aggregator {
// value to the output map.
func (o Output) AddTo(rec export.Record) error {
encoded := rec.Labels().Encoded(o.labelEncoder)
key := fmt.Sprint(rec.Descriptor().Name(), "/", encoded)
rencoded := rec.Resource().Encoded(o.labelEncoder)
key := fmt.Sprint(rec.Descriptor().Name(), "/", encoded, "/", rencoded)
var value float64
if s, ok := rec.Aggregator().(aggregator.Sum); ok {

View File

@ -25,7 +25,7 @@ import (
)
func TestStressInt64MinMaxSumCount(t *testing.T) {
desc := metric.NewDescriptor("some_metric", metric.MeasureKind, metric.Int64NumberKind)
desc := metric.NewDescriptor("some_metric", metric.ValueRecorderKind, metric.Int64NumberKind)
mmsc := minmaxsumcount.New(&desc)
ctx, cancel := context.WithCancel(context.Background())

View File

@ -29,6 +29,7 @@ import (
internal "go.opentelemetry.io/otel/internal/metric"
export "go.opentelemetry.io/otel/sdk/export/metric"
"go.opentelemetry.io/otel/sdk/export/metric/aggregator"
"go.opentelemetry.io/otel/sdk/resource"
)
type (
@ -68,6 +69,9 @@ type (
// place for sorting during labels creation to avoid
// allocation. It is cleared after use.
asyncSortSlice label.Sortable
// resource is applied to all records in this Accumulator.
resource *resource.Resource
}
syncInstrument struct {
@ -317,6 +321,7 @@ func NewAccumulator(integrator export.Integrator, opts ...Option) *Accumulator {
integrator: integrator,
errorHandler: c.ErrorHandler,
asyncInstruments: internal.NewAsyncInstrumentState(c.ErrorHandler),
resource: c.Resource,
}
}
@ -362,9 +367,10 @@ func (m *Accumulator) Collect(ctx context.Context) int {
m.collectLock.Lock()
defer m.collectLock.Unlock()
checkpointed := m.collectSyncInstruments(ctx)
checkpointed += m.observeAsyncInstruments(ctx)
checkpointed := m.observeAsyncInstruments(ctx)
checkpointed += m.collectSyncInstruments(ctx)
m.currentEpoch++
return checkpointed
}
@ -428,7 +434,7 @@ func (m *Accumulator) observeAsyncInstruments(ctx context.Context) int {
asyncCollected := 0
m.asyncContext = ctx
m.asyncInstruments.Run(m)
m.asyncInstruments.Run(context.Background(), m)
m.asyncContext = nil
for _, inst := range m.asyncInstruments.Instruments() {
@ -472,7 +478,7 @@ func (m *Accumulator) checkpoint(ctx context.Context, descriptor *metric.Descrip
}
recorder.Checkpoint(ctx, descriptor)
exportRecord := export.NewRecord(descriptor, labels, recorder)
exportRecord := export.NewRecord(descriptor, labels, m.resource, recorder)
err := m.integrator.Process(ctx, exportRecord)
if err != nil {
m.errorHandler(err)

View File

@ -42,48 +42,48 @@ var (
_ export.AggregationSelector = selectorHistogram{}
)
// NewWithInexpensiveMeasure returns a simple aggregation selector
// NewWithInexpensiveDistribution returns a simple aggregation selector
// that uses counter, minmaxsumcount and minmaxsumcount aggregators
// for the three kinds of metric. This selector is faster and uses
// less memory than the others because minmaxsumcount does not
// aggregate quantile information.
func NewWithInexpensiveMeasure() export.AggregationSelector {
func NewWithInexpensiveDistribution() export.AggregationSelector {
return selectorInexpensive{}
}
// NewWithSketchMeasure returns a simple aggregation selector that
// NewWithSketchDistribution returns a simple aggregation selector that
// uses counter, ddsketch, and ddsketch aggregators for the three
// kinds of metric. This selector uses more cpu and memory than the
// NewWithInexpensiveMeasure because it uses one DDSketch per distinct
// measure/observer and labelset.
func NewWithSketchMeasure(config *ddsketch.Config) export.AggregationSelector {
// NewWithInexpensiveDistribution because it uses one DDSketch per distinct
// instrument and label set.
func NewWithSketchDistribution(config *ddsketch.Config) export.AggregationSelector {
return selectorSketch{
config: config,
}
}
// NewWithExactMeasure returns a simple aggregation selector that uses
// NewWithExactDistribution returns a simple aggregation selector that uses
// counter, array, and array aggregators for the three kinds of metric.
// This selector uses more memory than the NewWithSketchMeasure
// This selector uses more memory than the NewWithSketchDistribution
// because it aggregates an array of all values, therefore is able to
// compute exact quantiles.
func NewWithExactMeasure() export.AggregationSelector {
func NewWithExactDistribution() export.AggregationSelector {
return selectorExact{}
}
// NewWithHistogramMeasure returns a simple aggregation selector that uses counter,
// NewWithHistogramDistribution returns a simple aggregation selector that uses counter,
// histogram, and histogram aggregators for the three kinds of metric. This
// selector uses more memory than the NewWithInexpensiveMeasure because it
// selector uses more memory than the NewWithInexpensiveDistribution because it
// uses a counter per bucket.
func NewWithHistogramMeasure(boundaries []metric.Number) export.AggregationSelector {
func NewWithHistogramDistribution(boundaries []metric.Number) export.AggregationSelector {
return selectorHistogram{boundaries: boundaries}
}
func (selectorInexpensive) AggregatorFor(descriptor *metric.Descriptor) export.Aggregator {
switch descriptor.MetricKind() {
case metric.ObserverKind:
case metric.ValueObserverKind:
fallthrough
case metric.MeasureKind:
case metric.ValueRecorderKind:
return minmaxsumcount.New(descriptor)
default:
return sum.New()
@ -92,9 +92,9 @@ func (selectorInexpensive) AggregatorFor(descriptor *metric.Descriptor) export.A
func (s selectorSketch) AggregatorFor(descriptor *metric.Descriptor) export.Aggregator {
switch descriptor.MetricKind() {
case metric.ObserverKind:
case metric.ValueObserverKind:
fallthrough
case metric.MeasureKind:
case metric.ValueRecorderKind:
return ddsketch.New(s.config, descriptor)
default:
return sum.New()
@ -103,9 +103,9 @@ func (s selectorSketch) AggregatorFor(descriptor *metric.Descriptor) export.Aggr
func (selectorExact) AggregatorFor(descriptor *metric.Descriptor) export.Aggregator {
switch descriptor.MetricKind() {
case metric.ObserverKind:
case metric.ValueObserverKind:
fallthrough
case metric.MeasureKind:
case metric.ValueRecorderKind:
return array.New()
default:
return sum.New()
@ -114,9 +114,9 @@ func (selectorExact) AggregatorFor(descriptor *metric.Descriptor) export.Aggrega
func (s selectorHistogram) AggregatorFor(descriptor *metric.Descriptor) export.Aggregator {
switch descriptor.MetricKind() {
case metric.ObserverKind:
case metric.ValueObserverKind:
fallthrough
case metric.MeasureKind:
case metric.ValueRecorderKind:
return histogram.New(descriptor, s.boundaries)
default:
return sum.New()

View File

@ -29,35 +29,35 @@ import (
)
var (
testCounterDesc = metric.NewDescriptor("counter", metric.CounterKind, metric.Int64NumberKind)
testMeasureDesc = metric.NewDescriptor("measure", metric.MeasureKind, metric.Int64NumberKind)
testObserverDesc = metric.NewDescriptor("observer", metric.ObserverKind, metric.Int64NumberKind)
testCounterDesc = metric.NewDescriptor("counter", metric.CounterKind, metric.Int64NumberKind)
testValueRecorderDesc = metric.NewDescriptor("valuerecorder", metric.ValueRecorderKind, metric.Int64NumberKind)
testValueObserverDesc = metric.NewDescriptor("valueobserver", metric.ValueObserverKind, metric.Int64NumberKind)
)
func TestInexpensiveMeasure(t *testing.T) {
inex := simple.NewWithInexpensiveMeasure()
func TestInexpensiveDistribution(t *testing.T) {
inex := simple.NewWithInexpensiveDistribution()
require.NotPanics(t, func() { _ = inex.AggregatorFor(&testCounterDesc).(*sum.Aggregator) })
require.NotPanics(t, func() { _ = inex.AggregatorFor(&testMeasureDesc).(*minmaxsumcount.Aggregator) })
require.NotPanics(t, func() { _ = inex.AggregatorFor(&testObserverDesc).(*minmaxsumcount.Aggregator) })
require.NotPanics(t, func() { _ = inex.AggregatorFor(&testValueRecorderDesc).(*minmaxsumcount.Aggregator) })
require.NotPanics(t, func() { _ = inex.AggregatorFor(&testValueObserverDesc).(*minmaxsumcount.Aggregator) })
}
func TestSketchMeasure(t *testing.T) {
sk := simple.NewWithSketchMeasure(ddsketch.NewDefaultConfig())
func TestSketchDistribution(t *testing.T) {
sk := simple.NewWithSketchDistribution(ddsketch.NewDefaultConfig())
require.NotPanics(t, func() { _ = sk.AggregatorFor(&testCounterDesc).(*sum.Aggregator) })
require.NotPanics(t, func() { _ = sk.AggregatorFor(&testMeasureDesc).(*ddsketch.Aggregator) })
require.NotPanics(t, func() { _ = sk.AggregatorFor(&testObserverDesc).(*ddsketch.Aggregator) })
require.NotPanics(t, func() { _ = sk.AggregatorFor(&testValueRecorderDesc).(*ddsketch.Aggregator) })
require.NotPanics(t, func() { _ = sk.AggregatorFor(&testValueObserverDesc).(*ddsketch.Aggregator) })
}
func TestExactMeasure(t *testing.T) {
ex := simple.NewWithExactMeasure()
func TestExactDistribution(t *testing.T) {
ex := simple.NewWithExactDistribution()
require.NotPanics(t, func() { _ = ex.AggregatorFor(&testCounterDesc).(*sum.Aggregator) })
require.NotPanics(t, func() { _ = ex.AggregatorFor(&testMeasureDesc).(*array.Aggregator) })
require.NotPanics(t, func() { _ = ex.AggregatorFor(&testObserverDesc).(*array.Aggregator) })
require.NotPanics(t, func() { _ = ex.AggregatorFor(&testValueRecorderDesc).(*array.Aggregator) })
require.NotPanics(t, func() { _ = ex.AggregatorFor(&testValueObserverDesc).(*array.Aggregator) })
}
func TestHistogramMeasure(t *testing.T) {
ex := simple.NewWithHistogramMeasure([]metric.Number{})
func TestHistogramDistribution(t *testing.T) {
ex := simple.NewWithHistogramDistribution([]metric.Number{})
require.NotPanics(t, func() { _ = ex.AggregatorFor(&testCounterDesc).(*sum.Aggregator) })
require.NotPanics(t, func() { _ = ex.AggregatorFor(&testMeasureDesc).(*histogram.Aggregator) })
require.NotPanics(t, func() { _ = ex.AggregatorFor(&testObserverDesc).(*histogram.Aggregator) })
require.NotPanics(t, func() { _ = ex.AggregatorFor(&testValueRecorderDesc).(*histogram.Aggregator) })
require.NotPanics(t, func() { _ = ex.AggregatorFor(&testValueObserverDesc).(*histogram.Aggregator) })
}

View File

@ -285,7 +285,7 @@ func (f *testFixture) Process(_ context.Context, record export.Record) error {
f.T.Fatal("Sum error: ", err)
}
f.impl.storeCollect(actual, sum, time.Time{})
case metric.MeasureKind:
case metric.ValueRecorderKind:
lv, ts, err := agg.(aggregator.LastValue).LastValue()
if err != nil && err != aggregator.ErrNoData {
f.T.Fatal("Last value error: ", err)
@ -431,15 +431,15 @@ func TestStressFloat64Counter(t *testing.T) {
func intLastValueTestImpl() testImpl {
return testImpl{
newInstrument: func(meter api.Meter, name string) SyncImpler {
return Must(meter).NewInt64Measure(name + ".lastvalue")
return Must(meter).NewInt64ValueRecorder(name + ".lastvalue")
},
getUpdateValue: func() api.Number {
r1 := rand.Int63()
return api.NewInt64Number(rand.Int63() - r1)
},
operate: func(inst interface{}, ctx context.Context, value api.Number, labels []kv.KeyValue) {
measure := inst.(api.Int64Measure)
measure.Record(ctx, value.AsInt64(), labels...)
valuerecorder := inst.(api.Int64ValueRecorder)
valuerecorder.Record(ctx, value.AsInt64(), labels...)
},
newStore: func() interface{} {
return &lastValueState{
@ -473,14 +473,14 @@ func TestStressInt64LastValue(t *testing.T) {
func floatLastValueTestImpl() testImpl {
return testImpl{
newInstrument: func(meter api.Meter, name string) SyncImpler {
return Must(meter).NewFloat64Measure(name + ".lastvalue")
return Must(meter).NewFloat64ValueRecorder(name + ".lastvalue")
},
getUpdateValue: func() api.Number {
return api.NewFloat64Number((-0.5 + rand.Float64()) * 100000)
},
operate: func(inst interface{}, ctx context.Context, value api.Number, labels []kv.KeyValue) {
measure := inst.(api.Float64Measure)
measure.Record(ctx, value.AsFloat64(), labels...)
valuerecorder := inst.(api.Float64ValueRecorder)
valuerecorder.Record(ctx, value.AsFloat64(), labels...)
},
newStore: func() interface{} {
return &lastValueState{

View File

@ -1,80 +0,0 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package resourcekeys contains well known type and label keys for resources.
package resourcekeys // import "go.opentelemetry.io/otel/sdk/resource/resourcekeys"
// Constants for Service resources.
const (
// A uniquely identifying name for a Service.
ServiceKeyName = "service.name"
ServiceKeyNamespace = "service.namespace"
ServiceKeyInstanceID = "service.instance.id"
ServiceKeyVersion = "service.version"
)
// Constants for Library resources.
const (
// A uniquely identifying name for a Library.
LibraryKeyName = "library.name"
LibraryKeyLanguage = "library.language"
LibraryKeyVersion = "library.version"
)
// Constants for Kubernetes resources.
const (
// A uniquely identifying name for the Kubernetes cluster. Kubernetes
// does not have cluster names as an internal concept so this may be
// set to any meaningful value within the environment. For example,
// GKE clusters have a name which can be used for this label.
K8SKeyClusterName = "k8s.cluster.name"
K8SKeyNamespaceName = "k8s.namespace.name"
K8SKeyPodName = "k8s.pod.name"
K8SKeyDeploymentName = "k8s.deployment.name"
)
// Constants for Container resources.
const (
// A uniquely identifying name for the Container.
ContainerKeyName = "container.name"
ContainerKeyImageName = "container.image.name"
ContainerKeyImageTag = "container.image.tag"
)
// Constants for Cloud resources.
const (
CloudKeyProvider = "cloud.provider"
CloudKeyAccountID = "cloud.account.id"
CloudKeyRegion = "cloud.region"
CloudKeyZone = "cloud.zone"
// Cloud Providers
CloudProviderAWS = "aws"
CloudProviderGCP = "gcp"
CloudProviderAZURE = "azure"
)
// Constants for Host resources.
const (
// A uniquely identifying name for the host.
HostKeyName = "host.name"
// A hostname as returned by the 'hostname' command on host machine.
HostKeyHostName = "host.hostname"
HostKeyID = "host.id"
HostKeyType = "host.type"
HostKeyImageName = "host.image.name"
HostKeyImageID = "host.image.id"
HostKeyImageVersion = "host.image.version"
)

View File

@ -17,6 +17,7 @@ package trace
import (
"context"
"errors"
"runtime"
"sync"
"sync/atomic"
"time"
@ -25,9 +26,9 @@ import (
)
const (
defaultMaxQueueSize = 2048
defaultScheduledDelay = 5000 * time.Millisecond
defaultMaxExportBatchSize = 512
DefaultMaxQueueSize = 2048
DefaultScheduledDelay = 5000 * time.Millisecond
DefaultMaxExportBatchSize = 512
)
var (
@ -70,6 +71,8 @@ type BatchSpanProcessor struct {
queue chan *export.SpanData
dropped uint32
batch []*export.SpanData
timer *time.Timer
stopWait sync.WaitGroup
stopOnce sync.Once
stopCh chan struct{}
@ -87,39 +90,26 @@ func NewBatchSpanProcessor(e export.SpanBatcher, opts ...BatchSpanProcessorOptio
}
o := BatchSpanProcessorOptions{
ScheduledDelayMillis: defaultScheduledDelay,
MaxQueueSize: defaultMaxQueueSize,
MaxExportBatchSize: defaultMaxExportBatchSize,
ScheduledDelayMillis: DefaultScheduledDelay,
MaxQueueSize: DefaultMaxQueueSize,
MaxExportBatchSize: DefaultMaxExportBatchSize,
}
for _, opt := range opts {
opt(&o)
}
bsp := &BatchSpanProcessor{
e: e,
o: o,
e: e,
o: o,
batch: make([]*export.SpanData, 0, o.MaxExportBatchSize),
timer: time.NewTimer(o.ScheduledDelayMillis),
queue: make(chan *export.SpanData, o.MaxQueueSize),
stopCh: make(chan struct{}),
}
bsp.queue = make(chan *export.SpanData, bsp.o.MaxQueueSize)
bsp.stopCh = make(chan struct{})
// Start timer to export spans.
ticker := time.NewTicker(bsp.o.ScheduledDelayMillis)
bsp.stopWait.Add(1)
go func() {
defer ticker.Stop()
batch := make([]*export.SpanData, 0, bsp.o.MaxExportBatchSize)
for {
select {
case <-bsp.stopCh:
bsp.processQueue(&batch)
close(bsp.queue)
bsp.stopWait.Done()
return
case <-ticker.C:
bsp.processQueue(&batch)
}
}
bsp.processQueue()
bsp.drainQueue()
}()
return bsp, nil
@ -140,6 +130,8 @@ func (bsp *BatchSpanProcessor) Shutdown() {
bsp.stopOnce.Do(func() {
close(bsp.stopCh)
bsp.stopWait.Wait()
close(bsp.queue)
})
}
@ -167,53 +159,85 @@ func WithBlocking() BatchSpanProcessorOption {
}
}
// processQueue removes spans from the `queue` channel until there is
// no more data. It calls the exporter in batches of up to
// MaxExportBatchSize until all the available data have been processed.
func (bsp *BatchSpanProcessor) processQueue(batch *[]*export.SpanData) {
// exportSpans is a subroutine of processing and draining the queue.
func (bsp *BatchSpanProcessor) exportSpans() {
bsp.timer.Reset(bsp.o.ScheduledDelayMillis)
if len(bsp.batch) > 0 {
bsp.e.ExportSpans(context.Background(), bsp.batch)
bsp.batch = bsp.batch[:0]
}
}
// processQueue removes spans from the `queue` channel until processor
// is shut down. It calls the exporter in batches of up to MaxExportBatchSize
// waiting up to ScheduledDelayMillis to form a batch.
func (bsp *BatchSpanProcessor) processQueue() {
defer bsp.stopWait.Done()
defer bsp.timer.Stop()
for {
// Read spans until either the buffer fills or the
// queue is empty.
for ok := true; ok && len(*batch) < bsp.o.MaxExportBatchSize; {
select {
case sd := <-bsp.queue:
if sd != nil && sd.SpanContext.IsSampled() {
*batch = append(*batch, sd)
select {
case <-bsp.stopCh:
return
case <-bsp.timer.C:
bsp.exportSpans()
case sd := <-bsp.queue:
bsp.batch = append(bsp.batch, sd)
if len(bsp.batch) == bsp.o.MaxExportBatchSize {
if !bsp.timer.Stop() {
<-bsp.timer.C
}
default:
ok = false
bsp.exportSpans()
}
}
if len(*batch) == 0 {
return
}
// Send one batch, then continue reading until the
// buffer is empty.
bsp.e.ExportSpans(context.Background(), *batch)
*batch = (*batch)[:0]
}
}
// drainQueue awaits the any caller that had added to bsp.stopWait
// to finish the enqueue, then exports the final batch.
func (bsp *BatchSpanProcessor) drainQueue() {
for sd := range bsp.queue {
bsp.batch = append(bsp.batch, sd)
if len(bsp.batch) == bsp.o.MaxExportBatchSize {
bsp.exportSpans()
}
}
bsp.exportSpans()
}
func (bsp *BatchSpanProcessor) enqueue(sd *export.SpanData) {
select {
case <-bsp.stopCh:
if !sd.SpanContext.IsSampled() {
return
default:
}
// This ensures the bsp.queue<- below does not panic as the
// processor shuts down.
defer func() {
x := recover()
switch err := x.(type) {
case nil:
return
case runtime.Error:
if err.Error() == "send on closed channel" {
return
}
}
panic(x)
}()
if bsp.o.BlockOnQueueFull {
bsp.queue <- sd
} else {
var ok bool
select {
case bsp.queue <- sd:
ok = true
default:
ok = false
}
if !ok {
atomic.AddUint32(&bsp.dropped, 1)
case <-bsp.stopCh:
}
return
}
select {
case bsp.queue <- sd:
case <-bsp.stopCh:
default:
atomic.AddUint32(&bsp.dropped, 1)
}
}

View File

@ -69,30 +69,26 @@ type testOption struct {
wantNumSpans int
wantBatchCount int
genNumSpans int
waitTime time.Duration
parallel bool
}
func TestNewBatchSpanProcessorWithOptions(t *testing.T) {
schDelay := 200 * time.Millisecond
waitTime := schDelay + 100*time.Millisecond
options := []testOption{
{
name: "default BatchSpanProcessorOptions",
wantNumSpans: 2048,
wantNumSpans: 2053,
wantBatchCount: 4,
genNumSpans: 2053,
waitTime: 5100 * time.Millisecond,
},
{
name: "non-default ScheduledDelayMillis",
o: []sdktrace.BatchSpanProcessorOption{
sdktrace.WithScheduleDelayMillis(schDelay),
},
wantNumSpans: 2048,
wantNumSpans: 2053,
wantBatchCount: 4,
genNumSpans: 2053,
waitTime: waitTime,
},
{
name: "non-default MaxQueueSize and ScheduledDelayMillis",
@ -100,10 +96,9 @@ func TestNewBatchSpanProcessorWithOptions(t *testing.T) {
sdktrace.WithScheduleDelayMillis(schDelay),
sdktrace.WithMaxQueueSize(200),
},
wantNumSpans: 200,
wantNumSpans: 205,
wantBatchCount: 1,
genNumSpans: 205,
waitTime: waitTime,
},
{
name: "non-default MaxQueueSize, ScheduledDelayMillis and MaxExportBatchSize",
@ -112,10 +107,9 @@ func TestNewBatchSpanProcessorWithOptions(t *testing.T) {
sdktrace.WithMaxQueueSize(205),
sdktrace.WithMaxExportBatchSize(20),
},
wantNumSpans: 205,
wantNumSpans: 210,
wantBatchCount: 11,
genNumSpans: 210,
waitTime: waitTime,
},
{
name: "blocking option",
@ -128,7 +122,6 @@ func TestNewBatchSpanProcessorWithOptions(t *testing.T) {
wantNumSpans: 205,
wantBatchCount: 11,
genNumSpans: 205,
waitTime: waitTime,
},
{
name: "parallel span generation",
@ -136,10 +129,9 @@ func TestNewBatchSpanProcessorWithOptions(t *testing.T) {
sdktrace.WithScheduleDelayMillis(schDelay),
sdktrace.WithMaxQueueSize(200),
},
wantNumSpans: 200,
wantNumSpans: 205,
wantBatchCount: 1,
genNumSpans: 205,
waitTime: waitTime,
parallel: true,
},
{
@ -152,38 +144,31 @@ func TestNewBatchSpanProcessorWithOptions(t *testing.T) {
wantNumSpans: 2000,
wantBatchCount: 10,
genNumSpans: 2000,
waitTime: waitTime,
parallel: true,
},
}
for _, option := range options {
te := testBatchExporter{}
tp := basicProvider(t)
ssp := createAndRegisterBatchSP(t, option, &te)
if ssp == nil {
t.Fatalf("%s: Error creating new instance of BatchSpanProcessor\n", option.name)
}
tp.RegisterSpanProcessor(ssp)
tr := tp.Tracer("BatchSpanProcessorWithOptions")
t.Run(option.name, func(t *testing.T) {
te := testBatchExporter{}
tp := basicProvider(t)
ssp := createAndRegisterBatchSP(t, option, &te)
if ssp == nil {
t.Fatalf("%s: Error creating new instance of BatchSpanProcessor\n", option.name)
}
tp.RegisterSpanProcessor(ssp)
tr := tp.Tracer("BatchSpanProcessorWithOptions")
generateSpan(t, option.parallel, tr, option)
generateSpan(t, option.parallel, tr, option)
time.Sleep(option.waitTime)
tp.UnregisterSpanProcessor(ssp)
tp.UnregisterSpanProcessor(ssp)
gotNumOfSpans := te.len()
if option.wantNumSpans != gotNumOfSpans {
t.Errorf("%s: number of exported span: got %+v, want %+v\n", option.name, gotNumOfSpans, option.wantNumSpans)
}
gotBatchCount := te.getBatchCount()
if gotBatchCount < option.wantBatchCount {
t.Errorf("%s: number batches: got %+v, want >= %+v\n", option.name, gotBatchCount, option.wantBatchCount)
t.Errorf("Batches %v\n", te.sizes)
}
tp.UnregisterSpanProcessor(ssp)
// TODO(https://github.com/open-telemetry/opentelemetry-go/issues/741)
// Restore some sort of test here.
_ = option.wantNumSpans
_ = option.wantBatchCount
_ = te.len() // gotNumOfSpans
_ = te.getBatchCount() // gotBatchCount
})
}
}