mirror of
https://github.com/open-telemetry/opentelemetry-go.git
synced 2025-03-29 21:47:00 +02:00
Decouple otlp/otlpmetric/otlpmetricgrpc
from otlp/internal
and otlp/otlpmetric/internal
using gotmp (#4404)
* Add shared otlpmetric templates * Generate otlpmetricgrpc/internal with gotmpl * Use local internal in otlpmetricgrpc * Add decoupling change to changelog
This commit is contained in:
parent
2e6ca0af0c
commit
f67ecb35dc
@ -54,6 +54,7 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm
|
|||||||
- Fix possible panic, deadlock and race condition in batch span processor in `go.opentelemetry.io/otel/sdk/trace`. (#4353)
|
- Fix possible panic, deadlock and race condition in batch span processor in `go.opentelemetry.io/otel/sdk/trace`. (#4353)
|
||||||
- Improve context cancelation handling in batch span processor's `ForceFlush` in `go.opentelemetry.io/otel/sdk/trace`. (#4369)
|
- Improve context cancelation handling in batch span processor's `ForceFlush` in `go.opentelemetry.io/otel/sdk/trace`. (#4369)
|
||||||
- Decouple `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal` from `go.opentelemetry.io/otel/exporters/otlp/internal` using gotmpl. (#4397, #3846)
|
- Decouple `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal` from `go.opentelemetry.io/otel/exporters/otlp/internal` using gotmpl. (#4397, #3846)
|
||||||
|
- Decouple `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal` from `go.opentelemetry.io/otel/exporters/otlp/internal` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal` using gotmpl. (#4404, #3846)
|
||||||
- Decouple `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal` from `go.opentelemetry.io/otel/exporters/otlp/internal` and `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal` using gotmpl. (#4400, #3846)
|
- Decouple `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal` from `go.opentelemetry.io/otel/exporters/otlp/internal` and `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal` using gotmpl. (#4400, #3846)
|
||||||
- Decouple `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal` from `go.opentelemetry.io/otel/exporters/otlp/internal` and `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal` using gotmpl. (#4401, #3846)
|
- Decouple `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal` from `go.opentelemetry.io/otel/exporters/otlp/internal` and `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal` using gotmpl. (#4401, #3846)
|
||||||
- Do not block the metric SDK when OTLP metric exports are blocked in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#3925, #4395)
|
- Do not block the metric SDK when OTLP metric exports are blocked in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#3925, #4395)
|
||||||
|
@ -25,9 +25,9 @@ import (
|
|||||||
"google.golang.org/grpc/status"
|
"google.golang.org/grpc/status"
|
||||||
|
|
||||||
"go.opentelemetry.io/otel"
|
"go.opentelemetry.io/otel"
|
||||||
"go.opentelemetry.io/otel/exporters/otlp/internal"
|
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal"
|
||||||
"go.opentelemetry.io/otel/exporters/otlp/internal/retry"
|
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf"
|
||||||
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/oconf"
|
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/retry"
|
||||||
colmetricpb "go.opentelemetry.io/proto/otlp/collector/metrics/v1"
|
colmetricpb "go.opentelemetry.io/proto/otlp/collector/metrics/v1"
|
||||||
metricpb "go.opentelemetry.io/proto/otlp/metrics/v1"
|
metricpb "go.opentelemetry.io/proto/otlp/metrics/v1"
|
||||||
)
|
)
|
||||||
|
@ -27,9 +27,8 @@ import (
|
|||||||
"google.golang.org/grpc/status"
|
"google.golang.org/grpc/status"
|
||||||
"google.golang.org/protobuf/types/known/durationpb"
|
"google.golang.org/protobuf/types/known/durationpb"
|
||||||
|
|
||||||
ominternal "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal"
|
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf"
|
||||||
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/oconf"
|
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/otest"
|
||||||
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/otest"
|
|
||||||
"go.opentelemetry.io/otel/sdk/metric"
|
"go.opentelemetry.io/otel/sdk/metric"
|
||||||
"go.opentelemetry.io/otel/sdk/metric/aggregation"
|
"go.opentelemetry.io/otel/sdk/metric/aggregation"
|
||||||
"go.opentelemetry.io/otel/sdk/metric/metricdata"
|
"go.opentelemetry.io/otel/sdk/metric/metricdata"
|
||||||
@ -146,7 +145,7 @@ func (clientShim) ForceFlush(ctx context.Context) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestClient(t *testing.T) {
|
func TestClient(t *testing.T) {
|
||||||
factory := func(rCh <-chan otest.ExportResult) (ominternal.Client, otest.Collector) {
|
factory := func(rCh <-chan otest.ExportResult) (otest.Client, otest.Collector) {
|
||||||
coll, err := otest.NewGRPCCollector("", rCh)
|
coll, err := otest.NewGRPCCollector("", rCh)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
@ -22,8 +22,8 @@ import (
|
|||||||
"google.golang.org/grpc/credentials"
|
"google.golang.org/grpc/credentials"
|
||||||
|
|
||||||
"go.opentelemetry.io/otel"
|
"go.opentelemetry.io/otel"
|
||||||
"go.opentelemetry.io/otel/exporters/otlp/internal/retry"
|
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf"
|
||||||
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/oconf"
|
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/retry"
|
||||||
"go.opentelemetry.io/otel/sdk/metric"
|
"go.opentelemetry.io/otel/sdk/metric"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -19,8 +19,8 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/oconf"
|
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf"
|
||||||
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/transform"
|
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform"
|
||||||
"go.opentelemetry.io/otel/internal/global"
|
"go.opentelemetry.io/otel/internal/global"
|
||||||
"go.opentelemetry.io/otel/sdk/metric"
|
"go.opentelemetry.io/otel/sdk/metric"
|
||||||
"go.opentelemetry.io/otel/sdk/metric/aggregation"
|
"go.opentelemetry.io/otel/sdk/metric/aggregation"
|
||||||
|
@ -23,8 +23,8 @@ import (
|
|||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/oconf"
|
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf"
|
||||||
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/otest"
|
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/otest"
|
||||||
"go.opentelemetry.io/otel/sdk/metric"
|
"go.opentelemetry.io/otel/sdk/metric"
|
||||||
"go.opentelemetry.io/otel/sdk/metric/metricdata"
|
"go.opentelemetry.io/otel/sdk/metric/metricdata"
|
||||||
)
|
)
|
||||||
|
@ -5,10 +5,12 @@ go 1.19
|
|||||||
retract v0.32.2 // Contains unresolvable dependencies.
|
retract v0.32.2 // Contains unresolvable dependencies.
|
||||||
|
|
||||||
require (
|
require (
|
||||||
|
github.com/cenkalti/backoff/v4 v4.2.1
|
||||||
|
github.com/google/go-cmp v0.5.9
|
||||||
github.com/stretchr/testify v1.8.4
|
github.com/stretchr/testify v1.8.4
|
||||||
go.opentelemetry.io/otel v1.16.0
|
go.opentelemetry.io/otel v1.16.0
|
||||||
go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.16.0
|
|
||||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric v0.39.0
|
go.opentelemetry.io/otel/exporters/otlp/otlpmetric v0.39.0
|
||||||
|
go.opentelemetry.io/otel/sdk v1.16.0
|
||||||
go.opentelemetry.io/otel/sdk/metric v0.39.0
|
go.opentelemetry.io/otel/sdk/metric v0.39.0
|
||||||
go.opentelemetry.io/proto/otlp v1.0.0
|
go.opentelemetry.io/proto/otlp v1.0.0
|
||||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc
|
google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc
|
||||||
@ -17,16 +19,13 @@ require (
|
|||||||
)
|
)
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/cenkalti/backoff/v4 v4.2.1 // indirect
|
|
||||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||||
github.com/go-logr/logr v1.2.4 // indirect
|
github.com/go-logr/logr v1.2.4 // indirect
|
||||||
github.com/go-logr/stdr v1.2.2 // indirect
|
github.com/go-logr/stdr v1.2.2 // indirect
|
||||||
github.com/golang/protobuf v1.5.3 // indirect
|
github.com/golang/protobuf v1.5.3 // indirect
|
||||||
github.com/google/go-cmp v0.5.9 // indirect
|
|
||||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect
|
github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect
|
||||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||||
go.opentelemetry.io/otel/metric v1.16.0 // indirect
|
go.opentelemetry.io/otel/metric v1.16.0 // indirect
|
||||||
go.opentelemetry.io/otel/sdk v1.16.0 // indirect
|
|
||||||
go.opentelemetry.io/otel/trace v1.16.0 // indirect
|
go.opentelemetry.io/otel/trace v1.16.0 // indirect
|
||||||
golang.org/x/net v0.10.0 // indirect
|
golang.org/x/net v0.10.0 // indirect
|
||||||
golang.org/x/sys v0.10.0 // indirect
|
golang.org/x/sys v0.10.0 // indirect
|
||||||
|
@ -0,0 +1,202 @@
|
|||||||
|
// Code created by gotmpl. DO NOT MODIFY.
|
||||||
|
// source: internal/shared/otlp/envconfig/envconfig.go.tmpl
|
||||||
|
|
||||||
|
// Copyright The OpenTelemetry Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package envconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/envconfig"
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/tls"
|
||||||
|
"crypto/x509"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"net/url"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"go.opentelemetry.io/otel/internal/global"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ConfigFn is the generic function used to set a config.
|
||||||
|
type ConfigFn func(*EnvOptionsReader)
|
||||||
|
|
||||||
|
// EnvOptionsReader reads the required environment variables.
|
||||||
|
type EnvOptionsReader struct {
|
||||||
|
GetEnv func(string) string
|
||||||
|
ReadFile func(string) ([]byte, error)
|
||||||
|
Namespace string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Apply runs every ConfigFn.
|
||||||
|
func (e *EnvOptionsReader) Apply(opts ...ConfigFn) {
|
||||||
|
for _, o := range opts {
|
||||||
|
o(e)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetEnvValue gets an OTLP environment variable value of the specified key
|
||||||
|
// using the GetEnv function.
|
||||||
|
// This function prepends the OTLP specified namespace to all key lookups.
|
||||||
|
func (e *EnvOptionsReader) GetEnvValue(key string) (string, bool) {
|
||||||
|
v := strings.TrimSpace(e.GetEnv(keyWithNamespace(e.Namespace, key)))
|
||||||
|
return v, v != ""
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithString retrieves the specified config and passes it to ConfigFn as a string.
|
||||||
|
func WithString(n string, fn func(string)) func(e *EnvOptionsReader) {
|
||||||
|
return func(e *EnvOptionsReader) {
|
||||||
|
if v, ok := e.GetEnvValue(n); ok {
|
||||||
|
fn(v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithBool returns a ConfigFn that reads the environment variable n and if it exists passes its parsed bool value to fn.
|
||||||
|
func WithBool(n string, fn func(bool)) ConfigFn {
|
||||||
|
return func(e *EnvOptionsReader) {
|
||||||
|
if v, ok := e.GetEnvValue(n); ok {
|
||||||
|
b := strings.ToLower(v) == "true"
|
||||||
|
fn(b)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithDuration retrieves the specified config and passes it to ConfigFn as a duration.
|
||||||
|
func WithDuration(n string, fn func(time.Duration)) func(e *EnvOptionsReader) {
|
||||||
|
return func(e *EnvOptionsReader) {
|
||||||
|
if v, ok := e.GetEnvValue(n); ok {
|
||||||
|
d, err := strconv.Atoi(v)
|
||||||
|
if err != nil {
|
||||||
|
global.Error(err, "parse duration", "input", v)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
fn(time.Duration(d) * time.Millisecond)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithHeaders retrieves the specified config and passes it to ConfigFn as a map of HTTP headers.
|
||||||
|
func WithHeaders(n string, fn func(map[string]string)) func(e *EnvOptionsReader) {
|
||||||
|
return func(e *EnvOptionsReader) {
|
||||||
|
if v, ok := e.GetEnvValue(n); ok {
|
||||||
|
fn(stringToHeader(v))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithURL retrieves the specified config and passes it to ConfigFn as a net/url.URL.
|
||||||
|
func WithURL(n string, fn func(*url.URL)) func(e *EnvOptionsReader) {
|
||||||
|
return func(e *EnvOptionsReader) {
|
||||||
|
if v, ok := e.GetEnvValue(n); ok {
|
||||||
|
u, err := url.Parse(v)
|
||||||
|
if err != nil {
|
||||||
|
global.Error(err, "parse url", "input", v)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
fn(u)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithCertPool returns a ConfigFn that reads the environment variable n as a filepath to a TLS certificate pool. If it exists, it is parsed as a crypto/x509.CertPool and it is passed to fn.
|
||||||
|
func WithCertPool(n string, fn func(*x509.CertPool)) ConfigFn {
|
||||||
|
return func(e *EnvOptionsReader) {
|
||||||
|
if v, ok := e.GetEnvValue(n); ok {
|
||||||
|
b, err := e.ReadFile(v)
|
||||||
|
if err != nil {
|
||||||
|
global.Error(err, "read tls ca cert file", "file", v)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
c, err := createCertPool(b)
|
||||||
|
if err != nil {
|
||||||
|
global.Error(err, "create tls cert pool")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
fn(c)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithClientCert returns a ConfigFn that reads the environment variable nc and nk as filepaths to a client certificate and key pair. If they exists, they are parsed as a crypto/tls.Certificate and it is passed to fn.
|
||||||
|
func WithClientCert(nc, nk string, fn func(tls.Certificate)) ConfigFn {
|
||||||
|
return func(e *EnvOptionsReader) {
|
||||||
|
vc, okc := e.GetEnvValue(nc)
|
||||||
|
vk, okk := e.GetEnvValue(nk)
|
||||||
|
if !okc || !okk {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
cert, err := e.ReadFile(vc)
|
||||||
|
if err != nil {
|
||||||
|
global.Error(err, "read tls client cert", "file", vc)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
key, err := e.ReadFile(vk)
|
||||||
|
if err != nil {
|
||||||
|
global.Error(err, "read tls client key", "file", vk)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
crt, err := tls.X509KeyPair(cert, key)
|
||||||
|
if err != nil {
|
||||||
|
global.Error(err, "create tls client key pair")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
fn(crt)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func keyWithNamespace(ns, key string) string {
|
||||||
|
if ns == "" {
|
||||||
|
return key
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("%s_%s", ns, key)
|
||||||
|
}
|
||||||
|
|
||||||
|
func stringToHeader(value string) map[string]string {
|
||||||
|
headersPairs := strings.Split(value, ",")
|
||||||
|
headers := make(map[string]string)
|
||||||
|
|
||||||
|
for _, header := range headersPairs {
|
||||||
|
n, v, found := strings.Cut(header, "=")
|
||||||
|
if !found {
|
||||||
|
global.Error(errors.New("missing '="), "parse headers", "input", header)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
name, err := url.QueryUnescape(n)
|
||||||
|
if err != nil {
|
||||||
|
global.Error(err, "escape header key", "key", n)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
trimmedName := strings.TrimSpace(name)
|
||||||
|
value, err := url.QueryUnescape(v)
|
||||||
|
if err != nil {
|
||||||
|
global.Error(err, "escape header value", "value", v)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
trimmedValue := strings.TrimSpace(value)
|
||||||
|
|
||||||
|
headers[trimmedName] = trimmedValue
|
||||||
|
}
|
||||||
|
|
||||||
|
return headers
|
||||||
|
}
|
||||||
|
|
||||||
|
func createCertPool(certBytes []byte) (*x509.CertPool, error) {
|
||||||
|
cp := x509.NewCertPool()
|
||||||
|
if ok := cp.AppendCertsFromPEM(certBytes); !ok {
|
||||||
|
return nil, errors.New("failed to append certificate to the cert pool")
|
||||||
|
}
|
||||||
|
return cp, nil
|
||||||
|
}
|
@ -0,0 +1,464 @@
|
|||||||
|
// Code created by gotmpl. DO NOT MODIFY.
|
||||||
|
// source: internal/shared/otlp/envconfig/envconfig_test.go.tmpl
|
||||||
|
|
||||||
|
// Copyright The OpenTelemetry Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package envconfig
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/tls"
|
||||||
|
"crypto/x509"
|
||||||
|
"errors"
|
||||||
|
"net/url"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
const WeakKey = `
|
||||||
|
-----BEGIN EC PRIVATE KEY-----
|
||||||
|
MHcCAQEEIEbrSPmnlSOXvVzxCyv+VR3a0HDeUTvOcqrdssZ2k4gFoAoGCCqGSM49
|
||||||
|
AwEHoUQDQgAEDMTfv75J315C3K9faptS9iythKOMEeV/Eep73nWX531YAkmmwBSB
|
||||||
|
2dXRD/brsgLnfG57WEpxZuY7dPRbxu33BA==
|
||||||
|
-----END EC PRIVATE KEY-----
|
||||||
|
`
|
||||||
|
|
||||||
|
const WeakCertificate = `
|
||||||
|
-----BEGIN CERTIFICATE-----
|
||||||
|
MIIBjjCCATWgAwIBAgIUKQSMC66MUw+kPp954ZYOcyKAQDswCgYIKoZIzj0EAwIw
|
||||||
|
EjEQMA4GA1UECgwHb3RlbC1nbzAeFw0yMjEwMTkwMDA5MTlaFw0yMzEwMTkwMDA5
|
||||||
|
MTlaMBIxEDAOBgNVBAoMB290ZWwtZ28wWTATBgcqhkjOPQIBBggqhkjOPQMBBwNC
|
||||||
|
AAQMxN+/vknfXkLcr19qm1L2LK2Eo4wR5X8R6nvedZfnfVgCSabAFIHZ1dEP9uuy
|
||||||
|
Aud8bntYSnFm5jt09FvG7fcEo2kwZzAdBgNVHQ4EFgQUicGuhnTTkYLZwofXMNLK
|
||||||
|
SHFeCWgwHwYDVR0jBBgwFoAUicGuhnTTkYLZwofXMNLKSHFeCWgwDwYDVR0TAQH/
|
||||||
|
BAUwAwEB/zAUBgNVHREEDTALgglsb2NhbGhvc3QwCgYIKoZIzj0EAwIDRwAwRAIg
|
||||||
|
Lfma8FnnxeSOi6223AsFfYwsNZ2RderNsQrS0PjEHb0CIBkrWacqARUAu7uT4cGu
|
||||||
|
jVcIxYQqhId5L8p/mAv2PWZS
|
||||||
|
-----END CERTIFICATE-----
|
||||||
|
`
|
||||||
|
|
||||||
|
type testOption struct {
|
||||||
|
TestString string
|
||||||
|
TestBool bool
|
||||||
|
TestDuration time.Duration
|
||||||
|
TestHeaders map[string]string
|
||||||
|
TestURL *url.URL
|
||||||
|
TestTLS *tls.Config
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestEnvConfig(t *testing.T) {
|
||||||
|
parsedURL, err := url.Parse("https://example.com")
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
options := []testOption{}
|
||||||
|
for _, testcase := range []struct {
|
||||||
|
name string
|
||||||
|
reader EnvOptionsReader
|
||||||
|
configs []ConfigFn
|
||||||
|
expectedOptions []testOption
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "with no namespace and a matching key",
|
||||||
|
reader: EnvOptionsReader{
|
||||||
|
GetEnv: func(n string) string {
|
||||||
|
if n == "HELLO" {
|
||||||
|
return "world"
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
},
|
||||||
|
},
|
||||||
|
configs: []ConfigFn{
|
||||||
|
WithString("HELLO", func(v string) {
|
||||||
|
options = append(options, testOption{TestString: v})
|
||||||
|
}),
|
||||||
|
},
|
||||||
|
expectedOptions: []testOption{
|
||||||
|
{
|
||||||
|
TestString: "world",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "with no namespace and a non-matching key",
|
||||||
|
reader: EnvOptionsReader{
|
||||||
|
GetEnv: func(n string) string {
|
||||||
|
if n == "HELLO" {
|
||||||
|
return "world"
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
},
|
||||||
|
},
|
||||||
|
configs: []ConfigFn{
|
||||||
|
WithString("HOLA", func(v string) {
|
||||||
|
options = append(options, testOption{TestString: v})
|
||||||
|
}),
|
||||||
|
},
|
||||||
|
expectedOptions: []testOption{},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "with a namespace and a matching key",
|
||||||
|
reader: EnvOptionsReader{
|
||||||
|
Namespace: "MY_NAMESPACE",
|
||||||
|
GetEnv: func(n string) string {
|
||||||
|
if n == "MY_NAMESPACE_HELLO" {
|
||||||
|
return "world"
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
},
|
||||||
|
},
|
||||||
|
configs: []ConfigFn{
|
||||||
|
WithString("HELLO", func(v string) {
|
||||||
|
options = append(options, testOption{TestString: v})
|
||||||
|
}),
|
||||||
|
},
|
||||||
|
expectedOptions: []testOption{
|
||||||
|
{
|
||||||
|
TestString: "world",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "with no namespace and a non-matching key",
|
||||||
|
reader: EnvOptionsReader{
|
||||||
|
Namespace: "MY_NAMESPACE",
|
||||||
|
GetEnv: func(n string) string {
|
||||||
|
if n == "HELLO" {
|
||||||
|
return "world"
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
},
|
||||||
|
},
|
||||||
|
configs: []ConfigFn{
|
||||||
|
WithString("HELLO", func(v string) {
|
||||||
|
options = append(options, testOption{TestString: v})
|
||||||
|
}),
|
||||||
|
},
|
||||||
|
expectedOptions: []testOption{},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "with a bool config",
|
||||||
|
reader: EnvOptionsReader{
|
||||||
|
GetEnv: func(n string) string {
|
||||||
|
if n == "HELLO" {
|
||||||
|
return "true"
|
||||||
|
} else if n == "WORLD" {
|
||||||
|
return "false"
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
},
|
||||||
|
},
|
||||||
|
configs: []ConfigFn{
|
||||||
|
WithBool("HELLO", func(b bool) {
|
||||||
|
options = append(options, testOption{TestBool: b})
|
||||||
|
}),
|
||||||
|
WithBool("WORLD", func(b bool) {
|
||||||
|
options = append(options, testOption{TestBool: b})
|
||||||
|
}),
|
||||||
|
},
|
||||||
|
expectedOptions: []testOption{
|
||||||
|
{
|
||||||
|
TestBool: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
TestBool: false,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "with an invalid bool config",
|
||||||
|
reader: EnvOptionsReader{
|
||||||
|
GetEnv: func(n string) string {
|
||||||
|
if n == "HELLO" {
|
||||||
|
return "world"
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
},
|
||||||
|
},
|
||||||
|
configs: []ConfigFn{
|
||||||
|
WithBool("HELLO", func(b bool) {
|
||||||
|
options = append(options, testOption{TestBool: b})
|
||||||
|
}),
|
||||||
|
},
|
||||||
|
expectedOptions: []testOption{
|
||||||
|
{
|
||||||
|
TestBool: false,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "with a duration config",
|
||||||
|
reader: EnvOptionsReader{
|
||||||
|
GetEnv: func(n string) string {
|
||||||
|
if n == "HELLO" {
|
||||||
|
return "60"
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
},
|
||||||
|
},
|
||||||
|
configs: []ConfigFn{
|
||||||
|
WithDuration("HELLO", func(v time.Duration) {
|
||||||
|
options = append(options, testOption{TestDuration: v})
|
||||||
|
}),
|
||||||
|
},
|
||||||
|
expectedOptions: []testOption{
|
||||||
|
{
|
||||||
|
TestDuration: 60_000_000, // 60 milliseconds
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "with an invalid duration config",
|
||||||
|
reader: EnvOptionsReader{
|
||||||
|
GetEnv: func(n string) string {
|
||||||
|
if n == "HELLO" {
|
||||||
|
return "world"
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
},
|
||||||
|
},
|
||||||
|
configs: []ConfigFn{
|
||||||
|
WithDuration("HELLO", func(v time.Duration) {
|
||||||
|
options = append(options, testOption{TestDuration: v})
|
||||||
|
}),
|
||||||
|
},
|
||||||
|
expectedOptions: []testOption{},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "with headers",
|
||||||
|
reader: EnvOptionsReader{
|
||||||
|
GetEnv: func(n string) string {
|
||||||
|
if n == "HELLO" {
|
||||||
|
return "userId=42,userName=alice"
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
},
|
||||||
|
},
|
||||||
|
configs: []ConfigFn{
|
||||||
|
WithHeaders("HELLO", func(v map[string]string) {
|
||||||
|
options = append(options, testOption{TestHeaders: v})
|
||||||
|
}),
|
||||||
|
},
|
||||||
|
expectedOptions: []testOption{
|
||||||
|
{
|
||||||
|
TestHeaders: map[string]string{
|
||||||
|
"userId": "42",
|
||||||
|
"userName": "alice",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "with invalid headers",
|
||||||
|
reader: EnvOptionsReader{
|
||||||
|
GetEnv: func(n string) string {
|
||||||
|
if n == "HELLO" {
|
||||||
|
return "world"
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
},
|
||||||
|
},
|
||||||
|
configs: []ConfigFn{
|
||||||
|
WithHeaders("HELLO", func(v map[string]string) {
|
||||||
|
options = append(options, testOption{TestHeaders: v})
|
||||||
|
}),
|
||||||
|
},
|
||||||
|
expectedOptions: []testOption{
|
||||||
|
{
|
||||||
|
TestHeaders: map[string]string{},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "with URL",
|
||||||
|
reader: EnvOptionsReader{
|
||||||
|
GetEnv: func(n string) string {
|
||||||
|
if n == "HELLO" {
|
||||||
|
return "https://example.com"
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
},
|
||||||
|
},
|
||||||
|
configs: []ConfigFn{
|
||||||
|
WithURL("HELLO", func(v *url.URL) {
|
||||||
|
options = append(options, testOption{TestURL: v})
|
||||||
|
}),
|
||||||
|
},
|
||||||
|
expectedOptions: []testOption{
|
||||||
|
{
|
||||||
|
TestURL: parsedURL,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "with invalid URL",
|
||||||
|
reader: EnvOptionsReader{
|
||||||
|
GetEnv: func(n string) string {
|
||||||
|
if n == "HELLO" {
|
||||||
|
return "i nvalid://url"
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
},
|
||||||
|
},
|
||||||
|
configs: []ConfigFn{
|
||||||
|
WithURL("HELLO", func(v *url.URL) {
|
||||||
|
options = append(options, testOption{TestURL: v})
|
||||||
|
}),
|
||||||
|
},
|
||||||
|
expectedOptions: []testOption{},
|
||||||
|
},
|
||||||
|
} {
|
||||||
|
t.Run(testcase.name, func(t *testing.T) {
|
||||||
|
testcase.reader.Apply(testcase.configs...)
|
||||||
|
assert.Equal(t, testcase.expectedOptions, options)
|
||||||
|
options = []testOption{}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestWithTLSConfig(t *testing.T) {
|
||||||
|
pool, err := createCertPool([]byte(WeakCertificate))
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
reader := EnvOptionsReader{
|
||||||
|
GetEnv: func(n string) string {
|
||||||
|
if n == "CERTIFICATE" {
|
||||||
|
return "/path/cert.pem"
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
},
|
||||||
|
ReadFile: func(p string) ([]byte, error) {
|
||||||
|
if p == "/path/cert.pem" {
|
||||||
|
return []byte(WeakCertificate), nil
|
||||||
|
}
|
||||||
|
return []byte{}, nil
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
var option testOption
|
||||||
|
reader.Apply(
|
||||||
|
WithCertPool("CERTIFICATE", func(cp *x509.CertPool) {
|
||||||
|
option = testOption{TestTLS: &tls.Config{RootCAs: cp}}
|
||||||
|
}),
|
||||||
|
)
|
||||||
|
|
||||||
|
// nolint:staticcheck // ignoring tlsCert.RootCAs.Subjects is deprecated ERR because cert does not come from SystemCertPool.
|
||||||
|
assert.Equal(t, pool.Subjects(), option.TestTLS.RootCAs.Subjects())
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestWithClientCert(t *testing.T) {
|
||||||
|
cert, err := tls.X509KeyPair([]byte(WeakCertificate), []byte(WeakKey))
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
reader := EnvOptionsReader{
|
||||||
|
GetEnv: func(n string) string {
|
||||||
|
switch n {
|
||||||
|
case "CLIENT_CERTIFICATE":
|
||||||
|
return "/path/tls.crt"
|
||||||
|
case "CLIENT_KEY":
|
||||||
|
return "/path/tls.key"
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
},
|
||||||
|
ReadFile: func(n string) ([]byte, error) {
|
||||||
|
switch n {
|
||||||
|
case "/path/tls.crt":
|
||||||
|
return []byte(WeakCertificate), nil
|
||||||
|
case "/path/tls.key":
|
||||||
|
return []byte(WeakKey), nil
|
||||||
|
}
|
||||||
|
return []byte{}, nil
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
var option testOption
|
||||||
|
reader.Apply(
|
||||||
|
WithClientCert("CLIENT_CERTIFICATE", "CLIENT_KEY", func(c tls.Certificate) {
|
||||||
|
option = testOption{TestTLS: &tls.Config{Certificates: []tls.Certificate{c}}}
|
||||||
|
}),
|
||||||
|
)
|
||||||
|
assert.Equal(t, cert, option.TestTLS.Certificates[0])
|
||||||
|
|
||||||
|
reader.ReadFile = func(s string) ([]byte, error) { return nil, errors.New("oops") }
|
||||||
|
option.TestTLS = nil
|
||||||
|
reader.Apply(
|
||||||
|
WithClientCert("CLIENT_CERTIFICATE", "CLIENT_KEY", func(c tls.Certificate) {
|
||||||
|
option = testOption{TestTLS: &tls.Config{Certificates: []tls.Certificate{c}}}
|
||||||
|
}),
|
||||||
|
)
|
||||||
|
assert.Nil(t, option.TestTLS)
|
||||||
|
|
||||||
|
reader.GetEnv = func(s string) string { return "" }
|
||||||
|
option.TestTLS = nil
|
||||||
|
reader.Apply(
|
||||||
|
WithClientCert("CLIENT_CERTIFICATE", "CLIENT_KEY", func(c tls.Certificate) {
|
||||||
|
option = testOption{TestTLS: &tls.Config{Certificates: []tls.Certificate{c}}}
|
||||||
|
}),
|
||||||
|
)
|
||||||
|
assert.Nil(t, option.TestTLS)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestStringToHeader(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
value string
|
||||||
|
want map[string]string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "simple test",
|
||||||
|
value: "userId=alice",
|
||||||
|
want: map[string]string{"userId": "alice"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "simple test with spaces",
|
||||||
|
value: " userId = alice ",
|
||||||
|
want: map[string]string{"userId": "alice"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "multiples headers encoded",
|
||||||
|
value: "userId=alice,serverNode=DF%3A28,isProduction=false",
|
||||||
|
want: map[string]string{
|
||||||
|
"userId": "alice",
|
||||||
|
"serverNode": "DF:28",
|
||||||
|
"isProduction": "false",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "invalid headers format",
|
||||||
|
value: "userId:alice",
|
||||||
|
want: map[string]string{},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "invalid key",
|
||||||
|
value: "%XX=missing,userId=alice",
|
||||||
|
want: map[string]string{
|
||||||
|
"userId": "alice",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "invalid value",
|
||||||
|
value: "missing=%XX,userId=alice",
|
||||||
|
want: map[string]string{
|
||||||
|
"userId": "alice",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
assert.Equal(t, tt.want, stringToHeader(tt.value))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
42
exporters/otlp/otlpmetric/otlpmetricgrpc/internal/gen.go
Normal file
42
exporters/otlp/otlpmetric/otlpmetricgrpc/internal/gen.go
Normal file
@ -0,0 +1,42 @@
|
|||||||
|
// Copyright The OpenTelemetry Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package internal // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal"
|
||||||
|
|
||||||
|
//go:generate gotmpl --body=../../../../../internal/shared/otlp/partialsuccess.go.tmpl "--data={}" --out=partialsuccess.go
|
||||||
|
//go:generate gotmpl --body=../../../../../internal/shared/otlp/partialsuccess_test.go.tmpl "--data={}" --out=partialsuccess_test.go
|
||||||
|
|
||||||
|
//go:generate gotmpl --body=../../../../../internal/shared/otlp/retry/retry.go.tmpl "--data={}" --out=retry/retry.go
|
||||||
|
//go:generate gotmpl --body=../../../../../internal/shared/otlp/retry/retry_test.go.tmpl "--data={}" --out=retry/retry_test.go
|
||||||
|
|
||||||
|
//go:generate gotmpl --body=../../../../../internal/shared/otlp/envconfig/envconfig.go.tmpl "--data={}" --out=envconfig/envconfig.go
|
||||||
|
//go:generate gotmpl --body=../../../../../internal/shared/otlp/envconfig/envconfig_test.go.tmpl "--data={}" --out=envconfig/envconfig_test.go
|
||||||
|
|
||||||
|
//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/oconf/envconfig.go.tmpl "--data={\"envconfigImportPath\": \"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/envconfig\"}" --out=oconf/envconfig.go
|
||||||
|
//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/oconf/envconfig_test.go.tmpl "--data={}" --out=oconf/envconfig_test.go.tmpl
|
||||||
|
//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/oconf/options.go.tmpl "--data={\"retryImportPath\": \"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/retry\"}" --out=oconf/options.go
|
||||||
|
//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/oconf/options_test.go.tmpl "--data={\"envconfigImportPath\": \"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/envconfig\"}" --out=oconf/options_test.go
|
||||||
|
//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/oconf/optiontypes.go.tmpl "--data={}" --out=oconf/optiontypes.go
|
||||||
|
//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/oconf/tls.go.tmpl "--data={}" --out=oconf/tls.go
|
||||||
|
|
||||||
|
//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/otest/client.go.tmpl "--data={}" --out=otest/client.go
|
||||||
|
//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/otest/client_test.go.tmpl "--data={\"internalImportPath\": \"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal\"}" --out=otest/client_test.go
|
||||||
|
//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/otest/collector.go.tmpl "--data={\"oconfImportPath\": \"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf\"}" --out=otest/collector.go
|
||||||
|
|
||||||
|
//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/transform/attribute.go.tmpl "--data={}" --out=transform/attribute.go
|
||||||
|
//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/transform/attribute_test.go.tmpl "--data={}" --out=transform/attribute_test.go
|
||||||
|
//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/transform/error.go.tmpl "--data={}" --out=transform/error.go
|
||||||
|
//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/transform/error_test.go.tmpl "--data={}" --out=transform/error_test.go
|
||||||
|
//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/transform/metricdata.go.tmpl "--data={}" --out=transform/metricdata.go
|
||||||
|
//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/transform/metricdata_test.go.tmpl "--data={}" --out=transform/metricdata_test.go
|
@ -0,0 +1,196 @@
|
|||||||
|
// Code created by gotmpl. DO NOT MODIFY.
|
||||||
|
// source: internal/shared/otlp/otlpmetric/oconf/envconfig.go.tmpl
|
||||||
|
|
||||||
|
// Copyright The OpenTelemetry Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package oconf // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf"
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/tls"
|
||||||
|
"crypto/x509"
|
||||||
|
"net/url"
|
||||||
|
"os"
|
||||||
|
"path"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/envconfig"
|
||||||
|
"go.opentelemetry.io/otel/internal/global"
|
||||||
|
"go.opentelemetry.io/otel/sdk/metric"
|
||||||
|
"go.opentelemetry.io/otel/sdk/metric/metricdata"
|
||||||
|
)
|
||||||
|
|
||||||
|
// DefaultEnvOptionsReader is the default environments reader.
|
||||||
|
var DefaultEnvOptionsReader = envconfig.EnvOptionsReader{
|
||||||
|
GetEnv: os.Getenv,
|
||||||
|
ReadFile: os.ReadFile,
|
||||||
|
Namespace: "OTEL_EXPORTER_OTLP",
|
||||||
|
}
|
||||||
|
|
||||||
|
// ApplyGRPCEnvConfigs applies the env configurations for gRPC.
|
||||||
|
func ApplyGRPCEnvConfigs(cfg Config) Config {
|
||||||
|
opts := getOptionsFromEnv()
|
||||||
|
for _, opt := range opts {
|
||||||
|
cfg = opt.ApplyGRPCOption(cfg)
|
||||||
|
}
|
||||||
|
return cfg
|
||||||
|
}
|
||||||
|
|
||||||
|
// ApplyHTTPEnvConfigs applies the env configurations for HTTP.
|
||||||
|
func ApplyHTTPEnvConfigs(cfg Config) Config {
|
||||||
|
opts := getOptionsFromEnv()
|
||||||
|
for _, opt := range opts {
|
||||||
|
cfg = opt.ApplyHTTPOption(cfg)
|
||||||
|
}
|
||||||
|
return cfg
|
||||||
|
}
|
||||||
|
|
||||||
|
func getOptionsFromEnv() []GenericOption {
|
||||||
|
opts := []GenericOption{}
|
||||||
|
|
||||||
|
tlsConf := &tls.Config{}
|
||||||
|
DefaultEnvOptionsReader.Apply(
|
||||||
|
envconfig.WithURL("ENDPOINT", func(u *url.URL) {
|
||||||
|
opts = append(opts, withEndpointScheme(u))
|
||||||
|
opts = append(opts, newSplitOption(func(cfg Config) Config {
|
||||||
|
cfg.Metrics.Endpoint = u.Host
|
||||||
|
// For OTLP/HTTP endpoint URLs without a per-signal
|
||||||
|
// configuration, the passed endpoint is used as a base URL
|
||||||
|
// and the signals are sent to these paths relative to that.
|
||||||
|
cfg.Metrics.URLPath = path.Join(u.Path, DefaultMetricsPath)
|
||||||
|
return cfg
|
||||||
|
}, withEndpointForGRPC(u)))
|
||||||
|
}),
|
||||||
|
envconfig.WithURL("METRICS_ENDPOINT", func(u *url.URL) {
|
||||||
|
opts = append(opts, withEndpointScheme(u))
|
||||||
|
opts = append(opts, newSplitOption(func(cfg Config) Config {
|
||||||
|
cfg.Metrics.Endpoint = u.Host
|
||||||
|
// For endpoint URLs for OTLP/HTTP per-signal variables, the
|
||||||
|
// URL MUST be used as-is without any modification. The only
|
||||||
|
// exception is that if an URL contains no path part, the root
|
||||||
|
// path / MUST be used.
|
||||||
|
path := u.Path
|
||||||
|
if path == "" {
|
||||||
|
path = "/"
|
||||||
|
}
|
||||||
|
cfg.Metrics.URLPath = path
|
||||||
|
return cfg
|
||||||
|
}, withEndpointForGRPC(u)))
|
||||||
|
}),
|
||||||
|
envconfig.WithCertPool("CERTIFICATE", func(p *x509.CertPool) { tlsConf.RootCAs = p }),
|
||||||
|
envconfig.WithCertPool("METRICS_CERTIFICATE", func(p *x509.CertPool) { tlsConf.RootCAs = p }),
|
||||||
|
envconfig.WithClientCert("CLIENT_CERTIFICATE", "CLIENT_KEY", func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }),
|
||||||
|
envconfig.WithClientCert("METRICS_CLIENT_CERTIFICATE", "METRICS_CLIENT_KEY", func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }),
|
||||||
|
envconfig.WithBool("INSECURE", func(b bool) { opts = append(opts, withInsecure(b)) }),
|
||||||
|
envconfig.WithBool("METRICS_INSECURE", func(b bool) { opts = append(opts, withInsecure(b)) }),
|
||||||
|
withTLSConfig(tlsConf, func(c *tls.Config) { opts = append(opts, WithTLSClientConfig(c)) }),
|
||||||
|
envconfig.WithHeaders("HEADERS", func(h map[string]string) { opts = append(opts, WithHeaders(h)) }),
|
||||||
|
envconfig.WithHeaders("METRICS_HEADERS", func(h map[string]string) { opts = append(opts, WithHeaders(h)) }),
|
||||||
|
WithEnvCompression("COMPRESSION", func(c Compression) { opts = append(opts, WithCompression(c)) }),
|
||||||
|
WithEnvCompression("METRICS_COMPRESSION", func(c Compression) { opts = append(opts, WithCompression(c)) }),
|
||||||
|
envconfig.WithDuration("TIMEOUT", func(d time.Duration) { opts = append(opts, WithTimeout(d)) }),
|
||||||
|
envconfig.WithDuration("METRICS_TIMEOUT", func(d time.Duration) { opts = append(opts, WithTimeout(d)) }),
|
||||||
|
withEnvTemporalityPreference("METRICS_TEMPORALITY_PREFERENCE", func(t metric.TemporalitySelector) { opts = append(opts, WithTemporalitySelector(t)) }),
|
||||||
|
)
|
||||||
|
|
||||||
|
return opts
|
||||||
|
}
|
||||||
|
|
||||||
|
func withEndpointForGRPC(u *url.URL) func(cfg Config) Config {
|
||||||
|
return func(cfg Config) Config {
|
||||||
|
// For OTLP/gRPC endpoints, this is the target to which the
|
||||||
|
// exporter is going to send telemetry.
|
||||||
|
cfg.Metrics.Endpoint = path.Join(u.Host, u.Path)
|
||||||
|
return cfg
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithEnvCompression retrieves the specified config and passes it to ConfigFn as a Compression.
|
||||||
|
func WithEnvCompression(n string, fn func(Compression)) func(e *envconfig.EnvOptionsReader) {
|
||||||
|
return func(e *envconfig.EnvOptionsReader) {
|
||||||
|
if v, ok := e.GetEnvValue(n); ok {
|
||||||
|
cp := NoCompression
|
||||||
|
if v == "gzip" {
|
||||||
|
cp = GzipCompression
|
||||||
|
}
|
||||||
|
|
||||||
|
fn(cp)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func withEndpointScheme(u *url.URL) GenericOption {
|
||||||
|
switch strings.ToLower(u.Scheme) {
|
||||||
|
case "http", "unix":
|
||||||
|
return WithInsecure()
|
||||||
|
default:
|
||||||
|
return WithSecure()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// revive:disable-next-line:flag-parameter
|
||||||
|
func withInsecure(b bool) GenericOption {
|
||||||
|
if b {
|
||||||
|
return WithInsecure()
|
||||||
|
}
|
||||||
|
return WithSecure()
|
||||||
|
}
|
||||||
|
|
||||||
|
func withTLSConfig(c *tls.Config, fn func(*tls.Config)) func(e *envconfig.EnvOptionsReader) {
|
||||||
|
return func(e *envconfig.EnvOptionsReader) {
|
||||||
|
if c.RootCAs != nil || len(c.Certificates) > 0 {
|
||||||
|
fn(c)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func withEnvTemporalityPreference(n string, fn func(metric.TemporalitySelector)) func(e *envconfig.EnvOptionsReader) {
|
||||||
|
return func(e *envconfig.EnvOptionsReader) {
|
||||||
|
if s, ok := e.GetEnvValue(n); ok {
|
||||||
|
switch strings.ToLower(s) {
|
||||||
|
case "cumulative":
|
||||||
|
fn(cumulativeTemporality)
|
||||||
|
case "delta":
|
||||||
|
fn(deltaTemporality)
|
||||||
|
case "lowmemory":
|
||||||
|
fn(lowMemory)
|
||||||
|
default:
|
||||||
|
global.Warn("OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE is set to an invalid value, ignoring.", "value", s)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func cumulativeTemporality(metric.InstrumentKind) metricdata.Temporality {
|
||||||
|
return metricdata.CumulativeTemporality
|
||||||
|
}
|
||||||
|
|
||||||
|
func deltaTemporality(ik metric.InstrumentKind) metricdata.Temporality {
|
||||||
|
switch ik {
|
||||||
|
case metric.InstrumentKindCounter, metric.InstrumentKindHistogram, metric.InstrumentKindObservableCounter:
|
||||||
|
return metricdata.DeltaTemporality
|
||||||
|
default:
|
||||||
|
return metricdata.CumulativeTemporality
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func lowMemory(ik metric.InstrumentKind) metricdata.Temporality {
|
||||||
|
switch ik {
|
||||||
|
case metric.InstrumentKindCounter, metric.InstrumentKindHistogram:
|
||||||
|
return metricdata.DeltaTemporality
|
||||||
|
default:
|
||||||
|
return metricdata.CumulativeTemporality
|
||||||
|
}
|
||||||
|
}
|
@ -0,0 +1,106 @@
|
|||||||
|
// Code created by gotmpl. DO NOT MODIFY.
|
||||||
|
// source: internal/shared/otlp/otlpmetric/oconf/envconfig_test.go.tmpl
|
||||||
|
|
||||||
|
// Copyright The OpenTelemetry Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package oconf
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
|
"go.opentelemetry.io/otel/sdk/metric"
|
||||||
|
"go.opentelemetry.io/otel/sdk/metric/metricdata"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestWithEnvTemporalityPreference(t *testing.T) {
|
||||||
|
origReader := DefaultEnvOptionsReader.GetEnv
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
envValue string
|
||||||
|
want map[metric.InstrumentKind]metricdata.Temporality
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "default do not set the selector",
|
||||||
|
envValue: "",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "non-normative do not set the selector",
|
||||||
|
envValue: "non-normative",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "cumulative",
|
||||||
|
envValue: "cumulative",
|
||||||
|
want: map[metric.InstrumentKind]metricdata.Temporality{
|
||||||
|
metric.InstrumentKindCounter: metricdata.CumulativeTemporality,
|
||||||
|
metric.InstrumentKindHistogram: metricdata.CumulativeTemporality,
|
||||||
|
metric.InstrumentKindUpDownCounter: metricdata.CumulativeTemporality,
|
||||||
|
metric.InstrumentKindObservableCounter: metricdata.CumulativeTemporality,
|
||||||
|
metric.InstrumentKindObservableUpDownCounter: metricdata.CumulativeTemporality,
|
||||||
|
metric.InstrumentKindObservableGauge: metricdata.CumulativeTemporality,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "delta",
|
||||||
|
envValue: "delta",
|
||||||
|
want: map[metric.InstrumentKind]metricdata.Temporality{
|
||||||
|
metric.InstrumentKindCounter: metricdata.DeltaTemporality,
|
||||||
|
metric.InstrumentKindHistogram: metricdata.DeltaTemporality,
|
||||||
|
metric.InstrumentKindUpDownCounter: metricdata.CumulativeTemporality,
|
||||||
|
metric.InstrumentKindObservableCounter: metricdata.DeltaTemporality,
|
||||||
|
metric.InstrumentKindObservableUpDownCounter: metricdata.CumulativeTemporality,
|
||||||
|
metric.InstrumentKindObservableGauge: metricdata.CumulativeTemporality,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "lowmemory",
|
||||||
|
envValue: "lowmemory",
|
||||||
|
want: map[metric.InstrumentKind]metricdata.Temporality{
|
||||||
|
metric.InstrumentKindCounter: metricdata.DeltaTemporality,
|
||||||
|
metric.InstrumentKindHistogram: metricdata.DeltaTemporality,
|
||||||
|
metric.InstrumentKindUpDownCounter: metricdata.CumulativeTemporality,
|
||||||
|
metric.InstrumentKindObservableCounter: metricdata.CumulativeTemporality,
|
||||||
|
metric.InstrumentKindObservableUpDownCounter: metricdata.CumulativeTemporality,
|
||||||
|
metric.InstrumentKindObservableGauge: metricdata.CumulativeTemporality,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
DefaultEnvOptionsReader.GetEnv = func(key string) string {
|
||||||
|
if key == "OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE" {
|
||||||
|
return tt.envValue
|
||||||
|
}
|
||||||
|
return origReader(key)
|
||||||
|
}
|
||||||
|
cfg := Config{}
|
||||||
|
cfg = ApplyGRPCEnvConfigs(cfg)
|
||||||
|
|
||||||
|
if tt.want == nil {
|
||||||
|
// There is no function set, the SDK's default is used.
|
||||||
|
assert.Nil(t, cfg.Metrics.TemporalitySelector)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
require.NotNil(t, cfg.Metrics.TemporalitySelector)
|
||||||
|
for ik, want := range tt.want {
|
||||||
|
assert.Equal(t, want, cfg.Metrics.TemporalitySelector(ik))
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
DefaultEnvOptionsReader.GetEnv = origReader
|
||||||
|
}
|
@ -0,0 +1,376 @@
|
|||||||
|
// Code created by gotmpl. DO NOT MODIFY.
|
||||||
|
// source: internal/shared/otlp/otlpmetric/oconf/options.go.tmpl
|
||||||
|
|
||||||
|
// Copyright The OpenTelemetry Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package oconf // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf"
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/tls"
|
||||||
|
"fmt"
|
||||||
|
"path"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"google.golang.org/grpc"
|
||||||
|
"google.golang.org/grpc/backoff"
|
||||||
|
"google.golang.org/grpc/credentials"
|
||||||
|
"google.golang.org/grpc/credentials/insecure"
|
||||||
|
"google.golang.org/grpc/encoding/gzip"
|
||||||
|
|
||||||
|
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric"
|
||||||
|
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/retry"
|
||||||
|
"go.opentelemetry.io/otel/internal/global"
|
||||||
|
"go.opentelemetry.io/otel/sdk/metric"
|
||||||
|
"go.opentelemetry.io/otel/sdk/metric/aggregation"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// DefaultMaxAttempts describes how many times the driver
|
||||||
|
// should retry the sending of the payload in case of a
|
||||||
|
// retryable error.
|
||||||
|
DefaultMaxAttempts int = 5
|
||||||
|
// DefaultMetricsPath is a default URL path for endpoint that
|
||||||
|
// receives metrics.
|
||||||
|
DefaultMetricsPath string = "/v1/metrics"
|
||||||
|
// DefaultBackoff is a default base backoff time used in the
|
||||||
|
// exponential backoff strategy.
|
||||||
|
DefaultBackoff time.Duration = 300 * time.Millisecond
|
||||||
|
// DefaultTimeout is a default max waiting time for the backend to process
|
||||||
|
// each span or metrics batch.
|
||||||
|
DefaultTimeout time.Duration = 10 * time.Second
|
||||||
|
)
|
||||||
|
|
||||||
|
type (
|
||||||
|
SignalConfig struct {
|
||||||
|
Endpoint string
|
||||||
|
Insecure bool
|
||||||
|
TLSCfg *tls.Config
|
||||||
|
Headers map[string]string
|
||||||
|
Compression Compression
|
||||||
|
Timeout time.Duration
|
||||||
|
URLPath string
|
||||||
|
|
||||||
|
// gRPC configurations
|
||||||
|
GRPCCredentials credentials.TransportCredentials
|
||||||
|
|
||||||
|
TemporalitySelector metric.TemporalitySelector
|
||||||
|
AggregationSelector metric.AggregationSelector
|
||||||
|
}
|
||||||
|
|
||||||
|
Config struct {
|
||||||
|
// Signal specific configurations
|
||||||
|
Metrics SignalConfig
|
||||||
|
|
||||||
|
RetryConfig retry.Config
|
||||||
|
|
||||||
|
// gRPC configurations
|
||||||
|
ReconnectionPeriod time.Duration
|
||||||
|
ServiceConfig string
|
||||||
|
DialOptions []grpc.DialOption
|
||||||
|
GRPCConn *grpc.ClientConn
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
// NewHTTPConfig returns a new Config with all settings applied from opts and
|
||||||
|
// any unset setting using the default HTTP config values.
|
||||||
|
func NewHTTPConfig(opts ...HTTPOption) Config {
|
||||||
|
cfg := Config{
|
||||||
|
Metrics: SignalConfig{
|
||||||
|
Endpoint: fmt.Sprintf("%s:%d", DefaultCollectorHost, DefaultCollectorHTTPPort),
|
||||||
|
URLPath: DefaultMetricsPath,
|
||||||
|
Compression: NoCompression,
|
||||||
|
Timeout: DefaultTimeout,
|
||||||
|
|
||||||
|
TemporalitySelector: metric.DefaultTemporalitySelector,
|
||||||
|
AggregationSelector: metric.DefaultAggregationSelector,
|
||||||
|
},
|
||||||
|
RetryConfig: retry.DefaultConfig,
|
||||||
|
}
|
||||||
|
cfg = ApplyHTTPEnvConfigs(cfg)
|
||||||
|
for _, opt := range opts {
|
||||||
|
cfg = opt.ApplyHTTPOption(cfg)
|
||||||
|
}
|
||||||
|
cfg.Metrics.URLPath = cleanPath(cfg.Metrics.URLPath, DefaultMetricsPath)
|
||||||
|
return cfg
|
||||||
|
}
|
||||||
|
|
||||||
|
// cleanPath returns a path with all spaces trimmed and all redundancies
|
||||||
|
// removed. If urlPath is empty or cleaning it results in an empty string,
|
||||||
|
// defaultPath is returned instead.
|
||||||
|
func cleanPath(urlPath string, defaultPath string) string {
|
||||||
|
tmp := path.Clean(strings.TrimSpace(urlPath))
|
||||||
|
if tmp == "." {
|
||||||
|
return defaultPath
|
||||||
|
}
|
||||||
|
if !path.IsAbs(tmp) {
|
||||||
|
tmp = fmt.Sprintf("/%s", tmp)
|
||||||
|
}
|
||||||
|
return tmp
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewGRPCConfig returns a new Config with all settings applied from opts and
|
||||||
|
// any unset setting using the default gRPC config values.
|
||||||
|
func NewGRPCConfig(opts ...GRPCOption) Config {
|
||||||
|
userAgent := "OTel OTLP Exporter Go/" + otlpmetric.Version()
|
||||||
|
cfg := Config{
|
||||||
|
Metrics: SignalConfig{
|
||||||
|
Endpoint: fmt.Sprintf("%s:%d", DefaultCollectorHost, DefaultCollectorGRPCPort),
|
||||||
|
URLPath: DefaultMetricsPath,
|
||||||
|
Compression: NoCompression,
|
||||||
|
Timeout: DefaultTimeout,
|
||||||
|
|
||||||
|
TemporalitySelector: metric.DefaultTemporalitySelector,
|
||||||
|
AggregationSelector: metric.DefaultAggregationSelector,
|
||||||
|
},
|
||||||
|
RetryConfig: retry.DefaultConfig,
|
||||||
|
DialOptions: []grpc.DialOption{grpc.WithUserAgent(userAgent)},
|
||||||
|
}
|
||||||
|
cfg = ApplyGRPCEnvConfigs(cfg)
|
||||||
|
for _, opt := range opts {
|
||||||
|
cfg = opt.ApplyGRPCOption(cfg)
|
||||||
|
}
|
||||||
|
|
||||||
|
if cfg.ServiceConfig != "" {
|
||||||
|
cfg.DialOptions = append(cfg.DialOptions, grpc.WithDefaultServiceConfig(cfg.ServiceConfig))
|
||||||
|
}
|
||||||
|
// Priroritize GRPCCredentials over Insecure (passing both is an error).
|
||||||
|
if cfg.Metrics.GRPCCredentials != nil {
|
||||||
|
cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(cfg.Metrics.GRPCCredentials))
|
||||||
|
} else if cfg.Metrics.Insecure {
|
||||||
|
cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(insecure.NewCredentials()))
|
||||||
|
} else {
|
||||||
|
// Default to using the host's root CA.
|
||||||
|
creds := credentials.NewTLS(nil)
|
||||||
|
cfg.Metrics.GRPCCredentials = creds
|
||||||
|
cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(creds))
|
||||||
|
}
|
||||||
|
if cfg.Metrics.Compression == GzipCompression {
|
||||||
|
cfg.DialOptions = append(cfg.DialOptions, grpc.WithDefaultCallOptions(grpc.UseCompressor(gzip.Name)))
|
||||||
|
}
|
||||||
|
if len(cfg.DialOptions) != 0 {
|
||||||
|
cfg.DialOptions = append(cfg.DialOptions, cfg.DialOptions...)
|
||||||
|
}
|
||||||
|
if cfg.ReconnectionPeriod != 0 {
|
||||||
|
p := grpc.ConnectParams{
|
||||||
|
Backoff: backoff.DefaultConfig,
|
||||||
|
MinConnectTimeout: cfg.ReconnectionPeriod,
|
||||||
|
}
|
||||||
|
cfg.DialOptions = append(cfg.DialOptions, grpc.WithConnectParams(p))
|
||||||
|
}
|
||||||
|
|
||||||
|
return cfg
|
||||||
|
}
|
||||||
|
|
||||||
|
type (
|
||||||
|
// GenericOption applies an option to the HTTP or gRPC driver.
|
||||||
|
GenericOption interface {
|
||||||
|
ApplyHTTPOption(Config) Config
|
||||||
|
ApplyGRPCOption(Config) Config
|
||||||
|
|
||||||
|
// A private method to prevent users implementing the
|
||||||
|
// interface and so future additions to it will not
|
||||||
|
// violate compatibility.
|
||||||
|
private()
|
||||||
|
}
|
||||||
|
|
||||||
|
// HTTPOption applies an option to the HTTP driver.
|
||||||
|
HTTPOption interface {
|
||||||
|
ApplyHTTPOption(Config) Config
|
||||||
|
|
||||||
|
// A private method to prevent users implementing the
|
||||||
|
// interface and so future additions to it will not
|
||||||
|
// violate compatibility.
|
||||||
|
private()
|
||||||
|
}
|
||||||
|
|
||||||
|
// GRPCOption applies an option to the gRPC driver.
|
||||||
|
GRPCOption interface {
|
||||||
|
ApplyGRPCOption(Config) Config
|
||||||
|
|
||||||
|
// A private method to prevent users implementing the
|
||||||
|
// interface and so future additions to it will not
|
||||||
|
// violate compatibility.
|
||||||
|
private()
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
// genericOption is an option that applies the same logic
|
||||||
|
// for both gRPC and HTTP.
|
||||||
|
type genericOption struct {
|
||||||
|
fn func(Config) Config
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g *genericOption) ApplyGRPCOption(cfg Config) Config {
|
||||||
|
return g.fn(cfg)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g *genericOption) ApplyHTTPOption(cfg Config) Config {
|
||||||
|
return g.fn(cfg)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (genericOption) private() {}
|
||||||
|
|
||||||
|
func newGenericOption(fn func(cfg Config) Config) GenericOption {
|
||||||
|
return &genericOption{fn: fn}
|
||||||
|
}
|
||||||
|
|
||||||
|
// splitOption is an option that applies different logics
|
||||||
|
// for gRPC and HTTP.
|
||||||
|
type splitOption struct {
|
||||||
|
httpFn func(Config) Config
|
||||||
|
grpcFn func(Config) Config
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g *splitOption) ApplyGRPCOption(cfg Config) Config {
|
||||||
|
return g.grpcFn(cfg)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g *splitOption) ApplyHTTPOption(cfg Config) Config {
|
||||||
|
return g.httpFn(cfg)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (splitOption) private() {}
|
||||||
|
|
||||||
|
func newSplitOption(httpFn func(cfg Config) Config, grpcFn func(cfg Config) Config) GenericOption {
|
||||||
|
return &splitOption{httpFn: httpFn, grpcFn: grpcFn}
|
||||||
|
}
|
||||||
|
|
||||||
|
// httpOption is an option that is only applied to the HTTP driver.
|
||||||
|
type httpOption struct {
|
||||||
|
fn func(Config) Config
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *httpOption) ApplyHTTPOption(cfg Config) Config {
|
||||||
|
return h.fn(cfg)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (httpOption) private() {}
|
||||||
|
|
||||||
|
func NewHTTPOption(fn func(cfg Config) Config) HTTPOption {
|
||||||
|
return &httpOption{fn: fn}
|
||||||
|
}
|
||||||
|
|
||||||
|
// grpcOption is an option that is only applied to the gRPC driver.
|
||||||
|
type grpcOption struct {
|
||||||
|
fn func(Config) Config
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *grpcOption) ApplyGRPCOption(cfg Config) Config {
|
||||||
|
return h.fn(cfg)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (grpcOption) private() {}
|
||||||
|
|
||||||
|
func NewGRPCOption(fn func(cfg Config) Config) GRPCOption {
|
||||||
|
return &grpcOption{fn: fn}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Generic Options
|
||||||
|
|
||||||
|
func WithEndpoint(endpoint string) GenericOption {
|
||||||
|
return newGenericOption(func(cfg Config) Config {
|
||||||
|
cfg.Metrics.Endpoint = endpoint
|
||||||
|
return cfg
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func WithCompression(compression Compression) GenericOption {
|
||||||
|
return newGenericOption(func(cfg Config) Config {
|
||||||
|
cfg.Metrics.Compression = compression
|
||||||
|
return cfg
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func WithURLPath(urlPath string) GenericOption {
|
||||||
|
return newGenericOption(func(cfg Config) Config {
|
||||||
|
cfg.Metrics.URLPath = urlPath
|
||||||
|
return cfg
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func WithRetry(rc retry.Config) GenericOption {
|
||||||
|
return newGenericOption(func(cfg Config) Config {
|
||||||
|
cfg.RetryConfig = rc
|
||||||
|
return cfg
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func WithTLSClientConfig(tlsCfg *tls.Config) GenericOption {
|
||||||
|
return newSplitOption(func(cfg Config) Config {
|
||||||
|
cfg.Metrics.TLSCfg = tlsCfg.Clone()
|
||||||
|
return cfg
|
||||||
|
}, func(cfg Config) Config {
|
||||||
|
cfg.Metrics.GRPCCredentials = credentials.NewTLS(tlsCfg)
|
||||||
|
return cfg
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func WithInsecure() GenericOption {
|
||||||
|
return newGenericOption(func(cfg Config) Config {
|
||||||
|
cfg.Metrics.Insecure = true
|
||||||
|
return cfg
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func WithSecure() GenericOption {
|
||||||
|
return newGenericOption(func(cfg Config) Config {
|
||||||
|
cfg.Metrics.Insecure = false
|
||||||
|
return cfg
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func WithHeaders(headers map[string]string) GenericOption {
|
||||||
|
return newGenericOption(func(cfg Config) Config {
|
||||||
|
cfg.Metrics.Headers = headers
|
||||||
|
return cfg
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func WithTimeout(duration time.Duration) GenericOption {
|
||||||
|
return newGenericOption(func(cfg Config) Config {
|
||||||
|
cfg.Metrics.Timeout = duration
|
||||||
|
return cfg
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func WithTemporalitySelector(selector metric.TemporalitySelector) GenericOption {
|
||||||
|
return newGenericOption(func(cfg Config) Config {
|
||||||
|
cfg.Metrics.TemporalitySelector = selector
|
||||||
|
return cfg
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func WithAggregationSelector(selector metric.AggregationSelector) GenericOption {
|
||||||
|
// Deep copy and validate before using.
|
||||||
|
wrapped := func(ik metric.InstrumentKind) aggregation.Aggregation {
|
||||||
|
a := selector(ik)
|
||||||
|
cpA := a.Copy()
|
||||||
|
if err := cpA.Err(); err != nil {
|
||||||
|
cpA = metric.DefaultAggregationSelector(ik)
|
||||||
|
global.Error(
|
||||||
|
err, "using default aggregation instead",
|
||||||
|
"aggregation", a,
|
||||||
|
"replacement", cpA,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
return cpA
|
||||||
|
}
|
||||||
|
|
||||||
|
return newGenericOption(func(cfg Config) Config {
|
||||||
|
cfg.Metrics.AggregationSelector = wrapped
|
||||||
|
return cfg
|
||||||
|
})
|
||||||
|
}
|
@ -0,0 +1,534 @@
|
|||||||
|
// Code created by gotmpl. DO NOT MODIFY.
|
||||||
|
// source: internal/shared/otlp/otlpmetric/oconf/options_test.go.tmpl
|
||||||
|
|
||||||
|
// Copyright The OpenTelemetry Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package oconf
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
|
||||||
|
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/envconfig"
|
||||||
|
"go.opentelemetry.io/otel/sdk/metric"
|
||||||
|
"go.opentelemetry.io/otel/sdk/metric/aggregation"
|
||||||
|
"go.opentelemetry.io/otel/sdk/metric/metricdata"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
WeakCertificate = `
|
||||||
|
-----BEGIN CERTIFICATE-----
|
||||||
|
MIIBhzCCASygAwIBAgIRANHpHgAWeTnLZpTSxCKs0ggwCgYIKoZIzj0EAwIwEjEQ
|
||||||
|
MA4GA1UEChMHb3RlbC1nbzAeFw0yMTA0MDExMzU5MDNaFw0yMTA0MDExNDU5MDNa
|
||||||
|
MBIxEDAOBgNVBAoTB290ZWwtZ28wWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAS9
|
||||||
|
nWSkmPCxShxnp43F+PrOtbGV7sNfkbQ/kxzi9Ego0ZJdiXxkmv/C05QFddCW7Y0Z
|
||||||
|
sJCLHGogQsYnWJBXUZOVo2MwYTAOBgNVHQ8BAf8EBAMCB4AwEwYDVR0lBAwwCgYI
|
||||||
|
KwYBBQUHAwEwDAYDVR0TAQH/BAIwADAsBgNVHREEJTAjgglsb2NhbGhvc3SHEAAA
|
||||||
|
AAAAAAAAAAAAAAAAAAGHBH8AAAEwCgYIKoZIzj0EAwIDSQAwRgIhANwZVVKvfvQ/
|
||||||
|
1HXsTvgH+xTQswOwSSKYJ1cVHQhqK7ZbAiEAus8NxpTRnp5DiTMuyVmhVNPB+bVH
|
||||||
|
Lhnm4N/QDk5rek0=
|
||||||
|
-----END CERTIFICATE-----
|
||||||
|
`
|
||||||
|
WeakPrivateKey = `
|
||||||
|
-----BEGIN PRIVATE KEY-----
|
||||||
|
MIGHAgEAMBMGByqGSM49AgEGCCqGSM49AwEHBG0wawIBAQQgN8HEXiXhvByrJ1zK
|
||||||
|
SFT6Y2l2KqDWwWzKf+t4CyWrNKehRANCAAS9nWSkmPCxShxnp43F+PrOtbGV7sNf
|
||||||
|
kbQ/kxzi9Ego0ZJdiXxkmv/C05QFddCW7Y0ZsJCLHGogQsYnWJBXUZOV
|
||||||
|
-----END PRIVATE KEY-----
|
||||||
|
`
|
||||||
|
)
|
||||||
|
|
||||||
|
type env map[string]string
|
||||||
|
|
||||||
|
func (e *env) getEnv(env string) string {
|
||||||
|
return (*e)[env]
|
||||||
|
}
|
||||||
|
|
||||||
|
type fileReader map[string][]byte
|
||||||
|
|
||||||
|
func (f *fileReader) readFile(filename string) ([]byte, error) {
|
||||||
|
if b, ok := (*f)[filename]; ok {
|
||||||
|
return b, nil
|
||||||
|
}
|
||||||
|
return nil, errors.New("file not found")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestConfigs(t *testing.T) {
|
||||||
|
tlsCert, err := CreateTLSConfig([]byte(WeakCertificate))
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
opts []GenericOption
|
||||||
|
env env
|
||||||
|
fileReader fileReader
|
||||||
|
asserts func(t *testing.T, c *Config, grpcOption bool)
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "Test default configs",
|
||||||
|
asserts: func(t *testing.T, c *Config, grpcOption bool) {
|
||||||
|
if grpcOption {
|
||||||
|
assert.Equal(t, "localhost:4317", c.Metrics.Endpoint)
|
||||||
|
} else {
|
||||||
|
assert.Equal(t, "localhost:4318", c.Metrics.Endpoint)
|
||||||
|
}
|
||||||
|
assert.Equal(t, NoCompression, c.Metrics.Compression)
|
||||||
|
assert.Equal(t, map[string]string(nil), c.Metrics.Headers)
|
||||||
|
assert.Equal(t, 10*time.Second, c.Metrics.Timeout)
|
||||||
|
},
|
||||||
|
},
|
||||||
|
|
||||||
|
// Endpoint Tests
|
||||||
|
{
|
||||||
|
name: "Test With Endpoint",
|
||||||
|
opts: []GenericOption{
|
||||||
|
WithEndpoint("someendpoint"),
|
||||||
|
},
|
||||||
|
asserts: func(t *testing.T, c *Config, grpcOption bool) {
|
||||||
|
assert.Equal(t, "someendpoint", c.Metrics.Endpoint)
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Test Environment Endpoint",
|
||||||
|
env: map[string]string{
|
||||||
|
"OTEL_EXPORTER_OTLP_ENDPOINT": "https://env.endpoint/prefix",
|
||||||
|
},
|
||||||
|
asserts: func(t *testing.T, c *Config, grpcOption bool) {
|
||||||
|
assert.False(t, c.Metrics.Insecure)
|
||||||
|
if grpcOption {
|
||||||
|
assert.Equal(t, "env.endpoint/prefix", c.Metrics.Endpoint)
|
||||||
|
} else {
|
||||||
|
assert.Equal(t, "env.endpoint", c.Metrics.Endpoint)
|
||||||
|
assert.Equal(t, "/prefix/v1/metrics", c.Metrics.URLPath)
|
||||||
|
}
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Test Environment Signal Specific Endpoint",
|
||||||
|
env: map[string]string{
|
||||||
|
"OTEL_EXPORTER_OTLP_ENDPOINT": "https://overrode.by.signal.specific/env/var",
|
||||||
|
"OTEL_EXPORTER_OTLP_METRICS_ENDPOINT": "http://env.metrics.endpoint",
|
||||||
|
},
|
||||||
|
asserts: func(t *testing.T, c *Config, grpcOption bool) {
|
||||||
|
assert.True(t, c.Metrics.Insecure)
|
||||||
|
assert.Equal(t, "env.metrics.endpoint", c.Metrics.Endpoint)
|
||||||
|
if !grpcOption {
|
||||||
|
assert.Equal(t, "/", c.Metrics.URLPath)
|
||||||
|
}
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Test Mixed Environment and With Endpoint",
|
||||||
|
opts: []GenericOption{
|
||||||
|
WithEndpoint("metrics_endpoint"),
|
||||||
|
},
|
||||||
|
env: map[string]string{
|
||||||
|
"OTEL_EXPORTER_OTLP_ENDPOINT": "env_endpoint",
|
||||||
|
},
|
||||||
|
asserts: func(t *testing.T, c *Config, grpcOption bool) {
|
||||||
|
assert.Equal(t, "metrics_endpoint", c.Metrics.Endpoint)
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Test Environment Endpoint with HTTP scheme",
|
||||||
|
env: map[string]string{
|
||||||
|
"OTEL_EXPORTER_OTLP_ENDPOINT": "http://env_endpoint",
|
||||||
|
},
|
||||||
|
asserts: func(t *testing.T, c *Config, grpcOption bool) {
|
||||||
|
assert.Equal(t, "env_endpoint", c.Metrics.Endpoint)
|
||||||
|
assert.Equal(t, true, c.Metrics.Insecure)
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Test Environment Endpoint with HTTP scheme and leading & trailingspaces",
|
||||||
|
env: map[string]string{
|
||||||
|
"OTEL_EXPORTER_OTLP_ENDPOINT": " http://env_endpoint ",
|
||||||
|
},
|
||||||
|
asserts: func(t *testing.T, c *Config, grpcOption bool) {
|
||||||
|
assert.Equal(t, "env_endpoint", c.Metrics.Endpoint)
|
||||||
|
assert.Equal(t, true, c.Metrics.Insecure)
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Test Environment Endpoint with HTTPS scheme",
|
||||||
|
env: map[string]string{
|
||||||
|
"OTEL_EXPORTER_OTLP_ENDPOINT": "https://env_endpoint",
|
||||||
|
},
|
||||||
|
asserts: func(t *testing.T, c *Config, grpcOption bool) {
|
||||||
|
assert.Equal(t, "env_endpoint", c.Metrics.Endpoint)
|
||||||
|
assert.Equal(t, false, c.Metrics.Insecure)
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Test Environment Signal Specific Endpoint with uppercase scheme",
|
||||||
|
env: map[string]string{
|
||||||
|
"OTEL_EXPORTER_OTLP_ENDPOINT": "HTTPS://overrode_by_signal_specific",
|
||||||
|
"OTEL_EXPORTER_OTLP_METRICS_ENDPOINT": "HtTp://env_metrics_endpoint",
|
||||||
|
},
|
||||||
|
asserts: func(t *testing.T, c *Config, grpcOption bool) {
|
||||||
|
assert.Equal(t, "env_metrics_endpoint", c.Metrics.Endpoint)
|
||||||
|
assert.Equal(t, true, c.Metrics.Insecure)
|
||||||
|
},
|
||||||
|
},
|
||||||
|
|
||||||
|
// Certificate tests
|
||||||
|
{
|
||||||
|
name: "Test Default Certificate",
|
||||||
|
asserts: func(t *testing.T, c *Config, grpcOption bool) {
|
||||||
|
if grpcOption {
|
||||||
|
assert.NotNil(t, c.Metrics.GRPCCredentials)
|
||||||
|
} else {
|
||||||
|
assert.Nil(t, c.Metrics.TLSCfg)
|
||||||
|
}
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Test With Certificate",
|
||||||
|
opts: []GenericOption{
|
||||||
|
WithTLSClientConfig(tlsCert),
|
||||||
|
},
|
||||||
|
asserts: func(t *testing.T, c *Config, grpcOption bool) {
|
||||||
|
if grpcOption {
|
||||||
|
//TODO: make sure gRPC's credentials actually works
|
||||||
|
assert.NotNil(t, c.Metrics.GRPCCredentials)
|
||||||
|
} else {
|
||||||
|
// nolint:staticcheck // ignoring tlsCert.RootCAs.Subjects is deprecated ERR because cert does not come from SystemCertPool.
|
||||||
|
assert.Equal(t, tlsCert.RootCAs.Subjects(), c.Metrics.TLSCfg.RootCAs.Subjects())
|
||||||
|
}
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Test Environment Certificate",
|
||||||
|
env: map[string]string{
|
||||||
|
"OTEL_EXPORTER_OTLP_CERTIFICATE": "cert_path",
|
||||||
|
},
|
||||||
|
fileReader: fileReader{
|
||||||
|
"cert_path": []byte(WeakCertificate),
|
||||||
|
},
|
||||||
|
asserts: func(t *testing.T, c *Config, grpcOption bool) {
|
||||||
|
if grpcOption {
|
||||||
|
assert.NotNil(t, c.Metrics.GRPCCredentials)
|
||||||
|
} else {
|
||||||
|
// nolint:staticcheck // ignoring tlsCert.RootCAs.Subjects is deprecated ERR because cert does not come from SystemCertPool.
|
||||||
|
assert.Equal(t, tlsCert.RootCAs.Subjects(), c.Metrics.TLSCfg.RootCAs.Subjects())
|
||||||
|
}
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Test Environment Signal Specific Certificate",
|
||||||
|
env: map[string]string{
|
||||||
|
"OTEL_EXPORTER_OTLP_CERTIFICATE": "overrode_by_signal_specific",
|
||||||
|
"OTEL_EXPORTER_OTLP_METRICS_CERTIFICATE": "cert_path",
|
||||||
|
},
|
||||||
|
fileReader: fileReader{
|
||||||
|
"cert_path": []byte(WeakCertificate),
|
||||||
|
"invalid_cert": []byte("invalid certificate file."),
|
||||||
|
},
|
||||||
|
asserts: func(t *testing.T, c *Config, grpcOption bool) {
|
||||||
|
if grpcOption {
|
||||||
|
assert.NotNil(t, c.Metrics.GRPCCredentials)
|
||||||
|
} else {
|
||||||
|
// nolint:staticcheck // ignoring tlsCert.RootCAs.Subjects is deprecated ERR because cert does not come from SystemCertPool.
|
||||||
|
assert.Equal(t, tlsCert.RootCAs.Subjects(), c.Metrics.TLSCfg.RootCAs.Subjects())
|
||||||
|
}
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Test Mixed Environment and With Certificate",
|
||||||
|
opts: []GenericOption{},
|
||||||
|
env: map[string]string{
|
||||||
|
"OTEL_EXPORTER_OTLP_CERTIFICATE": "cert_path",
|
||||||
|
},
|
||||||
|
fileReader: fileReader{
|
||||||
|
"cert_path": []byte(WeakCertificate),
|
||||||
|
},
|
||||||
|
asserts: func(t *testing.T, c *Config, grpcOption bool) {
|
||||||
|
if grpcOption {
|
||||||
|
assert.NotNil(t, c.Metrics.GRPCCredentials)
|
||||||
|
} else {
|
||||||
|
// nolint:staticcheck // ignoring tlsCert.RootCAs.Subjects is deprecated ERR because cert does not come from SystemCertPool.
|
||||||
|
assert.Equal(t, 1, len(c.Metrics.TLSCfg.RootCAs.Subjects()))
|
||||||
|
}
|
||||||
|
},
|
||||||
|
},
|
||||||
|
|
||||||
|
// Headers tests
|
||||||
|
{
|
||||||
|
name: "Test With Headers",
|
||||||
|
opts: []GenericOption{
|
||||||
|
WithHeaders(map[string]string{"h1": "v1"}),
|
||||||
|
},
|
||||||
|
asserts: func(t *testing.T, c *Config, grpcOption bool) {
|
||||||
|
assert.Equal(t, map[string]string{"h1": "v1"}, c.Metrics.Headers)
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Test Environment Headers",
|
||||||
|
env: map[string]string{"OTEL_EXPORTER_OTLP_HEADERS": "h1=v1,h2=v2"},
|
||||||
|
asserts: func(t *testing.T, c *Config, grpcOption bool) {
|
||||||
|
assert.Equal(t, map[string]string{"h1": "v1", "h2": "v2"}, c.Metrics.Headers)
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Test Environment Signal Specific Headers",
|
||||||
|
env: map[string]string{
|
||||||
|
"OTEL_EXPORTER_OTLP_HEADERS": "overrode_by_signal_specific",
|
||||||
|
"OTEL_EXPORTER_OTLP_METRICS_HEADERS": "h1=v1,h2=v2",
|
||||||
|
},
|
||||||
|
asserts: func(t *testing.T, c *Config, grpcOption bool) {
|
||||||
|
assert.Equal(t, map[string]string{"h1": "v1", "h2": "v2"}, c.Metrics.Headers)
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Test Mixed Environment and With Headers",
|
||||||
|
env: map[string]string{"OTEL_EXPORTER_OTLP_HEADERS": "h1=v1,h2=v2"},
|
||||||
|
opts: []GenericOption{
|
||||||
|
WithHeaders(map[string]string{"m1": "mv1"}),
|
||||||
|
},
|
||||||
|
asserts: func(t *testing.T, c *Config, grpcOption bool) {
|
||||||
|
assert.Equal(t, map[string]string{"m1": "mv1"}, c.Metrics.Headers)
|
||||||
|
},
|
||||||
|
},
|
||||||
|
|
||||||
|
// Compression Tests
|
||||||
|
{
|
||||||
|
name: "Test With Compression",
|
||||||
|
opts: []GenericOption{
|
||||||
|
WithCompression(GzipCompression),
|
||||||
|
},
|
||||||
|
asserts: func(t *testing.T, c *Config, grpcOption bool) {
|
||||||
|
assert.Equal(t, GzipCompression, c.Metrics.Compression)
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Test Environment Compression",
|
||||||
|
env: map[string]string{
|
||||||
|
"OTEL_EXPORTER_OTLP_COMPRESSION": "gzip",
|
||||||
|
},
|
||||||
|
asserts: func(t *testing.T, c *Config, grpcOption bool) {
|
||||||
|
assert.Equal(t, GzipCompression, c.Metrics.Compression)
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Test Environment Signal Specific Compression",
|
||||||
|
env: map[string]string{
|
||||||
|
"OTEL_EXPORTER_OTLP_METRICS_COMPRESSION": "gzip",
|
||||||
|
},
|
||||||
|
asserts: func(t *testing.T, c *Config, grpcOption bool) {
|
||||||
|
assert.Equal(t, GzipCompression, c.Metrics.Compression)
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Test Mixed Environment and With Compression",
|
||||||
|
opts: []GenericOption{
|
||||||
|
WithCompression(NoCompression),
|
||||||
|
},
|
||||||
|
env: map[string]string{
|
||||||
|
"OTEL_EXPORTER_OTLP_METRICS_COMPRESSION": "gzip",
|
||||||
|
},
|
||||||
|
asserts: func(t *testing.T, c *Config, grpcOption bool) {
|
||||||
|
assert.Equal(t, NoCompression, c.Metrics.Compression)
|
||||||
|
},
|
||||||
|
},
|
||||||
|
|
||||||
|
// Timeout Tests
|
||||||
|
{
|
||||||
|
name: "Test With Timeout",
|
||||||
|
opts: []GenericOption{
|
||||||
|
WithTimeout(time.Duration(5 * time.Second)),
|
||||||
|
},
|
||||||
|
asserts: func(t *testing.T, c *Config, grpcOption bool) {
|
||||||
|
assert.Equal(t, 5*time.Second, c.Metrics.Timeout)
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Test Environment Timeout",
|
||||||
|
env: map[string]string{
|
||||||
|
"OTEL_EXPORTER_OTLP_TIMEOUT": "15000",
|
||||||
|
},
|
||||||
|
asserts: func(t *testing.T, c *Config, grpcOption bool) {
|
||||||
|
assert.Equal(t, c.Metrics.Timeout, 15*time.Second)
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Test Environment Signal Specific Timeout",
|
||||||
|
env: map[string]string{
|
||||||
|
"OTEL_EXPORTER_OTLP_TIMEOUT": "15000",
|
||||||
|
"OTEL_EXPORTER_OTLP_METRICS_TIMEOUT": "28000",
|
||||||
|
},
|
||||||
|
asserts: func(t *testing.T, c *Config, grpcOption bool) {
|
||||||
|
assert.Equal(t, c.Metrics.Timeout, 28*time.Second)
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Test Mixed Environment and With Timeout",
|
||||||
|
env: map[string]string{
|
||||||
|
"OTEL_EXPORTER_OTLP_TIMEOUT": "15000",
|
||||||
|
"OTEL_EXPORTER_OTLP_METRICS_TIMEOUT": "28000",
|
||||||
|
},
|
||||||
|
opts: []GenericOption{
|
||||||
|
WithTimeout(5 * time.Second),
|
||||||
|
},
|
||||||
|
asserts: func(t *testing.T, c *Config, grpcOption bool) {
|
||||||
|
assert.Equal(t, c.Metrics.Timeout, 5*time.Second)
|
||||||
|
},
|
||||||
|
},
|
||||||
|
|
||||||
|
// Temporality Selector Tests
|
||||||
|
{
|
||||||
|
name: "WithTemporalitySelector",
|
||||||
|
opts: []GenericOption{
|
||||||
|
WithTemporalitySelector(deltaSelector),
|
||||||
|
},
|
||||||
|
asserts: func(t *testing.T, c *Config, grpcOption bool) {
|
||||||
|
// Function value comparisons are disallowed, test non-default
|
||||||
|
// behavior of a TemporalitySelector here to ensure our "catch
|
||||||
|
// all" was set.
|
||||||
|
var undefinedKind metric.InstrumentKind
|
||||||
|
got := c.Metrics.TemporalitySelector
|
||||||
|
assert.Equal(t, metricdata.DeltaTemporality, got(undefinedKind))
|
||||||
|
},
|
||||||
|
},
|
||||||
|
|
||||||
|
// Aggregation Selector Tests
|
||||||
|
{
|
||||||
|
name: "WithAggregationSelector",
|
||||||
|
opts: []GenericOption{
|
||||||
|
WithAggregationSelector(dropSelector),
|
||||||
|
},
|
||||||
|
asserts: func(t *testing.T, c *Config, grpcOption bool) {
|
||||||
|
// Function value comparisons are disallowed, test non-default
|
||||||
|
// behavior of a AggregationSelector here to ensure our "catch
|
||||||
|
// all" was set.
|
||||||
|
var undefinedKind metric.InstrumentKind
|
||||||
|
got := c.Metrics.AggregationSelector
|
||||||
|
assert.Equal(t, aggregation.Drop{}, got(undefinedKind))
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
origEOR := DefaultEnvOptionsReader
|
||||||
|
DefaultEnvOptionsReader = envconfig.EnvOptionsReader{
|
||||||
|
GetEnv: tt.env.getEnv,
|
||||||
|
ReadFile: tt.fileReader.readFile,
|
||||||
|
Namespace: "OTEL_EXPORTER_OTLP",
|
||||||
|
}
|
||||||
|
t.Cleanup(func() { DefaultEnvOptionsReader = origEOR })
|
||||||
|
|
||||||
|
// Tests Generic options as HTTP Options
|
||||||
|
cfg := NewHTTPConfig(asHTTPOptions(tt.opts)...)
|
||||||
|
tt.asserts(t, &cfg, false)
|
||||||
|
|
||||||
|
// Tests Generic options as gRPC Options
|
||||||
|
cfg = NewGRPCConfig(asGRPCOptions(tt.opts)...)
|
||||||
|
tt.asserts(t, &cfg, true)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func dropSelector(metric.InstrumentKind) aggregation.Aggregation {
|
||||||
|
return aggregation.Drop{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func deltaSelector(metric.InstrumentKind) metricdata.Temporality {
|
||||||
|
return metricdata.DeltaTemporality
|
||||||
|
}
|
||||||
|
|
||||||
|
func asHTTPOptions(opts []GenericOption) []HTTPOption {
|
||||||
|
converted := make([]HTTPOption, len(opts))
|
||||||
|
for i, o := range opts {
|
||||||
|
converted[i] = NewHTTPOption(o.ApplyHTTPOption)
|
||||||
|
}
|
||||||
|
return converted
|
||||||
|
}
|
||||||
|
|
||||||
|
func asGRPCOptions(opts []GenericOption) []GRPCOption {
|
||||||
|
converted := make([]GRPCOption, len(opts))
|
||||||
|
for i, o := range opts {
|
||||||
|
converted[i] = NewGRPCOption(o.ApplyGRPCOption)
|
||||||
|
}
|
||||||
|
return converted
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCleanPath(t *testing.T) {
|
||||||
|
type args struct {
|
||||||
|
urlPath string
|
||||||
|
defaultPath string
|
||||||
|
}
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
args args
|
||||||
|
want string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "clean empty path",
|
||||||
|
args: args{
|
||||||
|
urlPath: "",
|
||||||
|
defaultPath: "DefaultPath",
|
||||||
|
},
|
||||||
|
want: "DefaultPath",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "clean metrics path",
|
||||||
|
args: args{
|
||||||
|
urlPath: "/prefix/v1/metrics",
|
||||||
|
defaultPath: "DefaultMetricsPath",
|
||||||
|
},
|
||||||
|
want: "/prefix/v1/metrics",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "clean traces path",
|
||||||
|
args: args{
|
||||||
|
urlPath: "https://env_endpoint",
|
||||||
|
defaultPath: "DefaultTracesPath",
|
||||||
|
},
|
||||||
|
want: "/https:/env_endpoint",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "spaces trimmed",
|
||||||
|
args: args{
|
||||||
|
urlPath: " /dir",
|
||||||
|
},
|
||||||
|
want: "/dir",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "clean path empty",
|
||||||
|
args: args{
|
||||||
|
urlPath: "dir/..",
|
||||||
|
defaultPath: "DefaultTracesPath",
|
||||||
|
},
|
||||||
|
want: "DefaultTracesPath",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "make absolute",
|
||||||
|
args: args{
|
||||||
|
urlPath: "dir/a",
|
||||||
|
},
|
||||||
|
want: "/dir/a",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
if got := cleanPath(tt.args.urlPath, tt.args.defaultPath); got != tt.want {
|
||||||
|
t.Errorf("CleanPath() = %v, want %v", got, tt.want)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
@ -0,0 +1,58 @@
|
|||||||
|
// Code created by gotmpl. DO NOT MODIFY.
|
||||||
|
// source: internal/shared/otlp/otlpmetric/oconf/optiontypes.go.tmpl
|
||||||
|
|
||||||
|
// Copyright The OpenTelemetry Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package oconf // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf"
|
||||||
|
|
||||||
|
import "time"
|
||||||
|
|
||||||
|
const (
|
||||||
|
// DefaultCollectorGRPCPort is the default gRPC port of the collector.
|
||||||
|
DefaultCollectorGRPCPort uint16 = 4317
|
||||||
|
// DefaultCollectorHTTPPort is the default HTTP port of the collector.
|
||||||
|
DefaultCollectorHTTPPort uint16 = 4318
|
||||||
|
// DefaultCollectorHost is the host address the Exporter will attempt
|
||||||
|
// connect to if no collector address is provided.
|
||||||
|
DefaultCollectorHost string = "localhost"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Compression describes the compression used for payloads sent to the
|
||||||
|
// collector.
|
||||||
|
type Compression int
|
||||||
|
|
||||||
|
const (
|
||||||
|
// NoCompression tells the driver to send payloads without
|
||||||
|
// compression.
|
||||||
|
NoCompression Compression = iota
|
||||||
|
// GzipCompression tells the driver to send payloads after
|
||||||
|
// compressing them with gzip.
|
||||||
|
GzipCompression
|
||||||
|
)
|
||||||
|
|
||||||
|
// RetrySettings defines configuration for retrying batches in case of export failure
|
||||||
|
// using an exponential backoff.
|
||||||
|
type RetrySettings struct {
|
||||||
|
// Enabled indicates whether to not retry sending batches in case of export failure.
|
||||||
|
Enabled bool
|
||||||
|
// InitialInterval the time to wait after the first failure before retrying.
|
||||||
|
InitialInterval time.Duration
|
||||||
|
// MaxInterval is the upper bound on backoff interval. Once this value is reached the delay between
|
||||||
|
// consecutive retries will always be `MaxInterval`.
|
||||||
|
MaxInterval time.Duration
|
||||||
|
// MaxElapsedTime is the maximum amount of time (including retries) spent trying to send a request/batch.
|
||||||
|
// Once this value is reached, the data is discarded.
|
||||||
|
MaxElapsedTime time.Duration
|
||||||
|
}
|
@ -0,0 +1,49 @@
|
|||||||
|
// Code created by gotmpl. DO NOT MODIFY.
|
||||||
|
// source: internal/shared/otlp/otlpmetric/oconf/tls.go.tmpl
|
||||||
|
|
||||||
|
// Copyright The OpenTelemetry Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package oconf // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf"
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/tls"
|
||||||
|
"crypto/x509"
|
||||||
|
"errors"
|
||||||
|
"os"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ReadTLSConfigFromFile reads a PEM certificate file and creates
|
||||||
|
// a tls.Config that will use this certifate to verify a server certificate.
|
||||||
|
func ReadTLSConfigFromFile(path string) (*tls.Config, error) {
|
||||||
|
b, err := os.ReadFile(path)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return CreateTLSConfig(b)
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreateTLSConfig creates a tls.Config from a raw certificate bytes
|
||||||
|
// to verify a server certificate.
|
||||||
|
func CreateTLSConfig(certBytes []byte) (*tls.Config, error) {
|
||||||
|
cp := x509.NewCertPool()
|
||||||
|
if ok := cp.AppendCertsFromPEM(certBytes); !ok {
|
||||||
|
return nil, errors.New("failed to append certificate to the cert pool")
|
||||||
|
}
|
||||||
|
|
||||||
|
return &tls.Config{
|
||||||
|
RootCAs: cp,
|
||||||
|
}, nil
|
||||||
|
}
|
@ -0,0 +1,313 @@
|
|||||||
|
// Code created by gotmpl. DO NOT MODIFY.
|
||||||
|
// source: internal/shared/otlp/otlpmetric/otest/client.go.tmpl
|
||||||
|
|
||||||
|
// Copyright The OpenTelemetry Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package otest // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/otest"
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/google/go-cmp/cmp"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
"google.golang.org/protobuf/proto"
|
||||||
|
|
||||||
|
"go.opentelemetry.io/otel"
|
||||||
|
semconv "go.opentelemetry.io/otel/semconv/v1.17.0"
|
||||||
|
collpb "go.opentelemetry.io/proto/otlp/collector/metrics/v1"
|
||||||
|
cpb "go.opentelemetry.io/proto/otlp/common/v1"
|
||||||
|
mpb "go.opentelemetry.io/proto/otlp/metrics/v1"
|
||||||
|
rpb "go.opentelemetry.io/proto/otlp/resource/v1"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// Sat Jan 01 2000 00:00:00 GMT+0000.
|
||||||
|
start = time.Date(2000, time.January, 01, 0, 0, 0, 0, time.FixedZone("GMT", 0))
|
||||||
|
end = start.Add(30 * time.Second)
|
||||||
|
|
||||||
|
kvAlice = &cpb.KeyValue{Key: "user", Value: &cpb.AnyValue{
|
||||||
|
Value: &cpb.AnyValue_StringValue{StringValue: "alice"},
|
||||||
|
}}
|
||||||
|
kvBob = &cpb.KeyValue{Key: "user", Value: &cpb.AnyValue{
|
||||||
|
Value: &cpb.AnyValue_StringValue{StringValue: "bob"},
|
||||||
|
}}
|
||||||
|
kvSrvName = &cpb.KeyValue{Key: "service.name", Value: &cpb.AnyValue{
|
||||||
|
Value: &cpb.AnyValue_StringValue{StringValue: "test server"},
|
||||||
|
}}
|
||||||
|
kvSrvVer = &cpb.KeyValue{Key: "service.version", Value: &cpb.AnyValue{
|
||||||
|
Value: &cpb.AnyValue_StringValue{StringValue: "v0.1.0"},
|
||||||
|
}}
|
||||||
|
|
||||||
|
min, max, sum = 2.0, 4.0, 90.0
|
||||||
|
hdp = []*mpb.HistogramDataPoint{
|
||||||
|
{
|
||||||
|
Attributes: []*cpb.KeyValue{kvAlice},
|
||||||
|
StartTimeUnixNano: uint64(start.UnixNano()),
|
||||||
|
TimeUnixNano: uint64(end.UnixNano()),
|
||||||
|
Count: 30,
|
||||||
|
Sum: &sum,
|
||||||
|
ExplicitBounds: []float64{1, 5},
|
||||||
|
BucketCounts: []uint64{0, 30, 0},
|
||||||
|
Min: &min,
|
||||||
|
Max: &max,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
hist = &mpb.Histogram{
|
||||||
|
AggregationTemporality: mpb.AggregationTemporality_AGGREGATION_TEMPORALITY_DELTA,
|
||||||
|
DataPoints: hdp,
|
||||||
|
}
|
||||||
|
|
||||||
|
dPtsInt64 = []*mpb.NumberDataPoint{
|
||||||
|
{
|
||||||
|
Attributes: []*cpb.KeyValue{kvAlice},
|
||||||
|
StartTimeUnixNano: uint64(start.UnixNano()),
|
||||||
|
TimeUnixNano: uint64(end.UnixNano()),
|
||||||
|
Value: &mpb.NumberDataPoint_AsInt{AsInt: 1},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Attributes: []*cpb.KeyValue{kvBob},
|
||||||
|
StartTimeUnixNano: uint64(start.UnixNano()),
|
||||||
|
TimeUnixNano: uint64(end.UnixNano()),
|
||||||
|
Value: &mpb.NumberDataPoint_AsInt{AsInt: 2},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
dPtsFloat64 = []*mpb.NumberDataPoint{
|
||||||
|
{
|
||||||
|
Attributes: []*cpb.KeyValue{kvAlice},
|
||||||
|
StartTimeUnixNano: uint64(start.UnixNano()),
|
||||||
|
TimeUnixNano: uint64(end.UnixNano()),
|
||||||
|
Value: &mpb.NumberDataPoint_AsDouble{AsDouble: 1.0},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Attributes: []*cpb.KeyValue{kvBob},
|
||||||
|
StartTimeUnixNano: uint64(start.UnixNano()),
|
||||||
|
TimeUnixNano: uint64(end.UnixNano()),
|
||||||
|
Value: &mpb.NumberDataPoint_AsDouble{AsDouble: 2.0},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
sumInt64 = &mpb.Sum{
|
||||||
|
AggregationTemporality: mpb.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE,
|
||||||
|
IsMonotonic: true,
|
||||||
|
DataPoints: dPtsInt64,
|
||||||
|
}
|
||||||
|
sumFloat64 = &mpb.Sum{
|
||||||
|
AggregationTemporality: mpb.AggregationTemporality_AGGREGATION_TEMPORALITY_DELTA,
|
||||||
|
IsMonotonic: false,
|
||||||
|
DataPoints: dPtsFloat64,
|
||||||
|
}
|
||||||
|
|
||||||
|
gaugeInt64 = &mpb.Gauge{DataPoints: dPtsInt64}
|
||||||
|
gaugeFloat64 = &mpb.Gauge{DataPoints: dPtsFloat64}
|
||||||
|
|
||||||
|
metrics = []*mpb.Metric{
|
||||||
|
{
|
||||||
|
Name: "int64-gauge",
|
||||||
|
Description: "Gauge with int64 values",
|
||||||
|
Unit: "1",
|
||||||
|
Data: &mpb.Metric_Gauge{Gauge: gaugeInt64},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "float64-gauge",
|
||||||
|
Description: "Gauge with float64 values",
|
||||||
|
Unit: "1",
|
||||||
|
Data: &mpb.Metric_Gauge{Gauge: gaugeFloat64},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "int64-sum",
|
||||||
|
Description: "Sum with int64 values",
|
||||||
|
Unit: "1",
|
||||||
|
Data: &mpb.Metric_Sum{Sum: sumInt64},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "float64-sum",
|
||||||
|
Description: "Sum with float64 values",
|
||||||
|
Unit: "1",
|
||||||
|
Data: &mpb.Metric_Sum{Sum: sumFloat64},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "histogram",
|
||||||
|
Description: "Histogram",
|
||||||
|
Unit: "1",
|
||||||
|
Data: &mpb.Metric_Histogram{Histogram: hist},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
scope = &cpb.InstrumentationScope{
|
||||||
|
Name: "test/code/path",
|
||||||
|
Version: "v0.1.0",
|
||||||
|
}
|
||||||
|
scopeMetrics = []*mpb.ScopeMetrics{
|
||||||
|
{
|
||||||
|
Scope: scope,
|
||||||
|
Metrics: metrics,
|
||||||
|
SchemaUrl: semconv.SchemaURL,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
res = &rpb.Resource{
|
||||||
|
Attributes: []*cpb.KeyValue{kvSrvName, kvSrvVer},
|
||||||
|
}
|
||||||
|
resourceMetrics = &mpb.ResourceMetrics{
|
||||||
|
Resource: res,
|
||||||
|
ScopeMetrics: scopeMetrics,
|
||||||
|
SchemaUrl: semconv.SchemaURL,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
type Client interface {
|
||||||
|
UploadMetrics(context.Context, *mpb.ResourceMetrics) error
|
||||||
|
ForceFlush(context.Context) error
|
||||||
|
Shutdown(context.Context) error
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClientFactory is a function that when called returns a
|
||||||
|
// Client implementation that is connected to also returned
|
||||||
|
// Collector implementation. The Client is ready to upload metric data to the
|
||||||
|
// Collector which is ready to store that data.
|
||||||
|
//
|
||||||
|
// If resultCh is not nil, the returned Collector needs to use the responses
|
||||||
|
// from that channel to send back to the client for every export request.
|
||||||
|
type ClientFactory func(resultCh <-chan ExportResult) (Client, Collector)
|
||||||
|
|
||||||
|
// RunClientTests runs a suite of Client integration tests. For example:
|
||||||
|
//
|
||||||
|
// t.Run("Integration", RunClientTests(factory))
|
||||||
|
func RunClientTests(f ClientFactory) func(*testing.T) {
|
||||||
|
return func(t *testing.T) {
|
||||||
|
t.Run("ClientHonorsContextErrors", func(t *testing.T) {
|
||||||
|
t.Run("Shutdown", testCtxErrs(func() func(context.Context) error {
|
||||||
|
c, _ := f(nil)
|
||||||
|
return c.Shutdown
|
||||||
|
}))
|
||||||
|
|
||||||
|
t.Run("ForceFlush", testCtxErrs(func() func(context.Context) error {
|
||||||
|
c, _ := f(nil)
|
||||||
|
return c.ForceFlush
|
||||||
|
}))
|
||||||
|
|
||||||
|
t.Run("UploadMetrics", testCtxErrs(func() func(context.Context) error {
|
||||||
|
c, _ := f(nil)
|
||||||
|
return func(ctx context.Context) error {
|
||||||
|
return c.UploadMetrics(ctx, nil)
|
||||||
|
}
|
||||||
|
}))
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("ForceFlushFlushes", func(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
client, collector := f(nil)
|
||||||
|
require.NoError(t, client.UploadMetrics(ctx, resourceMetrics))
|
||||||
|
|
||||||
|
require.NoError(t, client.ForceFlush(ctx))
|
||||||
|
rm := collector.Collect().Dump()
|
||||||
|
// Data correctness is not important, just it was received.
|
||||||
|
require.Greater(t, len(rm), 0, "no data uploaded")
|
||||||
|
|
||||||
|
require.NoError(t, client.Shutdown(ctx))
|
||||||
|
rm = collector.Collect().Dump()
|
||||||
|
assert.Len(t, rm, 0, "client did not flush all data")
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("UploadMetrics", func(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
client, coll := f(nil)
|
||||||
|
|
||||||
|
require.NoError(t, client.UploadMetrics(ctx, resourceMetrics))
|
||||||
|
require.NoError(t, client.Shutdown(ctx))
|
||||||
|
got := coll.Collect().Dump()
|
||||||
|
require.Len(t, got, 1, "upload of one ResourceMetrics")
|
||||||
|
diff := cmp.Diff(got[0], resourceMetrics, cmp.Comparer(proto.Equal))
|
||||||
|
if diff != "" {
|
||||||
|
t.Fatalf("unexpected ResourceMetrics:\n%s", diff)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("PartialSuccess", func(t *testing.T) {
|
||||||
|
const n, msg = 2, "bad data"
|
||||||
|
rCh := make(chan ExportResult, 3)
|
||||||
|
rCh <- ExportResult{
|
||||||
|
Response: &collpb.ExportMetricsServiceResponse{
|
||||||
|
PartialSuccess: &collpb.ExportMetricsPartialSuccess{
|
||||||
|
RejectedDataPoints: n,
|
||||||
|
ErrorMessage: msg,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
rCh <- ExportResult{
|
||||||
|
Response: &collpb.ExportMetricsServiceResponse{
|
||||||
|
PartialSuccess: &collpb.ExportMetricsPartialSuccess{
|
||||||
|
// Should not be logged.
|
||||||
|
RejectedDataPoints: 0,
|
||||||
|
ErrorMessage: "",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
rCh <- ExportResult{
|
||||||
|
Response: &collpb.ExportMetricsServiceResponse{},
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
client, _ := f(rCh)
|
||||||
|
|
||||||
|
defer func(orig otel.ErrorHandler) {
|
||||||
|
otel.SetErrorHandler(orig)
|
||||||
|
}(otel.GetErrorHandler())
|
||||||
|
|
||||||
|
errs := []error{}
|
||||||
|
eh := otel.ErrorHandlerFunc(func(e error) { errs = append(errs, e) })
|
||||||
|
otel.SetErrorHandler(eh)
|
||||||
|
|
||||||
|
require.NoError(t, client.UploadMetrics(ctx, resourceMetrics))
|
||||||
|
require.NoError(t, client.UploadMetrics(ctx, resourceMetrics))
|
||||||
|
require.NoError(t, client.UploadMetrics(ctx, resourceMetrics))
|
||||||
|
require.NoError(t, client.Shutdown(ctx))
|
||||||
|
|
||||||
|
require.Equal(t, 1, len(errs))
|
||||||
|
want := fmt.Sprintf("%s (%d metric data points rejected)", msg, n)
|
||||||
|
assert.ErrorContains(t, errs[0], want)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testCtxErrs(factory func() func(context.Context) error) func(t *testing.T) {
|
||||||
|
return func(t *testing.T) {
|
||||||
|
t.Helper()
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
t.Cleanup(cancel)
|
||||||
|
|
||||||
|
t.Run("DeadlineExceeded", func(t *testing.T) {
|
||||||
|
innerCtx, innerCancel := context.WithTimeout(ctx, time.Nanosecond)
|
||||||
|
t.Cleanup(innerCancel)
|
||||||
|
<-innerCtx.Done()
|
||||||
|
|
||||||
|
f := factory()
|
||||||
|
assert.ErrorIs(t, f(innerCtx), context.DeadlineExceeded)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("Canceled", func(t *testing.T) {
|
||||||
|
innerCtx, innerCancel := context.WithCancel(ctx)
|
||||||
|
innerCancel()
|
||||||
|
|
||||||
|
f := factory()
|
||||||
|
assert.ErrorIs(t, f(innerCtx), context.Canceled)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
@ -0,0 +1,78 @@
|
|||||||
|
// Code created by gotmpl. DO NOT MODIFY.
|
||||||
|
// source: internal/shared/otlp/otlpmetric/otest/client_test.go.tmpl
|
||||||
|
|
||||||
|
// Copyright The OpenTelemetry Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package otest
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"go.opentelemetry.io/otel"
|
||||||
|
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal"
|
||||||
|
"go.opentelemetry.io/otel/sdk/metric"
|
||||||
|
"go.opentelemetry.io/otel/sdk/metric/aggregation"
|
||||||
|
"go.opentelemetry.io/otel/sdk/metric/metricdata"
|
||||||
|
cpb "go.opentelemetry.io/proto/otlp/collector/metrics/v1"
|
||||||
|
mpb "go.opentelemetry.io/proto/otlp/metrics/v1"
|
||||||
|
)
|
||||||
|
|
||||||
|
type client struct {
|
||||||
|
rCh <-chan ExportResult
|
||||||
|
storage *Storage
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *client) Temporality(k metric.InstrumentKind) metricdata.Temporality {
|
||||||
|
return metric.DefaultTemporalitySelector(k)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *client) Aggregation(k metric.InstrumentKind) aggregation.Aggregation {
|
||||||
|
return metric.DefaultAggregationSelector(k)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *client) Collect() *Storage {
|
||||||
|
return c.storage
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *client) UploadMetrics(ctx context.Context, rm *mpb.ResourceMetrics) error {
|
||||||
|
c.storage.Add(&cpb.ExportMetricsServiceRequest{
|
||||||
|
ResourceMetrics: []*mpb.ResourceMetrics{rm},
|
||||||
|
})
|
||||||
|
if c.rCh != nil {
|
||||||
|
r := <-c.rCh
|
||||||
|
if r.Response != nil && r.Response.GetPartialSuccess() != nil {
|
||||||
|
msg := r.Response.GetPartialSuccess().GetErrorMessage()
|
||||||
|
n := r.Response.GetPartialSuccess().GetRejectedDataPoints()
|
||||||
|
if msg != "" || n > 0 {
|
||||||
|
otel.Handle(internal.MetricPartialSuccessError(n, msg))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return r.Err
|
||||||
|
}
|
||||||
|
return ctx.Err()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *client) ForceFlush(ctx context.Context) error { return ctx.Err() }
|
||||||
|
func (c *client) Shutdown(ctx context.Context) error { return ctx.Err() }
|
||||||
|
|
||||||
|
func TestClientTests(t *testing.T) {
|
||||||
|
factory := func(rCh <-chan ExportResult) (Client, Collector) {
|
||||||
|
c := &client{rCh: rCh, storage: NewStorage()}
|
||||||
|
return c, c
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Run("Integration", RunClientTests(factory))
|
||||||
|
}
|
@ -0,0 +1,438 @@
|
|||||||
|
// Code created by gotmpl. DO NOT MODIFY.
|
||||||
|
// source: internal/shared/otlp/otlpmetric/otest/collector.go.tmpl
|
||||||
|
|
||||||
|
// Copyright The OpenTelemetry Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package otest // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/otest"
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"compress/gzip"
|
||||||
|
"context"
|
||||||
|
"crypto/ecdsa"
|
||||||
|
"crypto/elliptic"
|
||||||
|
"crypto/rand"
|
||||||
|
"crypto/tls"
|
||||||
|
"crypto/x509"
|
||||||
|
"crypto/x509/pkix" // nolint:depguard // This is for testing.
|
||||||
|
"encoding/pem"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"math/big"
|
||||||
|
"net"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"google.golang.org/grpc"
|
||||||
|
"google.golang.org/grpc/metadata"
|
||||||
|
"google.golang.org/protobuf/proto"
|
||||||
|
|
||||||
|
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf"
|
||||||
|
collpb "go.opentelemetry.io/proto/otlp/collector/metrics/v1"
|
||||||
|
mpb "go.opentelemetry.io/proto/otlp/metrics/v1"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Collector is the collection target a Client sends metric uploads to.
|
||||||
|
type Collector interface {
|
||||||
|
Collect() *Storage
|
||||||
|
}
|
||||||
|
|
||||||
|
type ExportResult struct {
|
||||||
|
Response *collpb.ExportMetricsServiceResponse
|
||||||
|
Err error
|
||||||
|
}
|
||||||
|
|
||||||
|
// Storage stores uploaded OTLP metric data in their proto form.
|
||||||
|
type Storage struct {
|
||||||
|
dataMu sync.Mutex
|
||||||
|
data []*mpb.ResourceMetrics
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewStorage returns a configure storage ready to store received requests.
|
||||||
|
func NewStorage() *Storage {
|
||||||
|
return &Storage{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add adds the request to the Storage.
|
||||||
|
func (s *Storage) Add(request *collpb.ExportMetricsServiceRequest) {
|
||||||
|
s.dataMu.Lock()
|
||||||
|
defer s.dataMu.Unlock()
|
||||||
|
s.data = append(s.data, request.ResourceMetrics...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Dump returns all added ResourceMetrics and clears the storage.
|
||||||
|
func (s *Storage) Dump() []*mpb.ResourceMetrics {
|
||||||
|
s.dataMu.Lock()
|
||||||
|
defer s.dataMu.Unlock()
|
||||||
|
|
||||||
|
var data []*mpb.ResourceMetrics
|
||||||
|
data, s.data = s.data, []*mpb.ResourceMetrics{}
|
||||||
|
return data
|
||||||
|
}
|
||||||
|
|
||||||
|
// GRPCCollector is an OTLP gRPC server that collects all requests it receives.
|
||||||
|
type GRPCCollector struct {
|
||||||
|
collpb.UnimplementedMetricsServiceServer
|
||||||
|
|
||||||
|
headersMu sync.Mutex
|
||||||
|
headers metadata.MD
|
||||||
|
storage *Storage
|
||||||
|
|
||||||
|
resultCh <-chan ExportResult
|
||||||
|
listener net.Listener
|
||||||
|
srv *grpc.Server
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewGRPCCollector returns a *GRPCCollector that is listening at the provided
|
||||||
|
// endpoint.
|
||||||
|
//
|
||||||
|
// If endpoint is an empty string, the returned collector will be listening on
|
||||||
|
// the localhost interface at an OS chosen port.
|
||||||
|
//
|
||||||
|
// If errCh is not nil, the collector will respond to Export calls with errors
|
||||||
|
// sent on that channel. This means that if errCh is not nil Export calls will
|
||||||
|
// block until an error is received.
|
||||||
|
func NewGRPCCollector(endpoint string, resultCh <-chan ExportResult) (*GRPCCollector, error) {
|
||||||
|
if endpoint == "" {
|
||||||
|
endpoint = "localhost:0"
|
||||||
|
}
|
||||||
|
|
||||||
|
c := &GRPCCollector{
|
||||||
|
storage: NewStorage(),
|
||||||
|
resultCh: resultCh,
|
||||||
|
}
|
||||||
|
|
||||||
|
var err error
|
||||||
|
c.listener, err = net.Listen("tcp", endpoint)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
c.srv = grpc.NewServer()
|
||||||
|
collpb.RegisterMetricsServiceServer(c.srv, c)
|
||||||
|
go func() { _ = c.srv.Serve(c.listener) }()
|
||||||
|
|
||||||
|
return c, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Shutdown shuts down the gRPC server closing all open connections and
|
||||||
|
// listeners immediately.
|
||||||
|
func (c *GRPCCollector) Shutdown() { c.srv.Stop() }
|
||||||
|
|
||||||
|
// Addr returns the net.Addr c is listening at.
|
||||||
|
func (c *GRPCCollector) Addr() net.Addr {
|
||||||
|
return c.listener.Addr()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Collect returns the Storage holding all collected requests.
|
||||||
|
func (c *GRPCCollector) Collect() *Storage {
|
||||||
|
return c.storage
|
||||||
|
}
|
||||||
|
|
||||||
|
// Headers returns the headers received for all requests.
|
||||||
|
func (c *GRPCCollector) Headers() map[string][]string {
|
||||||
|
// Makes a copy.
|
||||||
|
c.headersMu.Lock()
|
||||||
|
defer c.headersMu.Unlock()
|
||||||
|
return metadata.Join(c.headers)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Export handles the export req.
|
||||||
|
func (c *GRPCCollector) Export(ctx context.Context, req *collpb.ExportMetricsServiceRequest) (*collpb.ExportMetricsServiceResponse, error) {
|
||||||
|
c.storage.Add(req)
|
||||||
|
|
||||||
|
if h, ok := metadata.FromIncomingContext(ctx); ok {
|
||||||
|
c.headersMu.Lock()
|
||||||
|
c.headers = metadata.Join(c.headers, h)
|
||||||
|
c.headersMu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.resultCh != nil {
|
||||||
|
r := <-c.resultCh
|
||||||
|
if r.Response == nil {
|
||||||
|
return &collpb.ExportMetricsServiceResponse{}, r.Err
|
||||||
|
}
|
||||||
|
return r.Response, r.Err
|
||||||
|
}
|
||||||
|
return &collpb.ExportMetricsServiceResponse{}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var emptyExportMetricsServiceResponse = func() []byte {
|
||||||
|
body := collpb.ExportMetricsServiceResponse{}
|
||||||
|
r, err := proto.Marshal(&body)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return r
|
||||||
|
}()
|
||||||
|
|
||||||
|
type HTTPResponseError struct {
|
||||||
|
Err error
|
||||||
|
Status int
|
||||||
|
Header http.Header
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *HTTPResponseError) Error() string {
|
||||||
|
return fmt.Sprintf("%d: %s", e.Status, e.Err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *HTTPResponseError) Unwrap() error { return e.Err }
|
||||||
|
|
||||||
|
// HTTPCollector is an OTLP HTTP server that collects all requests it receives.
|
||||||
|
type HTTPCollector struct {
|
||||||
|
headersMu sync.Mutex
|
||||||
|
headers http.Header
|
||||||
|
storage *Storage
|
||||||
|
|
||||||
|
resultCh <-chan ExportResult
|
||||||
|
listener net.Listener
|
||||||
|
srv *http.Server
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewHTTPCollector returns a *HTTPCollector that is listening at the provided
|
||||||
|
// endpoint.
|
||||||
|
//
|
||||||
|
// If endpoint is an empty string, the returned collector will be listening on
|
||||||
|
// the localhost interface at an OS chosen port, not use TLS, and listen at the
|
||||||
|
// default OTLP metric endpoint path ("/v1/metrics"). If the endpoint contains
|
||||||
|
// a prefix of "https" the server will generate weak self-signed TLS
|
||||||
|
// certificates and use them to server data. If the endpoint contains a path,
|
||||||
|
// that path will be used instead of the default OTLP metric endpoint path.
|
||||||
|
//
|
||||||
|
// If errCh is not nil, the collector will respond to HTTP requests with errors
|
||||||
|
// sent on that channel. This means that if errCh is not nil Export calls will
|
||||||
|
// block until an error is received.
|
||||||
|
func NewHTTPCollector(endpoint string, resultCh <-chan ExportResult) (*HTTPCollector, error) {
|
||||||
|
u, err := url.Parse(endpoint)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if u.Host == "" {
|
||||||
|
u.Host = "localhost:0"
|
||||||
|
}
|
||||||
|
if u.Path == "" {
|
||||||
|
u.Path = oconf.DefaultMetricsPath
|
||||||
|
}
|
||||||
|
|
||||||
|
c := &HTTPCollector{
|
||||||
|
headers: http.Header{},
|
||||||
|
storage: NewStorage(),
|
||||||
|
resultCh: resultCh,
|
||||||
|
}
|
||||||
|
|
||||||
|
c.listener, err = net.Listen("tcp", u.Host)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
mux := http.NewServeMux()
|
||||||
|
mux.Handle(u.Path, http.HandlerFunc(c.handler))
|
||||||
|
c.srv = &http.Server{Handler: mux}
|
||||||
|
if u.Scheme == "https" {
|
||||||
|
cert, err := weakCertificate()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
c.srv.TLSConfig = &tls.Config{
|
||||||
|
Certificates: []tls.Certificate{cert},
|
||||||
|
}
|
||||||
|
go func() { _ = c.srv.ServeTLS(c.listener, "", "") }()
|
||||||
|
} else {
|
||||||
|
go func() { _ = c.srv.Serve(c.listener) }()
|
||||||
|
}
|
||||||
|
return c, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Shutdown shuts down the HTTP server closing all open connections and
|
||||||
|
// listeners.
|
||||||
|
func (c *HTTPCollector) Shutdown(ctx context.Context) error {
|
||||||
|
return c.srv.Shutdown(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Addr returns the net.Addr c is listening at.
|
||||||
|
func (c *HTTPCollector) Addr() net.Addr {
|
||||||
|
return c.listener.Addr()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Collect returns the Storage holding all collected requests.
|
||||||
|
func (c *HTTPCollector) Collect() *Storage {
|
||||||
|
return c.storage
|
||||||
|
}
|
||||||
|
|
||||||
|
// Headers returns the headers received for all requests.
|
||||||
|
func (c *HTTPCollector) Headers() map[string][]string {
|
||||||
|
// Makes a copy.
|
||||||
|
c.headersMu.Lock()
|
||||||
|
defer c.headersMu.Unlock()
|
||||||
|
return c.headers.Clone()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *HTTPCollector) handler(w http.ResponseWriter, r *http.Request) {
|
||||||
|
c.respond(w, c.record(r))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *HTTPCollector) record(r *http.Request) ExportResult {
|
||||||
|
// Currently only supports protobuf.
|
||||||
|
if v := r.Header.Get("Content-Type"); v != "application/x-protobuf" {
|
||||||
|
err := fmt.Errorf("content-type not supported: %s", v)
|
||||||
|
return ExportResult{Err: err}
|
||||||
|
}
|
||||||
|
|
||||||
|
body, err := c.readBody(r)
|
||||||
|
if err != nil {
|
||||||
|
return ExportResult{Err: err}
|
||||||
|
}
|
||||||
|
pbRequest := &collpb.ExportMetricsServiceRequest{}
|
||||||
|
err = proto.Unmarshal(body, pbRequest)
|
||||||
|
if err != nil {
|
||||||
|
return ExportResult{
|
||||||
|
Err: &HTTPResponseError{
|
||||||
|
Err: err,
|
||||||
|
Status: http.StatusInternalServerError,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
c.storage.Add(pbRequest)
|
||||||
|
|
||||||
|
c.headersMu.Lock()
|
||||||
|
for k, vals := range r.Header {
|
||||||
|
for _, v := range vals {
|
||||||
|
c.headers.Add(k, v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
c.headersMu.Unlock()
|
||||||
|
|
||||||
|
if c.resultCh != nil {
|
||||||
|
return <-c.resultCh
|
||||||
|
}
|
||||||
|
return ExportResult{Err: err}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *HTTPCollector) readBody(r *http.Request) (body []byte, err error) {
|
||||||
|
var reader io.ReadCloser
|
||||||
|
switch r.Header.Get("Content-Encoding") {
|
||||||
|
case "gzip":
|
||||||
|
reader, err = gzip.NewReader(r.Body)
|
||||||
|
if err != nil {
|
||||||
|
_ = reader.Close()
|
||||||
|
return nil, &HTTPResponseError{
|
||||||
|
Err: err,
|
||||||
|
Status: http.StatusInternalServerError,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
reader = r.Body
|
||||||
|
}
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
cErr := reader.Close()
|
||||||
|
if err == nil && cErr != nil {
|
||||||
|
err = &HTTPResponseError{
|
||||||
|
Err: cErr,
|
||||||
|
Status: http.StatusInternalServerError,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
body, err = io.ReadAll(reader)
|
||||||
|
if err != nil {
|
||||||
|
err = &HTTPResponseError{
|
||||||
|
Err: err,
|
||||||
|
Status: http.StatusInternalServerError,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return body, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *HTTPCollector) respond(w http.ResponseWriter, resp ExportResult) {
|
||||||
|
if resp.Err != nil {
|
||||||
|
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
|
||||||
|
w.Header().Set("X-Content-Type-Options", "nosniff")
|
||||||
|
var e *HTTPResponseError
|
||||||
|
if errors.As(resp.Err, &e) {
|
||||||
|
for k, vals := range e.Header {
|
||||||
|
for _, v := range vals {
|
||||||
|
w.Header().Add(k, v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
w.WriteHeader(e.Status)
|
||||||
|
fmt.Fprintln(w, e.Error())
|
||||||
|
} else {
|
||||||
|
w.WriteHeader(http.StatusBadRequest)
|
||||||
|
fmt.Fprintln(w, resp.Err.Error())
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
w.Header().Set("Content-Type", "application/x-protobuf")
|
||||||
|
w.WriteHeader(http.StatusOK)
|
||||||
|
if resp.Response == nil {
|
||||||
|
_, _ = w.Write(emptyExportMetricsServiceResponse)
|
||||||
|
} else {
|
||||||
|
r, err := proto.Marshal(resp.Response)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
_, _ = w.Write(r)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Based on https://golang.org/src/crypto/tls/generate_cert.go,
|
||||||
|
// simplified and weakened.
|
||||||
|
func weakCertificate() (tls.Certificate, error) {
|
||||||
|
priv, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
|
||||||
|
if err != nil {
|
||||||
|
return tls.Certificate{}, err
|
||||||
|
}
|
||||||
|
notBefore := time.Now()
|
||||||
|
notAfter := notBefore.Add(time.Hour)
|
||||||
|
max := new(big.Int).Lsh(big.NewInt(1), 128)
|
||||||
|
sn, err := rand.Int(rand.Reader, max)
|
||||||
|
if err != nil {
|
||||||
|
return tls.Certificate{}, err
|
||||||
|
}
|
||||||
|
tmpl := x509.Certificate{
|
||||||
|
SerialNumber: sn,
|
||||||
|
Subject: pkix.Name{Organization: []string{"otel-go"}},
|
||||||
|
NotBefore: notBefore,
|
||||||
|
NotAfter: notAfter,
|
||||||
|
KeyUsage: x509.KeyUsageDigitalSignature,
|
||||||
|
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},
|
||||||
|
BasicConstraintsValid: true,
|
||||||
|
DNSNames: []string{"localhost"},
|
||||||
|
IPAddresses: []net.IP{net.IPv6loopback, net.IPv4(127, 0, 0, 1)},
|
||||||
|
}
|
||||||
|
derBytes, err := x509.CreateCertificate(rand.Reader, &tmpl, &tmpl, &priv.PublicKey, priv)
|
||||||
|
if err != nil {
|
||||||
|
return tls.Certificate{}, err
|
||||||
|
}
|
||||||
|
var certBuf bytes.Buffer
|
||||||
|
err = pem.Encode(&certBuf, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes})
|
||||||
|
if err != nil {
|
||||||
|
return tls.Certificate{}, err
|
||||||
|
}
|
||||||
|
privBytes, err := x509.MarshalPKCS8PrivateKey(priv)
|
||||||
|
if err != nil {
|
||||||
|
return tls.Certificate{}, err
|
||||||
|
}
|
||||||
|
var privBuf bytes.Buffer
|
||||||
|
err = pem.Encode(&privBuf, &pem.Block{Type: "PRIVATE KEY", Bytes: privBytes})
|
||||||
|
if err != nil {
|
||||||
|
return tls.Certificate{}, err
|
||||||
|
}
|
||||||
|
return tls.X509KeyPair(certBuf.Bytes(), privBuf.Bytes())
|
||||||
|
}
|
@ -0,0 +1,67 @@
|
|||||||
|
// Code created by gotmpl. DO NOT MODIFY.
|
||||||
|
// source: internal/shared/otlp/partialsuccess.go
|
||||||
|
|
||||||
|
// Copyright The OpenTelemetry Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package internal // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal"
|
||||||
|
|
||||||
|
import "fmt"
|
||||||
|
|
||||||
|
// PartialSuccess represents the underlying error for all handling
|
||||||
|
// OTLP partial success messages. Use `errors.Is(err,
|
||||||
|
// PartialSuccess{})` to test whether an error passed to the OTel
|
||||||
|
// error handler belongs to this category.
|
||||||
|
type PartialSuccess struct {
|
||||||
|
ErrorMessage string
|
||||||
|
RejectedItems int64
|
||||||
|
RejectedKind string
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ error = PartialSuccess{}
|
||||||
|
|
||||||
|
// Error implements the error interface.
|
||||||
|
func (ps PartialSuccess) Error() string {
|
||||||
|
msg := ps.ErrorMessage
|
||||||
|
if msg == "" {
|
||||||
|
msg = "empty message"
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("OTLP partial success: %s (%d %s rejected)", msg, ps.RejectedItems, ps.RejectedKind)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Is supports the errors.Is() interface.
|
||||||
|
func (ps PartialSuccess) Is(err error) bool {
|
||||||
|
_, ok := err.(PartialSuccess)
|
||||||
|
return ok
|
||||||
|
}
|
||||||
|
|
||||||
|
// TracePartialSuccessError returns an error describing a partial success
|
||||||
|
// response for the trace signal.
|
||||||
|
func TracePartialSuccessError(itemsRejected int64, errorMessage string) error {
|
||||||
|
return PartialSuccess{
|
||||||
|
ErrorMessage: errorMessage,
|
||||||
|
RejectedItems: itemsRejected,
|
||||||
|
RejectedKind: "spans",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// MetricPartialSuccessError returns an error describing a partial success
|
||||||
|
// response for the metric signal.
|
||||||
|
func MetricPartialSuccessError(itemsRejected int64, errorMessage string) error {
|
||||||
|
return PartialSuccess{
|
||||||
|
ErrorMessage: errorMessage,
|
||||||
|
RejectedItems: itemsRejected,
|
||||||
|
RejectedKind: "metric data points",
|
||||||
|
}
|
||||||
|
}
|
@ -0,0 +1,46 @@
|
|||||||
|
// Code created by gotmpl. DO NOT MODIFY.
|
||||||
|
// source: internal/shared/otlp/partialsuccess_test.go
|
||||||
|
|
||||||
|
// Copyright The OpenTelemetry Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package internal
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func requireErrorString(t *testing.T, expect string, err error) {
|
||||||
|
t.Helper()
|
||||||
|
require.NotNil(t, err)
|
||||||
|
require.Error(t, err)
|
||||||
|
require.True(t, errors.Is(err, PartialSuccess{}))
|
||||||
|
|
||||||
|
const pfx = "OTLP partial success: "
|
||||||
|
|
||||||
|
msg := err.Error()
|
||||||
|
require.True(t, strings.HasPrefix(msg, pfx))
|
||||||
|
require.Equal(t, expect, msg[len(pfx):])
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPartialSuccessFormat(t *testing.T) {
|
||||||
|
requireErrorString(t, "empty message (0 metric data points rejected)", MetricPartialSuccessError(0, ""))
|
||||||
|
requireErrorString(t, "help help (0 metric data points rejected)", MetricPartialSuccessError(0, "help help"))
|
||||||
|
requireErrorString(t, "what happened (10 metric data points rejected)", MetricPartialSuccessError(10, "what happened"))
|
||||||
|
requireErrorString(t, "what happened (15 spans rejected)", TracePartialSuccessError(15, "what happened"))
|
||||||
|
}
|
156
exporters/otlp/otlpmetric/otlpmetricgrpc/internal/retry/retry.go
Normal file
156
exporters/otlp/otlpmetric/otlpmetricgrpc/internal/retry/retry.go
Normal file
@ -0,0 +1,156 @@
|
|||||||
|
// Code created by gotmpl. DO NOT MODIFY.
|
||||||
|
// source: internal/shared/otlp/retry/retry.go.tmpl
|
||||||
|
|
||||||
|
// Copyright The OpenTelemetry Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// Package retry provides request retry functionality that can perform
|
||||||
|
// configurable exponential backoff for transient errors and honor any
|
||||||
|
// explicit throttle responses received.
|
||||||
|
package retry // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/retry"
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/cenkalti/backoff/v4"
|
||||||
|
)
|
||||||
|
|
||||||
|
// DefaultConfig are the recommended defaults to use.
|
||||||
|
var DefaultConfig = Config{
|
||||||
|
Enabled: true,
|
||||||
|
InitialInterval: 5 * time.Second,
|
||||||
|
MaxInterval: 30 * time.Second,
|
||||||
|
MaxElapsedTime: time.Minute,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Config defines configuration for retrying batches in case of export failure
|
||||||
|
// using an exponential backoff.
|
||||||
|
type Config struct {
|
||||||
|
// Enabled indicates whether to not retry sending batches in case of
|
||||||
|
// export failure.
|
||||||
|
Enabled bool
|
||||||
|
// InitialInterval the time to wait after the first failure before
|
||||||
|
// retrying.
|
||||||
|
InitialInterval time.Duration
|
||||||
|
// MaxInterval is the upper bound on backoff interval. Once this value is
|
||||||
|
// reached the delay between consecutive retries will always be
|
||||||
|
// `MaxInterval`.
|
||||||
|
MaxInterval time.Duration
|
||||||
|
// MaxElapsedTime is the maximum amount of time (including retries) spent
|
||||||
|
// trying to send a request/batch. Once this value is reached, the data
|
||||||
|
// is discarded.
|
||||||
|
MaxElapsedTime time.Duration
|
||||||
|
}
|
||||||
|
|
||||||
|
// RequestFunc wraps a request with retry logic.
|
||||||
|
type RequestFunc func(context.Context, func(context.Context) error) error
|
||||||
|
|
||||||
|
// EvaluateFunc returns if an error is retry-able and if an explicit throttle
|
||||||
|
// duration should be honored that was included in the error.
|
||||||
|
//
|
||||||
|
// The function must return true if the error argument is retry-able,
|
||||||
|
// otherwise it must return false for the first return parameter.
|
||||||
|
//
|
||||||
|
// The function must return a non-zero time.Duration if the error contains
|
||||||
|
// explicit throttle duration that should be honored, otherwise it must return
|
||||||
|
// a zero valued time.Duration.
|
||||||
|
type EvaluateFunc func(error) (bool, time.Duration)
|
||||||
|
|
||||||
|
// RequestFunc returns a RequestFunc using the evaluate function to determine
|
||||||
|
// if requests can be retried and based on the exponential backoff
|
||||||
|
// configuration of c.
|
||||||
|
func (c Config) RequestFunc(evaluate EvaluateFunc) RequestFunc {
|
||||||
|
if !c.Enabled {
|
||||||
|
return func(ctx context.Context, fn func(context.Context) error) error {
|
||||||
|
return fn(ctx)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return func(ctx context.Context, fn func(context.Context) error) error {
|
||||||
|
// Do not use NewExponentialBackOff since it calls Reset and the code here
|
||||||
|
// must call Reset after changing the InitialInterval (this saves an
|
||||||
|
// unnecessary call to Now).
|
||||||
|
b := &backoff.ExponentialBackOff{
|
||||||
|
InitialInterval: c.InitialInterval,
|
||||||
|
RandomizationFactor: backoff.DefaultRandomizationFactor,
|
||||||
|
Multiplier: backoff.DefaultMultiplier,
|
||||||
|
MaxInterval: c.MaxInterval,
|
||||||
|
MaxElapsedTime: c.MaxElapsedTime,
|
||||||
|
Stop: backoff.Stop,
|
||||||
|
Clock: backoff.SystemClock,
|
||||||
|
}
|
||||||
|
b.Reset()
|
||||||
|
|
||||||
|
for {
|
||||||
|
err := fn(ctx)
|
||||||
|
if err == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
retryable, throttle := evaluate(err)
|
||||||
|
if !retryable {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
bOff := b.NextBackOff()
|
||||||
|
if bOff == backoff.Stop {
|
||||||
|
return fmt.Errorf("max retry time elapsed: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wait for the greater of the backoff or throttle delay.
|
||||||
|
var delay time.Duration
|
||||||
|
if bOff > throttle {
|
||||||
|
delay = bOff
|
||||||
|
} else {
|
||||||
|
elapsed := b.GetElapsedTime()
|
||||||
|
if b.MaxElapsedTime != 0 && elapsed+throttle > b.MaxElapsedTime {
|
||||||
|
return fmt.Errorf("max retry time would elapse: %w", err)
|
||||||
|
}
|
||||||
|
delay = throttle
|
||||||
|
}
|
||||||
|
|
||||||
|
if ctxErr := waitFunc(ctx, delay); ctxErr != nil {
|
||||||
|
return fmt.Errorf("%w: %s", ctxErr, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Allow override for testing.
|
||||||
|
var waitFunc = wait
|
||||||
|
|
||||||
|
// wait takes the caller's context, and the amount of time to wait. It will
|
||||||
|
// return nil if the timer fires before or at the same time as the context's
|
||||||
|
// deadline. This indicates that the call can be retried.
|
||||||
|
func wait(ctx context.Context, delay time.Duration) error {
|
||||||
|
timer := time.NewTimer(delay)
|
||||||
|
defer timer.Stop()
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
// Handle the case where the timer and context deadline end
|
||||||
|
// simultaneously by prioritizing the timer expiration nil value
|
||||||
|
// response.
|
||||||
|
select {
|
||||||
|
case <-timer.C:
|
||||||
|
default:
|
||||||
|
return ctx.Err()
|
||||||
|
}
|
||||||
|
case <-timer.C:
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
@ -0,0 +1,261 @@
|
|||||||
|
// Code created by gotmpl. DO NOT MODIFY.
|
||||||
|
// source: internal/shared/otlp/retry/retry_test.go.tmpl
|
||||||
|
|
||||||
|
// Copyright The OpenTelemetry Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package retry
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"math"
|
||||||
|
"sync"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/cenkalti/backoff/v4"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestWait(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
ctx context.Context
|
||||||
|
delay time.Duration
|
||||||
|
expected error
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
ctx: context.Background(),
|
||||||
|
delay: time.Duration(0),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ctx: context.Background(),
|
||||||
|
delay: time.Duration(1),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ctx: context.Background(),
|
||||||
|
delay: time.Duration(-1),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ctx: func() context.Context {
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
cancel()
|
||||||
|
return ctx
|
||||||
|
}(),
|
||||||
|
// Ensure the timer and context do not end simultaneously.
|
||||||
|
delay: 1 * time.Hour,
|
||||||
|
expected: context.Canceled,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, test := range tests {
|
||||||
|
err := wait(test.ctx, test.delay)
|
||||||
|
if test.expected == nil {
|
||||||
|
assert.NoError(t, err)
|
||||||
|
} else {
|
||||||
|
assert.ErrorIs(t, err, test.expected)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNonRetryableError(t *testing.T) {
|
||||||
|
ev := func(error) (bool, time.Duration) { return false, 0 }
|
||||||
|
|
||||||
|
reqFunc := Config{
|
||||||
|
Enabled: true,
|
||||||
|
InitialInterval: 1 * time.Nanosecond,
|
||||||
|
MaxInterval: 1 * time.Nanosecond,
|
||||||
|
// Never stop retrying.
|
||||||
|
MaxElapsedTime: 0,
|
||||||
|
}.RequestFunc(ev)
|
||||||
|
ctx := context.Background()
|
||||||
|
assert.NoError(t, reqFunc(ctx, func(context.Context) error {
|
||||||
|
return nil
|
||||||
|
}))
|
||||||
|
assert.ErrorIs(t, reqFunc(ctx, func(context.Context) error {
|
||||||
|
return assert.AnError
|
||||||
|
}), assert.AnError)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestThrottledRetry(t *testing.T) {
|
||||||
|
// Ensure the throttle delay is used by making longer than backoff delay.
|
||||||
|
throttleDelay, backoffDelay := time.Second, time.Nanosecond
|
||||||
|
|
||||||
|
ev := func(error) (bool, time.Duration) {
|
||||||
|
// Retry everything with a throttle delay.
|
||||||
|
return true, throttleDelay
|
||||||
|
}
|
||||||
|
|
||||||
|
reqFunc := Config{
|
||||||
|
Enabled: true,
|
||||||
|
InitialInterval: backoffDelay,
|
||||||
|
MaxInterval: backoffDelay,
|
||||||
|
// Never stop retrying.
|
||||||
|
MaxElapsedTime: 0,
|
||||||
|
}.RequestFunc(ev)
|
||||||
|
|
||||||
|
origWait := waitFunc
|
||||||
|
var done bool
|
||||||
|
waitFunc = func(_ context.Context, delay time.Duration) error {
|
||||||
|
assert.Equal(t, throttleDelay, delay, "retry not throttled")
|
||||||
|
// Try twice to ensure call is attempted again after delay.
|
||||||
|
if done {
|
||||||
|
return assert.AnError
|
||||||
|
}
|
||||||
|
done = true
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
defer func() { waitFunc = origWait }()
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
assert.ErrorIs(t, reqFunc(ctx, func(context.Context) error {
|
||||||
|
return errors.New("not this error")
|
||||||
|
}), assert.AnError)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBackoffRetry(t *testing.T) {
|
||||||
|
ev := func(error) (bool, time.Duration) { return true, 0 }
|
||||||
|
|
||||||
|
delay := time.Nanosecond
|
||||||
|
reqFunc := Config{
|
||||||
|
Enabled: true,
|
||||||
|
InitialInterval: delay,
|
||||||
|
MaxInterval: delay,
|
||||||
|
// Never stop retrying.
|
||||||
|
MaxElapsedTime: 0,
|
||||||
|
}.RequestFunc(ev)
|
||||||
|
|
||||||
|
origWait := waitFunc
|
||||||
|
var done bool
|
||||||
|
waitFunc = func(_ context.Context, d time.Duration) error {
|
||||||
|
delta := math.Ceil(float64(delay) * backoff.DefaultRandomizationFactor)
|
||||||
|
assert.InDelta(t, delay, d, delta, "retry not backoffed")
|
||||||
|
// Try twice to ensure call is attempted again after delay.
|
||||||
|
if done {
|
||||||
|
return assert.AnError
|
||||||
|
}
|
||||||
|
done = true
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
t.Cleanup(func() { waitFunc = origWait })
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
assert.ErrorIs(t, reqFunc(ctx, func(context.Context) error {
|
||||||
|
return errors.New("not this error")
|
||||||
|
}), assert.AnError)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBackoffRetryCanceledContext(t *testing.T) {
|
||||||
|
ev := func(error) (bool, time.Duration) { return true, 0 }
|
||||||
|
|
||||||
|
delay := time.Millisecond
|
||||||
|
reqFunc := Config{
|
||||||
|
Enabled: true,
|
||||||
|
InitialInterval: delay,
|
||||||
|
MaxInterval: delay,
|
||||||
|
// Never stop retrying.
|
||||||
|
MaxElapsedTime: 10 * time.Millisecond,
|
||||||
|
}.RequestFunc(ev)
|
||||||
|
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
count := 0
|
||||||
|
cancel()
|
||||||
|
err := reqFunc(ctx, func(context.Context) error {
|
||||||
|
count++
|
||||||
|
return assert.AnError
|
||||||
|
})
|
||||||
|
|
||||||
|
assert.ErrorIs(t, err, context.Canceled)
|
||||||
|
assert.Contains(t, err.Error(), assert.AnError.Error())
|
||||||
|
assert.Equal(t, 1, count)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestThrottledRetryGreaterThanMaxElapsedTime(t *testing.T) {
|
||||||
|
// Ensure the throttle delay is used by making longer than backoff delay.
|
||||||
|
tDelay, bDelay := time.Hour, time.Nanosecond
|
||||||
|
ev := func(error) (bool, time.Duration) { return true, tDelay }
|
||||||
|
reqFunc := Config{
|
||||||
|
Enabled: true,
|
||||||
|
InitialInterval: bDelay,
|
||||||
|
MaxInterval: bDelay,
|
||||||
|
MaxElapsedTime: tDelay - (time.Nanosecond),
|
||||||
|
}.RequestFunc(ev)
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
assert.Contains(t, reqFunc(ctx, func(context.Context) error {
|
||||||
|
return assert.AnError
|
||||||
|
}).Error(), "max retry time would elapse: ")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMaxElapsedTime(t *testing.T) {
|
||||||
|
ev := func(error) (bool, time.Duration) { return true, 0 }
|
||||||
|
delay := time.Nanosecond
|
||||||
|
reqFunc := Config{
|
||||||
|
Enabled: true,
|
||||||
|
// InitialInterval > MaxElapsedTime means immediate return.
|
||||||
|
InitialInterval: 2 * delay,
|
||||||
|
MaxElapsedTime: delay,
|
||||||
|
}.RequestFunc(ev)
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
assert.Contains(t, reqFunc(ctx, func(context.Context) error {
|
||||||
|
return assert.AnError
|
||||||
|
}).Error(), "max retry time elapsed: ")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRetryNotEnabled(t *testing.T) {
|
||||||
|
ev := func(error) (bool, time.Duration) {
|
||||||
|
t.Error("evaluated retry when not enabled")
|
||||||
|
return false, 0
|
||||||
|
}
|
||||||
|
|
||||||
|
reqFunc := Config{}.RequestFunc(ev)
|
||||||
|
ctx := context.Background()
|
||||||
|
assert.NoError(t, reqFunc(ctx, func(context.Context) error {
|
||||||
|
return nil
|
||||||
|
}))
|
||||||
|
assert.ErrorIs(t, reqFunc(ctx, func(context.Context) error {
|
||||||
|
return assert.AnError
|
||||||
|
}), assert.AnError)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRetryConcurrentSafe(t *testing.T) {
|
||||||
|
ev := func(error) (bool, time.Duration) { return true, 0 }
|
||||||
|
reqFunc := Config{
|
||||||
|
Enabled: true,
|
||||||
|
}.RequestFunc(ev)
|
||||||
|
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
|
for i := 1; i < 5; i++ {
|
||||||
|
wg.Add(1)
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
defer wg.Done()
|
||||||
|
|
||||||
|
var done bool
|
||||||
|
assert.NoError(t, reqFunc(ctx, func(context.Context) error {
|
||||||
|
if !done {
|
||||||
|
done = true
|
||||||
|
return assert.AnError
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}))
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
|
wg.Wait()
|
||||||
|
}
|
@ -0,0 +1,155 @@
|
|||||||
|
// Code created by gotmpl. DO NOT MODIFY.
|
||||||
|
// source: internal/shared/otlp/otlpmetric/transform/attribute.go.tmpl
|
||||||
|
|
||||||
|
// Copyright The OpenTelemetry Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package transform // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform"
|
||||||
|
|
||||||
|
import (
|
||||||
|
"go.opentelemetry.io/otel/attribute"
|
||||||
|
cpb "go.opentelemetry.io/proto/otlp/common/v1"
|
||||||
|
)
|
||||||
|
|
||||||
|
// AttrIter transforms an attribute iterator into OTLP key-values.
|
||||||
|
func AttrIter(iter attribute.Iterator) []*cpb.KeyValue {
|
||||||
|
l := iter.Len()
|
||||||
|
if l == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
out := make([]*cpb.KeyValue, 0, l)
|
||||||
|
for iter.Next() {
|
||||||
|
out = append(out, KeyValue(iter.Attribute()))
|
||||||
|
}
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// KeyValues transforms a slice of attribute KeyValues into OTLP key-values.
|
||||||
|
func KeyValues(attrs []attribute.KeyValue) []*cpb.KeyValue {
|
||||||
|
if len(attrs) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
out := make([]*cpb.KeyValue, 0, len(attrs))
|
||||||
|
for _, kv := range attrs {
|
||||||
|
out = append(out, KeyValue(kv))
|
||||||
|
}
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// KeyValue transforms an attribute KeyValue into an OTLP key-value.
|
||||||
|
func KeyValue(kv attribute.KeyValue) *cpb.KeyValue {
|
||||||
|
return &cpb.KeyValue{Key: string(kv.Key), Value: Value(kv.Value)}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Value transforms an attribute Value into an OTLP AnyValue.
|
||||||
|
func Value(v attribute.Value) *cpb.AnyValue {
|
||||||
|
av := new(cpb.AnyValue)
|
||||||
|
switch v.Type() {
|
||||||
|
case attribute.BOOL:
|
||||||
|
av.Value = &cpb.AnyValue_BoolValue{
|
||||||
|
BoolValue: v.AsBool(),
|
||||||
|
}
|
||||||
|
case attribute.BOOLSLICE:
|
||||||
|
av.Value = &cpb.AnyValue_ArrayValue{
|
||||||
|
ArrayValue: &cpb.ArrayValue{
|
||||||
|
Values: boolSliceValues(v.AsBoolSlice()),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
case attribute.INT64:
|
||||||
|
av.Value = &cpb.AnyValue_IntValue{
|
||||||
|
IntValue: v.AsInt64(),
|
||||||
|
}
|
||||||
|
case attribute.INT64SLICE:
|
||||||
|
av.Value = &cpb.AnyValue_ArrayValue{
|
||||||
|
ArrayValue: &cpb.ArrayValue{
|
||||||
|
Values: int64SliceValues(v.AsInt64Slice()),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
case attribute.FLOAT64:
|
||||||
|
av.Value = &cpb.AnyValue_DoubleValue{
|
||||||
|
DoubleValue: v.AsFloat64(),
|
||||||
|
}
|
||||||
|
case attribute.FLOAT64SLICE:
|
||||||
|
av.Value = &cpb.AnyValue_ArrayValue{
|
||||||
|
ArrayValue: &cpb.ArrayValue{
|
||||||
|
Values: float64SliceValues(v.AsFloat64Slice()),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
case attribute.STRING:
|
||||||
|
av.Value = &cpb.AnyValue_StringValue{
|
||||||
|
StringValue: v.AsString(),
|
||||||
|
}
|
||||||
|
case attribute.STRINGSLICE:
|
||||||
|
av.Value = &cpb.AnyValue_ArrayValue{
|
||||||
|
ArrayValue: &cpb.ArrayValue{
|
||||||
|
Values: stringSliceValues(v.AsStringSlice()),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
av.Value = &cpb.AnyValue_StringValue{
|
||||||
|
StringValue: "INVALID",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return av
|
||||||
|
}
|
||||||
|
|
||||||
|
func boolSliceValues(vals []bool) []*cpb.AnyValue {
|
||||||
|
converted := make([]*cpb.AnyValue, len(vals))
|
||||||
|
for i, v := range vals {
|
||||||
|
converted[i] = &cpb.AnyValue{
|
||||||
|
Value: &cpb.AnyValue_BoolValue{
|
||||||
|
BoolValue: v,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return converted
|
||||||
|
}
|
||||||
|
|
||||||
|
func int64SliceValues(vals []int64) []*cpb.AnyValue {
|
||||||
|
converted := make([]*cpb.AnyValue, len(vals))
|
||||||
|
for i, v := range vals {
|
||||||
|
converted[i] = &cpb.AnyValue{
|
||||||
|
Value: &cpb.AnyValue_IntValue{
|
||||||
|
IntValue: v,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return converted
|
||||||
|
}
|
||||||
|
|
||||||
|
func float64SliceValues(vals []float64) []*cpb.AnyValue {
|
||||||
|
converted := make([]*cpb.AnyValue, len(vals))
|
||||||
|
for i, v := range vals {
|
||||||
|
converted[i] = &cpb.AnyValue{
|
||||||
|
Value: &cpb.AnyValue_DoubleValue{
|
||||||
|
DoubleValue: v,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return converted
|
||||||
|
}
|
||||||
|
|
||||||
|
func stringSliceValues(vals []string) []*cpb.AnyValue {
|
||||||
|
converted := make([]*cpb.AnyValue, len(vals))
|
||||||
|
for i, v := range vals {
|
||||||
|
converted[i] = &cpb.AnyValue{
|
||||||
|
Value: &cpb.AnyValue_StringValue{
|
||||||
|
StringValue: v,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return converted
|
||||||
|
}
|
@ -0,0 +1,197 @@
|
|||||||
|
// Code created by gotmpl. DO NOT MODIFY.
|
||||||
|
// source: internal/shared/otlp/otlpmetric/transform/attribute_test.go.tmpl
|
||||||
|
|
||||||
|
// Copyright The OpenTelemetry Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package transform
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
|
||||||
|
"go.opentelemetry.io/otel/attribute"
|
||||||
|
cpb "go.opentelemetry.io/proto/otlp/common/v1"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
attrBool = attribute.Bool("bool", true)
|
||||||
|
attrBoolSlice = attribute.BoolSlice("bool slice", []bool{true, false})
|
||||||
|
attrInt = attribute.Int("int", 1)
|
||||||
|
attrIntSlice = attribute.IntSlice("int slice", []int{-1, 1})
|
||||||
|
attrInt64 = attribute.Int64("int64", 1)
|
||||||
|
attrInt64Slice = attribute.Int64Slice("int64 slice", []int64{-1, 1})
|
||||||
|
attrFloat64 = attribute.Float64("float64", 1)
|
||||||
|
attrFloat64Slice = attribute.Float64Slice("float64 slice", []float64{-1, 1})
|
||||||
|
attrString = attribute.String("string", "o")
|
||||||
|
attrStringSlice = attribute.StringSlice("string slice", []string{"o", "n"})
|
||||||
|
attrInvalid = attribute.KeyValue{
|
||||||
|
Key: attribute.Key("invalid"),
|
||||||
|
Value: attribute.Value{},
|
||||||
|
}
|
||||||
|
|
||||||
|
valBoolTrue = &cpb.AnyValue{Value: &cpb.AnyValue_BoolValue{BoolValue: true}}
|
||||||
|
valBoolFalse = &cpb.AnyValue{Value: &cpb.AnyValue_BoolValue{BoolValue: false}}
|
||||||
|
valBoolSlice = &cpb.AnyValue{Value: &cpb.AnyValue_ArrayValue{
|
||||||
|
ArrayValue: &cpb.ArrayValue{
|
||||||
|
Values: []*cpb.AnyValue{valBoolTrue, valBoolFalse},
|
||||||
|
},
|
||||||
|
}}
|
||||||
|
valIntOne = &cpb.AnyValue{Value: &cpb.AnyValue_IntValue{IntValue: 1}}
|
||||||
|
valIntNOne = &cpb.AnyValue{Value: &cpb.AnyValue_IntValue{IntValue: -1}}
|
||||||
|
valIntSlice = &cpb.AnyValue{Value: &cpb.AnyValue_ArrayValue{
|
||||||
|
ArrayValue: &cpb.ArrayValue{
|
||||||
|
Values: []*cpb.AnyValue{valIntNOne, valIntOne},
|
||||||
|
},
|
||||||
|
}}
|
||||||
|
valDblOne = &cpb.AnyValue{Value: &cpb.AnyValue_DoubleValue{DoubleValue: 1}}
|
||||||
|
valDblNOne = &cpb.AnyValue{Value: &cpb.AnyValue_DoubleValue{DoubleValue: -1}}
|
||||||
|
valDblSlice = &cpb.AnyValue{Value: &cpb.AnyValue_ArrayValue{
|
||||||
|
ArrayValue: &cpb.ArrayValue{
|
||||||
|
Values: []*cpb.AnyValue{valDblNOne, valDblOne},
|
||||||
|
},
|
||||||
|
}}
|
||||||
|
valStrO = &cpb.AnyValue{Value: &cpb.AnyValue_StringValue{StringValue: "o"}}
|
||||||
|
valStrN = &cpb.AnyValue{Value: &cpb.AnyValue_StringValue{StringValue: "n"}}
|
||||||
|
valStrSlice = &cpb.AnyValue{Value: &cpb.AnyValue_ArrayValue{
|
||||||
|
ArrayValue: &cpb.ArrayValue{
|
||||||
|
Values: []*cpb.AnyValue{valStrO, valStrN},
|
||||||
|
},
|
||||||
|
}}
|
||||||
|
|
||||||
|
kvBool = &cpb.KeyValue{Key: "bool", Value: valBoolTrue}
|
||||||
|
kvBoolSlice = &cpb.KeyValue{Key: "bool slice", Value: valBoolSlice}
|
||||||
|
kvInt = &cpb.KeyValue{Key: "int", Value: valIntOne}
|
||||||
|
kvIntSlice = &cpb.KeyValue{Key: "int slice", Value: valIntSlice}
|
||||||
|
kvInt64 = &cpb.KeyValue{Key: "int64", Value: valIntOne}
|
||||||
|
kvInt64Slice = &cpb.KeyValue{Key: "int64 slice", Value: valIntSlice}
|
||||||
|
kvFloat64 = &cpb.KeyValue{Key: "float64", Value: valDblOne}
|
||||||
|
kvFloat64Slice = &cpb.KeyValue{Key: "float64 slice", Value: valDblSlice}
|
||||||
|
kvString = &cpb.KeyValue{Key: "string", Value: valStrO}
|
||||||
|
kvStringSlice = &cpb.KeyValue{Key: "string slice", Value: valStrSlice}
|
||||||
|
kvInvalid = &cpb.KeyValue{
|
||||||
|
Key: "invalid",
|
||||||
|
Value: &cpb.AnyValue{
|
||||||
|
Value: &cpb.AnyValue_StringValue{StringValue: "INVALID"},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
type attributeTest struct {
|
||||||
|
name string
|
||||||
|
in []attribute.KeyValue
|
||||||
|
want []*cpb.KeyValue
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAttributeTransforms(t *testing.T) {
|
||||||
|
for _, test := range []attributeTest{
|
||||||
|
{"nil", nil, nil},
|
||||||
|
{"empty", []attribute.KeyValue{}, nil},
|
||||||
|
{
|
||||||
|
"invalid",
|
||||||
|
[]attribute.KeyValue{attrInvalid},
|
||||||
|
[]*cpb.KeyValue{kvInvalid},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"bool",
|
||||||
|
[]attribute.KeyValue{attrBool},
|
||||||
|
[]*cpb.KeyValue{kvBool},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"bool slice",
|
||||||
|
[]attribute.KeyValue{attrBoolSlice},
|
||||||
|
[]*cpb.KeyValue{kvBoolSlice},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"int",
|
||||||
|
[]attribute.KeyValue{attrInt},
|
||||||
|
[]*cpb.KeyValue{kvInt},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"int slice",
|
||||||
|
[]attribute.KeyValue{attrIntSlice},
|
||||||
|
[]*cpb.KeyValue{kvIntSlice},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"int64",
|
||||||
|
[]attribute.KeyValue{attrInt64},
|
||||||
|
[]*cpb.KeyValue{kvInt64},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"int64 slice",
|
||||||
|
[]attribute.KeyValue{attrInt64Slice},
|
||||||
|
[]*cpb.KeyValue{kvInt64Slice},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"float64",
|
||||||
|
[]attribute.KeyValue{attrFloat64},
|
||||||
|
[]*cpb.KeyValue{kvFloat64},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"float64 slice",
|
||||||
|
[]attribute.KeyValue{attrFloat64Slice},
|
||||||
|
[]*cpb.KeyValue{kvFloat64Slice},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"string",
|
||||||
|
[]attribute.KeyValue{attrString},
|
||||||
|
[]*cpb.KeyValue{kvString},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"string slice",
|
||||||
|
[]attribute.KeyValue{attrStringSlice},
|
||||||
|
[]*cpb.KeyValue{kvStringSlice},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"all",
|
||||||
|
[]attribute.KeyValue{
|
||||||
|
attrBool,
|
||||||
|
attrBoolSlice,
|
||||||
|
attrInt,
|
||||||
|
attrIntSlice,
|
||||||
|
attrInt64,
|
||||||
|
attrInt64Slice,
|
||||||
|
attrFloat64,
|
||||||
|
attrFloat64Slice,
|
||||||
|
attrString,
|
||||||
|
attrStringSlice,
|
||||||
|
attrInvalid,
|
||||||
|
},
|
||||||
|
[]*cpb.KeyValue{
|
||||||
|
kvBool,
|
||||||
|
kvBoolSlice,
|
||||||
|
kvInt,
|
||||||
|
kvIntSlice,
|
||||||
|
kvInt64,
|
||||||
|
kvInt64Slice,
|
||||||
|
kvFloat64,
|
||||||
|
kvFloat64Slice,
|
||||||
|
kvString,
|
||||||
|
kvStringSlice,
|
||||||
|
kvInvalid,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
} {
|
||||||
|
t.Run(test.name, func(t *testing.T) {
|
||||||
|
t.Run("KeyValues", func(t *testing.T) {
|
||||||
|
assert.ElementsMatch(t, test.want, KeyValues(test.in))
|
||||||
|
})
|
||||||
|
t.Run("AttrIter", func(t *testing.T) {
|
||||||
|
s := attribute.NewSet(test.in...)
|
||||||
|
assert.ElementsMatch(t, test.want, AttrIter(s.Iter()))
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
@ -0,0 +1,114 @@
|
|||||||
|
// Code created by gotmpl. DO NOT MODIFY.
|
||||||
|
// source: internal/shared/otlp/otlpmetric/transform/error.go.tmpl
|
||||||
|
|
||||||
|
// Copyright The OpenTelemetry Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package transform // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform"
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
mpb "go.opentelemetry.io/proto/otlp/metrics/v1"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
errUnknownAggregation = errors.New("unknown aggregation")
|
||||||
|
errUnknownTemporality = errors.New("unknown temporality")
|
||||||
|
)
|
||||||
|
|
||||||
|
type errMetric struct {
|
||||||
|
m *mpb.Metric
|
||||||
|
err error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e errMetric) Unwrap() error {
|
||||||
|
return e.err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e errMetric) Error() string {
|
||||||
|
format := "invalid metric (name: %q, description: %q, unit: %q): %s"
|
||||||
|
return fmt.Sprintf(format, e.m.Name, e.m.Description, e.m.Unit, e.err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e errMetric) Is(target error) bool {
|
||||||
|
return errors.Is(e.err, target)
|
||||||
|
}
|
||||||
|
|
||||||
|
// multiErr is used by the data-type transform functions to wrap multiple
|
||||||
|
// errors into a single return value. The error message will show all errors
|
||||||
|
// as a list and scope them by the datatype name that is returning them.
|
||||||
|
type multiErr struct {
|
||||||
|
datatype string
|
||||||
|
errs []error
|
||||||
|
}
|
||||||
|
|
||||||
|
// errOrNil returns nil if e contains no errors, otherwise it returns e.
|
||||||
|
func (e *multiErr) errOrNil() error {
|
||||||
|
if len(e.errs) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return e
|
||||||
|
}
|
||||||
|
|
||||||
|
// append adds err to e. If err is a multiErr, its errs are flattened into e.
|
||||||
|
func (e *multiErr) append(err error) {
|
||||||
|
// Do not use errors.As here, this should only be flattened one layer. If
|
||||||
|
// there is a *multiErr several steps down the chain, all the errors above
|
||||||
|
// it will be discarded if errors.As is used instead.
|
||||||
|
switch other := err.(type) {
|
||||||
|
case *multiErr:
|
||||||
|
// Flatten err errors into e.
|
||||||
|
e.errs = append(e.errs, other.errs...)
|
||||||
|
default:
|
||||||
|
e.errs = append(e.errs, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *multiErr) Error() string {
|
||||||
|
es := make([]string, len(e.errs))
|
||||||
|
for i, err := range e.errs {
|
||||||
|
es[i] = fmt.Sprintf("* %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
format := "%d errors occurred transforming %s:\n\t%s"
|
||||||
|
return fmt.Sprintf(format, len(es), e.datatype, strings.Join(es, "\n\t"))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *multiErr) Unwrap() error {
|
||||||
|
switch len(e.errs) {
|
||||||
|
case 0:
|
||||||
|
return nil
|
||||||
|
case 1:
|
||||||
|
return e.errs[0]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return a multiErr without the leading error.
|
||||||
|
cp := &multiErr{
|
||||||
|
datatype: e.datatype,
|
||||||
|
errs: make([]error, len(e.errs)-1),
|
||||||
|
}
|
||||||
|
copy(cp.errs, e.errs[1:])
|
||||||
|
return cp
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *multiErr) Is(target error) bool {
|
||||||
|
if len(e.errs) == 0 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
// Check if the first error is target.
|
||||||
|
return errors.Is(e.errs[0], target)
|
||||||
|
}
|
@ -0,0 +1,91 @@
|
|||||||
|
// Code created by gotmpl. DO NOT MODIFY.
|
||||||
|
// source: internal/shared/otlp/otlpmetric/transform/error_test.go.tmpl
|
||||||
|
|
||||||
|
// Copyright The OpenTelemetry Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package transform
|
||||||
|
|
||||||
|
import (
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
e0 = errMetric{m: pbMetrics[0], err: errUnknownAggregation}
|
||||||
|
e1 = errMetric{m: pbMetrics[1], err: errUnknownTemporality}
|
||||||
|
)
|
||||||
|
|
||||||
|
type testingErr struct{}
|
||||||
|
|
||||||
|
func (testingErr) Error() string { return "testing error" }
|
||||||
|
|
||||||
|
// errFunc is a non-comparable error type.
|
||||||
|
type errFunc func() string
|
||||||
|
|
||||||
|
func (e errFunc) Error() string {
|
||||||
|
return e()
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMultiErr(t *testing.T) {
|
||||||
|
const name = "TestMultiErr"
|
||||||
|
me := &multiErr{datatype: name}
|
||||||
|
|
||||||
|
t.Run("ErrOrNil", func(t *testing.T) {
|
||||||
|
require.Nil(t, me.errOrNil())
|
||||||
|
me.errs = []error{e0}
|
||||||
|
assert.Error(t, me.errOrNil())
|
||||||
|
})
|
||||||
|
|
||||||
|
var testErr testingErr
|
||||||
|
t.Run("AppendError", func(t *testing.T) {
|
||||||
|
me.append(testErr)
|
||||||
|
assert.Equal(t, testErr, me.errs[len(me.errs)-1])
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("AppendFlattens", func(t *testing.T) {
|
||||||
|
other := &multiErr{datatype: "OtherTestMultiErr", errs: []error{e1}}
|
||||||
|
me.append(other)
|
||||||
|
assert.Equal(t, e1, me.errs[len(me.errs)-1])
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("ErrorMessage", func(t *testing.T) {
|
||||||
|
// Test the overall structure of the message, but not the exact
|
||||||
|
// language so this doesn't become a change-indicator.
|
||||||
|
msg := me.Error()
|
||||||
|
lines := strings.Split(msg, "\n")
|
||||||
|
assert.Equalf(t, 4, len(lines), "expected a 4 line error message, got:\n\n%s", msg)
|
||||||
|
assert.Contains(t, msg, name)
|
||||||
|
assert.Contains(t, msg, e0.Error())
|
||||||
|
assert.Contains(t, msg, testErr.Error())
|
||||||
|
assert.Contains(t, msg, e1.Error())
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("ErrorIs", func(t *testing.T) {
|
||||||
|
assert.ErrorIs(t, me, errUnknownAggregation)
|
||||||
|
assert.ErrorIs(t, me, e0)
|
||||||
|
assert.ErrorIs(t, me, testErr)
|
||||||
|
assert.ErrorIs(t, me, errUnknownTemporality)
|
||||||
|
assert.ErrorIs(t, me, e1)
|
||||||
|
|
||||||
|
errUnknown := errFunc(func() string { return "unknown error" })
|
||||||
|
assert.NotErrorIs(t, me, errUnknown)
|
||||||
|
|
||||||
|
var empty multiErr
|
||||||
|
assert.NotErrorIs(t, &empty, errUnknownTemporality)
|
||||||
|
})
|
||||||
|
}
|
@ -0,0 +1,292 @@
|
|||||||
|
// Code created by gotmpl. DO NOT MODIFY.
|
||||||
|
// source: internal/shared/otlp/otlpmetric/transform/metricdata.go.tmpl
|
||||||
|
|
||||||
|
// Copyright The OpenTelemetry Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// Package transform provides transformation functionality from the
|
||||||
|
// sdk/metric/metricdata data-types into OTLP data-types.
|
||||||
|
package transform // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform"
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"go.opentelemetry.io/otel/sdk/metric/metricdata"
|
||||||
|
cpb "go.opentelemetry.io/proto/otlp/common/v1"
|
||||||
|
mpb "go.opentelemetry.io/proto/otlp/metrics/v1"
|
||||||
|
rpb "go.opentelemetry.io/proto/otlp/resource/v1"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ResourceMetrics returns an OTLP ResourceMetrics generated from rm. If rm
|
||||||
|
// contains invalid ScopeMetrics, an error will be returned along with an OTLP
|
||||||
|
// ResourceMetrics that contains partial OTLP ScopeMetrics.
|
||||||
|
func ResourceMetrics(rm *metricdata.ResourceMetrics) (*mpb.ResourceMetrics, error) {
|
||||||
|
sms, err := ScopeMetrics(rm.ScopeMetrics)
|
||||||
|
return &mpb.ResourceMetrics{
|
||||||
|
Resource: &rpb.Resource{
|
||||||
|
Attributes: AttrIter(rm.Resource.Iter()),
|
||||||
|
},
|
||||||
|
ScopeMetrics: sms,
|
||||||
|
SchemaUrl: rm.Resource.SchemaURL(),
|
||||||
|
}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// ScopeMetrics returns a slice of OTLP ScopeMetrics generated from sms. If
|
||||||
|
// sms contains invalid metric values, an error will be returned along with a
|
||||||
|
// slice that contains partial OTLP ScopeMetrics.
|
||||||
|
func ScopeMetrics(sms []metricdata.ScopeMetrics) ([]*mpb.ScopeMetrics, error) {
|
||||||
|
errs := &multiErr{datatype: "ScopeMetrics"}
|
||||||
|
out := make([]*mpb.ScopeMetrics, 0, len(sms))
|
||||||
|
for _, sm := range sms {
|
||||||
|
ms, err := Metrics(sm.Metrics)
|
||||||
|
if err != nil {
|
||||||
|
errs.append(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
out = append(out, &mpb.ScopeMetrics{
|
||||||
|
Scope: &cpb.InstrumentationScope{
|
||||||
|
Name: sm.Scope.Name,
|
||||||
|
Version: sm.Scope.Version,
|
||||||
|
},
|
||||||
|
Metrics: ms,
|
||||||
|
SchemaUrl: sm.Scope.SchemaURL,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
return out, errs.errOrNil()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Metrics returns a slice of OTLP Metric generated from ms. If ms contains
|
||||||
|
// invalid metric values, an error will be returned along with a slice that
|
||||||
|
// contains partial OTLP Metrics.
|
||||||
|
func Metrics(ms []metricdata.Metrics) ([]*mpb.Metric, error) {
|
||||||
|
errs := &multiErr{datatype: "Metrics"}
|
||||||
|
out := make([]*mpb.Metric, 0, len(ms))
|
||||||
|
for _, m := range ms {
|
||||||
|
o, err := metric(m)
|
||||||
|
if err != nil {
|
||||||
|
// Do not include invalid data. Drop the metric, report the error.
|
||||||
|
errs.append(errMetric{m: o, err: err})
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
out = append(out, o)
|
||||||
|
}
|
||||||
|
return out, errs.errOrNil()
|
||||||
|
}
|
||||||
|
|
||||||
|
func metric(m metricdata.Metrics) (*mpb.Metric, error) {
|
||||||
|
var err error
|
||||||
|
out := &mpb.Metric{
|
||||||
|
Name: m.Name,
|
||||||
|
Description: m.Description,
|
||||||
|
Unit: string(m.Unit),
|
||||||
|
}
|
||||||
|
switch a := m.Data.(type) {
|
||||||
|
case metricdata.Gauge[int64]:
|
||||||
|
out.Data = Gauge[int64](a)
|
||||||
|
case metricdata.Gauge[float64]:
|
||||||
|
out.Data = Gauge[float64](a)
|
||||||
|
case metricdata.Sum[int64]:
|
||||||
|
out.Data, err = Sum[int64](a)
|
||||||
|
case metricdata.Sum[float64]:
|
||||||
|
out.Data, err = Sum[float64](a)
|
||||||
|
case metricdata.Histogram[int64]:
|
||||||
|
out.Data, err = Histogram(a)
|
||||||
|
case metricdata.Histogram[float64]:
|
||||||
|
out.Data, err = Histogram(a)
|
||||||
|
case metricdata.ExponentialHistogram[int64]:
|
||||||
|
out.Data, err = ExponentialHistogram(a)
|
||||||
|
case metricdata.ExponentialHistogram[float64]:
|
||||||
|
out.Data, err = ExponentialHistogram(a)
|
||||||
|
default:
|
||||||
|
return out, fmt.Errorf("%w: %T", errUnknownAggregation, a)
|
||||||
|
}
|
||||||
|
return out, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Gauge returns an OTLP Metric_Gauge generated from g.
|
||||||
|
func Gauge[N int64 | float64](g metricdata.Gauge[N]) *mpb.Metric_Gauge {
|
||||||
|
return &mpb.Metric_Gauge{
|
||||||
|
Gauge: &mpb.Gauge{
|
||||||
|
DataPoints: DataPoints(g.DataPoints),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sum returns an OTLP Metric_Sum generated from s. An error is returned
|
||||||
|
// if the temporality of s is unknown.
|
||||||
|
func Sum[N int64 | float64](s metricdata.Sum[N]) (*mpb.Metric_Sum, error) {
|
||||||
|
t, err := Temporality(s.Temporality)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &mpb.Metric_Sum{
|
||||||
|
Sum: &mpb.Sum{
|
||||||
|
AggregationTemporality: t,
|
||||||
|
IsMonotonic: s.IsMonotonic,
|
||||||
|
DataPoints: DataPoints(s.DataPoints),
|
||||||
|
},
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DataPoints returns a slice of OTLP NumberDataPoint generated from dPts.
|
||||||
|
func DataPoints[N int64 | float64](dPts []metricdata.DataPoint[N]) []*mpb.NumberDataPoint {
|
||||||
|
out := make([]*mpb.NumberDataPoint, 0, len(dPts))
|
||||||
|
for _, dPt := range dPts {
|
||||||
|
ndp := &mpb.NumberDataPoint{
|
||||||
|
Attributes: AttrIter(dPt.Attributes.Iter()),
|
||||||
|
StartTimeUnixNano: timeUnixNano(dPt.StartTime),
|
||||||
|
TimeUnixNano: timeUnixNano(dPt.Time),
|
||||||
|
}
|
||||||
|
switch v := any(dPt.Value).(type) {
|
||||||
|
case int64:
|
||||||
|
ndp.Value = &mpb.NumberDataPoint_AsInt{
|
||||||
|
AsInt: v,
|
||||||
|
}
|
||||||
|
case float64:
|
||||||
|
ndp.Value = &mpb.NumberDataPoint_AsDouble{
|
||||||
|
AsDouble: v,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
out = append(out, ndp)
|
||||||
|
}
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// Histogram returns an OTLP Metric_Histogram generated from h. An error is
|
||||||
|
// returned if the temporality of h is unknown.
|
||||||
|
func Histogram[N int64 | float64](h metricdata.Histogram[N]) (*mpb.Metric_Histogram, error) {
|
||||||
|
t, err := Temporality(h.Temporality)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &mpb.Metric_Histogram{
|
||||||
|
Histogram: &mpb.Histogram{
|
||||||
|
AggregationTemporality: t,
|
||||||
|
DataPoints: HistogramDataPoints(h.DataPoints),
|
||||||
|
},
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// HistogramDataPoints returns a slice of OTLP HistogramDataPoint generated
|
||||||
|
// from dPts.
|
||||||
|
func HistogramDataPoints[N int64 | float64](dPts []metricdata.HistogramDataPoint[N]) []*mpb.HistogramDataPoint {
|
||||||
|
out := make([]*mpb.HistogramDataPoint, 0, len(dPts))
|
||||||
|
for _, dPt := range dPts {
|
||||||
|
sum := float64(dPt.Sum)
|
||||||
|
hdp := &mpb.HistogramDataPoint{
|
||||||
|
Attributes: AttrIter(dPt.Attributes.Iter()),
|
||||||
|
StartTimeUnixNano: timeUnixNano(dPt.StartTime),
|
||||||
|
TimeUnixNano: timeUnixNano(dPt.Time),
|
||||||
|
Count: dPt.Count,
|
||||||
|
Sum: &sum,
|
||||||
|
BucketCounts: dPt.BucketCounts,
|
||||||
|
ExplicitBounds: dPt.Bounds,
|
||||||
|
}
|
||||||
|
if v, ok := dPt.Min.Value(); ok {
|
||||||
|
vF64 := float64(v)
|
||||||
|
hdp.Min = &vF64
|
||||||
|
}
|
||||||
|
if v, ok := dPt.Max.Value(); ok {
|
||||||
|
vF64 := float64(v)
|
||||||
|
hdp.Max = &vF64
|
||||||
|
}
|
||||||
|
out = append(out, hdp)
|
||||||
|
}
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExponentialHistogram returns an OTLP Metric_ExponentialHistogram generated from h. An error is
|
||||||
|
// returned if the temporality of h is unknown.
|
||||||
|
func ExponentialHistogram[N int64 | float64](h metricdata.ExponentialHistogram[N]) (*mpb.Metric_ExponentialHistogram, error) {
|
||||||
|
t, err := Temporality(h.Temporality)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &mpb.Metric_ExponentialHistogram{
|
||||||
|
ExponentialHistogram: &mpb.ExponentialHistogram{
|
||||||
|
AggregationTemporality: t,
|
||||||
|
DataPoints: ExponentialHistogramDataPoints(h.DataPoints),
|
||||||
|
},
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExponentialHistogramDataPoints returns a slice of OTLP ExponentialHistogramDataPoint generated
|
||||||
|
// from dPts.
|
||||||
|
func ExponentialHistogramDataPoints[N int64 | float64](dPts []metricdata.ExponentialHistogramDataPoint[N]) []*mpb.ExponentialHistogramDataPoint {
|
||||||
|
out := make([]*mpb.ExponentialHistogramDataPoint, 0, len(dPts))
|
||||||
|
for _, dPt := range dPts {
|
||||||
|
sum := float64(dPt.Sum)
|
||||||
|
ehdp := &mpb.ExponentialHistogramDataPoint{
|
||||||
|
Attributes: AttrIter(dPt.Attributes.Iter()),
|
||||||
|
StartTimeUnixNano: timeUnixNano(dPt.StartTime),
|
||||||
|
TimeUnixNano: timeUnixNano(dPt.Time),
|
||||||
|
Count: dPt.Count,
|
||||||
|
Sum: &sum,
|
||||||
|
Scale: dPt.Scale,
|
||||||
|
ZeroCount: dPt.ZeroCount,
|
||||||
|
|
||||||
|
Positive: ExponentialHistogramDataPointBuckets(dPt.PositiveBucket),
|
||||||
|
Negative: ExponentialHistogramDataPointBuckets(dPt.NegativeBucket),
|
||||||
|
}
|
||||||
|
if v, ok := dPt.Min.Value(); ok {
|
||||||
|
vF64 := float64(v)
|
||||||
|
ehdp.Min = &vF64
|
||||||
|
}
|
||||||
|
if v, ok := dPt.Max.Value(); ok {
|
||||||
|
vF64 := float64(v)
|
||||||
|
ehdp.Max = &vF64
|
||||||
|
}
|
||||||
|
out = append(out, ehdp)
|
||||||
|
}
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExponentialHistogramDataPointBuckets returns an OTLP ExponentialHistogramDataPoint_Buckets generated
|
||||||
|
// from bucket.
|
||||||
|
func ExponentialHistogramDataPointBuckets(bucket metricdata.ExponentialBucket) *mpb.ExponentialHistogramDataPoint_Buckets {
|
||||||
|
return &mpb.ExponentialHistogramDataPoint_Buckets{
|
||||||
|
Offset: bucket.Offset,
|
||||||
|
BucketCounts: bucket.Counts,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Temporality returns an OTLP AggregationTemporality generated from t. If t
|
||||||
|
// is unknown, an error is returned along with the invalid
|
||||||
|
// AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED.
|
||||||
|
func Temporality(t metricdata.Temporality) (mpb.AggregationTemporality, error) {
|
||||||
|
switch t {
|
||||||
|
case metricdata.DeltaTemporality:
|
||||||
|
return mpb.AggregationTemporality_AGGREGATION_TEMPORALITY_DELTA, nil
|
||||||
|
case metricdata.CumulativeTemporality:
|
||||||
|
return mpb.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE, nil
|
||||||
|
default:
|
||||||
|
err := fmt.Errorf("%w: %s", errUnknownTemporality, t)
|
||||||
|
return mpb.AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// timeUnixNano returns t as a Unix time, the number of nanoseconds elapsed
|
||||||
|
// since January 1, 1970 UTC as uint64.
|
||||||
|
// The result is undefined if the Unix time
|
||||||
|
// in nanoseconds cannot be represented by an int64
|
||||||
|
// (a date before the year 1678 or after 2262).
|
||||||
|
// timeUnixNano on the zero Time returns 0.
|
||||||
|
// The result does not depend on the location associated with t.
|
||||||
|
func timeUnixNano(t time.Time) uint64 {
|
||||||
|
if t.IsZero() {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
return uint64(t.UnixNano())
|
||||||
|
}
|
@ -0,0 +1,633 @@
|
|||||||
|
// Code created by gotmpl. DO NOT MODIFY.
|
||||||
|
// source: internal/shared/otlp/otlpmetric/transform/metricdata_test.go.tmpl
|
||||||
|
|
||||||
|
// Copyright The OpenTelemetry Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package transform
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
|
"go.opentelemetry.io/otel/attribute"
|
||||||
|
"go.opentelemetry.io/otel/sdk/instrumentation"
|
||||||
|
"go.opentelemetry.io/otel/sdk/metric/metricdata"
|
||||||
|
"go.opentelemetry.io/otel/sdk/resource"
|
||||||
|
semconv "go.opentelemetry.io/otel/semconv/v1.17.0"
|
||||||
|
cpb "go.opentelemetry.io/proto/otlp/common/v1"
|
||||||
|
mpb "go.opentelemetry.io/proto/otlp/metrics/v1"
|
||||||
|
rpb "go.opentelemetry.io/proto/otlp/resource/v1"
|
||||||
|
)
|
||||||
|
|
||||||
|
type unknownAggT struct {
|
||||||
|
metricdata.Aggregation
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
// Sat Jan 01 2000 00:00:00 GMT+0000.
|
||||||
|
start = time.Date(2000, time.January, 01, 0, 0, 0, 0, time.FixedZone("GMT", 0))
|
||||||
|
end = start.Add(30 * time.Second)
|
||||||
|
|
||||||
|
alice = attribute.NewSet(attribute.String("user", "alice"))
|
||||||
|
bob = attribute.NewSet(attribute.String("user", "bob"))
|
||||||
|
|
||||||
|
pbAlice = &cpb.KeyValue{Key: "user", Value: &cpb.AnyValue{
|
||||||
|
Value: &cpb.AnyValue_StringValue{StringValue: "alice"},
|
||||||
|
}}
|
||||||
|
pbBob = &cpb.KeyValue{Key: "user", Value: &cpb.AnyValue{
|
||||||
|
Value: &cpb.AnyValue_StringValue{StringValue: "bob"},
|
||||||
|
}}
|
||||||
|
|
||||||
|
minA, maxA, sumA = 2.0, 4.0, 90.0
|
||||||
|
minB, maxB, sumB = 4.0, 150.0, 234.0
|
||||||
|
otelHDPInt64 = []metricdata.HistogramDataPoint[int64]{
|
||||||
|
{
|
||||||
|
Attributes: alice,
|
||||||
|
StartTime: start,
|
||||||
|
Time: end,
|
||||||
|
Count: 30,
|
||||||
|
Bounds: []float64{1, 5},
|
||||||
|
BucketCounts: []uint64{0, 30, 0},
|
||||||
|
Min: metricdata.NewExtrema(int64(minA)),
|
||||||
|
Max: metricdata.NewExtrema(int64(maxA)),
|
||||||
|
Sum: int64(sumA),
|
||||||
|
}, {
|
||||||
|
Attributes: bob,
|
||||||
|
StartTime: start,
|
||||||
|
Time: end,
|
||||||
|
Count: 3,
|
||||||
|
Bounds: []float64{1, 5},
|
||||||
|
BucketCounts: []uint64{0, 1, 2},
|
||||||
|
Min: metricdata.NewExtrema(int64(minB)),
|
||||||
|
Max: metricdata.NewExtrema(int64(maxB)),
|
||||||
|
Sum: int64(sumB),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
otelHDPFloat64 = []metricdata.HistogramDataPoint[float64]{
|
||||||
|
{
|
||||||
|
Attributes: alice,
|
||||||
|
StartTime: start,
|
||||||
|
Time: end,
|
||||||
|
Count: 30,
|
||||||
|
Bounds: []float64{1, 5},
|
||||||
|
BucketCounts: []uint64{0, 30, 0},
|
||||||
|
Min: metricdata.NewExtrema(minA),
|
||||||
|
Max: metricdata.NewExtrema(maxA),
|
||||||
|
Sum: sumA,
|
||||||
|
}, {
|
||||||
|
Attributes: bob,
|
||||||
|
StartTime: start,
|
||||||
|
Time: end,
|
||||||
|
Count: 3,
|
||||||
|
Bounds: []float64{1, 5},
|
||||||
|
BucketCounts: []uint64{0, 1, 2},
|
||||||
|
Min: metricdata.NewExtrema(minB),
|
||||||
|
Max: metricdata.NewExtrema(maxB),
|
||||||
|
Sum: sumB,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
otelEBucketA = metricdata.ExponentialBucket{
|
||||||
|
Offset: 5,
|
||||||
|
Counts: []uint64{0, 5, 0, 5},
|
||||||
|
}
|
||||||
|
otelEBucketB = metricdata.ExponentialBucket{
|
||||||
|
Offset: 3,
|
||||||
|
Counts: []uint64{0, 5, 0, 5},
|
||||||
|
}
|
||||||
|
otelEBucketsC = metricdata.ExponentialBucket{
|
||||||
|
Offset: 5,
|
||||||
|
Counts: []uint64{0, 1},
|
||||||
|
}
|
||||||
|
otelEBucketsD = metricdata.ExponentialBucket{
|
||||||
|
Offset: 3,
|
||||||
|
Counts: []uint64{0, 1},
|
||||||
|
}
|
||||||
|
|
||||||
|
otelEHDPInt64 = []metricdata.ExponentialHistogramDataPoint[int64]{
|
||||||
|
{
|
||||||
|
Attributes: alice,
|
||||||
|
StartTime: start,
|
||||||
|
Time: end,
|
||||||
|
Count: 30,
|
||||||
|
Scale: 2,
|
||||||
|
ZeroCount: 10,
|
||||||
|
PositiveBucket: otelEBucketA,
|
||||||
|
NegativeBucket: otelEBucketB,
|
||||||
|
ZeroThreshold: .01,
|
||||||
|
Min: metricdata.NewExtrema(int64(minA)),
|
||||||
|
Max: metricdata.NewExtrema(int64(maxA)),
|
||||||
|
Sum: int64(sumA),
|
||||||
|
}, {
|
||||||
|
Attributes: bob,
|
||||||
|
StartTime: start,
|
||||||
|
Time: end,
|
||||||
|
Count: 3,
|
||||||
|
Scale: 4,
|
||||||
|
ZeroCount: 1,
|
||||||
|
PositiveBucket: otelEBucketsC,
|
||||||
|
NegativeBucket: otelEBucketsD,
|
||||||
|
ZeroThreshold: .02,
|
||||||
|
Min: metricdata.NewExtrema(int64(minB)),
|
||||||
|
Max: metricdata.NewExtrema(int64(maxB)),
|
||||||
|
Sum: int64(sumB),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
otelEHDPFloat64 = []metricdata.ExponentialHistogramDataPoint[float64]{
|
||||||
|
{
|
||||||
|
Attributes: alice,
|
||||||
|
StartTime: start,
|
||||||
|
Time: end,
|
||||||
|
Count: 30,
|
||||||
|
Scale: 2,
|
||||||
|
ZeroCount: 10,
|
||||||
|
PositiveBucket: otelEBucketA,
|
||||||
|
NegativeBucket: otelEBucketB,
|
||||||
|
ZeroThreshold: .01,
|
||||||
|
Min: metricdata.NewExtrema(minA),
|
||||||
|
Max: metricdata.NewExtrema(maxA),
|
||||||
|
Sum: sumA,
|
||||||
|
}, {
|
||||||
|
Attributes: bob,
|
||||||
|
StartTime: start,
|
||||||
|
Time: end,
|
||||||
|
Count: 3,
|
||||||
|
Scale: 4,
|
||||||
|
ZeroCount: 1,
|
||||||
|
PositiveBucket: otelEBucketsC,
|
||||||
|
NegativeBucket: otelEBucketsD,
|
||||||
|
ZeroThreshold: .02,
|
||||||
|
Min: metricdata.NewExtrema(minB),
|
||||||
|
Max: metricdata.NewExtrema(maxB),
|
||||||
|
Sum: sumB,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
pbHDP = []*mpb.HistogramDataPoint{
|
||||||
|
{
|
||||||
|
Attributes: []*cpb.KeyValue{pbAlice},
|
||||||
|
StartTimeUnixNano: uint64(start.UnixNano()),
|
||||||
|
TimeUnixNano: uint64(end.UnixNano()),
|
||||||
|
Count: 30,
|
||||||
|
Sum: &sumA,
|
||||||
|
ExplicitBounds: []float64{1, 5},
|
||||||
|
BucketCounts: []uint64{0, 30, 0},
|
||||||
|
Min: &minA,
|
||||||
|
Max: &maxA,
|
||||||
|
}, {
|
||||||
|
Attributes: []*cpb.KeyValue{pbBob},
|
||||||
|
StartTimeUnixNano: uint64(start.UnixNano()),
|
||||||
|
TimeUnixNano: uint64(end.UnixNano()),
|
||||||
|
Count: 3,
|
||||||
|
Sum: &sumB,
|
||||||
|
ExplicitBounds: []float64{1, 5},
|
||||||
|
BucketCounts: []uint64{0, 1, 2},
|
||||||
|
Min: &minB,
|
||||||
|
Max: &maxB,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
pbEHDPBA = &mpb.ExponentialHistogramDataPoint_Buckets{
|
||||||
|
Offset: 5,
|
||||||
|
BucketCounts: []uint64{0, 5, 0, 5},
|
||||||
|
}
|
||||||
|
pbEHDPBB = &mpb.ExponentialHistogramDataPoint_Buckets{
|
||||||
|
Offset: 3,
|
||||||
|
BucketCounts: []uint64{0, 5, 0, 5},
|
||||||
|
}
|
||||||
|
pbEHDPBC = &mpb.ExponentialHistogramDataPoint_Buckets{
|
||||||
|
Offset: 5,
|
||||||
|
BucketCounts: []uint64{0, 1},
|
||||||
|
}
|
||||||
|
pbEHDPBD = &mpb.ExponentialHistogramDataPoint_Buckets{
|
||||||
|
Offset: 3,
|
||||||
|
BucketCounts: []uint64{0, 1},
|
||||||
|
}
|
||||||
|
|
||||||
|
pbEHDP = []*mpb.ExponentialHistogramDataPoint{
|
||||||
|
{
|
||||||
|
Attributes: []*cpb.KeyValue{pbAlice},
|
||||||
|
StartTimeUnixNano: uint64(start.UnixNano()),
|
||||||
|
TimeUnixNano: uint64(end.UnixNano()),
|
||||||
|
Count: 30,
|
||||||
|
Sum: &sumA,
|
||||||
|
Scale: 2,
|
||||||
|
ZeroCount: 10,
|
||||||
|
Positive: pbEHDPBA,
|
||||||
|
Negative: pbEHDPBB,
|
||||||
|
Min: &minA,
|
||||||
|
Max: &maxA,
|
||||||
|
}, {
|
||||||
|
Attributes: []*cpb.KeyValue{pbBob},
|
||||||
|
StartTimeUnixNano: uint64(start.UnixNano()),
|
||||||
|
TimeUnixNano: uint64(end.UnixNano()),
|
||||||
|
Count: 3,
|
||||||
|
Sum: &sumB,
|
||||||
|
Scale: 4,
|
||||||
|
ZeroCount: 1,
|
||||||
|
Positive: pbEHDPBC,
|
||||||
|
Negative: pbEHDPBD,
|
||||||
|
Min: &minB,
|
||||||
|
Max: &maxB,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
otelHistInt64 = metricdata.Histogram[int64]{
|
||||||
|
Temporality: metricdata.DeltaTemporality,
|
||||||
|
DataPoints: otelHDPInt64,
|
||||||
|
}
|
||||||
|
otelHistFloat64 = metricdata.Histogram[float64]{
|
||||||
|
Temporality: metricdata.DeltaTemporality,
|
||||||
|
DataPoints: otelHDPFloat64,
|
||||||
|
}
|
||||||
|
invalidTemporality metricdata.Temporality
|
||||||
|
otelHistInvalid = metricdata.Histogram[int64]{
|
||||||
|
Temporality: invalidTemporality,
|
||||||
|
DataPoints: otelHDPInt64,
|
||||||
|
}
|
||||||
|
|
||||||
|
otelExpoHistInt64 = metricdata.ExponentialHistogram[int64]{
|
||||||
|
Temporality: metricdata.DeltaTemporality,
|
||||||
|
DataPoints: otelEHDPInt64,
|
||||||
|
}
|
||||||
|
otelExpoHistFloat64 = metricdata.ExponentialHistogram[float64]{
|
||||||
|
Temporality: metricdata.DeltaTemporality,
|
||||||
|
DataPoints: otelEHDPFloat64,
|
||||||
|
}
|
||||||
|
otelExpoHistInvalid = metricdata.ExponentialHistogram[int64]{
|
||||||
|
Temporality: invalidTemporality,
|
||||||
|
DataPoints: otelEHDPInt64,
|
||||||
|
}
|
||||||
|
|
||||||
|
pbHist = &mpb.Histogram{
|
||||||
|
AggregationTemporality: mpb.AggregationTemporality_AGGREGATION_TEMPORALITY_DELTA,
|
||||||
|
DataPoints: pbHDP,
|
||||||
|
}
|
||||||
|
|
||||||
|
pbExpoHist = &mpb.ExponentialHistogram{
|
||||||
|
AggregationTemporality: mpb.AggregationTemporality_AGGREGATION_TEMPORALITY_DELTA,
|
||||||
|
DataPoints: pbEHDP,
|
||||||
|
}
|
||||||
|
|
||||||
|
otelDPtsInt64 = []metricdata.DataPoint[int64]{
|
||||||
|
{Attributes: alice, StartTime: start, Time: end, Value: 1},
|
||||||
|
{Attributes: bob, StartTime: start, Time: end, Value: 2},
|
||||||
|
}
|
||||||
|
otelDPtsFloat64 = []metricdata.DataPoint[float64]{
|
||||||
|
{Attributes: alice, StartTime: start, Time: end, Value: 1.0},
|
||||||
|
{Attributes: bob, StartTime: start, Time: end, Value: 2.0},
|
||||||
|
}
|
||||||
|
|
||||||
|
pbDPtsInt64 = []*mpb.NumberDataPoint{
|
||||||
|
{
|
||||||
|
Attributes: []*cpb.KeyValue{pbAlice},
|
||||||
|
StartTimeUnixNano: uint64(start.UnixNano()),
|
||||||
|
TimeUnixNano: uint64(end.UnixNano()),
|
||||||
|
Value: &mpb.NumberDataPoint_AsInt{AsInt: 1},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Attributes: []*cpb.KeyValue{pbBob},
|
||||||
|
StartTimeUnixNano: uint64(start.UnixNano()),
|
||||||
|
TimeUnixNano: uint64(end.UnixNano()),
|
||||||
|
Value: &mpb.NumberDataPoint_AsInt{AsInt: 2},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
pbDPtsFloat64 = []*mpb.NumberDataPoint{
|
||||||
|
{
|
||||||
|
Attributes: []*cpb.KeyValue{pbAlice},
|
||||||
|
StartTimeUnixNano: uint64(start.UnixNano()),
|
||||||
|
TimeUnixNano: uint64(end.UnixNano()),
|
||||||
|
Value: &mpb.NumberDataPoint_AsDouble{AsDouble: 1.0},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Attributes: []*cpb.KeyValue{pbBob},
|
||||||
|
StartTimeUnixNano: uint64(start.UnixNano()),
|
||||||
|
TimeUnixNano: uint64(end.UnixNano()),
|
||||||
|
Value: &mpb.NumberDataPoint_AsDouble{AsDouble: 2.0},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
otelSumInt64 = metricdata.Sum[int64]{
|
||||||
|
Temporality: metricdata.CumulativeTemporality,
|
||||||
|
IsMonotonic: true,
|
||||||
|
DataPoints: otelDPtsInt64,
|
||||||
|
}
|
||||||
|
otelSumFloat64 = metricdata.Sum[float64]{
|
||||||
|
Temporality: metricdata.DeltaTemporality,
|
||||||
|
IsMonotonic: false,
|
||||||
|
DataPoints: otelDPtsFloat64,
|
||||||
|
}
|
||||||
|
otelSumInvalid = metricdata.Sum[float64]{
|
||||||
|
Temporality: invalidTemporality,
|
||||||
|
IsMonotonic: false,
|
||||||
|
DataPoints: otelDPtsFloat64,
|
||||||
|
}
|
||||||
|
|
||||||
|
pbSumInt64 = &mpb.Sum{
|
||||||
|
AggregationTemporality: mpb.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE,
|
||||||
|
IsMonotonic: true,
|
||||||
|
DataPoints: pbDPtsInt64,
|
||||||
|
}
|
||||||
|
pbSumFloat64 = &mpb.Sum{
|
||||||
|
AggregationTemporality: mpb.AggregationTemporality_AGGREGATION_TEMPORALITY_DELTA,
|
||||||
|
IsMonotonic: false,
|
||||||
|
DataPoints: pbDPtsFloat64,
|
||||||
|
}
|
||||||
|
|
||||||
|
otelGaugeInt64 = metricdata.Gauge[int64]{DataPoints: otelDPtsInt64}
|
||||||
|
otelGaugeFloat64 = metricdata.Gauge[float64]{DataPoints: otelDPtsFloat64}
|
||||||
|
otelGaugeZeroStartTime = metricdata.Gauge[int64]{
|
||||||
|
DataPoints: []metricdata.DataPoint[int64]{
|
||||||
|
{Attributes: alice, StartTime: time.Time{}, Time: end, Value: 1},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
pbGaugeInt64 = &mpb.Gauge{DataPoints: pbDPtsInt64}
|
||||||
|
pbGaugeFloat64 = &mpb.Gauge{DataPoints: pbDPtsFloat64}
|
||||||
|
pbGaugeZeroStartTime = &mpb.Gauge{DataPoints: []*mpb.NumberDataPoint{
|
||||||
|
{
|
||||||
|
Attributes: []*cpb.KeyValue{pbAlice},
|
||||||
|
StartTimeUnixNano: 0,
|
||||||
|
TimeUnixNano: uint64(end.UnixNano()),
|
||||||
|
Value: &mpb.NumberDataPoint_AsInt{AsInt: 1},
|
||||||
|
},
|
||||||
|
}}
|
||||||
|
|
||||||
|
unknownAgg unknownAggT
|
||||||
|
otelMetrics = []metricdata.Metrics{
|
||||||
|
{
|
||||||
|
Name: "int64-gauge",
|
||||||
|
Description: "Gauge with int64 values",
|
||||||
|
Unit: "1",
|
||||||
|
Data: otelGaugeInt64,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "float64-gauge",
|
||||||
|
Description: "Gauge with float64 values",
|
||||||
|
Unit: "1",
|
||||||
|
Data: otelGaugeFloat64,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "int64-sum",
|
||||||
|
Description: "Sum with int64 values",
|
||||||
|
Unit: "1",
|
||||||
|
Data: otelSumInt64,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "float64-sum",
|
||||||
|
Description: "Sum with float64 values",
|
||||||
|
Unit: "1",
|
||||||
|
Data: otelSumFloat64,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "invalid-sum",
|
||||||
|
Description: "Sum with invalid temporality",
|
||||||
|
Unit: "1",
|
||||||
|
Data: otelSumInvalid,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "int64-histogram",
|
||||||
|
Description: "Histogram",
|
||||||
|
Unit: "1",
|
||||||
|
Data: otelHistInt64,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "float64-histogram",
|
||||||
|
Description: "Histogram",
|
||||||
|
Unit: "1",
|
||||||
|
Data: otelHistFloat64,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "invalid-histogram",
|
||||||
|
Description: "Invalid histogram",
|
||||||
|
Unit: "1",
|
||||||
|
Data: otelHistInvalid,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "unknown",
|
||||||
|
Description: "Unknown aggregation",
|
||||||
|
Unit: "1",
|
||||||
|
Data: unknownAgg,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "int64-ExponentialHistogram",
|
||||||
|
Description: "Exponential Histogram",
|
||||||
|
Unit: "1",
|
||||||
|
Data: otelExpoHistInt64,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "float64-ExponentialHistogram",
|
||||||
|
Description: "Exponential Histogram",
|
||||||
|
Unit: "1",
|
||||||
|
Data: otelExpoHistFloat64,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "invalid-ExponentialHistogram",
|
||||||
|
Description: "Invalid Exponential Histogram",
|
||||||
|
Unit: "1",
|
||||||
|
Data: otelExpoHistInvalid,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "zero-time",
|
||||||
|
Description: "Gauge with 0 StartTime",
|
||||||
|
Unit: "1",
|
||||||
|
Data: otelGaugeZeroStartTime,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
pbMetrics = []*mpb.Metric{
|
||||||
|
{
|
||||||
|
Name: "int64-gauge",
|
||||||
|
Description: "Gauge with int64 values",
|
||||||
|
Unit: "1",
|
||||||
|
Data: &mpb.Metric_Gauge{Gauge: pbGaugeInt64},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "float64-gauge",
|
||||||
|
Description: "Gauge with float64 values",
|
||||||
|
Unit: "1",
|
||||||
|
Data: &mpb.Metric_Gauge{Gauge: pbGaugeFloat64},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "int64-sum",
|
||||||
|
Description: "Sum with int64 values",
|
||||||
|
Unit: "1",
|
||||||
|
Data: &mpb.Metric_Sum{Sum: pbSumInt64},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "float64-sum",
|
||||||
|
Description: "Sum with float64 values",
|
||||||
|
Unit: "1",
|
||||||
|
Data: &mpb.Metric_Sum{Sum: pbSumFloat64},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "int64-histogram",
|
||||||
|
Description: "Histogram",
|
||||||
|
Unit: "1",
|
||||||
|
Data: &mpb.Metric_Histogram{Histogram: pbHist},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "float64-histogram",
|
||||||
|
Description: "Histogram",
|
||||||
|
Unit: "1",
|
||||||
|
Data: &mpb.Metric_Histogram{Histogram: pbHist},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "int64-ExponentialHistogram",
|
||||||
|
Description: "Exponential Histogram",
|
||||||
|
Unit: "1",
|
||||||
|
Data: &mpb.Metric_ExponentialHistogram{ExponentialHistogram: pbExpoHist},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "float64-ExponentialHistogram",
|
||||||
|
Description: "Exponential Histogram",
|
||||||
|
Unit: "1",
|
||||||
|
Data: &mpb.Metric_ExponentialHistogram{ExponentialHistogram: pbExpoHist},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "zero-time",
|
||||||
|
Description: "Gauge with 0 StartTime",
|
||||||
|
Unit: "1",
|
||||||
|
Data: &mpb.Metric_Gauge{Gauge: pbGaugeZeroStartTime},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
otelScopeMetrics = []metricdata.ScopeMetrics{
|
||||||
|
{
|
||||||
|
Scope: instrumentation.Scope{
|
||||||
|
Name: "test/code/path",
|
||||||
|
Version: "v0.1.0",
|
||||||
|
SchemaURL: semconv.SchemaURL,
|
||||||
|
},
|
||||||
|
Metrics: otelMetrics,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
pbScopeMetrics = []*mpb.ScopeMetrics{
|
||||||
|
{
|
||||||
|
Scope: &cpb.InstrumentationScope{
|
||||||
|
Name: "test/code/path",
|
||||||
|
Version: "v0.1.0",
|
||||||
|
},
|
||||||
|
Metrics: pbMetrics,
|
||||||
|
SchemaUrl: semconv.SchemaURL,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
otelRes = resource.NewWithAttributes(
|
||||||
|
semconv.SchemaURL,
|
||||||
|
semconv.ServiceName("test server"),
|
||||||
|
semconv.ServiceVersion("v0.1.0"),
|
||||||
|
)
|
||||||
|
|
||||||
|
pbRes = &rpb.Resource{
|
||||||
|
Attributes: []*cpb.KeyValue{
|
||||||
|
{
|
||||||
|
Key: "service.name",
|
||||||
|
Value: &cpb.AnyValue{
|
||||||
|
Value: &cpb.AnyValue_StringValue{StringValue: "test server"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Key: "service.version",
|
||||||
|
Value: &cpb.AnyValue{
|
||||||
|
Value: &cpb.AnyValue_StringValue{StringValue: "v0.1.0"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
otelResourceMetrics = &metricdata.ResourceMetrics{
|
||||||
|
Resource: otelRes,
|
||||||
|
ScopeMetrics: otelScopeMetrics,
|
||||||
|
}
|
||||||
|
|
||||||
|
pbResourceMetrics = &mpb.ResourceMetrics{
|
||||||
|
Resource: pbRes,
|
||||||
|
ScopeMetrics: pbScopeMetrics,
|
||||||
|
SchemaUrl: semconv.SchemaURL,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestTransformations(t *testing.T) {
|
||||||
|
// Run tests from the "bottom-up" of the metricdata data-types and halt
|
||||||
|
// when a failure occurs to ensure the clearest failure message (as
|
||||||
|
// opposed to the opposite of testing from the top-down which will obscure
|
||||||
|
// errors deep inside the structs).
|
||||||
|
|
||||||
|
// DataPoint types.
|
||||||
|
assert.Equal(t, pbHDP, HistogramDataPoints(otelHDPInt64))
|
||||||
|
assert.Equal(t, pbHDP, HistogramDataPoints(otelHDPFloat64))
|
||||||
|
assert.Equal(t, pbDPtsInt64, DataPoints[int64](otelDPtsInt64))
|
||||||
|
require.Equal(t, pbDPtsFloat64, DataPoints[float64](otelDPtsFloat64))
|
||||||
|
assert.Equal(t, pbEHDP, ExponentialHistogramDataPoints(otelEHDPInt64))
|
||||||
|
assert.Equal(t, pbEHDP, ExponentialHistogramDataPoints(otelEHDPFloat64))
|
||||||
|
assert.Equal(t, pbEHDPBA, ExponentialHistogramDataPointBuckets(otelEBucketA))
|
||||||
|
|
||||||
|
// Aggregations.
|
||||||
|
h, err := Histogram(otelHistInt64)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t, &mpb.Metric_Histogram{Histogram: pbHist}, h)
|
||||||
|
h, err = Histogram(otelHistFloat64)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t, &mpb.Metric_Histogram{Histogram: pbHist}, h)
|
||||||
|
h, err = Histogram(otelHistInvalid)
|
||||||
|
assert.ErrorIs(t, err, errUnknownTemporality)
|
||||||
|
assert.Nil(t, h)
|
||||||
|
|
||||||
|
s, err := Sum[int64](otelSumInt64)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t, &mpb.Metric_Sum{Sum: pbSumInt64}, s)
|
||||||
|
s, err = Sum[float64](otelSumFloat64)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t, &mpb.Metric_Sum{Sum: pbSumFloat64}, s)
|
||||||
|
s, err = Sum[float64](otelSumInvalid)
|
||||||
|
assert.ErrorIs(t, err, errUnknownTemporality)
|
||||||
|
assert.Nil(t, s)
|
||||||
|
|
||||||
|
assert.Equal(t, &mpb.Metric_Gauge{Gauge: pbGaugeInt64}, Gauge[int64](otelGaugeInt64))
|
||||||
|
require.Equal(t, &mpb.Metric_Gauge{Gauge: pbGaugeFloat64}, Gauge[float64](otelGaugeFloat64))
|
||||||
|
|
||||||
|
e, err := ExponentialHistogram(otelExpoHistInt64)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t, &mpb.Metric_ExponentialHistogram{ExponentialHistogram: pbExpoHist}, e)
|
||||||
|
e, err = ExponentialHistogram(otelExpoHistFloat64)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t, &mpb.Metric_ExponentialHistogram{ExponentialHistogram: pbExpoHist}, e)
|
||||||
|
e, err = ExponentialHistogram(otelExpoHistInvalid)
|
||||||
|
assert.ErrorIs(t, err, errUnknownTemporality)
|
||||||
|
assert.Nil(t, e)
|
||||||
|
|
||||||
|
// Metrics.
|
||||||
|
m, err := Metrics(otelMetrics)
|
||||||
|
assert.ErrorIs(t, err, errUnknownTemporality)
|
||||||
|
assert.ErrorIs(t, err, errUnknownAggregation)
|
||||||
|
require.Equal(t, pbMetrics, m)
|
||||||
|
|
||||||
|
// Scope Metrics.
|
||||||
|
sm, err := ScopeMetrics(otelScopeMetrics)
|
||||||
|
assert.ErrorIs(t, err, errUnknownTemporality)
|
||||||
|
assert.ErrorIs(t, err, errUnknownAggregation)
|
||||||
|
require.Equal(t, pbScopeMetrics, sm)
|
||||||
|
|
||||||
|
// Resource Metrics.
|
||||||
|
rm, err := ResourceMetrics(otelResourceMetrics)
|
||||||
|
assert.ErrorIs(t, err, errUnknownTemporality)
|
||||||
|
assert.ErrorIs(t, err, errUnknownAggregation)
|
||||||
|
require.Equal(t, pbResourceMetrics, rm)
|
||||||
|
}
|
196
internal/shared/otlp/otlpmetric/oconf/envconfig.go.tmpl
Normal file
196
internal/shared/otlp/otlpmetric/oconf/envconfig.go.tmpl
Normal file
@ -0,0 +1,196 @@
|
|||||||
|
// Code created by gotmpl. DO NOT MODIFY.
|
||||||
|
// source: internal/shared/otlp/otlpmetric/oconf/envconfig.go.tmpl
|
||||||
|
|
||||||
|
// Copyright The OpenTelemetry Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package oconf
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/tls"
|
||||||
|
"crypto/x509"
|
||||||
|
"net/url"
|
||||||
|
"os"
|
||||||
|
"path"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"{{ .envconfigImportPath }}"
|
||||||
|
"go.opentelemetry.io/otel/internal/global"
|
||||||
|
"go.opentelemetry.io/otel/sdk/metric"
|
||||||
|
"go.opentelemetry.io/otel/sdk/metric/metricdata"
|
||||||
|
)
|
||||||
|
|
||||||
|
// DefaultEnvOptionsReader is the default environments reader.
|
||||||
|
var DefaultEnvOptionsReader = envconfig.EnvOptionsReader{
|
||||||
|
GetEnv: os.Getenv,
|
||||||
|
ReadFile: os.ReadFile,
|
||||||
|
Namespace: "OTEL_EXPORTER_OTLP",
|
||||||
|
}
|
||||||
|
|
||||||
|
// ApplyGRPCEnvConfigs applies the env configurations for gRPC.
|
||||||
|
func ApplyGRPCEnvConfigs(cfg Config) Config {
|
||||||
|
opts := getOptionsFromEnv()
|
||||||
|
for _, opt := range opts {
|
||||||
|
cfg = opt.ApplyGRPCOption(cfg)
|
||||||
|
}
|
||||||
|
return cfg
|
||||||
|
}
|
||||||
|
|
||||||
|
// ApplyHTTPEnvConfigs applies the env configurations for HTTP.
|
||||||
|
func ApplyHTTPEnvConfigs(cfg Config) Config {
|
||||||
|
opts := getOptionsFromEnv()
|
||||||
|
for _, opt := range opts {
|
||||||
|
cfg = opt.ApplyHTTPOption(cfg)
|
||||||
|
}
|
||||||
|
return cfg
|
||||||
|
}
|
||||||
|
|
||||||
|
func getOptionsFromEnv() []GenericOption {
|
||||||
|
opts := []GenericOption{}
|
||||||
|
|
||||||
|
tlsConf := &tls.Config{}
|
||||||
|
DefaultEnvOptionsReader.Apply(
|
||||||
|
envconfig.WithURL("ENDPOINT", func(u *url.URL) {
|
||||||
|
opts = append(opts, withEndpointScheme(u))
|
||||||
|
opts = append(opts, newSplitOption(func(cfg Config) Config {
|
||||||
|
cfg.Metrics.Endpoint = u.Host
|
||||||
|
// For OTLP/HTTP endpoint URLs without a per-signal
|
||||||
|
// configuration, the passed endpoint is used as a base URL
|
||||||
|
// and the signals are sent to these paths relative to that.
|
||||||
|
cfg.Metrics.URLPath = path.Join(u.Path, DefaultMetricsPath)
|
||||||
|
return cfg
|
||||||
|
}, withEndpointForGRPC(u)))
|
||||||
|
}),
|
||||||
|
envconfig.WithURL("METRICS_ENDPOINT", func(u *url.URL) {
|
||||||
|
opts = append(opts, withEndpointScheme(u))
|
||||||
|
opts = append(opts, newSplitOption(func(cfg Config) Config {
|
||||||
|
cfg.Metrics.Endpoint = u.Host
|
||||||
|
// For endpoint URLs for OTLP/HTTP per-signal variables, the
|
||||||
|
// URL MUST be used as-is without any modification. The only
|
||||||
|
// exception is that if an URL contains no path part, the root
|
||||||
|
// path / MUST be used.
|
||||||
|
path := u.Path
|
||||||
|
if path == "" {
|
||||||
|
path = "/"
|
||||||
|
}
|
||||||
|
cfg.Metrics.URLPath = path
|
||||||
|
return cfg
|
||||||
|
}, withEndpointForGRPC(u)))
|
||||||
|
}),
|
||||||
|
envconfig.WithCertPool("CERTIFICATE", func(p *x509.CertPool) { tlsConf.RootCAs = p }),
|
||||||
|
envconfig.WithCertPool("METRICS_CERTIFICATE", func(p *x509.CertPool) { tlsConf.RootCAs = p }),
|
||||||
|
envconfig.WithClientCert("CLIENT_CERTIFICATE", "CLIENT_KEY", func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }),
|
||||||
|
envconfig.WithClientCert("METRICS_CLIENT_CERTIFICATE", "METRICS_CLIENT_KEY", func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }),
|
||||||
|
envconfig.WithBool("INSECURE", func(b bool) { opts = append(opts, withInsecure(b)) }),
|
||||||
|
envconfig.WithBool("METRICS_INSECURE", func(b bool) { opts = append(opts, withInsecure(b)) }),
|
||||||
|
withTLSConfig(tlsConf, func(c *tls.Config) { opts = append(opts, WithTLSClientConfig(c)) }),
|
||||||
|
envconfig.WithHeaders("HEADERS", func(h map[string]string) { opts = append(opts, WithHeaders(h)) }),
|
||||||
|
envconfig.WithHeaders("METRICS_HEADERS", func(h map[string]string) { opts = append(opts, WithHeaders(h)) }),
|
||||||
|
WithEnvCompression("COMPRESSION", func(c Compression) { opts = append(opts, WithCompression(c)) }),
|
||||||
|
WithEnvCompression("METRICS_COMPRESSION", func(c Compression) { opts = append(opts, WithCompression(c)) }),
|
||||||
|
envconfig.WithDuration("TIMEOUT", func(d time.Duration) { opts = append(opts, WithTimeout(d)) }),
|
||||||
|
envconfig.WithDuration("METRICS_TIMEOUT", func(d time.Duration) { opts = append(opts, WithTimeout(d)) }),
|
||||||
|
withEnvTemporalityPreference("METRICS_TEMPORALITY_PREFERENCE", func(t metric.TemporalitySelector) { opts = append(opts, WithTemporalitySelector(t)) }),
|
||||||
|
)
|
||||||
|
|
||||||
|
return opts
|
||||||
|
}
|
||||||
|
|
||||||
|
func withEndpointForGRPC(u *url.URL) func(cfg Config) Config {
|
||||||
|
return func(cfg Config) Config {
|
||||||
|
// For OTLP/gRPC endpoints, this is the target to which the
|
||||||
|
// exporter is going to send telemetry.
|
||||||
|
cfg.Metrics.Endpoint = path.Join(u.Host, u.Path)
|
||||||
|
return cfg
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithEnvCompression retrieves the specified config and passes it to ConfigFn as a Compression.
|
||||||
|
func WithEnvCompression(n string, fn func(Compression)) func(e *envconfig.EnvOptionsReader) {
|
||||||
|
return func(e *envconfig.EnvOptionsReader) {
|
||||||
|
if v, ok := e.GetEnvValue(n); ok {
|
||||||
|
cp := NoCompression
|
||||||
|
if v == "gzip" {
|
||||||
|
cp = GzipCompression
|
||||||
|
}
|
||||||
|
|
||||||
|
fn(cp)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func withEndpointScheme(u *url.URL) GenericOption {
|
||||||
|
switch strings.ToLower(u.Scheme) {
|
||||||
|
case "http", "unix":
|
||||||
|
return WithInsecure()
|
||||||
|
default:
|
||||||
|
return WithSecure()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// revive:disable-next-line:flag-parameter
|
||||||
|
func withInsecure(b bool) GenericOption {
|
||||||
|
if b {
|
||||||
|
return WithInsecure()
|
||||||
|
}
|
||||||
|
return WithSecure()
|
||||||
|
}
|
||||||
|
|
||||||
|
func withTLSConfig(c *tls.Config, fn func(*tls.Config)) func(e *envconfig.EnvOptionsReader) {
|
||||||
|
return func(e *envconfig.EnvOptionsReader) {
|
||||||
|
if c.RootCAs != nil || len(c.Certificates) > 0 {
|
||||||
|
fn(c)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func withEnvTemporalityPreference(n string, fn func(metric.TemporalitySelector)) func(e *envconfig.EnvOptionsReader) {
|
||||||
|
return func(e *envconfig.EnvOptionsReader) {
|
||||||
|
if s, ok := e.GetEnvValue(n); ok {
|
||||||
|
switch strings.ToLower(s) {
|
||||||
|
case "cumulative":
|
||||||
|
fn(cumulativeTemporality)
|
||||||
|
case "delta":
|
||||||
|
fn(deltaTemporality)
|
||||||
|
case "lowmemory":
|
||||||
|
fn(lowMemory)
|
||||||
|
default:
|
||||||
|
global.Warn("OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE is set to an invalid value, ignoring.", "value", s)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func cumulativeTemporality(metric.InstrumentKind) metricdata.Temporality {
|
||||||
|
return metricdata.CumulativeTemporality
|
||||||
|
}
|
||||||
|
|
||||||
|
func deltaTemporality(ik metric.InstrumentKind) metricdata.Temporality {
|
||||||
|
switch ik {
|
||||||
|
case metric.InstrumentKindCounter, metric.InstrumentKindHistogram, metric.InstrumentKindObservableCounter:
|
||||||
|
return metricdata.DeltaTemporality
|
||||||
|
default:
|
||||||
|
return metricdata.CumulativeTemporality
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func lowMemory(ik metric.InstrumentKind) metricdata.Temporality {
|
||||||
|
switch ik {
|
||||||
|
case metric.InstrumentKindCounter, metric.InstrumentKindHistogram:
|
||||||
|
return metricdata.DeltaTemporality
|
||||||
|
default:
|
||||||
|
return metricdata.CumulativeTemporality
|
||||||
|
}
|
||||||
|
}
|
106
internal/shared/otlp/otlpmetric/oconf/envconfig_test.go.tmpl
Normal file
106
internal/shared/otlp/otlpmetric/oconf/envconfig_test.go.tmpl
Normal file
@ -0,0 +1,106 @@
|
|||||||
|
// Code created by gotmpl. DO NOT MODIFY.
|
||||||
|
// source: internal/shared/otlp/otlpmetric/oconf/envconfig_test.go.tmpl
|
||||||
|
|
||||||
|
// Copyright The OpenTelemetry Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package oconf
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
|
"go.opentelemetry.io/otel/sdk/metric"
|
||||||
|
"go.opentelemetry.io/otel/sdk/metric/metricdata"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestWithEnvTemporalityPreference(t *testing.T) {
|
||||||
|
origReader := DefaultEnvOptionsReader.GetEnv
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
envValue string
|
||||||
|
want map[metric.InstrumentKind]metricdata.Temporality
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "default do not set the selector",
|
||||||
|
envValue: "",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "non-normative do not set the selector",
|
||||||
|
envValue: "non-normative",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "cumulative",
|
||||||
|
envValue: "cumulative",
|
||||||
|
want: map[metric.InstrumentKind]metricdata.Temporality{
|
||||||
|
metric.InstrumentKindCounter: metricdata.CumulativeTemporality,
|
||||||
|
metric.InstrumentKindHistogram: metricdata.CumulativeTemporality,
|
||||||
|
metric.InstrumentKindUpDownCounter: metricdata.CumulativeTemporality,
|
||||||
|
metric.InstrumentKindObservableCounter: metricdata.CumulativeTemporality,
|
||||||
|
metric.InstrumentKindObservableUpDownCounter: metricdata.CumulativeTemporality,
|
||||||
|
metric.InstrumentKindObservableGauge: metricdata.CumulativeTemporality,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "delta",
|
||||||
|
envValue: "delta",
|
||||||
|
want: map[metric.InstrumentKind]metricdata.Temporality{
|
||||||
|
metric.InstrumentKindCounter: metricdata.DeltaTemporality,
|
||||||
|
metric.InstrumentKindHistogram: metricdata.DeltaTemporality,
|
||||||
|
metric.InstrumentKindUpDownCounter: metricdata.CumulativeTemporality,
|
||||||
|
metric.InstrumentKindObservableCounter: metricdata.DeltaTemporality,
|
||||||
|
metric.InstrumentKindObservableUpDownCounter: metricdata.CumulativeTemporality,
|
||||||
|
metric.InstrumentKindObservableGauge: metricdata.CumulativeTemporality,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "lowmemory",
|
||||||
|
envValue: "lowmemory",
|
||||||
|
want: map[metric.InstrumentKind]metricdata.Temporality{
|
||||||
|
metric.InstrumentKindCounter: metricdata.DeltaTemporality,
|
||||||
|
metric.InstrumentKindHistogram: metricdata.DeltaTemporality,
|
||||||
|
metric.InstrumentKindUpDownCounter: metricdata.CumulativeTemporality,
|
||||||
|
metric.InstrumentKindObservableCounter: metricdata.CumulativeTemporality,
|
||||||
|
metric.InstrumentKindObservableUpDownCounter: metricdata.CumulativeTemporality,
|
||||||
|
metric.InstrumentKindObservableGauge: metricdata.CumulativeTemporality,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
DefaultEnvOptionsReader.GetEnv = func(key string) string {
|
||||||
|
if key == "OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE" {
|
||||||
|
return tt.envValue
|
||||||
|
}
|
||||||
|
return origReader(key)
|
||||||
|
}
|
||||||
|
cfg := Config{}
|
||||||
|
cfg = ApplyGRPCEnvConfigs(cfg)
|
||||||
|
|
||||||
|
if tt.want == nil {
|
||||||
|
// There is no function set, the SDK's default is used.
|
||||||
|
assert.Nil(t, cfg.Metrics.TemporalitySelector)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
require.NotNil(t, cfg.Metrics.TemporalitySelector)
|
||||||
|
for ik, want := range tt.want {
|
||||||
|
assert.Equal(t, want, cfg.Metrics.TemporalitySelector(ik))
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
DefaultEnvOptionsReader.GetEnv = origReader
|
||||||
|
}
|
376
internal/shared/otlp/otlpmetric/oconf/options.go.tmpl
Normal file
376
internal/shared/otlp/otlpmetric/oconf/options.go.tmpl
Normal file
@ -0,0 +1,376 @@
|
|||||||
|
// Code created by gotmpl. DO NOT MODIFY.
|
||||||
|
// source: internal/shared/otlp/otlpmetric/oconf/options.go.tmpl
|
||||||
|
|
||||||
|
// Copyright The OpenTelemetry Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package oconf
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/tls"
|
||||||
|
"fmt"
|
||||||
|
"path"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"google.golang.org/grpc"
|
||||||
|
"google.golang.org/grpc/backoff"
|
||||||
|
"google.golang.org/grpc/credentials"
|
||||||
|
"google.golang.org/grpc/credentials/insecure"
|
||||||
|
"google.golang.org/grpc/encoding/gzip"
|
||||||
|
|
||||||
|
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric"
|
||||||
|
"{{ .retryImportPath }}"
|
||||||
|
"go.opentelemetry.io/otel/internal/global"
|
||||||
|
"go.opentelemetry.io/otel/sdk/metric"
|
||||||
|
"go.opentelemetry.io/otel/sdk/metric/aggregation"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// DefaultMaxAttempts describes how many times the driver
|
||||||
|
// should retry the sending of the payload in case of a
|
||||||
|
// retryable error.
|
||||||
|
DefaultMaxAttempts int = 5
|
||||||
|
// DefaultMetricsPath is a default URL path for endpoint that
|
||||||
|
// receives metrics.
|
||||||
|
DefaultMetricsPath string = "/v1/metrics"
|
||||||
|
// DefaultBackoff is a default base backoff time used in the
|
||||||
|
// exponential backoff strategy.
|
||||||
|
DefaultBackoff time.Duration = 300 * time.Millisecond
|
||||||
|
// DefaultTimeout is a default max waiting time for the backend to process
|
||||||
|
// each span or metrics batch.
|
||||||
|
DefaultTimeout time.Duration = 10 * time.Second
|
||||||
|
)
|
||||||
|
|
||||||
|
type (
|
||||||
|
SignalConfig struct {
|
||||||
|
Endpoint string
|
||||||
|
Insecure bool
|
||||||
|
TLSCfg *tls.Config
|
||||||
|
Headers map[string]string
|
||||||
|
Compression Compression
|
||||||
|
Timeout time.Duration
|
||||||
|
URLPath string
|
||||||
|
|
||||||
|
// gRPC configurations
|
||||||
|
GRPCCredentials credentials.TransportCredentials
|
||||||
|
|
||||||
|
TemporalitySelector metric.TemporalitySelector
|
||||||
|
AggregationSelector metric.AggregationSelector
|
||||||
|
}
|
||||||
|
|
||||||
|
Config struct {
|
||||||
|
// Signal specific configurations
|
||||||
|
Metrics SignalConfig
|
||||||
|
|
||||||
|
RetryConfig retry.Config
|
||||||
|
|
||||||
|
// gRPC configurations
|
||||||
|
ReconnectionPeriod time.Duration
|
||||||
|
ServiceConfig string
|
||||||
|
DialOptions []grpc.DialOption
|
||||||
|
GRPCConn *grpc.ClientConn
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
// NewHTTPConfig returns a new Config with all settings applied from opts and
|
||||||
|
// any unset setting using the default HTTP config values.
|
||||||
|
func NewHTTPConfig(opts ...HTTPOption) Config {
|
||||||
|
cfg := Config{
|
||||||
|
Metrics: SignalConfig{
|
||||||
|
Endpoint: fmt.Sprintf("%s:%d", DefaultCollectorHost, DefaultCollectorHTTPPort),
|
||||||
|
URLPath: DefaultMetricsPath,
|
||||||
|
Compression: NoCompression,
|
||||||
|
Timeout: DefaultTimeout,
|
||||||
|
|
||||||
|
TemporalitySelector: metric.DefaultTemporalitySelector,
|
||||||
|
AggregationSelector: metric.DefaultAggregationSelector,
|
||||||
|
},
|
||||||
|
RetryConfig: retry.DefaultConfig,
|
||||||
|
}
|
||||||
|
cfg = ApplyHTTPEnvConfigs(cfg)
|
||||||
|
for _, opt := range opts {
|
||||||
|
cfg = opt.ApplyHTTPOption(cfg)
|
||||||
|
}
|
||||||
|
cfg.Metrics.URLPath = cleanPath(cfg.Metrics.URLPath, DefaultMetricsPath)
|
||||||
|
return cfg
|
||||||
|
}
|
||||||
|
|
||||||
|
// cleanPath returns a path with all spaces trimmed and all redundancies
|
||||||
|
// removed. If urlPath is empty or cleaning it results in an empty string,
|
||||||
|
// defaultPath is returned instead.
|
||||||
|
func cleanPath(urlPath string, defaultPath string) string {
|
||||||
|
tmp := path.Clean(strings.TrimSpace(urlPath))
|
||||||
|
if tmp == "." {
|
||||||
|
return defaultPath
|
||||||
|
}
|
||||||
|
if !path.IsAbs(tmp) {
|
||||||
|
tmp = fmt.Sprintf("/%s", tmp)
|
||||||
|
}
|
||||||
|
return tmp
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewGRPCConfig returns a new Config with all settings applied from opts and
|
||||||
|
// any unset setting using the default gRPC config values.
|
||||||
|
func NewGRPCConfig(opts ...GRPCOption) Config {
|
||||||
|
userAgent := "OTel OTLP Exporter Go/" + otlpmetric.Version()
|
||||||
|
cfg := Config{
|
||||||
|
Metrics: SignalConfig{
|
||||||
|
Endpoint: fmt.Sprintf("%s:%d", DefaultCollectorHost, DefaultCollectorGRPCPort),
|
||||||
|
URLPath: DefaultMetricsPath,
|
||||||
|
Compression: NoCompression,
|
||||||
|
Timeout: DefaultTimeout,
|
||||||
|
|
||||||
|
TemporalitySelector: metric.DefaultTemporalitySelector,
|
||||||
|
AggregationSelector: metric.DefaultAggregationSelector,
|
||||||
|
},
|
||||||
|
RetryConfig: retry.DefaultConfig,
|
||||||
|
DialOptions: []grpc.DialOption{grpc.WithUserAgent(userAgent)},
|
||||||
|
}
|
||||||
|
cfg = ApplyGRPCEnvConfigs(cfg)
|
||||||
|
for _, opt := range opts {
|
||||||
|
cfg = opt.ApplyGRPCOption(cfg)
|
||||||
|
}
|
||||||
|
|
||||||
|
if cfg.ServiceConfig != "" {
|
||||||
|
cfg.DialOptions = append(cfg.DialOptions, grpc.WithDefaultServiceConfig(cfg.ServiceConfig))
|
||||||
|
}
|
||||||
|
// Priroritize GRPCCredentials over Insecure (passing both is an error).
|
||||||
|
if cfg.Metrics.GRPCCredentials != nil {
|
||||||
|
cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(cfg.Metrics.GRPCCredentials))
|
||||||
|
} else if cfg.Metrics.Insecure {
|
||||||
|
cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(insecure.NewCredentials()))
|
||||||
|
} else {
|
||||||
|
// Default to using the host's root CA.
|
||||||
|
creds := credentials.NewTLS(nil)
|
||||||
|
cfg.Metrics.GRPCCredentials = creds
|
||||||
|
cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(creds))
|
||||||
|
}
|
||||||
|
if cfg.Metrics.Compression == GzipCompression {
|
||||||
|
cfg.DialOptions = append(cfg.DialOptions, grpc.WithDefaultCallOptions(grpc.UseCompressor(gzip.Name)))
|
||||||
|
}
|
||||||
|
if len(cfg.DialOptions) != 0 {
|
||||||
|
cfg.DialOptions = append(cfg.DialOptions, cfg.DialOptions...)
|
||||||
|
}
|
||||||
|
if cfg.ReconnectionPeriod != 0 {
|
||||||
|
p := grpc.ConnectParams{
|
||||||
|
Backoff: backoff.DefaultConfig,
|
||||||
|
MinConnectTimeout: cfg.ReconnectionPeriod,
|
||||||
|
}
|
||||||
|
cfg.DialOptions = append(cfg.DialOptions, grpc.WithConnectParams(p))
|
||||||
|
}
|
||||||
|
|
||||||
|
return cfg
|
||||||
|
}
|
||||||
|
|
||||||
|
type (
|
||||||
|
// GenericOption applies an option to the HTTP or gRPC driver.
|
||||||
|
GenericOption interface {
|
||||||
|
ApplyHTTPOption(Config) Config
|
||||||
|
ApplyGRPCOption(Config) Config
|
||||||
|
|
||||||
|
// A private method to prevent users implementing the
|
||||||
|
// interface and so future additions to it will not
|
||||||
|
// violate compatibility.
|
||||||
|
private()
|
||||||
|
}
|
||||||
|
|
||||||
|
// HTTPOption applies an option to the HTTP driver.
|
||||||
|
HTTPOption interface {
|
||||||
|
ApplyHTTPOption(Config) Config
|
||||||
|
|
||||||
|
// A private method to prevent users implementing the
|
||||||
|
// interface and so future additions to it will not
|
||||||
|
// violate compatibility.
|
||||||
|
private()
|
||||||
|
}
|
||||||
|
|
||||||
|
// GRPCOption applies an option to the gRPC driver.
|
||||||
|
GRPCOption interface {
|
||||||
|
ApplyGRPCOption(Config) Config
|
||||||
|
|
||||||
|
// A private method to prevent users implementing the
|
||||||
|
// interface and so future additions to it will not
|
||||||
|
// violate compatibility.
|
||||||
|
private()
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
// genericOption is an option that applies the same logic
|
||||||
|
// for both gRPC and HTTP.
|
||||||
|
type genericOption struct {
|
||||||
|
fn func(Config) Config
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g *genericOption) ApplyGRPCOption(cfg Config) Config {
|
||||||
|
return g.fn(cfg)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g *genericOption) ApplyHTTPOption(cfg Config) Config {
|
||||||
|
return g.fn(cfg)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (genericOption) private() {}
|
||||||
|
|
||||||
|
func newGenericOption(fn func(cfg Config) Config) GenericOption {
|
||||||
|
return &genericOption{fn: fn}
|
||||||
|
}
|
||||||
|
|
||||||
|
// splitOption is an option that applies different logics
|
||||||
|
// for gRPC and HTTP.
|
||||||
|
type splitOption struct {
|
||||||
|
httpFn func(Config) Config
|
||||||
|
grpcFn func(Config) Config
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g *splitOption) ApplyGRPCOption(cfg Config) Config {
|
||||||
|
return g.grpcFn(cfg)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g *splitOption) ApplyHTTPOption(cfg Config) Config {
|
||||||
|
return g.httpFn(cfg)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (splitOption) private() {}
|
||||||
|
|
||||||
|
func newSplitOption(httpFn func(cfg Config) Config, grpcFn func(cfg Config) Config) GenericOption {
|
||||||
|
return &splitOption{httpFn: httpFn, grpcFn: grpcFn}
|
||||||
|
}
|
||||||
|
|
||||||
|
// httpOption is an option that is only applied to the HTTP driver.
|
||||||
|
type httpOption struct {
|
||||||
|
fn func(Config) Config
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *httpOption) ApplyHTTPOption(cfg Config) Config {
|
||||||
|
return h.fn(cfg)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (httpOption) private() {}
|
||||||
|
|
||||||
|
func NewHTTPOption(fn func(cfg Config) Config) HTTPOption {
|
||||||
|
return &httpOption{fn: fn}
|
||||||
|
}
|
||||||
|
|
||||||
|
// grpcOption is an option that is only applied to the gRPC driver.
|
||||||
|
type grpcOption struct {
|
||||||
|
fn func(Config) Config
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *grpcOption) ApplyGRPCOption(cfg Config) Config {
|
||||||
|
return h.fn(cfg)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (grpcOption) private() {}
|
||||||
|
|
||||||
|
func NewGRPCOption(fn func(cfg Config) Config) GRPCOption {
|
||||||
|
return &grpcOption{fn: fn}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Generic Options
|
||||||
|
|
||||||
|
func WithEndpoint(endpoint string) GenericOption {
|
||||||
|
return newGenericOption(func(cfg Config) Config {
|
||||||
|
cfg.Metrics.Endpoint = endpoint
|
||||||
|
return cfg
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func WithCompression(compression Compression) GenericOption {
|
||||||
|
return newGenericOption(func(cfg Config) Config {
|
||||||
|
cfg.Metrics.Compression = compression
|
||||||
|
return cfg
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func WithURLPath(urlPath string) GenericOption {
|
||||||
|
return newGenericOption(func(cfg Config) Config {
|
||||||
|
cfg.Metrics.URLPath = urlPath
|
||||||
|
return cfg
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func WithRetry(rc retry.Config) GenericOption {
|
||||||
|
return newGenericOption(func(cfg Config) Config {
|
||||||
|
cfg.RetryConfig = rc
|
||||||
|
return cfg
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func WithTLSClientConfig(tlsCfg *tls.Config) GenericOption {
|
||||||
|
return newSplitOption(func(cfg Config) Config {
|
||||||
|
cfg.Metrics.TLSCfg = tlsCfg.Clone()
|
||||||
|
return cfg
|
||||||
|
}, func(cfg Config) Config {
|
||||||
|
cfg.Metrics.GRPCCredentials = credentials.NewTLS(tlsCfg)
|
||||||
|
return cfg
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func WithInsecure() GenericOption {
|
||||||
|
return newGenericOption(func(cfg Config) Config {
|
||||||
|
cfg.Metrics.Insecure = true
|
||||||
|
return cfg
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func WithSecure() GenericOption {
|
||||||
|
return newGenericOption(func(cfg Config) Config {
|
||||||
|
cfg.Metrics.Insecure = false
|
||||||
|
return cfg
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func WithHeaders(headers map[string]string) GenericOption {
|
||||||
|
return newGenericOption(func(cfg Config) Config {
|
||||||
|
cfg.Metrics.Headers = headers
|
||||||
|
return cfg
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func WithTimeout(duration time.Duration) GenericOption {
|
||||||
|
return newGenericOption(func(cfg Config) Config {
|
||||||
|
cfg.Metrics.Timeout = duration
|
||||||
|
return cfg
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func WithTemporalitySelector(selector metric.TemporalitySelector) GenericOption {
|
||||||
|
return newGenericOption(func(cfg Config) Config {
|
||||||
|
cfg.Metrics.TemporalitySelector = selector
|
||||||
|
return cfg
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func WithAggregationSelector(selector metric.AggregationSelector) GenericOption {
|
||||||
|
// Deep copy and validate before using.
|
||||||
|
wrapped := func(ik metric.InstrumentKind) aggregation.Aggregation {
|
||||||
|
a := selector(ik)
|
||||||
|
cpA := a.Copy()
|
||||||
|
if err := cpA.Err(); err != nil {
|
||||||
|
cpA = metric.DefaultAggregationSelector(ik)
|
||||||
|
global.Error(
|
||||||
|
err, "using default aggregation instead",
|
||||||
|
"aggregation", a,
|
||||||
|
"replacement", cpA,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
return cpA
|
||||||
|
}
|
||||||
|
|
||||||
|
return newGenericOption(func(cfg Config) Config {
|
||||||
|
cfg.Metrics.AggregationSelector = wrapped
|
||||||
|
return cfg
|
||||||
|
})
|
||||||
|
}
|
534
internal/shared/otlp/otlpmetric/oconf/options_test.go.tmpl
Normal file
534
internal/shared/otlp/otlpmetric/oconf/options_test.go.tmpl
Normal file
@ -0,0 +1,534 @@
|
|||||||
|
// Code created by gotmpl. DO NOT MODIFY.
|
||||||
|
// source: internal/shared/otlp/otlpmetric/oconf/options_test.go.tmpl
|
||||||
|
|
||||||
|
// Copyright The OpenTelemetry Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package oconf
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
|
||||||
|
"{{ .envconfigImportPath }}"
|
||||||
|
"go.opentelemetry.io/otel/sdk/metric"
|
||||||
|
"go.opentelemetry.io/otel/sdk/metric/aggregation"
|
||||||
|
"go.opentelemetry.io/otel/sdk/metric/metricdata"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
WeakCertificate = `
|
||||||
|
-----BEGIN CERTIFICATE-----
|
||||||
|
MIIBhzCCASygAwIBAgIRANHpHgAWeTnLZpTSxCKs0ggwCgYIKoZIzj0EAwIwEjEQ
|
||||||
|
MA4GA1UEChMHb3RlbC1nbzAeFw0yMTA0MDExMzU5MDNaFw0yMTA0MDExNDU5MDNa
|
||||||
|
MBIxEDAOBgNVBAoTB290ZWwtZ28wWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAS9
|
||||||
|
nWSkmPCxShxnp43F+PrOtbGV7sNfkbQ/kxzi9Ego0ZJdiXxkmv/C05QFddCW7Y0Z
|
||||||
|
sJCLHGogQsYnWJBXUZOVo2MwYTAOBgNVHQ8BAf8EBAMCB4AwEwYDVR0lBAwwCgYI
|
||||||
|
KwYBBQUHAwEwDAYDVR0TAQH/BAIwADAsBgNVHREEJTAjgglsb2NhbGhvc3SHEAAA
|
||||||
|
AAAAAAAAAAAAAAAAAAGHBH8AAAEwCgYIKoZIzj0EAwIDSQAwRgIhANwZVVKvfvQ/
|
||||||
|
1HXsTvgH+xTQswOwSSKYJ1cVHQhqK7ZbAiEAus8NxpTRnp5DiTMuyVmhVNPB+bVH
|
||||||
|
Lhnm4N/QDk5rek0=
|
||||||
|
-----END CERTIFICATE-----
|
||||||
|
`
|
||||||
|
WeakPrivateKey = `
|
||||||
|
-----BEGIN PRIVATE KEY-----
|
||||||
|
MIGHAgEAMBMGByqGSM49AgEGCCqGSM49AwEHBG0wawIBAQQgN8HEXiXhvByrJ1zK
|
||||||
|
SFT6Y2l2KqDWwWzKf+t4CyWrNKehRANCAAS9nWSkmPCxShxnp43F+PrOtbGV7sNf
|
||||||
|
kbQ/kxzi9Ego0ZJdiXxkmv/C05QFddCW7Y0ZsJCLHGogQsYnWJBXUZOV
|
||||||
|
-----END PRIVATE KEY-----
|
||||||
|
`
|
||||||
|
)
|
||||||
|
|
||||||
|
type env map[string]string
|
||||||
|
|
||||||
|
func (e *env) getEnv(env string) string {
|
||||||
|
return (*e)[env]
|
||||||
|
}
|
||||||
|
|
||||||
|
type fileReader map[string][]byte
|
||||||
|
|
||||||
|
func (f *fileReader) readFile(filename string) ([]byte, error) {
|
||||||
|
if b, ok := (*f)[filename]; ok {
|
||||||
|
return b, nil
|
||||||
|
}
|
||||||
|
return nil, errors.New("file not found")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestConfigs(t *testing.T) {
|
||||||
|
tlsCert, err := CreateTLSConfig([]byte(WeakCertificate))
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
opts []GenericOption
|
||||||
|
env env
|
||||||
|
fileReader fileReader
|
||||||
|
asserts func(t *testing.T, c *Config, grpcOption bool)
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "Test default configs",
|
||||||
|
asserts: func(t *testing.T, c *Config, grpcOption bool) {
|
||||||
|
if grpcOption {
|
||||||
|
assert.Equal(t, "localhost:4317", c.Metrics.Endpoint)
|
||||||
|
} else {
|
||||||
|
assert.Equal(t, "localhost:4318", c.Metrics.Endpoint)
|
||||||
|
}
|
||||||
|
assert.Equal(t, NoCompression, c.Metrics.Compression)
|
||||||
|
assert.Equal(t, map[string]string(nil), c.Metrics.Headers)
|
||||||
|
assert.Equal(t, 10*time.Second, c.Metrics.Timeout)
|
||||||
|
},
|
||||||
|
},
|
||||||
|
|
||||||
|
// Endpoint Tests
|
||||||
|
{
|
||||||
|
name: "Test With Endpoint",
|
||||||
|
opts: []GenericOption{
|
||||||
|
WithEndpoint("someendpoint"),
|
||||||
|
},
|
||||||
|
asserts: func(t *testing.T, c *Config, grpcOption bool) {
|
||||||
|
assert.Equal(t, "someendpoint", c.Metrics.Endpoint)
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Test Environment Endpoint",
|
||||||
|
env: map[string]string{
|
||||||
|
"OTEL_EXPORTER_OTLP_ENDPOINT": "https://env.endpoint/prefix",
|
||||||
|
},
|
||||||
|
asserts: func(t *testing.T, c *Config, grpcOption bool) {
|
||||||
|
assert.False(t, c.Metrics.Insecure)
|
||||||
|
if grpcOption {
|
||||||
|
assert.Equal(t, "env.endpoint/prefix", c.Metrics.Endpoint)
|
||||||
|
} else {
|
||||||
|
assert.Equal(t, "env.endpoint", c.Metrics.Endpoint)
|
||||||
|
assert.Equal(t, "/prefix/v1/metrics", c.Metrics.URLPath)
|
||||||
|
}
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Test Environment Signal Specific Endpoint",
|
||||||
|
env: map[string]string{
|
||||||
|
"OTEL_EXPORTER_OTLP_ENDPOINT": "https://overrode.by.signal.specific/env/var",
|
||||||
|
"OTEL_EXPORTER_OTLP_METRICS_ENDPOINT": "http://env.metrics.endpoint",
|
||||||
|
},
|
||||||
|
asserts: func(t *testing.T, c *Config, grpcOption bool) {
|
||||||
|
assert.True(t, c.Metrics.Insecure)
|
||||||
|
assert.Equal(t, "env.metrics.endpoint", c.Metrics.Endpoint)
|
||||||
|
if !grpcOption {
|
||||||
|
assert.Equal(t, "/", c.Metrics.URLPath)
|
||||||
|
}
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Test Mixed Environment and With Endpoint",
|
||||||
|
opts: []GenericOption{
|
||||||
|
WithEndpoint("metrics_endpoint"),
|
||||||
|
},
|
||||||
|
env: map[string]string{
|
||||||
|
"OTEL_EXPORTER_OTLP_ENDPOINT": "env_endpoint",
|
||||||
|
},
|
||||||
|
asserts: func(t *testing.T, c *Config, grpcOption bool) {
|
||||||
|
assert.Equal(t, "metrics_endpoint", c.Metrics.Endpoint)
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Test Environment Endpoint with HTTP scheme",
|
||||||
|
env: map[string]string{
|
||||||
|
"OTEL_EXPORTER_OTLP_ENDPOINT": "http://env_endpoint",
|
||||||
|
},
|
||||||
|
asserts: func(t *testing.T, c *Config, grpcOption bool) {
|
||||||
|
assert.Equal(t, "env_endpoint", c.Metrics.Endpoint)
|
||||||
|
assert.Equal(t, true, c.Metrics.Insecure)
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Test Environment Endpoint with HTTP scheme and leading & trailingspaces",
|
||||||
|
env: map[string]string{
|
||||||
|
"OTEL_EXPORTER_OTLP_ENDPOINT": " http://env_endpoint ",
|
||||||
|
},
|
||||||
|
asserts: func(t *testing.T, c *Config, grpcOption bool) {
|
||||||
|
assert.Equal(t, "env_endpoint", c.Metrics.Endpoint)
|
||||||
|
assert.Equal(t, true, c.Metrics.Insecure)
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Test Environment Endpoint with HTTPS scheme",
|
||||||
|
env: map[string]string{
|
||||||
|
"OTEL_EXPORTER_OTLP_ENDPOINT": "https://env_endpoint",
|
||||||
|
},
|
||||||
|
asserts: func(t *testing.T, c *Config, grpcOption bool) {
|
||||||
|
assert.Equal(t, "env_endpoint", c.Metrics.Endpoint)
|
||||||
|
assert.Equal(t, false, c.Metrics.Insecure)
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Test Environment Signal Specific Endpoint with uppercase scheme",
|
||||||
|
env: map[string]string{
|
||||||
|
"OTEL_EXPORTER_OTLP_ENDPOINT": "HTTPS://overrode_by_signal_specific",
|
||||||
|
"OTEL_EXPORTER_OTLP_METRICS_ENDPOINT": "HtTp://env_metrics_endpoint",
|
||||||
|
},
|
||||||
|
asserts: func(t *testing.T, c *Config, grpcOption bool) {
|
||||||
|
assert.Equal(t, "env_metrics_endpoint", c.Metrics.Endpoint)
|
||||||
|
assert.Equal(t, true, c.Metrics.Insecure)
|
||||||
|
},
|
||||||
|
},
|
||||||
|
|
||||||
|
// Certificate tests
|
||||||
|
{
|
||||||
|
name: "Test Default Certificate",
|
||||||
|
asserts: func(t *testing.T, c *Config, grpcOption bool) {
|
||||||
|
if grpcOption {
|
||||||
|
assert.NotNil(t, c.Metrics.GRPCCredentials)
|
||||||
|
} else {
|
||||||
|
assert.Nil(t, c.Metrics.TLSCfg)
|
||||||
|
}
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Test With Certificate",
|
||||||
|
opts: []GenericOption{
|
||||||
|
WithTLSClientConfig(tlsCert),
|
||||||
|
},
|
||||||
|
asserts: func(t *testing.T, c *Config, grpcOption bool) {
|
||||||
|
if grpcOption {
|
||||||
|
//TODO: make sure gRPC's credentials actually works
|
||||||
|
assert.NotNil(t, c.Metrics.GRPCCredentials)
|
||||||
|
} else {
|
||||||
|
// nolint:staticcheck // ignoring tlsCert.RootCAs.Subjects is deprecated ERR because cert does not come from SystemCertPool.
|
||||||
|
assert.Equal(t, tlsCert.RootCAs.Subjects(), c.Metrics.TLSCfg.RootCAs.Subjects())
|
||||||
|
}
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Test Environment Certificate",
|
||||||
|
env: map[string]string{
|
||||||
|
"OTEL_EXPORTER_OTLP_CERTIFICATE": "cert_path",
|
||||||
|
},
|
||||||
|
fileReader: fileReader{
|
||||||
|
"cert_path": []byte(WeakCertificate),
|
||||||
|
},
|
||||||
|
asserts: func(t *testing.T, c *Config, grpcOption bool) {
|
||||||
|
if grpcOption {
|
||||||
|
assert.NotNil(t, c.Metrics.GRPCCredentials)
|
||||||
|
} else {
|
||||||
|
// nolint:staticcheck // ignoring tlsCert.RootCAs.Subjects is deprecated ERR because cert does not come from SystemCertPool.
|
||||||
|
assert.Equal(t, tlsCert.RootCAs.Subjects(), c.Metrics.TLSCfg.RootCAs.Subjects())
|
||||||
|
}
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Test Environment Signal Specific Certificate",
|
||||||
|
env: map[string]string{
|
||||||
|
"OTEL_EXPORTER_OTLP_CERTIFICATE": "overrode_by_signal_specific",
|
||||||
|
"OTEL_EXPORTER_OTLP_METRICS_CERTIFICATE": "cert_path",
|
||||||
|
},
|
||||||
|
fileReader: fileReader{
|
||||||
|
"cert_path": []byte(WeakCertificate),
|
||||||
|
"invalid_cert": []byte("invalid certificate file."),
|
||||||
|
},
|
||||||
|
asserts: func(t *testing.T, c *Config, grpcOption bool) {
|
||||||
|
if grpcOption {
|
||||||
|
assert.NotNil(t, c.Metrics.GRPCCredentials)
|
||||||
|
} else {
|
||||||
|
// nolint:staticcheck // ignoring tlsCert.RootCAs.Subjects is deprecated ERR because cert does not come from SystemCertPool.
|
||||||
|
assert.Equal(t, tlsCert.RootCAs.Subjects(), c.Metrics.TLSCfg.RootCAs.Subjects())
|
||||||
|
}
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Test Mixed Environment and With Certificate",
|
||||||
|
opts: []GenericOption{},
|
||||||
|
env: map[string]string{
|
||||||
|
"OTEL_EXPORTER_OTLP_CERTIFICATE": "cert_path",
|
||||||
|
},
|
||||||
|
fileReader: fileReader{
|
||||||
|
"cert_path": []byte(WeakCertificate),
|
||||||
|
},
|
||||||
|
asserts: func(t *testing.T, c *Config, grpcOption bool) {
|
||||||
|
if grpcOption {
|
||||||
|
assert.NotNil(t, c.Metrics.GRPCCredentials)
|
||||||
|
} else {
|
||||||
|
// nolint:staticcheck // ignoring tlsCert.RootCAs.Subjects is deprecated ERR because cert does not come from SystemCertPool.
|
||||||
|
assert.Equal(t, 1, len(c.Metrics.TLSCfg.RootCAs.Subjects()))
|
||||||
|
}
|
||||||
|
},
|
||||||
|
},
|
||||||
|
|
||||||
|
// Headers tests
|
||||||
|
{
|
||||||
|
name: "Test With Headers",
|
||||||
|
opts: []GenericOption{
|
||||||
|
WithHeaders(map[string]string{"h1": "v1"}),
|
||||||
|
},
|
||||||
|
asserts: func(t *testing.T, c *Config, grpcOption bool) {
|
||||||
|
assert.Equal(t, map[string]string{"h1": "v1"}, c.Metrics.Headers)
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Test Environment Headers",
|
||||||
|
env: map[string]string{"OTEL_EXPORTER_OTLP_HEADERS": "h1=v1,h2=v2"},
|
||||||
|
asserts: func(t *testing.T, c *Config, grpcOption bool) {
|
||||||
|
assert.Equal(t, map[string]string{"h1": "v1", "h2": "v2"}, c.Metrics.Headers)
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Test Environment Signal Specific Headers",
|
||||||
|
env: map[string]string{
|
||||||
|
"OTEL_EXPORTER_OTLP_HEADERS": "overrode_by_signal_specific",
|
||||||
|
"OTEL_EXPORTER_OTLP_METRICS_HEADERS": "h1=v1,h2=v2",
|
||||||
|
},
|
||||||
|
asserts: func(t *testing.T, c *Config, grpcOption bool) {
|
||||||
|
assert.Equal(t, map[string]string{"h1": "v1", "h2": "v2"}, c.Metrics.Headers)
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Test Mixed Environment and With Headers",
|
||||||
|
env: map[string]string{"OTEL_EXPORTER_OTLP_HEADERS": "h1=v1,h2=v2"},
|
||||||
|
opts: []GenericOption{
|
||||||
|
WithHeaders(map[string]string{"m1": "mv1"}),
|
||||||
|
},
|
||||||
|
asserts: func(t *testing.T, c *Config, grpcOption bool) {
|
||||||
|
assert.Equal(t, map[string]string{"m1": "mv1"}, c.Metrics.Headers)
|
||||||
|
},
|
||||||
|
},
|
||||||
|
|
||||||
|
// Compression Tests
|
||||||
|
{
|
||||||
|
name: "Test With Compression",
|
||||||
|
opts: []GenericOption{
|
||||||
|
WithCompression(GzipCompression),
|
||||||
|
},
|
||||||
|
asserts: func(t *testing.T, c *Config, grpcOption bool) {
|
||||||
|
assert.Equal(t, GzipCompression, c.Metrics.Compression)
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Test Environment Compression",
|
||||||
|
env: map[string]string{
|
||||||
|
"OTEL_EXPORTER_OTLP_COMPRESSION": "gzip",
|
||||||
|
},
|
||||||
|
asserts: func(t *testing.T, c *Config, grpcOption bool) {
|
||||||
|
assert.Equal(t, GzipCompression, c.Metrics.Compression)
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Test Environment Signal Specific Compression",
|
||||||
|
env: map[string]string{
|
||||||
|
"OTEL_EXPORTER_OTLP_METRICS_COMPRESSION": "gzip",
|
||||||
|
},
|
||||||
|
asserts: func(t *testing.T, c *Config, grpcOption bool) {
|
||||||
|
assert.Equal(t, GzipCompression, c.Metrics.Compression)
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Test Mixed Environment and With Compression",
|
||||||
|
opts: []GenericOption{
|
||||||
|
WithCompression(NoCompression),
|
||||||
|
},
|
||||||
|
env: map[string]string{
|
||||||
|
"OTEL_EXPORTER_OTLP_METRICS_COMPRESSION": "gzip",
|
||||||
|
},
|
||||||
|
asserts: func(t *testing.T, c *Config, grpcOption bool) {
|
||||||
|
assert.Equal(t, NoCompression, c.Metrics.Compression)
|
||||||
|
},
|
||||||
|
},
|
||||||
|
|
||||||
|
// Timeout Tests
|
||||||
|
{
|
||||||
|
name: "Test With Timeout",
|
||||||
|
opts: []GenericOption{
|
||||||
|
WithTimeout(time.Duration(5 * time.Second)),
|
||||||
|
},
|
||||||
|
asserts: func(t *testing.T, c *Config, grpcOption bool) {
|
||||||
|
assert.Equal(t, 5*time.Second, c.Metrics.Timeout)
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Test Environment Timeout",
|
||||||
|
env: map[string]string{
|
||||||
|
"OTEL_EXPORTER_OTLP_TIMEOUT": "15000",
|
||||||
|
},
|
||||||
|
asserts: func(t *testing.T, c *Config, grpcOption bool) {
|
||||||
|
assert.Equal(t, c.Metrics.Timeout, 15*time.Second)
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Test Environment Signal Specific Timeout",
|
||||||
|
env: map[string]string{
|
||||||
|
"OTEL_EXPORTER_OTLP_TIMEOUT": "15000",
|
||||||
|
"OTEL_EXPORTER_OTLP_METRICS_TIMEOUT": "28000",
|
||||||
|
},
|
||||||
|
asserts: func(t *testing.T, c *Config, grpcOption bool) {
|
||||||
|
assert.Equal(t, c.Metrics.Timeout, 28*time.Second)
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Test Mixed Environment and With Timeout",
|
||||||
|
env: map[string]string{
|
||||||
|
"OTEL_EXPORTER_OTLP_TIMEOUT": "15000",
|
||||||
|
"OTEL_EXPORTER_OTLP_METRICS_TIMEOUT": "28000",
|
||||||
|
},
|
||||||
|
opts: []GenericOption{
|
||||||
|
WithTimeout(5 * time.Second),
|
||||||
|
},
|
||||||
|
asserts: func(t *testing.T, c *Config, grpcOption bool) {
|
||||||
|
assert.Equal(t, c.Metrics.Timeout, 5*time.Second)
|
||||||
|
},
|
||||||
|
},
|
||||||
|
|
||||||
|
// Temporality Selector Tests
|
||||||
|
{
|
||||||
|
name: "WithTemporalitySelector",
|
||||||
|
opts: []GenericOption{
|
||||||
|
WithTemporalitySelector(deltaSelector),
|
||||||
|
},
|
||||||
|
asserts: func(t *testing.T, c *Config, grpcOption bool) {
|
||||||
|
// Function value comparisons are disallowed, test non-default
|
||||||
|
// behavior of a TemporalitySelector here to ensure our "catch
|
||||||
|
// all" was set.
|
||||||
|
var undefinedKind metric.InstrumentKind
|
||||||
|
got := c.Metrics.TemporalitySelector
|
||||||
|
assert.Equal(t, metricdata.DeltaTemporality, got(undefinedKind))
|
||||||
|
},
|
||||||
|
},
|
||||||
|
|
||||||
|
// Aggregation Selector Tests
|
||||||
|
{
|
||||||
|
name: "WithAggregationSelector",
|
||||||
|
opts: []GenericOption{
|
||||||
|
WithAggregationSelector(dropSelector),
|
||||||
|
},
|
||||||
|
asserts: func(t *testing.T, c *Config, grpcOption bool) {
|
||||||
|
// Function value comparisons are disallowed, test non-default
|
||||||
|
// behavior of a AggregationSelector here to ensure our "catch
|
||||||
|
// all" was set.
|
||||||
|
var undefinedKind metric.InstrumentKind
|
||||||
|
got := c.Metrics.AggregationSelector
|
||||||
|
assert.Equal(t, aggregation.Drop{}, got(undefinedKind))
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
origEOR := DefaultEnvOptionsReader
|
||||||
|
DefaultEnvOptionsReader = envconfig.EnvOptionsReader{
|
||||||
|
GetEnv: tt.env.getEnv,
|
||||||
|
ReadFile: tt.fileReader.readFile,
|
||||||
|
Namespace: "OTEL_EXPORTER_OTLP",
|
||||||
|
}
|
||||||
|
t.Cleanup(func() { DefaultEnvOptionsReader = origEOR })
|
||||||
|
|
||||||
|
// Tests Generic options as HTTP Options
|
||||||
|
cfg := NewHTTPConfig(asHTTPOptions(tt.opts)...)
|
||||||
|
tt.asserts(t, &cfg, false)
|
||||||
|
|
||||||
|
// Tests Generic options as gRPC Options
|
||||||
|
cfg = NewGRPCConfig(asGRPCOptions(tt.opts)...)
|
||||||
|
tt.asserts(t, &cfg, true)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func dropSelector(metric.InstrumentKind) aggregation.Aggregation {
|
||||||
|
return aggregation.Drop{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func deltaSelector(metric.InstrumentKind) metricdata.Temporality {
|
||||||
|
return metricdata.DeltaTemporality
|
||||||
|
}
|
||||||
|
|
||||||
|
func asHTTPOptions(opts []GenericOption) []HTTPOption {
|
||||||
|
converted := make([]HTTPOption, len(opts))
|
||||||
|
for i, o := range opts {
|
||||||
|
converted[i] = NewHTTPOption(o.ApplyHTTPOption)
|
||||||
|
}
|
||||||
|
return converted
|
||||||
|
}
|
||||||
|
|
||||||
|
func asGRPCOptions(opts []GenericOption) []GRPCOption {
|
||||||
|
converted := make([]GRPCOption, len(opts))
|
||||||
|
for i, o := range opts {
|
||||||
|
converted[i] = NewGRPCOption(o.ApplyGRPCOption)
|
||||||
|
}
|
||||||
|
return converted
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCleanPath(t *testing.T) {
|
||||||
|
type args struct {
|
||||||
|
urlPath string
|
||||||
|
defaultPath string
|
||||||
|
}
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
args args
|
||||||
|
want string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "clean empty path",
|
||||||
|
args: args{
|
||||||
|
urlPath: "",
|
||||||
|
defaultPath: "DefaultPath",
|
||||||
|
},
|
||||||
|
want: "DefaultPath",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "clean metrics path",
|
||||||
|
args: args{
|
||||||
|
urlPath: "/prefix/v1/metrics",
|
||||||
|
defaultPath: "DefaultMetricsPath",
|
||||||
|
},
|
||||||
|
want: "/prefix/v1/metrics",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "clean traces path",
|
||||||
|
args: args{
|
||||||
|
urlPath: "https://env_endpoint",
|
||||||
|
defaultPath: "DefaultTracesPath",
|
||||||
|
},
|
||||||
|
want: "/https:/env_endpoint",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "spaces trimmed",
|
||||||
|
args: args{
|
||||||
|
urlPath: " /dir",
|
||||||
|
},
|
||||||
|
want: "/dir",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "clean path empty",
|
||||||
|
args: args{
|
||||||
|
urlPath: "dir/..",
|
||||||
|
defaultPath: "DefaultTracesPath",
|
||||||
|
},
|
||||||
|
want: "DefaultTracesPath",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "make absolute",
|
||||||
|
args: args{
|
||||||
|
urlPath: "dir/a",
|
||||||
|
},
|
||||||
|
want: "/dir/a",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
if got := cleanPath(tt.args.urlPath, tt.args.defaultPath); got != tt.want {
|
||||||
|
t.Errorf("CleanPath() = %v, want %v", got, tt.want)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
58
internal/shared/otlp/otlpmetric/oconf/optiontypes.go.tmpl
Normal file
58
internal/shared/otlp/otlpmetric/oconf/optiontypes.go.tmpl
Normal file
@ -0,0 +1,58 @@
|
|||||||
|
// Code created by gotmpl. DO NOT MODIFY.
|
||||||
|
// source: internal/shared/otlp/otlpmetric/oconf/optiontypes.go.tmpl
|
||||||
|
|
||||||
|
// Copyright The OpenTelemetry Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package oconf
|
||||||
|
|
||||||
|
import "time"
|
||||||
|
|
||||||
|
const (
|
||||||
|
// DefaultCollectorGRPCPort is the default gRPC port of the collector.
|
||||||
|
DefaultCollectorGRPCPort uint16 = 4317
|
||||||
|
// DefaultCollectorHTTPPort is the default HTTP port of the collector.
|
||||||
|
DefaultCollectorHTTPPort uint16 = 4318
|
||||||
|
// DefaultCollectorHost is the host address the Exporter will attempt
|
||||||
|
// connect to if no collector address is provided.
|
||||||
|
DefaultCollectorHost string = "localhost"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Compression describes the compression used for payloads sent to the
|
||||||
|
// collector.
|
||||||
|
type Compression int
|
||||||
|
|
||||||
|
const (
|
||||||
|
// NoCompression tells the driver to send payloads without
|
||||||
|
// compression.
|
||||||
|
NoCompression Compression = iota
|
||||||
|
// GzipCompression tells the driver to send payloads after
|
||||||
|
// compressing them with gzip.
|
||||||
|
GzipCompression
|
||||||
|
)
|
||||||
|
|
||||||
|
// RetrySettings defines configuration for retrying batches in case of export failure
|
||||||
|
// using an exponential backoff.
|
||||||
|
type RetrySettings struct {
|
||||||
|
// Enabled indicates whether to not retry sending batches in case of export failure.
|
||||||
|
Enabled bool
|
||||||
|
// InitialInterval the time to wait after the first failure before retrying.
|
||||||
|
InitialInterval time.Duration
|
||||||
|
// MaxInterval is the upper bound on backoff interval. Once this value is reached the delay between
|
||||||
|
// consecutive retries will always be `MaxInterval`.
|
||||||
|
MaxInterval time.Duration
|
||||||
|
// MaxElapsedTime is the maximum amount of time (including retries) spent trying to send a request/batch.
|
||||||
|
// Once this value is reached, the data is discarded.
|
||||||
|
MaxElapsedTime time.Duration
|
||||||
|
}
|
49
internal/shared/otlp/otlpmetric/oconf/tls.go.tmpl
Normal file
49
internal/shared/otlp/otlpmetric/oconf/tls.go.tmpl
Normal file
@ -0,0 +1,49 @@
|
|||||||
|
// Code created by gotmpl. DO NOT MODIFY.
|
||||||
|
// source: internal/shared/otlp/otlpmetric/oconf/tls.go.tmpl
|
||||||
|
|
||||||
|
// Copyright The OpenTelemetry Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package oconf
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/tls"
|
||||||
|
"crypto/x509"
|
||||||
|
"errors"
|
||||||
|
"os"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ReadTLSConfigFromFile reads a PEM certificate file and creates
|
||||||
|
// a tls.Config that will use this certifate to verify a server certificate.
|
||||||
|
func ReadTLSConfigFromFile(path string) (*tls.Config, error) {
|
||||||
|
b, err := os.ReadFile(path)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return CreateTLSConfig(b)
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreateTLSConfig creates a tls.Config from a raw certificate bytes
|
||||||
|
// to verify a server certificate.
|
||||||
|
func CreateTLSConfig(certBytes []byte) (*tls.Config, error) {
|
||||||
|
cp := x509.NewCertPool()
|
||||||
|
if ok := cp.AppendCertsFromPEM(certBytes); !ok {
|
||||||
|
return nil, errors.New("failed to append certificate to the cert pool")
|
||||||
|
}
|
||||||
|
|
||||||
|
return &tls.Config{
|
||||||
|
RootCAs: cp,
|
||||||
|
}, nil
|
||||||
|
}
|
313
internal/shared/otlp/otlpmetric/otest/client.go.tmpl
Normal file
313
internal/shared/otlp/otlpmetric/otest/client.go.tmpl
Normal file
@ -0,0 +1,313 @@
|
|||||||
|
// Code created by gotmpl. DO NOT MODIFY.
|
||||||
|
// source: internal/shared/otlp/otlpmetric/otest/client.go.tmpl
|
||||||
|
|
||||||
|
// Copyright The OpenTelemetry Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package otest
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/google/go-cmp/cmp"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
"google.golang.org/protobuf/proto"
|
||||||
|
|
||||||
|
"go.opentelemetry.io/otel"
|
||||||
|
semconv "go.opentelemetry.io/otel/semconv/v1.17.0"
|
||||||
|
collpb "go.opentelemetry.io/proto/otlp/collector/metrics/v1"
|
||||||
|
cpb "go.opentelemetry.io/proto/otlp/common/v1"
|
||||||
|
mpb "go.opentelemetry.io/proto/otlp/metrics/v1"
|
||||||
|
rpb "go.opentelemetry.io/proto/otlp/resource/v1"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// Sat Jan 01 2000 00:00:00 GMT+0000.
|
||||||
|
start = time.Date(2000, time.January, 01, 0, 0, 0, 0, time.FixedZone("GMT", 0))
|
||||||
|
end = start.Add(30 * time.Second)
|
||||||
|
|
||||||
|
kvAlice = &cpb.KeyValue{Key: "user", Value: &cpb.AnyValue{
|
||||||
|
Value: &cpb.AnyValue_StringValue{StringValue: "alice"},
|
||||||
|
}}
|
||||||
|
kvBob = &cpb.KeyValue{Key: "user", Value: &cpb.AnyValue{
|
||||||
|
Value: &cpb.AnyValue_StringValue{StringValue: "bob"},
|
||||||
|
}}
|
||||||
|
kvSrvName = &cpb.KeyValue{Key: "service.name", Value: &cpb.AnyValue{
|
||||||
|
Value: &cpb.AnyValue_StringValue{StringValue: "test server"},
|
||||||
|
}}
|
||||||
|
kvSrvVer = &cpb.KeyValue{Key: "service.version", Value: &cpb.AnyValue{
|
||||||
|
Value: &cpb.AnyValue_StringValue{StringValue: "v0.1.0"},
|
||||||
|
}}
|
||||||
|
|
||||||
|
min, max, sum = 2.0, 4.0, 90.0
|
||||||
|
hdp = []*mpb.HistogramDataPoint{
|
||||||
|
{
|
||||||
|
Attributes: []*cpb.KeyValue{kvAlice},
|
||||||
|
StartTimeUnixNano: uint64(start.UnixNano()),
|
||||||
|
TimeUnixNano: uint64(end.UnixNano()),
|
||||||
|
Count: 30,
|
||||||
|
Sum: &sum,
|
||||||
|
ExplicitBounds: []float64{1, 5},
|
||||||
|
BucketCounts: []uint64{0, 30, 0},
|
||||||
|
Min: &min,
|
||||||
|
Max: &max,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
hist = &mpb.Histogram{
|
||||||
|
AggregationTemporality: mpb.AggregationTemporality_AGGREGATION_TEMPORALITY_DELTA,
|
||||||
|
DataPoints: hdp,
|
||||||
|
}
|
||||||
|
|
||||||
|
dPtsInt64 = []*mpb.NumberDataPoint{
|
||||||
|
{
|
||||||
|
Attributes: []*cpb.KeyValue{kvAlice},
|
||||||
|
StartTimeUnixNano: uint64(start.UnixNano()),
|
||||||
|
TimeUnixNano: uint64(end.UnixNano()),
|
||||||
|
Value: &mpb.NumberDataPoint_AsInt{AsInt: 1},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Attributes: []*cpb.KeyValue{kvBob},
|
||||||
|
StartTimeUnixNano: uint64(start.UnixNano()),
|
||||||
|
TimeUnixNano: uint64(end.UnixNano()),
|
||||||
|
Value: &mpb.NumberDataPoint_AsInt{AsInt: 2},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
dPtsFloat64 = []*mpb.NumberDataPoint{
|
||||||
|
{
|
||||||
|
Attributes: []*cpb.KeyValue{kvAlice},
|
||||||
|
StartTimeUnixNano: uint64(start.UnixNano()),
|
||||||
|
TimeUnixNano: uint64(end.UnixNano()),
|
||||||
|
Value: &mpb.NumberDataPoint_AsDouble{AsDouble: 1.0},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Attributes: []*cpb.KeyValue{kvBob},
|
||||||
|
StartTimeUnixNano: uint64(start.UnixNano()),
|
||||||
|
TimeUnixNano: uint64(end.UnixNano()),
|
||||||
|
Value: &mpb.NumberDataPoint_AsDouble{AsDouble: 2.0},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
sumInt64 = &mpb.Sum{
|
||||||
|
AggregationTemporality: mpb.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE,
|
||||||
|
IsMonotonic: true,
|
||||||
|
DataPoints: dPtsInt64,
|
||||||
|
}
|
||||||
|
sumFloat64 = &mpb.Sum{
|
||||||
|
AggregationTemporality: mpb.AggregationTemporality_AGGREGATION_TEMPORALITY_DELTA,
|
||||||
|
IsMonotonic: false,
|
||||||
|
DataPoints: dPtsFloat64,
|
||||||
|
}
|
||||||
|
|
||||||
|
gaugeInt64 = &mpb.Gauge{DataPoints: dPtsInt64}
|
||||||
|
gaugeFloat64 = &mpb.Gauge{DataPoints: dPtsFloat64}
|
||||||
|
|
||||||
|
metrics = []*mpb.Metric{
|
||||||
|
{
|
||||||
|
Name: "int64-gauge",
|
||||||
|
Description: "Gauge with int64 values",
|
||||||
|
Unit: "1",
|
||||||
|
Data: &mpb.Metric_Gauge{Gauge: gaugeInt64},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "float64-gauge",
|
||||||
|
Description: "Gauge with float64 values",
|
||||||
|
Unit: "1",
|
||||||
|
Data: &mpb.Metric_Gauge{Gauge: gaugeFloat64},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "int64-sum",
|
||||||
|
Description: "Sum with int64 values",
|
||||||
|
Unit: "1",
|
||||||
|
Data: &mpb.Metric_Sum{Sum: sumInt64},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "float64-sum",
|
||||||
|
Description: "Sum with float64 values",
|
||||||
|
Unit: "1",
|
||||||
|
Data: &mpb.Metric_Sum{Sum: sumFloat64},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "histogram",
|
||||||
|
Description: "Histogram",
|
||||||
|
Unit: "1",
|
||||||
|
Data: &mpb.Metric_Histogram{Histogram: hist},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
scope = &cpb.InstrumentationScope{
|
||||||
|
Name: "test/code/path",
|
||||||
|
Version: "v0.1.0",
|
||||||
|
}
|
||||||
|
scopeMetrics = []*mpb.ScopeMetrics{
|
||||||
|
{
|
||||||
|
Scope: scope,
|
||||||
|
Metrics: metrics,
|
||||||
|
SchemaUrl: semconv.SchemaURL,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
res = &rpb.Resource{
|
||||||
|
Attributes: []*cpb.KeyValue{kvSrvName, kvSrvVer},
|
||||||
|
}
|
||||||
|
resourceMetrics = &mpb.ResourceMetrics{
|
||||||
|
Resource: res,
|
||||||
|
ScopeMetrics: scopeMetrics,
|
||||||
|
SchemaUrl: semconv.SchemaURL,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
type Client interface {
|
||||||
|
UploadMetrics(context.Context, *mpb.ResourceMetrics) error
|
||||||
|
ForceFlush(context.Context) error
|
||||||
|
Shutdown(context.Context) error
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClientFactory is a function that when called returns a
|
||||||
|
// Client implementation that is connected to also returned
|
||||||
|
// Collector implementation. The Client is ready to upload metric data to the
|
||||||
|
// Collector which is ready to store that data.
|
||||||
|
//
|
||||||
|
// If resultCh is not nil, the returned Collector needs to use the responses
|
||||||
|
// from that channel to send back to the client for every export request.
|
||||||
|
type ClientFactory func(resultCh <-chan ExportResult) (Client, Collector)
|
||||||
|
|
||||||
|
// RunClientTests runs a suite of Client integration tests. For example:
|
||||||
|
//
|
||||||
|
// t.Run("Integration", RunClientTests(factory))
|
||||||
|
func RunClientTests(f ClientFactory) func(*testing.T) {
|
||||||
|
return func(t *testing.T) {
|
||||||
|
t.Run("ClientHonorsContextErrors", func(t *testing.T) {
|
||||||
|
t.Run("Shutdown", testCtxErrs(func() func(context.Context) error {
|
||||||
|
c, _ := f(nil)
|
||||||
|
return c.Shutdown
|
||||||
|
}))
|
||||||
|
|
||||||
|
t.Run("ForceFlush", testCtxErrs(func() func(context.Context) error {
|
||||||
|
c, _ := f(nil)
|
||||||
|
return c.ForceFlush
|
||||||
|
}))
|
||||||
|
|
||||||
|
t.Run("UploadMetrics", testCtxErrs(func() func(context.Context) error {
|
||||||
|
c, _ := f(nil)
|
||||||
|
return func(ctx context.Context) error {
|
||||||
|
return c.UploadMetrics(ctx, nil)
|
||||||
|
}
|
||||||
|
}))
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("ForceFlushFlushes", func(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
client, collector := f(nil)
|
||||||
|
require.NoError(t, client.UploadMetrics(ctx, resourceMetrics))
|
||||||
|
|
||||||
|
require.NoError(t, client.ForceFlush(ctx))
|
||||||
|
rm := collector.Collect().Dump()
|
||||||
|
// Data correctness is not important, just it was received.
|
||||||
|
require.Greater(t, len(rm), 0, "no data uploaded")
|
||||||
|
|
||||||
|
require.NoError(t, client.Shutdown(ctx))
|
||||||
|
rm = collector.Collect().Dump()
|
||||||
|
assert.Len(t, rm, 0, "client did not flush all data")
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("UploadMetrics", func(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
client, coll := f(nil)
|
||||||
|
|
||||||
|
require.NoError(t, client.UploadMetrics(ctx, resourceMetrics))
|
||||||
|
require.NoError(t, client.Shutdown(ctx))
|
||||||
|
got := coll.Collect().Dump()
|
||||||
|
require.Len(t, got, 1, "upload of one ResourceMetrics")
|
||||||
|
diff := cmp.Diff(got[0], resourceMetrics, cmp.Comparer(proto.Equal))
|
||||||
|
if diff != "" {
|
||||||
|
t.Fatalf("unexpected ResourceMetrics:\n%s", diff)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("PartialSuccess", func(t *testing.T) {
|
||||||
|
const n, msg = 2, "bad data"
|
||||||
|
rCh := make(chan ExportResult, 3)
|
||||||
|
rCh <- ExportResult{
|
||||||
|
Response: &collpb.ExportMetricsServiceResponse{
|
||||||
|
PartialSuccess: &collpb.ExportMetricsPartialSuccess{
|
||||||
|
RejectedDataPoints: n,
|
||||||
|
ErrorMessage: msg,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
rCh <- ExportResult{
|
||||||
|
Response: &collpb.ExportMetricsServiceResponse{
|
||||||
|
PartialSuccess: &collpb.ExportMetricsPartialSuccess{
|
||||||
|
// Should not be logged.
|
||||||
|
RejectedDataPoints: 0,
|
||||||
|
ErrorMessage: "",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
rCh <- ExportResult{
|
||||||
|
Response: &collpb.ExportMetricsServiceResponse{},
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
client, _ := f(rCh)
|
||||||
|
|
||||||
|
defer func(orig otel.ErrorHandler) {
|
||||||
|
otel.SetErrorHandler(orig)
|
||||||
|
}(otel.GetErrorHandler())
|
||||||
|
|
||||||
|
errs := []error{}
|
||||||
|
eh := otel.ErrorHandlerFunc(func(e error) { errs = append(errs, e) })
|
||||||
|
otel.SetErrorHandler(eh)
|
||||||
|
|
||||||
|
require.NoError(t, client.UploadMetrics(ctx, resourceMetrics))
|
||||||
|
require.NoError(t, client.UploadMetrics(ctx, resourceMetrics))
|
||||||
|
require.NoError(t, client.UploadMetrics(ctx, resourceMetrics))
|
||||||
|
require.NoError(t, client.Shutdown(ctx))
|
||||||
|
|
||||||
|
require.Equal(t, 1, len(errs))
|
||||||
|
want := fmt.Sprintf("%s (%d metric data points rejected)", msg, n)
|
||||||
|
assert.ErrorContains(t, errs[0], want)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testCtxErrs(factory func() func(context.Context) error) func(t *testing.T) {
|
||||||
|
return func(t *testing.T) {
|
||||||
|
t.Helper()
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
t.Cleanup(cancel)
|
||||||
|
|
||||||
|
t.Run("DeadlineExceeded", func(t *testing.T) {
|
||||||
|
innerCtx, innerCancel := context.WithTimeout(ctx, time.Nanosecond)
|
||||||
|
t.Cleanup(innerCancel)
|
||||||
|
<-innerCtx.Done()
|
||||||
|
|
||||||
|
f := factory()
|
||||||
|
assert.ErrorIs(t, f(innerCtx), context.DeadlineExceeded)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("Canceled", func(t *testing.T) {
|
||||||
|
innerCtx, innerCancel := context.WithCancel(ctx)
|
||||||
|
innerCancel()
|
||||||
|
|
||||||
|
f := factory()
|
||||||
|
assert.ErrorIs(t, f(innerCtx), context.Canceled)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
78
internal/shared/otlp/otlpmetric/otest/client_test.go.tmpl
Normal file
78
internal/shared/otlp/otlpmetric/otest/client_test.go.tmpl
Normal file
@ -0,0 +1,78 @@
|
|||||||
|
// Code created by gotmpl. DO NOT MODIFY.
|
||||||
|
// source: internal/shared/otlp/otlpmetric/otest/client_test.go.tmpl
|
||||||
|
|
||||||
|
// Copyright The OpenTelemetry Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package otest
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"go.opentelemetry.io/otel"
|
||||||
|
"{{ .internalImportPath }}"
|
||||||
|
"go.opentelemetry.io/otel/sdk/metric"
|
||||||
|
"go.opentelemetry.io/otel/sdk/metric/aggregation"
|
||||||
|
"go.opentelemetry.io/otel/sdk/metric/metricdata"
|
||||||
|
cpb "go.opentelemetry.io/proto/otlp/collector/metrics/v1"
|
||||||
|
mpb "go.opentelemetry.io/proto/otlp/metrics/v1"
|
||||||
|
)
|
||||||
|
|
||||||
|
type client struct {
|
||||||
|
rCh <-chan ExportResult
|
||||||
|
storage *Storage
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *client) Temporality(k metric.InstrumentKind) metricdata.Temporality {
|
||||||
|
return metric.DefaultTemporalitySelector(k)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *client) Aggregation(k metric.InstrumentKind) aggregation.Aggregation {
|
||||||
|
return metric.DefaultAggregationSelector(k)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *client) Collect() *Storage {
|
||||||
|
return c.storage
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *client) UploadMetrics(ctx context.Context, rm *mpb.ResourceMetrics) error {
|
||||||
|
c.storage.Add(&cpb.ExportMetricsServiceRequest{
|
||||||
|
ResourceMetrics: []*mpb.ResourceMetrics{rm},
|
||||||
|
})
|
||||||
|
if c.rCh != nil {
|
||||||
|
r := <-c.rCh
|
||||||
|
if r.Response != nil && r.Response.GetPartialSuccess() != nil {
|
||||||
|
msg := r.Response.GetPartialSuccess().GetErrorMessage()
|
||||||
|
n := r.Response.GetPartialSuccess().GetRejectedDataPoints()
|
||||||
|
if msg != "" || n > 0 {
|
||||||
|
otel.Handle(internal.MetricPartialSuccessError(n, msg))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return r.Err
|
||||||
|
}
|
||||||
|
return ctx.Err()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *client) ForceFlush(ctx context.Context) error { return ctx.Err() }
|
||||||
|
func (c *client) Shutdown(ctx context.Context) error { return ctx.Err() }
|
||||||
|
|
||||||
|
func TestClientTests(t *testing.T) {
|
||||||
|
factory := func(rCh <-chan ExportResult) (Client, Collector) {
|
||||||
|
c := &client{rCh: rCh, storage: NewStorage()}
|
||||||
|
return c, c
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Run("Integration", RunClientTests(factory))
|
||||||
|
}
|
438
internal/shared/otlp/otlpmetric/otest/collector.go.tmpl
Normal file
438
internal/shared/otlp/otlpmetric/otest/collector.go.tmpl
Normal file
@ -0,0 +1,438 @@
|
|||||||
|
// Code created by gotmpl. DO NOT MODIFY.
|
||||||
|
// source: internal/shared/otlp/otlpmetric/otest/collector.go.tmpl
|
||||||
|
|
||||||
|
// Copyright The OpenTelemetry Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package otest
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"compress/gzip"
|
||||||
|
"context"
|
||||||
|
"crypto/ecdsa"
|
||||||
|
"crypto/elliptic"
|
||||||
|
"crypto/rand"
|
||||||
|
"crypto/tls"
|
||||||
|
"crypto/x509"
|
||||||
|
"crypto/x509/pkix" // nolint:depguard // This is for testing.
|
||||||
|
"encoding/pem"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"math/big"
|
||||||
|
"net"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"google.golang.org/grpc"
|
||||||
|
"google.golang.org/grpc/metadata"
|
||||||
|
"google.golang.org/protobuf/proto"
|
||||||
|
|
||||||
|
"{{ .oconfImportPath }}"
|
||||||
|
collpb "go.opentelemetry.io/proto/otlp/collector/metrics/v1"
|
||||||
|
mpb "go.opentelemetry.io/proto/otlp/metrics/v1"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Collector is the collection target a Client sends metric uploads to.
|
||||||
|
type Collector interface {
|
||||||
|
Collect() *Storage
|
||||||
|
}
|
||||||
|
|
||||||
|
type ExportResult struct {
|
||||||
|
Response *collpb.ExportMetricsServiceResponse
|
||||||
|
Err error
|
||||||
|
}
|
||||||
|
|
||||||
|
// Storage stores uploaded OTLP metric data in their proto form.
|
||||||
|
type Storage struct {
|
||||||
|
dataMu sync.Mutex
|
||||||
|
data []*mpb.ResourceMetrics
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewStorage returns a configure storage ready to store received requests.
|
||||||
|
func NewStorage() *Storage {
|
||||||
|
return &Storage{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add adds the request to the Storage.
|
||||||
|
func (s *Storage) Add(request *collpb.ExportMetricsServiceRequest) {
|
||||||
|
s.dataMu.Lock()
|
||||||
|
defer s.dataMu.Unlock()
|
||||||
|
s.data = append(s.data, request.ResourceMetrics...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Dump returns all added ResourceMetrics and clears the storage.
|
||||||
|
func (s *Storage) Dump() []*mpb.ResourceMetrics {
|
||||||
|
s.dataMu.Lock()
|
||||||
|
defer s.dataMu.Unlock()
|
||||||
|
|
||||||
|
var data []*mpb.ResourceMetrics
|
||||||
|
data, s.data = s.data, []*mpb.ResourceMetrics{}
|
||||||
|
return data
|
||||||
|
}
|
||||||
|
|
||||||
|
// GRPCCollector is an OTLP gRPC server that collects all requests it receives.
|
||||||
|
type GRPCCollector struct {
|
||||||
|
collpb.UnimplementedMetricsServiceServer
|
||||||
|
|
||||||
|
headersMu sync.Mutex
|
||||||
|
headers metadata.MD
|
||||||
|
storage *Storage
|
||||||
|
|
||||||
|
resultCh <-chan ExportResult
|
||||||
|
listener net.Listener
|
||||||
|
srv *grpc.Server
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewGRPCCollector returns a *GRPCCollector that is listening at the provided
|
||||||
|
// endpoint.
|
||||||
|
//
|
||||||
|
// If endpoint is an empty string, the returned collector will be listening on
|
||||||
|
// the localhost interface at an OS chosen port.
|
||||||
|
//
|
||||||
|
// If errCh is not nil, the collector will respond to Export calls with errors
|
||||||
|
// sent on that channel. This means that if errCh is not nil Export calls will
|
||||||
|
// block until an error is received.
|
||||||
|
func NewGRPCCollector(endpoint string, resultCh <-chan ExportResult) (*GRPCCollector, error) {
|
||||||
|
if endpoint == "" {
|
||||||
|
endpoint = "localhost:0"
|
||||||
|
}
|
||||||
|
|
||||||
|
c := &GRPCCollector{
|
||||||
|
storage: NewStorage(),
|
||||||
|
resultCh: resultCh,
|
||||||
|
}
|
||||||
|
|
||||||
|
var err error
|
||||||
|
c.listener, err = net.Listen("tcp", endpoint)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
c.srv = grpc.NewServer()
|
||||||
|
collpb.RegisterMetricsServiceServer(c.srv, c)
|
||||||
|
go func() { _ = c.srv.Serve(c.listener) }()
|
||||||
|
|
||||||
|
return c, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Shutdown shuts down the gRPC server closing all open connections and
|
||||||
|
// listeners immediately.
|
||||||
|
func (c *GRPCCollector) Shutdown() { c.srv.Stop() }
|
||||||
|
|
||||||
|
// Addr returns the net.Addr c is listening at.
|
||||||
|
func (c *GRPCCollector) Addr() net.Addr {
|
||||||
|
return c.listener.Addr()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Collect returns the Storage holding all collected requests.
|
||||||
|
func (c *GRPCCollector) Collect() *Storage {
|
||||||
|
return c.storage
|
||||||
|
}
|
||||||
|
|
||||||
|
// Headers returns the headers received for all requests.
|
||||||
|
func (c *GRPCCollector) Headers() map[string][]string {
|
||||||
|
// Makes a copy.
|
||||||
|
c.headersMu.Lock()
|
||||||
|
defer c.headersMu.Unlock()
|
||||||
|
return metadata.Join(c.headers)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Export handles the export req.
|
||||||
|
func (c *GRPCCollector) Export(ctx context.Context, req *collpb.ExportMetricsServiceRequest) (*collpb.ExportMetricsServiceResponse, error) {
|
||||||
|
c.storage.Add(req)
|
||||||
|
|
||||||
|
if h, ok := metadata.FromIncomingContext(ctx); ok {
|
||||||
|
c.headersMu.Lock()
|
||||||
|
c.headers = metadata.Join(c.headers, h)
|
||||||
|
c.headersMu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.resultCh != nil {
|
||||||
|
r := <-c.resultCh
|
||||||
|
if r.Response == nil {
|
||||||
|
return &collpb.ExportMetricsServiceResponse{}, r.Err
|
||||||
|
}
|
||||||
|
return r.Response, r.Err
|
||||||
|
}
|
||||||
|
return &collpb.ExportMetricsServiceResponse{}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var emptyExportMetricsServiceResponse = func() []byte {
|
||||||
|
body := collpb.ExportMetricsServiceResponse{}
|
||||||
|
r, err := proto.Marshal(&body)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return r
|
||||||
|
}()
|
||||||
|
|
||||||
|
type HTTPResponseError struct {
|
||||||
|
Err error
|
||||||
|
Status int
|
||||||
|
Header http.Header
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *HTTPResponseError) Error() string {
|
||||||
|
return fmt.Sprintf("%d: %s", e.Status, e.Err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *HTTPResponseError) Unwrap() error { return e.Err }
|
||||||
|
|
||||||
|
// HTTPCollector is an OTLP HTTP server that collects all requests it receives.
|
||||||
|
type HTTPCollector struct {
|
||||||
|
headersMu sync.Mutex
|
||||||
|
headers http.Header
|
||||||
|
storage *Storage
|
||||||
|
|
||||||
|
resultCh <-chan ExportResult
|
||||||
|
listener net.Listener
|
||||||
|
srv *http.Server
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewHTTPCollector returns a *HTTPCollector that is listening at the provided
|
||||||
|
// endpoint.
|
||||||
|
//
|
||||||
|
// If endpoint is an empty string, the returned collector will be listening on
|
||||||
|
// the localhost interface at an OS chosen port, not use TLS, and listen at the
|
||||||
|
// default OTLP metric endpoint path ("/v1/metrics"). If the endpoint contains
|
||||||
|
// a prefix of "https" the server will generate weak self-signed TLS
|
||||||
|
// certificates and use them to server data. If the endpoint contains a path,
|
||||||
|
// that path will be used instead of the default OTLP metric endpoint path.
|
||||||
|
//
|
||||||
|
// If errCh is not nil, the collector will respond to HTTP requests with errors
|
||||||
|
// sent on that channel. This means that if errCh is not nil Export calls will
|
||||||
|
// block until an error is received.
|
||||||
|
func NewHTTPCollector(endpoint string, resultCh <-chan ExportResult) (*HTTPCollector, error) {
|
||||||
|
u, err := url.Parse(endpoint)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if u.Host == "" {
|
||||||
|
u.Host = "localhost:0"
|
||||||
|
}
|
||||||
|
if u.Path == "" {
|
||||||
|
u.Path = oconf.DefaultMetricsPath
|
||||||
|
}
|
||||||
|
|
||||||
|
c := &HTTPCollector{
|
||||||
|
headers: http.Header{},
|
||||||
|
storage: NewStorage(),
|
||||||
|
resultCh: resultCh,
|
||||||
|
}
|
||||||
|
|
||||||
|
c.listener, err = net.Listen("tcp", u.Host)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
mux := http.NewServeMux()
|
||||||
|
mux.Handle(u.Path, http.HandlerFunc(c.handler))
|
||||||
|
c.srv = &http.Server{Handler: mux}
|
||||||
|
if u.Scheme == "https" {
|
||||||
|
cert, err := weakCertificate()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
c.srv.TLSConfig = &tls.Config{
|
||||||
|
Certificates: []tls.Certificate{cert},
|
||||||
|
}
|
||||||
|
go func() { _ = c.srv.ServeTLS(c.listener, "", "") }()
|
||||||
|
} else {
|
||||||
|
go func() { _ = c.srv.Serve(c.listener) }()
|
||||||
|
}
|
||||||
|
return c, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Shutdown shuts down the HTTP server closing all open connections and
|
||||||
|
// listeners.
|
||||||
|
func (c *HTTPCollector) Shutdown(ctx context.Context) error {
|
||||||
|
return c.srv.Shutdown(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Addr returns the net.Addr c is listening at.
|
||||||
|
func (c *HTTPCollector) Addr() net.Addr {
|
||||||
|
return c.listener.Addr()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Collect returns the Storage holding all collected requests.
|
||||||
|
func (c *HTTPCollector) Collect() *Storage {
|
||||||
|
return c.storage
|
||||||
|
}
|
||||||
|
|
||||||
|
// Headers returns the headers received for all requests.
|
||||||
|
func (c *HTTPCollector) Headers() map[string][]string {
|
||||||
|
// Makes a copy.
|
||||||
|
c.headersMu.Lock()
|
||||||
|
defer c.headersMu.Unlock()
|
||||||
|
return c.headers.Clone()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *HTTPCollector) handler(w http.ResponseWriter, r *http.Request) {
|
||||||
|
c.respond(w, c.record(r))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *HTTPCollector) record(r *http.Request) ExportResult {
|
||||||
|
// Currently only supports protobuf.
|
||||||
|
if v := r.Header.Get("Content-Type"); v != "application/x-protobuf" {
|
||||||
|
err := fmt.Errorf("content-type not supported: %s", v)
|
||||||
|
return ExportResult{Err: err}
|
||||||
|
}
|
||||||
|
|
||||||
|
body, err := c.readBody(r)
|
||||||
|
if err != nil {
|
||||||
|
return ExportResult{Err: err}
|
||||||
|
}
|
||||||
|
pbRequest := &collpb.ExportMetricsServiceRequest{}
|
||||||
|
err = proto.Unmarshal(body, pbRequest)
|
||||||
|
if err != nil {
|
||||||
|
return ExportResult{
|
||||||
|
Err: &HTTPResponseError{
|
||||||
|
Err: err,
|
||||||
|
Status: http.StatusInternalServerError,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
c.storage.Add(pbRequest)
|
||||||
|
|
||||||
|
c.headersMu.Lock()
|
||||||
|
for k, vals := range r.Header {
|
||||||
|
for _, v := range vals {
|
||||||
|
c.headers.Add(k, v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
c.headersMu.Unlock()
|
||||||
|
|
||||||
|
if c.resultCh != nil {
|
||||||
|
return <-c.resultCh
|
||||||
|
}
|
||||||
|
return ExportResult{Err: err}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *HTTPCollector) readBody(r *http.Request) (body []byte, err error) {
|
||||||
|
var reader io.ReadCloser
|
||||||
|
switch r.Header.Get("Content-Encoding") {
|
||||||
|
case "gzip":
|
||||||
|
reader, err = gzip.NewReader(r.Body)
|
||||||
|
if err != nil {
|
||||||
|
_ = reader.Close()
|
||||||
|
return nil, &HTTPResponseError{
|
||||||
|
Err: err,
|
||||||
|
Status: http.StatusInternalServerError,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
reader = r.Body
|
||||||
|
}
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
cErr := reader.Close()
|
||||||
|
if err == nil && cErr != nil {
|
||||||
|
err = &HTTPResponseError{
|
||||||
|
Err: cErr,
|
||||||
|
Status: http.StatusInternalServerError,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
body, err = io.ReadAll(reader)
|
||||||
|
if err != nil {
|
||||||
|
err = &HTTPResponseError{
|
||||||
|
Err: err,
|
||||||
|
Status: http.StatusInternalServerError,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return body, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *HTTPCollector) respond(w http.ResponseWriter, resp ExportResult) {
|
||||||
|
if resp.Err != nil {
|
||||||
|
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
|
||||||
|
w.Header().Set("X-Content-Type-Options", "nosniff")
|
||||||
|
var e *HTTPResponseError
|
||||||
|
if errors.As(resp.Err, &e) {
|
||||||
|
for k, vals := range e.Header {
|
||||||
|
for _, v := range vals {
|
||||||
|
w.Header().Add(k, v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
w.WriteHeader(e.Status)
|
||||||
|
fmt.Fprintln(w, e.Error())
|
||||||
|
} else {
|
||||||
|
w.WriteHeader(http.StatusBadRequest)
|
||||||
|
fmt.Fprintln(w, resp.Err.Error())
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
w.Header().Set("Content-Type", "application/x-protobuf")
|
||||||
|
w.WriteHeader(http.StatusOK)
|
||||||
|
if resp.Response == nil {
|
||||||
|
_, _ = w.Write(emptyExportMetricsServiceResponse)
|
||||||
|
} else {
|
||||||
|
r, err := proto.Marshal(resp.Response)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
_, _ = w.Write(r)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Based on https://golang.org/src/crypto/tls/generate_cert.go,
|
||||||
|
// simplified and weakened.
|
||||||
|
func weakCertificate() (tls.Certificate, error) {
|
||||||
|
priv, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
|
||||||
|
if err != nil {
|
||||||
|
return tls.Certificate{}, err
|
||||||
|
}
|
||||||
|
notBefore := time.Now()
|
||||||
|
notAfter := notBefore.Add(time.Hour)
|
||||||
|
max := new(big.Int).Lsh(big.NewInt(1), 128)
|
||||||
|
sn, err := rand.Int(rand.Reader, max)
|
||||||
|
if err != nil {
|
||||||
|
return tls.Certificate{}, err
|
||||||
|
}
|
||||||
|
tmpl := x509.Certificate{
|
||||||
|
SerialNumber: sn,
|
||||||
|
Subject: pkix.Name{Organization: []string{"otel-go"}},
|
||||||
|
NotBefore: notBefore,
|
||||||
|
NotAfter: notAfter,
|
||||||
|
KeyUsage: x509.KeyUsageDigitalSignature,
|
||||||
|
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},
|
||||||
|
BasicConstraintsValid: true,
|
||||||
|
DNSNames: []string{"localhost"},
|
||||||
|
IPAddresses: []net.IP{net.IPv6loopback, net.IPv4(127, 0, 0, 1)},
|
||||||
|
}
|
||||||
|
derBytes, err := x509.CreateCertificate(rand.Reader, &tmpl, &tmpl, &priv.PublicKey, priv)
|
||||||
|
if err != nil {
|
||||||
|
return tls.Certificate{}, err
|
||||||
|
}
|
||||||
|
var certBuf bytes.Buffer
|
||||||
|
err = pem.Encode(&certBuf, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes})
|
||||||
|
if err != nil {
|
||||||
|
return tls.Certificate{}, err
|
||||||
|
}
|
||||||
|
privBytes, err := x509.MarshalPKCS8PrivateKey(priv)
|
||||||
|
if err != nil {
|
||||||
|
return tls.Certificate{}, err
|
||||||
|
}
|
||||||
|
var privBuf bytes.Buffer
|
||||||
|
err = pem.Encode(&privBuf, &pem.Block{Type: "PRIVATE KEY", Bytes: privBytes})
|
||||||
|
if err != nil {
|
||||||
|
return tls.Certificate{}, err
|
||||||
|
}
|
||||||
|
return tls.X509KeyPair(certBuf.Bytes(), privBuf.Bytes())
|
||||||
|
}
|
155
internal/shared/otlp/otlpmetric/transform/attribute.go.tmpl
Normal file
155
internal/shared/otlp/otlpmetric/transform/attribute.go.tmpl
Normal file
@ -0,0 +1,155 @@
|
|||||||
|
// Code created by gotmpl. DO NOT MODIFY.
|
||||||
|
// source: internal/shared/otlp/otlpmetric/transform/attribute.go.tmpl
|
||||||
|
|
||||||
|
// Copyright The OpenTelemetry Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package transform
|
||||||
|
|
||||||
|
import (
|
||||||
|
"go.opentelemetry.io/otel/attribute"
|
||||||
|
cpb "go.opentelemetry.io/proto/otlp/common/v1"
|
||||||
|
)
|
||||||
|
|
||||||
|
// AttrIter transforms an attribute iterator into OTLP key-values.
|
||||||
|
func AttrIter(iter attribute.Iterator) []*cpb.KeyValue {
|
||||||
|
l := iter.Len()
|
||||||
|
if l == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
out := make([]*cpb.KeyValue, 0, l)
|
||||||
|
for iter.Next() {
|
||||||
|
out = append(out, KeyValue(iter.Attribute()))
|
||||||
|
}
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// KeyValues transforms a slice of attribute KeyValues into OTLP key-values.
|
||||||
|
func KeyValues(attrs []attribute.KeyValue) []*cpb.KeyValue {
|
||||||
|
if len(attrs) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
out := make([]*cpb.KeyValue, 0, len(attrs))
|
||||||
|
for _, kv := range attrs {
|
||||||
|
out = append(out, KeyValue(kv))
|
||||||
|
}
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// KeyValue transforms an attribute KeyValue into an OTLP key-value.
|
||||||
|
func KeyValue(kv attribute.KeyValue) *cpb.KeyValue {
|
||||||
|
return &cpb.KeyValue{Key: string(kv.Key), Value: Value(kv.Value)}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Value transforms an attribute Value into an OTLP AnyValue.
|
||||||
|
func Value(v attribute.Value) *cpb.AnyValue {
|
||||||
|
av := new(cpb.AnyValue)
|
||||||
|
switch v.Type() {
|
||||||
|
case attribute.BOOL:
|
||||||
|
av.Value = &cpb.AnyValue_BoolValue{
|
||||||
|
BoolValue: v.AsBool(),
|
||||||
|
}
|
||||||
|
case attribute.BOOLSLICE:
|
||||||
|
av.Value = &cpb.AnyValue_ArrayValue{
|
||||||
|
ArrayValue: &cpb.ArrayValue{
|
||||||
|
Values: boolSliceValues(v.AsBoolSlice()),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
case attribute.INT64:
|
||||||
|
av.Value = &cpb.AnyValue_IntValue{
|
||||||
|
IntValue: v.AsInt64(),
|
||||||
|
}
|
||||||
|
case attribute.INT64SLICE:
|
||||||
|
av.Value = &cpb.AnyValue_ArrayValue{
|
||||||
|
ArrayValue: &cpb.ArrayValue{
|
||||||
|
Values: int64SliceValues(v.AsInt64Slice()),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
case attribute.FLOAT64:
|
||||||
|
av.Value = &cpb.AnyValue_DoubleValue{
|
||||||
|
DoubleValue: v.AsFloat64(),
|
||||||
|
}
|
||||||
|
case attribute.FLOAT64SLICE:
|
||||||
|
av.Value = &cpb.AnyValue_ArrayValue{
|
||||||
|
ArrayValue: &cpb.ArrayValue{
|
||||||
|
Values: float64SliceValues(v.AsFloat64Slice()),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
case attribute.STRING:
|
||||||
|
av.Value = &cpb.AnyValue_StringValue{
|
||||||
|
StringValue: v.AsString(),
|
||||||
|
}
|
||||||
|
case attribute.STRINGSLICE:
|
||||||
|
av.Value = &cpb.AnyValue_ArrayValue{
|
||||||
|
ArrayValue: &cpb.ArrayValue{
|
||||||
|
Values: stringSliceValues(v.AsStringSlice()),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
av.Value = &cpb.AnyValue_StringValue{
|
||||||
|
StringValue: "INVALID",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return av
|
||||||
|
}
|
||||||
|
|
||||||
|
func boolSliceValues(vals []bool) []*cpb.AnyValue {
|
||||||
|
converted := make([]*cpb.AnyValue, len(vals))
|
||||||
|
for i, v := range vals {
|
||||||
|
converted[i] = &cpb.AnyValue{
|
||||||
|
Value: &cpb.AnyValue_BoolValue{
|
||||||
|
BoolValue: v,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return converted
|
||||||
|
}
|
||||||
|
|
||||||
|
func int64SliceValues(vals []int64) []*cpb.AnyValue {
|
||||||
|
converted := make([]*cpb.AnyValue, len(vals))
|
||||||
|
for i, v := range vals {
|
||||||
|
converted[i] = &cpb.AnyValue{
|
||||||
|
Value: &cpb.AnyValue_IntValue{
|
||||||
|
IntValue: v,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return converted
|
||||||
|
}
|
||||||
|
|
||||||
|
func float64SliceValues(vals []float64) []*cpb.AnyValue {
|
||||||
|
converted := make([]*cpb.AnyValue, len(vals))
|
||||||
|
for i, v := range vals {
|
||||||
|
converted[i] = &cpb.AnyValue{
|
||||||
|
Value: &cpb.AnyValue_DoubleValue{
|
||||||
|
DoubleValue: v,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return converted
|
||||||
|
}
|
||||||
|
|
||||||
|
func stringSliceValues(vals []string) []*cpb.AnyValue {
|
||||||
|
converted := make([]*cpb.AnyValue, len(vals))
|
||||||
|
for i, v := range vals {
|
||||||
|
converted[i] = &cpb.AnyValue{
|
||||||
|
Value: &cpb.AnyValue_StringValue{
|
||||||
|
StringValue: v,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return converted
|
||||||
|
}
|
197
internal/shared/otlp/otlpmetric/transform/attribute_test.go.tmpl
Normal file
197
internal/shared/otlp/otlpmetric/transform/attribute_test.go.tmpl
Normal file
@ -0,0 +1,197 @@
|
|||||||
|
// Code created by gotmpl. DO NOT MODIFY.
|
||||||
|
// source: internal/shared/otlp/otlpmetric/transform/attribute_test.go.tmpl
|
||||||
|
|
||||||
|
// Copyright The OpenTelemetry Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package transform
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
|
||||||
|
"go.opentelemetry.io/otel/attribute"
|
||||||
|
cpb "go.opentelemetry.io/proto/otlp/common/v1"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
attrBool = attribute.Bool("bool", true)
|
||||||
|
attrBoolSlice = attribute.BoolSlice("bool slice", []bool{true, false})
|
||||||
|
attrInt = attribute.Int("int", 1)
|
||||||
|
attrIntSlice = attribute.IntSlice("int slice", []int{-1, 1})
|
||||||
|
attrInt64 = attribute.Int64("int64", 1)
|
||||||
|
attrInt64Slice = attribute.Int64Slice("int64 slice", []int64{-1, 1})
|
||||||
|
attrFloat64 = attribute.Float64("float64", 1)
|
||||||
|
attrFloat64Slice = attribute.Float64Slice("float64 slice", []float64{-1, 1})
|
||||||
|
attrString = attribute.String("string", "o")
|
||||||
|
attrStringSlice = attribute.StringSlice("string slice", []string{"o", "n"})
|
||||||
|
attrInvalid = attribute.KeyValue{
|
||||||
|
Key: attribute.Key("invalid"),
|
||||||
|
Value: attribute.Value{},
|
||||||
|
}
|
||||||
|
|
||||||
|
valBoolTrue = &cpb.AnyValue{Value: &cpb.AnyValue_BoolValue{BoolValue: true}}
|
||||||
|
valBoolFalse = &cpb.AnyValue{Value: &cpb.AnyValue_BoolValue{BoolValue: false}}
|
||||||
|
valBoolSlice = &cpb.AnyValue{Value: &cpb.AnyValue_ArrayValue{
|
||||||
|
ArrayValue: &cpb.ArrayValue{
|
||||||
|
Values: []*cpb.AnyValue{valBoolTrue, valBoolFalse},
|
||||||
|
},
|
||||||
|
}}
|
||||||
|
valIntOne = &cpb.AnyValue{Value: &cpb.AnyValue_IntValue{IntValue: 1}}
|
||||||
|
valIntNOne = &cpb.AnyValue{Value: &cpb.AnyValue_IntValue{IntValue: -1}}
|
||||||
|
valIntSlice = &cpb.AnyValue{Value: &cpb.AnyValue_ArrayValue{
|
||||||
|
ArrayValue: &cpb.ArrayValue{
|
||||||
|
Values: []*cpb.AnyValue{valIntNOne, valIntOne},
|
||||||
|
},
|
||||||
|
}}
|
||||||
|
valDblOne = &cpb.AnyValue{Value: &cpb.AnyValue_DoubleValue{DoubleValue: 1}}
|
||||||
|
valDblNOne = &cpb.AnyValue{Value: &cpb.AnyValue_DoubleValue{DoubleValue: -1}}
|
||||||
|
valDblSlice = &cpb.AnyValue{Value: &cpb.AnyValue_ArrayValue{
|
||||||
|
ArrayValue: &cpb.ArrayValue{
|
||||||
|
Values: []*cpb.AnyValue{valDblNOne, valDblOne},
|
||||||
|
},
|
||||||
|
}}
|
||||||
|
valStrO = &cpb.AnyValue{Value: &cpb.AnyValue_StringValue{StringValue: "o"}}
|
||||||
|
valStrN = &cpb.AnyValue{Value: &cpb.AnyValue_StringValue{StringValue: "n"}}
|
||||||
|
valStrSlice = &cpb.AnyValue{Value: &cpb.AnyValue_ArrayValue{
|
||||||
|
ArrayValue: &cpb.ArrayValue{
|
||||||
|
Values: []*cpb.AnyValue{valStrO, valStrN},
|
||||||
|
},
|
||||||
|
}}
|
||||||
|
|
||||||
|
kvBool = &cpb.KeyValue{Key: "bool", Value: valBoolTrue}
|
||||||
|
kvBoolSlice = &cpb.KeyValue{Key: "bool slice", Value: valBoolSlice}
|
||||||
|
kvInt = &cpb.KeyValue{Key: "int", Value: valIntOne}
|
||||||
|
kvIntSlice = &cpb.KeyValue{Key: "int slice", Value: valIntSlice}
|
||||||
|
kvInt64 = &cpb.KeyValue{Key: "int64", Value: valIntOne}
|
||||||
|
kvInt64Slice = &cpb.KeyValue{Key: "int64 slice", Value: valIntSlice}
|
||||||
|
kvFloat64 = &cpb.KeyValue{Key: "float64", Value: valDblOne}
|
||||||
|
kvFloat64Slice = &cpb.KeyValue{Key: "float64 slice", Value: valDblSlice}
|
||||||
|
kvString = &cpb.KeyValue{Key: "string", Value: valStrO}
|
||||||
|
kvStringSlice = &cpb.KeyValue{Key: "string slice", Value: valStrSlice}
|
||||||
|
kvInvalid = &cpb.KeyValue{
|
||||||
|
Key: "invalid",
|
||||||
|
Value: &cpb.AnyValue{
|
||||||
|
Value: &cpb.AnyValue_StringValue{StringValue: "INVALID"},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
type attributeTest struct {
|
||||||
|
name string
|
||||||
|
in []attribute.KeyValue
|
||||||
|
want []*cpb.KeyValue
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAttributeTransforms(t *testing.T) {
|
||||||
|
for _, test := range []attributeTest{
|
||||||
|
{"nil", nil, nil},
|
||||||
|
{"empty", []attribute.KeyValue{}, nil},
|
||||||
|
{
|
||||||
|
"invalid",
|
||||||
|
[]attribute.KeyValue{attrInvalid},
|
||||||
|
[]*cpb.KeyValue{kvInvalid},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"bool",
|
||||||
|
[]attribute.KeyValue{attrBool},
|
||||||
|
[]*cpb.KeyValue{kvBool},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"bool slice",
|
||||||
|
[]attribute.KeyValue{attrBoolSlice},
|
||||||
|
[]*cpb.KeyValue{kvBoolSlice},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"int",
|
||||||
|
[]attribute.KeyValue{attrInt},
|
||||||
|
[]*cpb.KeyValue{kvInt},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"int slice",
|
||||||
|
[]attribute.KeyValue{attrIntSlice},
|
||||||
|
[]*cpb.KeyValue{kvIntSlice},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"int64",
|
||||||
|
[]attribute.KeyValue{attrInt64},
|
||||||
|
[]*cpb.KeyValue{kvInt64},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"int64 slice",
|
||||||
|
[]attribute.KeyValue{attrInt64Slice},
|
||||||
|
[]*cpb.KeyValue{kvInt64Slice},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"float64",
|
||||||
|
[]attribute.KeyValue{attrFloat64},
|
||||||
|
[]*cpb.KeyValue{kvFloat64},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"float64 slice",
|
||||||
|
[]attribute.KeyValue{attrFloat64Slice},
|
||||||
|
[]*cpb.KeyValue{kvFloat64Slice},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"string",
|
||||||
|
[]attribute.KeyValue{attrString},
|
||||||
|
[]*cpb.KeyValue{kvString},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"string slice",
|
||||||
|
[]attribute.KeyValue{attrStringSlice},
|
||||||
|
[]*cpb.KeyValue{kvStringSlice},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"all",
|
||||||
|
[]attribute.KeyValue{
|
||||||
|
attrBool,
|
||||||
|
attrBoolSlice,
|
||||||
|
attrInt,
|
||||||
|
attrIntSlice,
|
||||||
|
attrInt64,
|
||||||
|
attrInt64Slice,
|
||||||
|
attrFloat64,
|
||||||
|
attrFloat64Slice,
|
||||||
|
attrString,
|
||||||
|
attrStringSlice,
|
||||||
|
attrInvalid,
|
||||||
|
},
|
||||||
|
[]*cpb.KeyValue{
|
||||||
|
kvBool,
|
||||||
|
kvBoolSlice,
|
||||||
|
kvInt,
|
||||||
|
kvIntSlice,
|
||||||
|
kvInt64,
|
||||||
|
kvInt64Slice,
|
||||||
|
kvFloat64,
|
||||||
|
kvFloat64Slice,
|
||||||
|
kvString,
|
||||||
|
kvStringSlice,
|
||||||
|
kvInvalid,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
} {
|
||||||
|
t.Run(test.name, func(t *testing.T) {
|
||||||
|
t.Run("KeyValues", func(t *testing.T) {
|
||||||
|
assert.ElementsMatch(t, test.want, KeyValues(test.in))
|
||||||
|
})
|
||||||
|
t.Run("AttrIter", func(t *testing.T) {
|
||||||
|
s := attribute.NewSet(test.in...)
|
||||||
|
assert.ElementsMatch(t, test.want, AttrIter(s.Iter()))
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
114
internal/shared/otlp/otlpmetric/transform/error.go.tmpl
Normal file
114
internal/shared/otlp/otlpmetric/transform/error.go.tmpl
Normal file
@ -0,0 +1,114 @@
|
|||||||
|
// Code created by gotmpl. DO NOT MODIFY.
|
||||||
|
// source: internal/shared/otlp/otlpmetric/transform/error.go.tmpl
|
||||||
|
|
||||||
|
// Copyright The OpenTelemetry Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package transform
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
mpb "go.opentelemetry.io/proto/otlp/metrics/v1"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
errUnknownAggregation = errors.New("unknown aggregation")
|
||||||
|
errUnknownTemporality = errors.New("unknown temporality")
|
||||||
|
)
|
||||||
|
|
||||||
|
type errMetric struct {
|
||||||
|
m *mpb.Metric
|
||||||
|
err error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e errMetric) Unwrap() error {
|
||||||
|
return e.err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e errMetric) Error() string {
|
||||||
|
format := "invalid metric (name: %q, description: %q, unit: %q): %s"
|
||||||
|
return fmt.Sprintf(format, e.m.Name, e.m.Description, e.m.Unit, e.err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e errMetric) Is(target error) bool {
|
||||||
|
return errors.Is(e.err, target)
|
||||||
|
}
|
||||||
|
|
||||||
|
// multiErr is used by the data-type transform functions to wrap multiple
|
||||||
|
// errors into a single return value. The error message will show all errors
|
||||||
|
// as a list and scope them by the datatype name that is returning them.
|
||||||
|
type multiErr struct {
|
||||||
|
datatype string
|
||||||
|
errs []error
|
||||||
|
}
|
||||||
|
|
||||||
|
// errOrNil returns nil if e contains no errors, otherwise it returns e.
|
||||||
|
func (e *multiErr) errOrNil() error {
|
||||||
|
if len(e.errs) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return e
|
||||||
|
}
|
||||||
|
|
||||||
|
// append adds err to e. If err is a multiErr, its errs are flattened into e.
|
||||||
|
func (e *multiErr) append(err error) {
|
||||||
|
// Do not use errors.As here, this should only be flattened one layer. If
|
||||||
|
// there is a *multiErr several steps down the chain, all the errors above
|
||||||
|
// it will be discarded if errors.As is used instead.
|
||||||
|
switch other := err.(type) {
|
||||||
|
case *multiErr:
|
||||||
|
// Flatten err errors into e.
|
||||||
|
e.errs = append(e.errs, other.errs...)
|
||||||
|
default:
|
||||||
|
e.errs = append(e.errs, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *multiErr) Error() string {
|
||||||
|
es := make([]string, len(e.errs))
|
||||||
|
for i, err := range e.errs {
|
||||||
|
es[i] = fmt.Sprintf("* %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
format := "%d errors occurred transforming %s:\n\t%s"
|
||||||
|
return fmt.Sprintf(format, len(es), e.datatype, strings.Join(es, "\n\t"))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *multiErr) Unwrap() error {
|
||||||
|
switch len(e.errs) {
|
||||||
|
case 0:
|
||||||
|
return nil
|
||||||
|
case 1:
|
||||||
|
return e.errs[0]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return a multiErr without the leading error.
|
||||||
|
cp := &multiErr{
|
||||||
|
datatype: e.datatype,
|
||||||
|
errs: make([]error, len(e.errs)-1),
|
||||||
|
}
|
||||||
|
copy(cp.errs, e.errs[1:])
|
||||||
|
return cp
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *multiErr) Is(target error) bool {
|
||||||
|
if len(e.errs) == 0 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
// Check if the first error is target.
|
||||||
|
return errors.Is(e.errs[0], target)
|
||||||
|
}
|
91
internal/shared/otlp/otlpmetric/transform/error_test.go.tmpl
Normal file
91
internal/shared/otlp/otlpmetric/transform/error_test.go.tmpl
Normal file
@ -0,0 +1,91 @@
|
|||||||
|
// Code created by gotmpl. DO NOT MODIFY.
|
||||||
|
// source: internal/shared/otlp/otlpmetric/transform/error_test.go.tmpl
|
||||||
|
|
||||||
|
// Copyright The OpenTelemetry Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package transform
|
||||||
|
|
||||||
|
import (
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
e0 = errMetric{m: pbMetrics[0], err: errUnknownAggregation}
|
||||||
|
e1 = errMetric{m: pbMetrics[1], err: errUnknownTemporality}
|
||||||
|
)
|
||||||
|
|
||||||
|
type testingErr struct{}
|
||||||
|
|
||||||
|
func (testingErr) Error() string { return "testing error" }
|
||||||
|
|
||||||
|
// errFunc is a non-comparable error type.
|
||||||
|
type errFunc func() string
|
||||||
|
|
||||||
|
func (e errFunc) Error() string {
|
||||||
|
return e()
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMultiErr(t *testing.T) {
|
||||||
|
const name = "TestMultiErr"
|
||||||
|
me := &multiErr{datatype: name}
|
||||||
|
|
||||||
|
t.Run("ErrOrNil", func(t *testing.T) {
|
||||||
|
require.Nil(t, me.errOrNil())
|
||||||
|
me.errs = []error{e0}
|
||||||
|
assert.Error(t, me.errOrNil())
|
||||||
|
})
|
||||||
|
|
||||||
|
var testErr testingErr
|
||||||
|
t.Run("AppendError", func(t *testing.T) {
|
||||||
|
me.append(testErr)
|
||||||
|
assert.Equal(t, testErr, me.errs[len(me.errs)-1])
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("AppendFlattens", func(t *testing.T) {
|
||||||
|
other := &multiErr{datatype: "OtherTestMultiErr", errs: []error{e1}}
|
||||||
|
me.append(other)
|
||||||
|
assert.Equal(t, e1, me.errs[len(me.errs)-1])
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("ErrorMessage", func(t *testing.T) {
|
||||||
|
// Test the overall structure of the message, but not the exact
|
||||||
|
// language so this doesn't become a change-indicator.
|
||||||
|
msg := me.Error()
|
||||||
|
lines := strings.Split(msg, "\n")
|
||||||
|
assert.Equalf(t, 4, len(lines), "expected a 4 line error message, got:\n\n%s", msg)
|
||||||
|
assert.Contains(t, msg, name)
|
||||||
|
assert.Contains(t, msg, e0.Error())
|
||||||
|
assert.Contains(t, msg, testErr.Error())
|
||||||
|
assert.Contains(t, msg, e1.Error())
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("ErrorIs", func(t *testing.T) {
|
||||||
|
assert.ErrorIs(t, me, errUnknownAggregation)
|
||||||
|
assert.ErrorIs(t, me, e0)
|
||||||
|
assert.ErrorIs(t, me, testErr)
|
||||||
|
assert.ErrorIs(t, me, errUnknownTemporality)
|
||||||
|
assert.ErrorIs(t, me, e1)
|
||||||
|
|
||||||
|
errUnknown := errFunc(func() string { return "unknown error" })
|
||||||
|
assert.NotErrorIs(t, me, errUnknown)
|
||||||
|
|
||||||
|
var empty multiErr
|
||||||
|
assert.NotErrorIs(t, &empty, errUnknownTemporality)
|
||||||
|
})
|
||||||
|
}
|
292
internal/shared/otlp/otlpmetric/transform/metricdata.go.tmpl
Normal file
292
internal/shared/otlp/otlpmetric/transform/metricdata.go.tmpl
Normal file
@ -0,0 +1,292 @@
|
|||||||
|
// Code created by gotmpl. DO NOT MODIFY.
|
||||||
|
// source: internal/shared/otlp/otlpmetric/transform/metricdata.go.tmpl
|
||||||
|
|
||||||
|
// Copyright The OpenTelemetry Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// Package transform provides transformation functionality from the
|
||||||
|
// sdk/metric/metricdata data-types into OTLP data-types.
|
||||||
|
package transform
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"go.opentelemetry.io/otel/sdk/metric/metricdata"
|
||||||
|
cpb "go.opentelemetry.io/proto/otlp/common/v1"
|
||||||
|
mpb "go.opentelemetry.io/proto/otlp/metrics/v1"
|
||||||
|
rpb "go.opentelemetry.io/proto/otlp/resource/v1"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ResourceMetrics returns an OTLP ResourceMetrics generated from rm. If rm
|
||||||
|
// contains invalid ScopeMetrics, an error will be returned along with an OTLP
|
||||||
|
// ResourceMetrics that contains partial OTLP ScopeMetrics.
|
||||||
|
func ResourceMetrics(rm *metricdata.ResourceMetrics) (*mpb.ResourceMetrics, error) {
|
||||||
|
sms, err := ScopeMetrics(rm.ScopeMetrics)
|
||||||
|
return &mpb.ResourceMetrics{
|
||||||
|
Resource: &rpb.Resource{
|
||||||
|
Attributes: AttrIter(rm.Resource.Iter()),
|
||||||
|
},
|
||||||
|
ScopeMetrics: sms,
|
||||||
|
SchemaUrl: rm.Resource.SchemaURL(),
|
||||||
|
}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// ScopeMetrics returns a slice of OTLP ScopeMetrics generated from sms. If
|
||||||
|
// sms contains invalid metric values, an error will be returned along with a
|
||||||
|
// slice that contains partial OTLP ScopeMetrics.
|
||||||
|
func ScopeMetrics(sms []metricdata.ScopeMetrics) ([]*mpb.ScopeMetrics, error) {
|
||||||
|
errs := &multiErr{datatype: "ScopeMetrics"}
|
||||||
|
out := make([]*mpb.ScopeMetrics, 0, len(sms))
|
||||||
|
for _, sm := range sms {
|
||||||
|
ms, err := Metrics(sm.Metrics)
|
||||||
|
if err != nil {
|
||||||
|
errs.append(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
out = append(out, &mpb.ScopeMetrics{
|
||||||
|
Scope: &cpb.InstrumentationScope{
|
||||||
|
Name: sm.Scope.Name,
|
||||||
|
Version: sm.Scope.Version,
|
||||||
|
},
|
||||||
|
Metrics: ms,
|
||||||
|
SchemaUrl: sm.Scope.SchemaURL,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
return out, errs.errOrNil()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Metrics returns a slice of OTLP Metric generated from ms. If ms contains
|
||||||
|
// invalid metric values, an error will be returned along with a slice that
|
||||||
|
// contains partial OTLP Metrics.
|
||||||
|
func Metrics(ms []metricdata.Metrics) ([]*mpb.Metric, error) {
|
||||||
|
errs := &multiErr{datatype: "Metrics"}
|
||||||
|
out := make([]*mpb.Metric, 0, len(ms))
|
||||||
|
for _, m := range ms {
|
||||||
|
o, err := metric(m)
|
||||||
|
if err != nil {
|
||||||
|
// Do not include invalid data. Drop the metric, report the error.
|
||||||
|
errs.append(errMetric{m: o, err: err})
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
out = append(out, o)
|
||||||
|
}
|
||||||
|
return out, errs.errOrNil()
|
||||||
|
}
|
||||||
|
|
||||||
|
func metric(m metricdata.Metrics) (*mpb.Metric, error) {
|
||||||
|
var err error
|
||||||
|
out := &mpb.Metric{
|
||||||
|
Name: m.Name,
|
||||||
|
Description: m.Description,
|
||||||
|
Unit: string(m.Unit),
|
||||||
|
}
|
||||||
|
switch a := m.Data.(type) {
|
||||||
|
case metricdata.Gauge[int64]:
|
||||||
|
out.Data = Gauge[int64](a)
|
||||||
|
case metricdata.Gauge[float64]:
|
||||||
|
out.Data = Gauge[float64](a)
|
||||||
|
case metricdata.Sum[int64]:
|
||||||
|
out.Data, err = Sum[int64](a)
|
||||||
|
case metricdata.Sum[float64]:
|
||||||
|
out.Data, err = Sum[float64](a)
|
||||||
|
case metricdata.Histogram[int64]:
|
||||||
|
out.Data, err = Histogram(a)
|
||||||
|
case metricdata.Histogram[float64]:
|
||||||
|
out.Data, err = Histogram(a)
|
||||||
|
case metricdata.ExponentialHistogram[int64]:
|
||||||
|
out.Data, err = ExponentialHistogram(a)
|
||||||
|
case metricdata.ExponentialHistogram[float64]:
|
||||||
|
out.Data, err = ExponentialHistogram(a)
|
||||||
|
default:
|
||||||
|
return out, fmt.Errorf("%w: %T", errUnknownAggregation, a)
|
||||||
|
}
|
||||||
|
return out, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Gauge returns an OTLP Metric_Gauge generated from g.
|
||||||
|
func Gauge[N int64 | float64](g metricdata.Gauge[N]) *mpb.Metric_Gauge {
|
||||||
|
return &mpb.Metric_Gauge{
|
||||||
|
Gauge: &mpb.Gauge{
|
||||||
|
DataPoints: DataPoints(g.DataPoints),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sum returns an OTLP Metric_Sum generated from s. An error is returned
|
||||||
|
// if the temporality of s is unknown.
|
||||||
|
func Sum[N int64 | float64](s metricdata.Sum[N]) (*mpb.Metric_Sum, error) {
|
||||||
|
t, err := Temporality(s.Temporality)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &mpb.Metric_Sum{
|
||||||
|
Sum: &mpb.Sum{
|
||||||
|
AggregationTemporality: t,
|
||||||
|
IsMonotonic: s.IsMonotonic,
|
||||||
|
DataPoints: DataPoints(s.DataPoints),
|
||||||
|
},
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DataPoints returns a slice of OTLP NumberDataPoint generated from dPts.
|
||||||
|
func DataPoints[N int64 | float64](dPts []metricdata.DataPoint[N]) []*mpb.NumberDataPoint {
|
||||||
|
out := make([]*mpb.NumberDataPoint, 0, len(dPts))
|
||||||
|
for _, dPt := range dPts {
|
||||||
|
ndp := &mpb.NumberDataPoint{
|
||||||
|
Attributes: AttrIter(dPt.Attributes.Iter()),
|
||||||
|
StartTimeUnixNano: timeUnixNano(dPt.StartTime),
|
||||||
|
TimeUnixNano: timeUnixNano(dPt.Time),
|
||||||
|
}
|
||||||
|
switch v := any(dPt.Value).(type) {
|
||||||
|
case int64:
|
||||||
|
ndp.Value = &mpb.NumberDataPoint_AsInt{
|
||||||
|
AsInt: v,
|
||||||
|
}
|
||||||
|
case float64:
|
||||||
|
ndp.Value = &mpb.NumberDataPoint_AsDouble{
|
||||||
|
AsDouble: v,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
out = append(out, ndp)
|
||||||
|
}
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// Histogram returns an OTLP Metric_Histogram generated from h. An error is
|
||||||
|
// returned if the temporality of h is unknown.
|
||||||
|
func Histogram[N int64 | float64](h metricdata.Histogram[N]) (*mpb.Metric_Histogram, error) {
|
||||||
|
t, err := Temporality(h.Temporality)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &mpb.Metric_Histogram{
|
||||||
|
Histogram: &mpb.Histogram{
|
||||||
|
AggregationTemporality: t,
|
||||||
|
DataPoints: HistogramDataPoints(h.DataPoints),
|
||||||
|
},
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// HistogramDataPoints returns a slice of OTLP HistogramDataPoint generated
|
||||||
|
// from dPts.
|
||||||
|
func HistogramDataPoints[N int64 | float64](dPts []metricdata.HistogramDataPoint[N]) []*mpb.HistogramDataPoint {
|
||||||
|
out := make([]*mpb.HistogramDataPoint, 0, len(dPts))
|
||||||
|
for _, dPt := range dPts {
|
||||||
|
sum := float64(dPt.Sum)
|
||||||
|
hdp := &mpb.HistogramDataPoint{
|
||||||
|
Attributes: AttrIter(dPt.Attributes.Iter()),
|
||||||
|
StartTimeUnixNano: timeUnixNano(dPt.StartTime),
|
||||||
|
TimeUnixNano: timeUnixNano(dPt.Time),
|
||||||
|
Count: dPt.Count,
|
||||||
|
Sum: &sum,
|
||||||
|
BucketCounts: dPt.BucketCounts,
|
||||||
|
ExplicitBounds: dPt.Bounds,
|
||||||
|
}
|
||||||
|
if v, ok := dPt.Min.Value(); ok {
|
||||||
|
vF64 := float64(v)
|
||||||
|
hdp.Min = &vF64
|
||||||
|
}
|
||||||
|
if v, ok := dPt.Max.Value(); ok {
|
||||||
|
vF64 := float64(v)
|
||||||
|
hdp.Max = &vF64
|
||||||
|
}
|
||||||
|
out = append(out, hdp)
|
||||||
|
}
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExponentialHistogram returns an OTLP Metric_ExponentialHistogram generated from h. An error is
|
||||||
|
// returned if the temporality of h is unknown.
|
||||||
|
func ExponentialHistogram[N int64 | float64](h metricdata.ExponentialHistogram[N]) (*mpb.Metric_ExponentialHistogram, error) {
|
||||||
|
t, err := Temporality(h.Temporality)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &mpb.Metric_ExponentialHistogram{
|
||||||
|
ExponentialHistogram: &mpb.ExponentialHistogram{
|
||||||
|
AggregationTemporality: t,
|
||||||
|
DataPoints: ExponentialHistogramDataPoints(h.DataPoints),
|
||||||
|
},
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExponentialHistogramDataPoints returns a slice of OTLP ExponentialHistogramDataPoint generated
|
||||||
|
// from dPts.
|
||||||
|
func ExponentialHistogramDataPoints[N int64 | float64](dPts []metricdata.ExponentialHistogramDataPoint[N]) []*mpb.ExponentialHistogramDataPoint {
|
||||||
|
out := make([]*mpb.ExponentialHistogramDataPoint, 0, len(dPts))
|
||||||
|
for _, dPt := range dPts {
|
||||||
|
sum := float64(dPt.Sum)
|
||||||
|
ehdp := &mpb.ExponentialHistogramDataPoint{
|
||||||
|
Attributes: AttrIter(dPt.Attributes.Iter()),
|
||||||
|
StartTimeUnixNano: timeUnixNano(dPt.StartTime),
|
||||||
|
TimeUnixNano: timeUnixNano(dPt.Time),
|
||||||
|
Count: dPt.Count,
|
||||||
|
Sum: &sum,
|
||||||
|
Scale: dPt.Scale,
|
||||||
|
ZeroCount: dPt.ZeroCount,
|
||||||
|
|
||||||
|
Positive: ExponentialHistogramDataPointBuckets(dPt.PositiveBucket),
|
||||||
|
Negative: ExponentialHistogramDataPointBuckets(dPt.NegativeBucket),
|
||||||
|
}
|
||||||
|
if v, ok := dPt.Min.Value(); ok {
|
||||||
|
vF64 := float64(v)
|
||||||
|
ehdp.Min = &vF64
|
||||||
|
}
|
||||||
|
if v, ok := dPt.Max.Value(); ok {
|
||||||
|
vF64 := float64(v)
|
||||||
|
ehdp.Max = &vF64
|
||||||
|
}
|
||||||
|
out = append(out, ehdp)
|
||||||
|
}
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExponentialHistogramDataPointBuckets returns an OTLP ExponentialHistogramDataPoint_Buckets generated
|
||||||
|
// from bucket.
|
||||||
|
func ExponentialHistogramDataPointBuckets(bucket metricdata.ExponentialBucket) *mpb.ExponentialHistogramDataPoint_Buckets {
|
||||||
|
return &mpb.ExponentialHistogramDataPoint_Buckets{
|
||||||
|
Offset: bucket.Offset,
|
||||||
|
BucketCounts: bucket.Counts,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Temporality returns an OTLP AggregationTemporality generated from t. If t
|
||||||
|
// is unknown, an error is returned along with the invalid
|
||||||
|
// AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED.
|
||||||
|
func Temporality(t metricdata.Temporality) (mpb.AggregationTemporality, error) {
|
||||||
|
switch t {
|
||||||
|
case metricdata.DeltaTemporality:
|
||||||
|
return mpb.AggregationTemporality_AGGREGATION_TEMPORALITY_DELTA, nil
|
||||||
|
case metricdata.CumulativeTemporality:
|
||||||
|
return mpb.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE, nil
|
||||||
|
default:
|
||||||
|
err := fmt.Errorf("%w: %s", errUnknownTemporality, t)
|
||||||
|
return mpb.AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// timeUnixNano returns t as a Unix time, the number of nanoseconds elapsed
|
||||||
|
// since January 1, 1970 UTC as uint64.
|
||||||
|
// The result is undefined if the Unix time
|
||||||
|
// in nanoseconds cannot be represented by an int64
|
||||||
|
// (a date before the year 1678 or after 2262).
|
||||||
|
// timeUnixNano on the zero Time returns 0.
|
||||||
|
// The result does not depend on the location associated with t.
|
||||||
|
func timeUnixNano(t time.Time) uint64 {
|
||||||
|
if t.IsZero() {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
return uint64(t.UnixNano())
|
||||||
|
}
|
@ -0,0 +1,633 @@
|
|||||||
|
// Code created by gotmpl. DO NOT MODIFY.
|
||||||
|
// source: internal/shared/otlp/otlpmetric/transform/metricdata_test.go.tmpl
|
||||||
|
|
||||||
|
// Copyright The OpenTelemetry Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package transform
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
|
"go.opentelemetry.io/otel/attribute"
|
||||||
|
"go.opentelemetry.io/otel/sdk/instrumentation"
|
||||||
|
"go.opentelemetry.io/otel/sdk/metric/metricdata"
|
||||||
|
"go.opentelemetry.io/otel/sdk/resource"
|
||||||
|
semconv "go.opentelemetry.io/otel/semconv/v1.17.0"
|
||||||
|
cpb "go.opentelemetry.io/proto/otlp/common/v1"
|
||||||
|
mpb "go.opentelemetry.io/proto/otlp/metrics/v1"
|
||||||
|
rpb "go.opentelemetry.io/proto/otlp/resource/v1"
|
||||||
|
)
|
||||||
|
|
||||||
|
type unknownAggT struct {
|
||||||
|
metricdata.Aggregation
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
// Sat Jan 01 2000 00:00:00 GMT+0000.
|
||||||
|
start = time.Date(2000, time.January, 01, 0, 0, 0, 0, time.FixedZone("GMT", 0))
|
||||||
|
end = start.Add(30 * time.Second)
|
||||||
|
|
||||||
|
alice = attribute.NewSet(attribute.String("user", "alice"))
|
||||||
|
bob = attribute.NewSet(attribute.String("user", "bob"))
|
||||||
|
|
||||||
|
pbAlice = &cpb.KeyValue{Key: "user", Value: &cpb.AnyValue{
|
||||||
|
Value: &cpb.AnyValue_StringValue{StringValue: "alice"},
|
||||||
|
}}
|
||||||
|
pbBob = &cpb.KeyValue{Key: "user", Value: &cpb.AnyValue{
|
||||||
|
Value: &cpb.AnyValue_StringValue{StringValue: "bob"},
|
||||||
|
}}
|
||||||
|
|
||||||
|
minA, maxA, sumA = 2.0, 4.0, 90.0
|
||||||
|
minB, maxB, sumB = 4.0, 150.0, 234.0
|
||||||
|
otelHDPInt64 = []metricdata.HistogramDataPoint[int64]{
|
||||||
|
{
|
||||||
|
Attributes: alice,
|
||||||
|
StartTime: start,
|
||||||
|
Time: end,
|
||||||
|
Count: 30,
|
||||||
|
Bounds: []float64{1, 5},
|
||||||
|
BucketCounts: []uint64{0, 30, 0},
|
||||||
|
Min: metricdata.NewExtrema(int64(minA)),
|
||||||
|
Max: metricdata.NewExtrema(int64(maxA)),
|
||||||
|
Sum: int64(sumA),
|
||||||
|
}, {
|
||||||
|
Attributes: bob,
|
||||||
|
StartTime: start,
|
||||||
|
Time: end,
|
||||||
|
Count: 3,
|
||||||
|
Bounds: []float64{1, 5},
|
||||||
|
BucketCounts: []uint64{0, 1, 2},
|
||||||
|
Min: metricdata.NewExtrema(int64(minB)),
|
||||||
|
Max: metricdata.NewExtrema(int64(maxB)),
|
||||||
|
Sum: int64(sumB),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
otelHDPFloat64 = []metricdata.HistogramDataPoint[float64]{
|
||||||
|
{
|
||||||
|
Attributes: alice,
|
||||||
|
StartTime: start,
|
||||||
|
Time: end,
|
||||||
|
Count: 30,
|
||||||
|
Bounds: []float64{1, 5},
|
||||||
|
BucketCounts: []uint64{0, 30, 0},
|
||||||
|
Min: metricdata.NewExtrema(minA),
|
||||||
|
Max: metricdata.NewExtrema(maxA),
|
||||||
|
Sum: sumA,
|
||||||
|
}, {
|
||||||
|
Attributes: bob,
|
||||||
|
StartTime: start,
|
||||||
|
Time: end,
|
||||||
|
Count: 3,
|
||||||
|
Bounds: []float64{1, 5},
|
||||||
|
BucketCounts: []uint64{0, 1, 2},
|
||||||
|
Min: metricdata.NewExtrema(minB),
|
||||||
|
Max: metricdata.NewExtrema(maxB),
|
||||||
|
Sum: sumB,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
otelEBucketA = metricdata.ExponentialBucket{
|
||||||
|
Offset: 5,
|
||||||
|
Counts: []uint64{0, 5, 0, 5},
|
||||||
|
}
|
||||||
|
otelEBucketB = metricdata.ExponentialBucket{
|
||||||
|
Offset: 3,
|
||||||
|
Counts: []uint64{0, 5, 0, 5},
|
||||||
|
}
|
||||||
|
otelEBucketsC = metricdata.ExponentialBucket{
|
||||||
|
Offset: 5,
|
||||||
|
Counts: []uint64{0, 1},
|
||||||
|
}
|
||||||
|
otelEBucketsD = metricdata.ExponentialBucket{
|
||||||
|
Offset: 3,
|
||||||
|
Counts: []uint64{0, 1},
|
||||||
|
}
|
||||||
|
|
||||||
|
otelEHDPInt64 = []metricdata.ExponentialHistogramDataPoint[int64]{
|
||||||
|
{
|
||||||
|
Attributes: alice,
|
||||||
|
StartTime: start,
|
||||||
|
Time: end,
|
||||||
|
Count: 30,
|
||||||
|
Scale: 2,
|
||||||
|
ZeroCount: 10,
|
||||||
|
PositiveBucket: otelEBucketA,
|
||||||
|
NegativeBucket: otelEBucketB,
|
||||||
|
ZeroThreshold: .01,
|
||||||
|
Min: metricdata.NewExtrema(int64(minA)),
|
||||||
|
Max: metricdata.NewExtrema(int64(maxA)),
|
||||||
|
Sum: int64(sumA),
|
||||||
|
}, {
|
||||||
|
Attributes: bob,
|
||||||
|
StartTime: start,
|
||||||
|
Time: end,
|
||||||
|
Count: 3,
|
||||||
|
Scale: 4,
|
||||||
|
ZeroCount: 1,
|
||||||
|
PositiveBucket: otelEBucketsC,
|
||||||
|
NegativeBucket: otelEBucketsD,
|
||||||
|
ZeroThreshold: .02,
|
||||||
|
Min: metricdata.NewExtrema(int64(minB)),
|
||||||
|
Max: metricdata.NewExtrema(int64(maxB)),
|
||||||
|
Sum: int64(sumB),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
otelEHDPFloat64 = []metricdata.ExponentialHistogramDataPoint[float64]{
|
||||||
|
{
|
||||||
|
Attributes: alice,
|
||||||
|
StartTime: start,
|
||||||
|
Time: end,
|
||||||
|
Count: 30,
|
||||||
|
Scale: 2,
|
||||||
|
ZeroCount: 10,
|
||||||
|
PositiveBucket: otelEBucketA,
|
||||||
|
NegativeBucket: otelEBucketB,
|
||||||
|
ZeroThreshold: .01,
|
||||||
|
Min: metricdata.NewExtrema(minA),
|
||||||
|
Max: metricdata.NewExtrema(maxA),
|
||||||
|
Sum: sumA,
|
||||||
|
}, {
|
||||||
|
Attributes: bob,
|
||||||
|
StartTime: start,
|
||||||
|
Time: end,
|
||||||
|
Count: 3,
|
||||||
|
Scale: 4,
|
||||||
|
ZeroCount: 1,
|
||||||
|
PositiveBucket: otelEBucketsC,
|
||||||
|
NegativeBucket: otelEBucketsD,
|
||||||
|
ZeroThreshold: .02,
|
||||||
|
Min: metricdata.NewExtrema(minB),
|
||||||
|
Max: metricdata.NewExtrema(maxB),
|
||||||
|
Sum: sumB,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
pbHDP = []*mpb.HistogramDataPoint{
|
||||||
|
{
|
||||||
|
Attributes: []*cpb.KeyValue{pbAlice},
|
||||||
|
StartTimeUnixNano: uint64(start.UnixNano()),
|
||||||
|
TimeUnixNano: uint64(end.UnixNano()),
|
||||||
|
Count: 30,
|
||||||
|
Sum: &sumA,
|
||||||
|
ExplicitBounds: []float64{1, 5},
|
||||||
|
BucketCounts: []uint64{0, 30, 0},
|
||||||
|
Min: &minA,
|
||||||
|
Max: &maxA,
|
||||||
|
}, {
|
||||||
|
Attributes: []*cpb.KeyValue{pbBob},
|
||||||
|
StartTimeUnixNano: uint64(start.UnixNano()),
|
||||||
|
TimeUnixNano: uint64(end.UnixNano()),
|
||||||
|
Count: 3,
|
||||||
|
Sum: &sumB,
|
||||||
|
ExplicitBounds: []float64{1, 5},
|
||||||
|
BucketCounts: []uint64{0, 1, 2},
|
||||||
|
Min: &minB,
|
||||||
|
Max: &maxB,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
pbEHDPBA = &mpb.ExponentialHistogramDataPoint_Buckets{
|
||||||
|
Offset: 5,
|
||||||
|
BucketCounts: []uint64{0, 5, 0, 5},
|
||||||
|
}
|
||||||
|
pbEHDPBB = &mpb.ExponentialHistogramDataPoint_Buckets{
|
||||||
|
Offset: 3,
|
||||||
|
BucketCounts: []uint64{0, 5, 0, 5},
|
||||||
|
}
|
||||||
|
pbEHDPBC = &mpb.ExponentialHistogramDataPoint_Buckets{
|
||||||
|
Offset: 5,
|
||||||
|
BucketCounts: []uint64{0, 1},
|
||||||
|
}
|
||||||
|
pbEHDPBD = &mpb.ExponentialHistogramDataPoint_Buckets{
|
||||||
|
Offset: 3,
|
||||||
|
BucketCounts: []uint64{0, 1},
|
||||||
|
}
|
||||||
|
|
||||||
|
pbEHDP = []*mpb.ExponentialHistogramDataPoint{
|
||||||
|
{
|
||||||
|
Attributes: []*cpb.KeyValue{pbAlice},
|
||||||
|
StartTimeUnixNano: uint64(start.UnixNano()),
|
||||||
|
TimeUnixNano: uint64(end.UnixNano()),
|
||||||
|
Count: 30,
|
||||||
|
Sum: &sumA,
|
||||||
|
Scale: 2,
|
||||||
|
ZeroCount: 10,
|
||||||
|
Positive: pbEHDPBA,
|
||||||
|
Negative: pbEHDPBB,
|
||||||
|
Min: &minA,
|
||||||
|
Max: &maxA,
|
||||||
|
}, {
|
||||||
|
Attributes: []*cpb.KeyValue{pbBob},
|
||||||
|
StartTimeUnixNano: uint64(start.UnixNano()),
|
||||||
|
TimeUnixNano: uint64(end.UnixNano()),
|
||||||
|
Count: 3,
|
||||||
|
Sum: &sumB,
|
||||||
|
Scale: 4,
|
||||||
|
ZeroCount: 1,
|
||||||
|
Positive: pbEHDPBC,
|
||||||
|
Negative: pbEHDPBD,
|
||||||
|
Min: &minB,
|
||||||
|
Max: &maxB,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
otelHistInt64 = metricdata.Histogram[int64]{
|
||||||
|
Temporality: metricdata.DeltaTemporality,
|
||||||
|
DataPoints: otelHDPInt64,
|
||||||
|
}
|
||||||
|
otelHistFloat64 = metricdata.Histogram[float64]{
|
||||||
|
Temporality: metricdata.DeltaTemporality,
|
||||||
|
DataPoints: otelHDPFloat64,
|
||||||
|
}
|
||||||
|
invalidTemporality metricdata.Temporality
|
||||||
|
otelHistInvalid = metricdata.Histogram[int64]{
|
||||||
|
Temporality: invalidTemporality,
|
||||||
|
DataPoints: otelHDPInt64,
|
||||||
|
}
|
||||||
|
|
||||||
|
otelExpoHistInt64 = metricdata.ExponentialHistogram[int64]{
|
||||||
|
Temporality: metricdata.DeltaTemporality,
|
||||||
|
DataPoints: otelEHDPInt64,
|
||||||
|
}
|
||||||
|
otelExpoHistFloat64 = metricdata.ExponentialHistogram[float64]{
|
||||||
|
Temporality: metricdata.DeltaTemporality,
|
||||||
|
DataPoints: otelEHDPFloat64,
|
||||||
|
}
|
||||||
|
otelExpoHistInvalid = metricdata.ExponentialHistogram[int64]{
|
||||||
|
Temporality: invalidTemporality,
|
||||||
|
DataPoints: otelEHDPInt64,
|
||||||
|
}
|
||||||
|
|
||||||
|
pbHist = &mpb.Histogram{
|
||||||
|
AggregationTemporality: mpb.AggregationTemporality_AGGREGATION_TEMPORALITY_DELTA,
|
||||||
|
DataPoints: pbHDP,
|
||||||
|
}
|
||||||
|
|
||||||
|
pbExpoHist = &mpb.ExponentialHistogram{
|
||||||
|
AggregationTemporality: mpb.AggregationTemporality_AGGREGATION_TEMPORALITY_DELTA,
|
||||||
|
DataPoints: pbEHDP,
|
||||||
|
}
|
||||||
|
|
||||||
|
otelDPtsInt64 = []metricdata.DataPoint[int64]{
|
||||||
|
{Attributes: alice, StartTime: start, Time: end, Value: 1},
|
||||||
|
{Attributes: bob, StartTime: start, Time: end, Value: 2},
|
||||||
|
}
|
||||||
|
otelDPtsFloat64 = []metricdata.DataPoint[float64]{
|
||||||
|
{Attributes: alice, StartTime: start, Time: end, Value: 1.0},
|
||||||
|
{Attributes: bob, StartTime: start, Time: end, Value: 2.0},
|
||||||
|
}
|
||||||
|
|
||||||
|
pbDPtsInt64 = []*mpb.NumberDataPoint{
|
||||||
|
{
|
||||||
|
Attributes: []*cpb.KeyValue{pbAlice},
|
||||||
|
StartTimeUnixNano: uint64(start.UnixNano()),
|
||||||
|
TimeUnixNano: uint64(end.UnixNano()),
|
||||||
|
Value: &mpb.NumberDataPoint_AsInt{AsInt: 1},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Attributes: []*cpb.KeyValue{pbBob},
|
||||||
|
StartTimeUnixNano: uint64(start.UnixNano()),
|
||||||
|
TimeUnixNano: uint64(end.UnixNano()),
|
||||||
|
Value: &mpb.NumberDataPoint_AsInt{AsInt: 2},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
pbDPtsFloat64 = []*mpb.NumberDataPoint{
|
||||||
|
{
|
||||||
|
Attributes: []*cpb.KeyValue{pbAlice},
|
||||||
|
StartTimeUnixNano: uint64(start.UnixNano()),
|
||||||
|
TimeUnixNano: uint64(end.UnixNano()),
|
||||||
|
Value: &mpb.NumberDataPoint_AsDouble{AsDouble: 1.0},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Attributes: []*cpb.KeyValue{pbBob},
|
||||||
|
StartTimeUnixNano: uint64(start.UnixNano()),
|
||||||
|
TimeUnixNano: uint64(end.UnixNano()),
|
||||||
|
Value: &mpb.NumberDataPoint_AsDouble{AsDouble: 2.0},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
otelSumInt64 = metricdata.Sum[int64]{
|
||||||
|
Temporality: metricdata.CumulativeTemporality,
|
||||||
|
IsMonotonic: true,
|
||||||
|
DataPoints: otelDPtsInt64,
|
||||||
|
}
|
||||||
|
otelSumFloat64 = metricdata.Sum[float64]{
|
||||||
|
Temporality: metricdata.DeltaTemporality,
|
||||||
|
IsMonotonic: false,
|
||||||
|
DataPoints: otelDPtsFloat64,
|
||||||
|
}
|
||||||
|
otelSumInvalid = metricdata.Sum[float64]{
|
||||||
|
Temporality: invalidTemporality,
|
||||||
|
IsMonotonic: false,
|
||||||
|
DataPoints: otelDPtsFloat64,
|
||||||
|
}
|
||||||
|
|
||||||
|
pbSumInt64 = &mpb.Sum{
|
||||||
|
AggregationTemporality: mpb.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE,
|
||||||
|
IsMonotonic: true,
|
||||||
|
DataPoints: pbDPtsInt64,
|
||||||
|
}
|
||||||
|
pbSumFloat64 = &mpb.Sum{
|
||||||
|
AggregationTemporality: mpb.AggregationTemporality_AGGREGATION_TEMPORALITY_DELTA,
|
||||||
|
IsMonotonic: false,
|
||||||
|
DataPoints: pbDPtsFloat64,
|
||||||
|
}
|
||||||
|
|
||||||
|
otelGaugeInt64 = metricdata.Gauge[int64]{DataPoints: otelDPtsInt64}
|
||||||
|
otelGaugeFloat64 = metricdata.Gauge[float64]{DataPoints: otelDPtsFloat64}
|
||||||
|
otelGaugeZeroStartTime = metricdata.Gauge[int64]{
|
||||||
|
DataPoints: []metricdata.DataPoint[int64]{
|
||||||
|
{Attributes: alice, StartTime: time.Time{}, Time: end, Value: 1},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
pbGaugeInt64 = &mpb.Gauge{DataPoints: pbDPtsInt64}
|
||||||
|
pbGaugeFloat64 = &mpb.Gauge{DataPoints: pbDPtsFloat64}
|
||||||
|
pbGaugeZeroStartTime = &mpb.Gauge{DataPoints: []*mpb.NumberDataPoint{
|
||||||
|
{
|
||||||
|
Attributes: []*cpb.KeyValue{pbAlice},
|
||||||
|
StartTimeUnixNano: 0,
|
||||||
|
TimeUnixNano: uint64(end.UnixNano()),
|
||||||
|
Value: &mpb.NumberDataPoint_AsInt{AsInt: 1},
|
||||||
|
},
|
||||||
|
}}
|
||||||
|
|
||||||
|
unknownAgg unknownAggT
|
||||||
|
otelMetrics = []metricdata.Metrics{
|
||||||
|
{
|
||||||
|
Name: "int64-gauge",
|
||||||
|
Description: "Gauge with int64 values",
|
||||||
|
Unit: "1",
|
||||||
|
Data: otelGaugeInt64,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "float64-gauge",
|
||||||
|
Description: "Gauge with float64 values",
|
||||||
|
Unit: "1",
|
||||||
|
Data: otelGaugeFloat64,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "int64-sum",
|
||||||
|
Description: "Sum with int64 values",
|
||||||
|
Unit: "1",
|
||||||
|
Data: otelSumInt64,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "float64-sum",
|
||||||
|
Description: "Sum with float64 values",
|
||||||
|
Unit: "1",
|
||||||
|
Data: otelSumFloat64,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "invalid-sum",
|
||||||
|
Description: "Sum with invalid temporality",
|
||||||
|
Unit: "1",
|
||||||
|
Data: otelSumInvalid,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "int64-histogram",
|
||||||
|
Description: "Histogram",
|
||||||
|
Unit: "1",
|
||||||
|
Data: otelHistInt64,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "float64-histogram",
|
||||||
|
Description: "Histogram",
|
||||||
|
Unit: "1",
|
||||||
|
Data: otelHistFloat64,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "invalid-histogram",
|
||||||
|
Description: "Invalid histogram",
|
||||||
|
Unit: "1",
|
||||||
|
Data: otelHistInvalid,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "unknown",
|
||||||
|
Description: "Unknown aggregation",
|
||||||
|
Unit: "1",
|
||||||
|
Data: unknownAgg,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "int64-ExponentialHistogram",
|
||||||
|
Description: "Exponential Histogram",
|
||||||
|
Unit: "1",
|
||||||
|
Data: otelExpoHistInt64,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "float64-ExponentialHistogram",
|
||||||
|
Description: "Exponential Histogram",
|
||||||
|
Unit: "1",
|
||||||
|
Data: otelExpoHistFloat64,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "invalid-ExponentialHistogram",
|
||||||
|
Description: "Invalid Exponential Histogram",
|
||||||
|
Unit: "1",
|
||||||
|
Data: otelExpoHistInvalid,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "zero-time",
|
||||||
|
Description: "Gauge with 0 StartTime",
|
||||||
|
Unit: "1",
|
||||||
|
Data: otelGaugeZeroStartTime,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
pbMetrics = []*mpb.Metric{
|
||||||
|
{
|
||||||
|
Name: "int64-gauge",
|
||||||
|
Description: "Gauge with int64 values",
|
||||||
|
Unit: "1",
|
||||||
|
Data: &mpb.Metric_Gauge{Gauge: pbGaugeInt64},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "float64-gauge",
|
||||||
|
Description: "Gauge with float64 values",
|
||||||
|
Unit: "1",
|
||||||
|
Data: &mpb.Metric_Gauge{Gauge: pbGaugeFloat64},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "int64-sum",
|
||||||
|
Description: "Sum with int64 values",
|
||||||
|
Unit: "1",
|
||||||
|
Data: &mpb.Metric_Sum{Sum: pbSumInt64},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "float64-sum",
|
||||||
|
Description: "Sum with float64 values",
|
||||||
|
Unit: "1",
|
||||||
|
Data: &mpb.Metric_Sum{Sum: pbSumFloat64},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "int64-histogram",
|
||||||
|
Description: "Histogram",
|
||||||
|
Unit: "1",
|
||||||
|
Data: &mpb.Metric_Histogram{Histogram: pbHist},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "float64-histogram",
|
||||||
|
Description: "Histogram",
|
||||||
|
Unit: "1",
|
||||||
|
Data: &mpb.Metric_Histogram{Histogram: pbHist},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "int64-ExponentialHistogram",
|
||||||
|
Description: "Exponential Histogram",
|
||||||
|
Unit: "1",
|
||||||
|
Data: &mpb.Metric_ExponentialHistogram{ExponentialHistogram: pbExpoHist},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "float64-ExponentialHistogram",
|
||||||
|
Description: "Exponential Histogram",
|
||||||
|
Unit: "1",
|
||||||
|
Data: &mpb.Metric_ExponentialHistogram{ExponentialHistogram: pbExpoHist},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "zero-time",
|
||||||
|
Description: "Gauge with 0 StartTime",
|
||||||
|
Unit: "1",
|
||||||
|
Data: &mpb.Metric_Gauge{Gauge: pbGaugeZeroStartTime},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
otelScopeMetrics = []metricdata.ScopeMetrics{
|
||||||
|
{
|
||||||
|
Scope: instrumentation.Scope{
|
||||||
|
Name: "test/code/path",
|
||||||
|
Version: "v0.1.0",
|
||||||
|
SchemaURL: semconv.SchemaURL,
|
||||||
|
},
|
||||||
|
Metrics: otelMetrics,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
pbScopeMetrics = []*mpb.ScopeMetrics{
|
||||||
|
{
|
||||||
|
Scope: &cpb.InstrumentationScope{
|
||||||
|
Name: "test/code/path",
|
||||||
|
Version: "v0.1.0",
|
||||||
|
},
|
||||||
|
Metrics: pbMetrics,
|
||||||
|
SchemaUrl: semconv.SchemaURL,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
otelRes = resource.NewWithAttributes(
|
||||||
|
semconv.SchemaURL,
|
||||||
|
semconv.ServiceName("test server"),
|
||||||
|
semconv.ServiceVersion("v0.1.0"),
|
||||||
|
)
|
||||||
|
|
||||||
|
pbRes = &rpb.Resource{
|
||||||
|
Attributes: []*cpb.KeyValue{
|
||||||
|
{
|
||||||
|
Key: "service.name",
|
||||||
|
Value: &cpb.AnyValue{
|
||||||
|
Value: &cpb.AnyValue_StringValue{StringValue: "test server"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Key: "service.version",
|
||||||
|
Value: &cpb.AnyValue{
|
||||||
|
Value: &cpb.AnyValue_StringValue{StringValue: "v0.1.0"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
otelResourceMetrics = &metricdata.ResourceMetrics{
|
||||||
|
Resource: otelRes,
|
||||||
|
ScopeMetrics: otelScopeMetrics,
|
||||||
|
}
|
||||||
|
|
||||||
|
pbResourceMetrics = &mpb.ResourceMetrics{
|
||||||
|
Resource: pbRes,
|
||||||
|
ScopeMetrics: pbScopeMetrics,
|
||||||
|
SchemaUrl: semconv.SchemaURL,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestTransformations(t *testing.T) {
|
||||||
|
// Run tests from the "bottom-up" of the metricdata data-types and halt
|
||||||
|
// when a failure occurs to ensure the clearest failure message (as
|
||||||
|
// opposed to the opposite of testing from the top-down which will obscure
|
||||||
|
// errors deep inside the structs).
|
||||||
|
|
||||||
|
// DataPoint types.
|
||||||
|
assert.Equal(t, pbHDP, HistogramDataPoints(otelHDPInt64))
|
||||||
|
assert.Equal(t, pbHDP, HistogramDataPoints(otelHDPFloat64))
|
||||||
|
assert.Equal(t, pbDPtsInt64, DataPoints[int64](otelDPtsInt64))
|
||||||
|
require.Equal(t, pbDPtsFloat64, DataPoints[float64](otelDPtsFloat64))
|
||||||
|
assert.Equal(t, pbEHDP, ExponentialHistogramDataPoints(otelEHDPInt64))
|
||||||
|
assert.Equal(t, pbEHDP, ExponentialHistogramDataPoints(otelEHDPFloat64))
|
||||||
|
assert.Equal(t, pbEHDPBA, ExponentialHistogramDataPointBuckets(otelEBucketA))
|
||||||
|
|
||||||
|
// Aggregations.
|
||||||
|
h, err := Histogram(otelHistInt64)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t, &mpb.Metric_Histogram{Histogram: pbHist}, h)
|
||||||
|
h, err = Histogram(otelHistFloat64)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t, &mpb.Metric_Histogram{Histogram: pbHist}, h)
|
||||||
|
h, err = Histogram(otelHistInvalid)
|
||||||
|
assert.ErrorIs(t, err, errUnknownTemporality)
|
||||||
|
assert.Nil(t, h)
|
||||||
|
|
||||||
|
s, err := Sum[int64](otelSumInt64)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t, &mpb.Metric_Sum{Sum: pbSumInt64}, s)
|
||||||
|
s, err = Sum[float64](otelSumFloat64)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t, &mpb.Metric_Sum{Sum: pbSumFloat64}, s)
|
||||||
|
s, err = Sum[float64](otelSumInvalid)
|
||||||
|
assert.ErrorIs(t, err, errUnknownTemporality)
|
||||||
|
assert.Nil(t, s)
|
||||||
|
|
||||||
|
assert.Equal(t, &mpb.Metric_Gauge{Gauge: pbGaugeInt64}, Gauge[int64](otelGaugeInt64))
|
||||||
|
require.Equal(t, &mpb.Metric_Gauge{Gauge: pbGaugeFloat64}, Gauge[float64](otelGaugeFloat64))
|
||||||
|
|
||||||
|
e, err := ExponentialHistogram(otelExpoHistInt64)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t, &mpb.Metric_ExponentialHistogram{ExponentialHistogram: pbExpoHist}, e)
|
||||||
|
e, err = ExponentialHistogram(otelExpoHistFloat64)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t, &mpb.Metric_ExponentialHistogram{ExponentialHistogram: pbExpoHist}, e)
|
||||||
|
e, err = ExponentialHistogram(otelExpoHistInvalid)
|
||||||
|
assert.ErrorIs(t, err, errUnknownTemporality)
|
||||||
|
assert.Nil(t, e)
|
||||||
|
|
||||||
|
// Metrics.
|
||||||
|
m, err := Metrics(otelMetrics)
|
||||||
|
assert.ErrorIs(t, err, errUnknownTemporality)
|
||||||
|
assert.ErrorIs(t, err, errUnknownAggregation)
|
||||||
|
require.Equal(t, pbMetrics, m)
|
||||||
|
|
||||||
|
// Scope Metrics.
|
||||||
|
sm, err := ScopeMetrics(otelScopeMetrics)
|
||||||
|
assert.ErrorIs(t, err, errUnknownTemporality)
|
||||||
|
assert.ErrorIs(t, err, errUnknownAggregation)
|
||||||
|
require.Equal(t, pbScopeMetrics, sm)
|
||||||
|
|
||||||
|
// Resource Metrics.
|
||||||
|
rm, err := ResourceMetrics(otelResourceMetrics)
|
||||||
|
assert.ErrorIs(t, err, errUnknownTemporality)
|
||||||
|
assert.ErrorIs(t, err, errUnknownAggregation)
|
||||||
|
require.Equal(t, pbResourceMetrics, rm)
|
||||||
|
}
|
Loading…
x
Reference in New Issue
Block a user