mirror of
https://github.com/open-telemetry/opentelemetry-go.git
synced 2025-01-26 03:52:03 +02:00
5a728db2e9
* Move connection logic into grpcConnection object If we will need to maintain more than one connection in future, this splitting off will come in handy. Co-authored-by: Stefan Prisca <stefan.prisca@gmail.com> * Make another channel a signal channel There is another channel that serves as a one-time signal, where channel's data type does not matter. * Reorder and document connection members This is to make clear that the lock is guarding only the connection since it can be changed by multiple goroutines, and other members are either atomic or read-only. * Move stop signal into connection The stop channel was rather useless on the exporter side - the primary reason for existence of this channel is to stop a background reconnecting goroutine. Since the goroutine lives entirely within grpcConnection object, move the stop channel here. Also expose a function to unify the stop channel with the context cancellation, so exporter can use it without knowing anything about stop channels. Also make export functions a bit more consistent. * Do not run reconnection routine when being stopped too It's possible that both disconnected channel and stop channel will be triggered around the same time, so the goroutine is as likely to start reconnecting as to return from the goroutine. Make sure we return if the stop channel is closed. * Nil clients on connection error Set clients to nil on connection error, so we don't try to send the data over a bad connection, but return a "no client" error immediately. * Do not call new connection handler within critical section It's rather risky to call a callback coming from outside within a critical section. Move it out. * Add context parameter to connection routines Connecting to the collector may also take its time, so it can be useful in some cases to pass a context with a deadline. Currently we just pass a background context, so this commit does not really change any behavior. The follow-up commits will make a use of it, though. * Add context parameter to NewExporter and Start It makes it possible to limit the time spent on connecting to the collector. * Stop connecting on shutdown Dialling to grpc service ignored the closing of the stop channel, but this can be easily changed. * Close connection after background is shut down That way we can make sure that there won't be a window between closing a connection and waiting for the background goroutine to return, where the new connection could be established. * Remove unnecessary nil check This member is never nil, unless the Exporter is created like &Exporter{}, which is not a thing we support anyway. * Update changelog Co-authored-by: Stefan Prisca <stefan.prisca@gmail.com>
144 lines
4.2 KiB
Go
144 lines
4.2 KiB
Go
// Copyright The OpenTelemetry Authors
|
|
//
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
// you may not use this file except in compliance with the License.
|
|
// You may obtain a copy of the License at
|
|
//
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
//
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
// See the License for the specific language governing permissions and
|
|
// limitations under the License.
|
|
|
|
// Example using the OTLP exporter + collector + third-party backends. For
|
|
// information about using the exporter, see:
|
|
// https://pkg.go.dev/go.opentelemetry.io/otel/exporters/otlp?tab=doc#example-package-Insecure
|
|
package main
|
|
|
|
import (
|
|
"context"
|
|
"fmt"
|
|
"log"
|
|
"time"
|
|
|
|
"google.golang.org/grpc"
|
|
|
|
"go.opentelemetry.io/otel"
|
|
"go.opentelemetry.io/otel/exporters/otlp"
|
|
"go.opentelemetry.io/otel/label"
|
|
"go.opentelemetry.io/otel/metric"
|
|
"go.opentelemetry.io/otel/propagation"
|
|
"go.opentelemetry.io/otel/sdk/metric/controller/push"
|
|
"go.opentelemetry.io/otel/sdk/metric/processor/basic"
|
|
"go.opentelemetry.io/otel/sdk/metric/selector/simple"
|
|
"go.opentelemetry.io/otel/sdk/resource"
|
|
sdktrace "go.opentelemetry.io/otel/sdk/trace"
|
|
"go.opentelemetry.io/otel/semconv"
|
|
"go.opentelemetry.io/otel/trace"
|
|
)
|
|
|
|
// Initializes an OTLP exporter, and configures the corresponding trace and
|
|
// metric providers.
|
|
func initProvider() func() {
|
|
ctx := context.Background()
|
|
|
|
// If the OpenTelemetry Collector is running on a local cluster (minikube or
|
|
// microk8s), it should be accessible through the NodePort service at the
|
|
// `localhost:30080` address. Otherwise, replace `localhost` with the
|
|
// address of your cluster. If you run the app inside k8s, then you can
|
|
// probably connect directly to the service through dns
|
|
exp, err := otlp.NewExporter(ctx,
|
|
otlp.WithInsecure(),
|
|
otlp.WithAddress("localhost:30080"),
|
|
otlp.WithGRPCDialOption(grpc.WithBlock()), // useful for testing
|
|
)
|
|
handleErr(err, "failed to create exporter")
|
|
|
|
res, err := resource.New(ctx,
|
|
resource.WithAttributes(
|
|
// the service name used to display traces in backends
|
|
semconv.ServiceNameKey.String("test-service"),
|
|
),
|
|
)
|
|
handleErr(err, "failed to create resource")
|
|
|
|
bsp := sdktrace.NewBatchSpanProcessor(exp)
|
|
tracerProvider := sdktrace.NewTracerProvider(
|
|
sdktrace.WithConfig(sdktrace.Config{DefaultSampler: sdktrace.AlwaysSample()}),
|
|
sdktrace.WithResource(res),
|
|
sdktrace.WithSpanProcessor(bsp),
|
|
)
|
|
|
|
pusher := push.New(
|
|
basic.New(
|
|
simple.NewWithExactDistribution(),
|
|
exp,
|
|
),
|
|
exp,
|
|
push.WithPeriod(2*time.Second),
|
|
)
|
|
|
|
// set global propagator to tracecontext (the default is no-op).
|
|
otel.SetTextMapPropagator(propagation.TraceContext{})
|
|
otel.SetTracerProvider(tracerProvider)
|
|
otel.SetMeterProvider(pusher.MeterProvider())
|
|
pusher.Start()
|
|
|
|
return func() {
|
|
handleErr(tracerProvider.Shutdown(ctx), "failed to shutdown provider")
|
|
handleErr(exp.Shutdown(ctx), "failed to stop exporter")
|
|
pusher.Stop() // pushes any last exports to the receiver
|
|
}
|
|
}
|
|
|
|
func main() {
|
|
log.Printf("Waiting for connection...")
|
|
|
|
shutdown := initProvider()
|
|
defer shutdown()
|
|
|
|
tracer := otel.Tracer("test-tracer")
|
|
meter := otel.Meter("test-meter")
|
|
|
|
// labels represent additional key-value descriptors that can be bound to a
|
|
// metric observer or recorder.
|
|
commonLabels := []label.KeyValue{
|
|
label.String("labelA", "chocolate"),
|
|
label.String("labelB", "raspberry"),
|
|
label.String("labelC", "vanilla"),
|
|
}
|
|
|
|
// Recorder metric example
|
|
valuerecorder := metric.Must(meter).
|
|
NewFloat64Counter(
|
|
"an_important_metric",
|
|
metric.WithDescription("Measures the cumulative epicness of the app"),
|
|
).Bind(commonLabels...)
|
|
defer valuerecorder.Unbind()
|
|
|
|
// work begins
|
|
ctx, span := tracer.Start(
|
|
context.Background(),
|
|
"CollectorExporter-Example",
|
|
trace.WithAttributes(commonLabels...))
|
|
defer span.End()
|
|
for i := 0; i < 10; i++ {
|
|
_, iSpan := tracer.Start(ctx, fmt.Sprintf("Sample-%d", i))
|
|
log.Printf("Doing really hard work (%d / 10)\n", i+1)
|
|
valuerecorder.Add(ctx, 1.0)
|
|
|
|
<-time.After(time.Second)
|
|
iSpan.End()
|
|
}
|
|
|
|
log.Printf("Done!")
|
|
}
|
|
|
|
func handleErr(err error, message string) {
|
|
if err != nil {
|
|
log.Fatalf("%s: %v", message, err)
|
|
}
|
|
}
|