You've already forked opentelemetry-go
mirror of
https://github.com/open-telemetry/opentelemetry-go.git
synced 2025-06-25 00:16:49 +02:00
Empty queued spans when ForceFlush called (#2335)
* Empty queued spans when ForceFlush called Update the implementation of ForceFlush() to first ensure that all spans which are queued are added to the batch before calling export spans. Create a small ReadOnlySpan implementation which can be used as a marker that ForceFlush has been invoked and used to notify when all spans are ready to be exported. Fixes #2080. * Add a changelog entry. * Update CHANGELOG.md Co-authored-by: Tyler Yahn <MrAlias@users.noreply.github.com> * Update sdk/trace/batch_span_processor.go Co-authored-by: Tyler Yahn <MrAlias@users.noreply.github.com> * Improve test case to verify multiple flushes. * Refactor code to use enqueue. * Be more defensive on waiting for queue. Update the handling of the force flush span so we only wait on the channel if we were able to enqueue the span to the queue. * Fix linter. Co-authored-by: Tyler Yahn <MrAlias@users.noreply.github.com> Co-authored-by: Anthony Mirabella <a9@aneurysm9.com>
This commit is contained in:
@ -22,6 +22,7 @@ import (
|
||||
"time"
|
||||
|
||||
"go.opentelemetry.io/otel"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
)
|
||||
|
||||
// Defaults for BatchSpanProcessorOptions.
|
||||
@ -153,10 +154,29 @@ func (bsp *batchSpanProcessor) Shutdown(ctx context.Context) error {
|
||||
return err
|
||||
}
|
||||
|
||||
type forceFlushSpan struct {
|
||||
ReadOnlySpan
|
||||
flushed chan struct{}
|
||||
}
|
||||
|
||||
func (f forceFlushSpan) SpanContext() trace.SpanContext {
|
||||
return trace.NewSpanContext(trace.SpanContextConfig{TraceFlags: trace.FlagsSampled})
|
||||
}
|
||||
|
||||
// ForceFlush exports all ended spans that have not yet been exported.
|
||||
func (bsp *batchSpanProcessor) ForceFlush(ctx context.Context) error {
|
||||
var err error
|
||||
if bsp.e != nil {
|
||||
flushCh := make(chan struct{})
|
||||
if bsp.enqueueBlockOnQueueFull(ctx, forceFlushSpan{flushed: flushCh}, true) {
|
||||
select {
|
||||
case <-flushCh:
|
||||
// Processed any items in queue prior to ForceFlush being called
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
}
|
||||
}
|
||||
|
||||
wait := make(chan error)
|
||||
go func() {
|
||||
wait <- bsp.exportSpans(ctx)
|
||||
@ -248,6 +268,10 @@ func (bsp *batchSpanProcessor) processQueue() {
|
||||
otel.Handle(err)
|
||||
}
|
||||
case sd := <-bsp.queue:
|
||||
if ffs, ok := sd.(forceFlushSpan); ok {
|
||||
close(ffs.flushed)
|
||||
continue
|
||||
}
|
||||
bsp.batchMutex.Lock()
|
||||
bsp.batch = append(bsp.batch, sd)
|
||||
shouldExport := len(bsp.batch) >= bsp.o.MaxExportBatchSize
|
||||
@ -296,8 +320,12 @@ func (bsp *batchSpanProcessor) drainQueue() {
|
||||
}
|
||||
|
||||
func (bsp *batchSpanProcessor) enqueue(sd ReadOnlySpan) {
|
||||
bsp.enqueueBlockOnQueueFull(context.TODO(), sd, bsp.o.BlockOnQueueFull)
|
||||
}
|
||||
|
||||
func (bsp *batchSpanProcessor) enqueueBlockOnQueueFull(ctx context.Context, sd ReadOnlySpan, block bool) bool {
|
||||
if !sd.SpanContext().IsSampled() {
|
||||
return
|
||||
return false
|
||||
}
|
||||
|
||||
// This ensures the bsp.queue<- below does not panic as the
|
||||
@ -317,18 +345,24 @@ func (bsp *batchSpanProcessor) enqueue(sd ReadOnlySpan) {
|
||||
|
||||
select {
|
||||
case <-bsp.stopCh:
|
||||
return
|
||||
return false
|
||||
default:
|
||||
}
|
||||
|
||||
if bsp.o.BlockOnQueueFull {
|
||||
bsp.queue <- sd
|
||||
return
|
||||
if block {
|
||||
select {
|
||||
case bsp.queue <- sd:
|
||||
return true
|
||||
case <-ctx.Done():
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
select {
|
||||
case bsp.queue <- sd:
|
||||
return true
|
||||
default:
|
||||
atomic.AddUint32(&bsp.dropped, 1)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
Reference in New Issue
Block a user