1
0
mirror of https://github.com/open-telemetry/opentelemetry-go.git synced 2025-11-25 22:41:46 +02:00

sdk/log: Rename BatchingProcessor to BatchProcessor (#5229)

This commit is contained in:
Robert Pająk
2024-04-19 08:34:30 +02:00
committed by GitHub
parent e055c7d315
commit ed666f7713
5 changed files with 37 additions and 37 deletions

View File

@@ -55,10 +55,10 @@ The user can configure custom processors and decorate built-in processors.
The [Simple processor](https://opentelemetry.io/docs/specs/otel/logs/sdk/#simple-processor)
is implemented as `SimpleProcessor` struct in [simple.go](simple.go).
### BatchingProcessor
### BatchProcessor
The [Batching processor](https://opentelemetry.io/docs/specs/otel/logs/sdk/#batching-processor)
is implemented as `BatchingProcessor` struct in [batch.go](batch.go).
is implemented as `BatchProcessor` struct in [batch.go](batch.go).
The `Batcher` can be also configured using the `OTEL_BLRP_*` environment variables as
[defined by the specification](https://opentelemetry.io/docs/specs/otel/configuration/sdk-environment-variables/#batch-logrecord-processor).

View File

@@ -25,20 +25,20 @@ const (
envarExpMaxBatchSize = "OTEL_BLRP_MAX_EXPORT_BATCH_SIZE"
)
// Compile-time check BatchingProcessor implements Processor.
var _ Processor = (*BatchingProcessor)(nil)
// Compile-time check BatchProcessor implements Processor.
var _ Processor = (*BatchProcessor)(nil)
// BatchingProcessor is a processor that exports batches of log records.
// A BatchingProcessor must be created with [NewBatchingProcessor].
type BatchingProcessor struct {
// The BatchingProcessor is designed to provide the highest throughput of
// BatchProcessor is a processor that exports batches of log records.
// A BatchProcessor must be created with [NewBatchProcessor].
type BatchProcessor struct {
// The BatchProcessor is designed to provide the highest throughput of
// log records possible while being compatible with OpenTelemetry. The
// entry point of log records is the OnEmit method. This method is designed
// to receive records as fast as possible while still honoring shutdown
// commands. All records received are enqueued to queue.
//
// In order to block OnEmit as little as possible, a separate "poll"
// goroutine is spawned at the creation of a BatchingProcessor. This
// goroutine is spawned at the creation of a BatchProcessor. This
// goroutine is responsible for batching the queue at regular polled
// intervals, or when it is directly signaled to.
//
@@ -48,7 +48,7 @@ type BatchingProcessor struct {
// separate goroutine dedicated to the export. This asynchronous behavior
// allows the poll goroutine to maintain accurate interval polling.
//
// __BatchingProcessor__ __Poll Goroutine__ __Export Goroutine__
// __BatchProcessor__ __Poll Goroutine__ __Export Goroutine__
// || || || || || ||
// || ********** || || || || ********** ||
// || Records=>* OnEmit * || || | - ticker || || * export * ||
@@ -91,15 +91,15 @@ type BatchingProcessor struct {
// pollDone signals the poll goroutine has completed.
pollDone chan struct{}
// stopped holds the stopped state of the BatchingProcessor.
// stopped holds the stopped state of the BatchProcessor.
stopped atomic.Bool
}
// NewBatchingProcessor decorates the provided exporter
// NewBatchProcessor decorates the provided exporter
// so that the log records are batched before exporting.
//
// All of the exporter's methods are called synchronously.
func NewBatchingProcessor(exporter Exporter, opts ...BatchProcessorOption) *BatchingProcessor {
func NewBatchProcessor(exporter Exporter, opts ...BatchProcessorOption) *BatchProcessor {
cfg := newBatchingConfig(opts)
if exporter == nil {
// Do not panic on nil export.
@@ -113,7 +113,7 @@ func NewBatchingProcessor(exporter Exporter, opts ...BatchProcessorOption) *Batc
// appropriately on export.
exporter = newChunkExporter(exporter, cfg.expMaxBatchSize.Value)
b := &BatchingProcessor{
b := &BatchProcessor{
// TODO: explore making the size of this configurable.
exporter: newBufferExporter(exporter, 1),
@@ -128,7 +128,7 @@ func NewBatchingProcessor(exporter Exporter, opts ...BatchProcessorOption) *Batc
// poll spawns a goroutine to handle interval polling and batch exporting. The
// returned done chan is closed when the spawned goroutine completes.
func (b *BatchingProcessor) poll(interval time.Duration) (done chan struct{}) {
func (b *BatchProcessor) poll(interval time.Duration) (done chan struct{}) {
done = make(chan struct{})
ticker := time.NewTicker(interval)
@@ -169,7 +169,7 @@ func (b *BatchingProcessor) poll(interval time.Duration) (done chan struct{}) {
}
// OnEmit batches provided log record.
func (b *BatchingProcessor) OnEmit(_ context.Context, r Record) error {
func (b *BatchProcessor) OnEmit(_ context.Context, r Record) error {
if b.stopped.Load() {
return nil
}
@@ -186,12 +186,12 @@ func (b *BatchingProcessor) OnEmit(_ context.Context, r Record) error {
}
// Enabled returns if b is enabled.
func (b *BatchingProcessor) Enabled(context.Context, Record) bool {
func (b *BatchProcessor) Enabled(context.Context, Record) bool {
return !b.stopped.Load()
}
// Shutdown flushes queued log records and shuts down the decorated exporter.
func (b *BatchingProcessor) Shutdown(ctx context.Context) error {
func (b *BatchProcessor) Shutdown(ctx context.Context) error {
if b.stopped.Swap(true) {
return nil
}
@@ -218,7 +218,7 @@ var ctxErr = func(ctx context.Context) error {
}
// ForceFlush flushes queued log records and flushes the decorated exporter.
func (b *BatchingProcessor) ForceFlush(ctx context.Context) error {
func (b *BatchProcessor) ForceFlush(ctx context.Context) error {
if b.stopped.Load() {
return nil
}
@@ -372,7 +372,7 @@ func newBatchingConfig(options []BatchProcessorOption) batchingConfig {
return c
}
// BatchProcessorOption applies a configuration to a [BatchingProcessor].
// BatchProcessorOption applies a configuration to a [BatchProcessor].
type BatchProcessorOption interface {
apply(batchingConfig) batchingConfig
}

View File

@@ -145,17 +145,17 @@ func TestNewBatchingConfig(t *testing.T) {
}
}
func TestBatchingProcessor(t *testing.T) {
func TestBatchProcessor(t *testing.T) {
ctx := context.Background()
t.Run("NilExporter", func(t *testing.T) {
assert.NotPanics(t, func() { NewBatchingProcessor(nil) })
assert.NotPanics(t, func() { NewBatchProcessor(nil) })
})
t.Run("Polling", func(t *testing.T) {
e := newTestExporter(nil)
const size = 15
b := NewBatchingProcessor(
b := NewBatchProcessor(
e,
WithMaxQueueSize(2*size),
WithExportMaxBatchSize(2*size),
@@ -178,7 +178,7 @@ func TestBatchingProcessor(t *testing.T) {
t.Run("OnEmit", func(t *testing.T) {
const batch = 10
e := newTestExporter(nil)
b := NewBatchingProcessor(
b := NewBatchProcessor(
e,
WithMaxQueueSize(10*batch),
WithExportMaxBatchSize(batch),
@@ -201,7 +201,7 @@ func TestBatchingProcessor(t *testing.T) {
e.ExportTrigger = make(chan struct{})
const batch = 10
b := NewBatchingProcessor(
b := NewBatchProcessor(
e,
WithMaxQueueSize(3*batch),
WithExportMaxBatchSize(batch),
@@ -236,7 +236,7 @@ func TestBatchingProcessor(t *testing.T) {
})
t.Run("Enabled", func(t *testing.T) {
b := NewBatchingProcessor(defaultNoopExporter)
b := NewBatchProcessor(defaultNoopExporter)
assert.True(t, b.Enabled(ctx, Record{}))
_ = b.Shutdown(ctx)
@@ -246,14 +246,14 @@ func TestBatchingProcessor(t *testing.T) {
t.Run("Shutdown", func(t *testing.T) {
t.Run("Error", func(t *testing.T) {
e := newTestExporter(assert.AnError)
b := NewBatchingProcessor(e)
b := NewBatchProcessor(e)
assert.ErrorIs(t, b.Shutdown(ctx), assert.AnError, "exporter error not returned")
assert.NoError(t, b.Shutdown(ctx))
})
t.Run("Multiple", func(t *testing.T) {
e := newTestExporter(nil)
b := NewBatchingProcessor(e)
b := NewBatchProcessor(e)
const shutdowns = 3
for i := 0; i < shutdowns; i++ {
@@ -264,7 +264,7 @@ func TestBatchingProcessor(t *testing.T) {
t.Run("OnEmit", func(t *testing.T) {
e := newTestExporter(nil)
b := NewBatchingProcessor(e)
b := NewBatchProcessor(e)
assert.NoError(t, b.Shutdown(ctx))
want := e.ExportN()
@@ -274,7 +274,7 @@ func TestBatchingProcessor(t *testing.T) {
t.Run("ForceFlush", func(t *testing.T) {
e := newTestExporter(nil)
b := NewBatchingProcessor(e)
b := NewBatchProcessor(e)
assert.NoError(t, b.OnEmit(ctx, Record{}))
assert.NoError(t, b.Shutdown(ctx))
@@ -287,7 +287,7 @@ func TestBatchingProcessor(t *testing.T) {
e := newTestExporter(nil)
e.ExportTrigger = make(chan struct{})
t.Cleanup(func() { close(e.ExportTrigger) })
b := NewBatchingProcessor(e)
b := NewBatchProcessor(e)
ctx := context.Background()
c, cancel := context.WithCancel(ctx)
@@ -300,7 +300,7 @@ func TestBatchingProcessor(t *testing.T) {
t.Run("ForceFlush", func(t *testing.T) {
t.Run("Flush", func(t *testing.T) {
e := newTestExporter(assert.AnError)
b := NewBatchingProcessor(
b := NewBatchProcessor(
e,
WithMaxQueueSize(100),
WithExportMaxBatchSize(10),
@@ -336,7 +336,7 @@ func TestBatchingProcessor(t *testing.T) {
t.Cleanup(func() { ctxErr = orig })
const batch = 1
b := NewBatchingProcessor(
b := NewBatchProcessor(
e,
WithMaxQueueSize(10*batch),
WithExportMaxBatchSize(batch),
@@ -385,7 +385,7 @@ func TestBatchingProcessor(t *testing.T) {
t.Run("CanceledContext", func(t *testing.T) {
e := newTestExporter(nil)
e.ExportTrigger = make(chan struct{})
b := NewBatchingProcessor(e)
b := NewBatchProcessor(e)
t.Cleanup(func() { _ = b.Shutdown(ctx) })
var r Record
@@ -404,7 +404,7 @@ func TestBatchingProcessor(t *testing.T) {
const goRoutines = 10
e := newTestExporter(nil)
b := NewBatchingProcessor(e)
b := NewBatchProcessor(e)
ctx, cancel := context.WithCancel(ctx)
var wg sync.WaitGroup

View File

@@ -192,7 +192,7 @@ func WithResource(res *resource.Resource) LoggerProviderOption {
// Each WithProcessor creates a separate pipeline. Use custom decorators
// for advanced scenarios such as enriching with attributes.
//
// For production, use [NewBatchingProcessor] to batch log records before they are exported.
// For production, use [NewBatchProcessor] to batch log records before they are exported.
// For testing and debugging, use [NewSimpleProcessor] to synchronously export log records.
func WithProcessor(processor Processor) LoggerProviderOption {
return loggerProviderOptionFunc(func(cfg providerConfig) providerConfig {

View File

@@ -20,7 +20,7 @@ type SimpleProcessor struct {
// This Processor is not recommended for production use. The synchronous
// nature of this Processor make it good for testing, debugging, or
// showing examples of other features, but it can be slow and have a high
// computation resource usage overhead. [NewBatchingProcessor] is recommended
// computation resource usage overhead. [NewBatchProcessor] is recommended
// for production use instead.
func NewSimpleProcessor(exporter Exporter, _ ...SimpleProcessorOption) *SimpleProcessor {
if exporter == nil {