Skip to content

Commit

Permalink
sdk/log: Assign fltrProcessors on provider creation instead of lazy (#…
Browse files Browse the repository at this point in the history
…6239)

I think it is better to calculate fltrProcessors upfront instead of
doing it in a lazy manner. Reasons:

- No locks/synchronization in the hot-path. Even though the performance
overhead is not big (in practice it will be usually unnoticeable, in my
benchmark execution it is less than a nanosecond) I think it is good to
minimize locking on the hot path.
- Simpler code (subjective, but at least less code by 9 lines)

```
goos: linux
goarch: amd64
pkg: go.opentelemetry.io/otel/sdk/log
cpu: 13th Gen Intel(R) Core(TM) i7-13800H
                 │   old.txt   │               new.txt               │
                 │   sec/op    │   sec/op     vs base                │
LoggerEnabled-20   6.010n ± 1%   5.364n ± 1%  -10.73% (p=0.000 n=10)

                 │  old.txt   │            new.txt             │
                 │    B/op    │    B/op     vs base            │
LoggerEnabled-20   0.000 ± 0%   0.000 ± 0%  ~ (p=1.000 n=10) ¹
¹ all samples are equal

                 │  old.txt   │            new.txt             │
                 │ allocs/op  │ allocs/op   vs base            │
LoggerEnabled-20   0.000 ± 0%   0.000 ± 0%  ~ (p=1.000 n=10) ¹
¹ all samples are equal
```
  • Loading branch information
pellared authored Feb 4, 2025
1 parent 2260929 commit 078a4a8
Show file tree
Hide file tree
Showing 4 changed files with 31 additions and 40 deletions.
3 changes: 1 addition & 2 deletions sdk/log/logger.go
Original file line number Diff line number Diff line change
Expand Up @@ -50,12 +50,11 @@ func (l *logger) Emit(ctx context.Context, r log.Record) {
// processed, true will be returned by default. A value of false will only be
// returned if it can be positively verified that no Processor will process.
func (l *logger) Enabled(ctx context.Context, param log.EnabledParameters) bool {
fltrs := l.provider.filterProcessors()
// If there are more Processors than FilterProcessors we cannot be sure
// that all Processors will drop the record. Therefore, return true.
//
// If all Processors are FilterProcessors, check if any is enabled.
return len(l.provider.processors) > len(fltrs) || anyEnabled(ctx, param, fltrs)
return len(l.provider.processors) > len(l.provider.fltrProcessors) || anyEnabled(ctx, param, l.provider.fltrProcessors)
}

func anyEnabled(ctx context.Context, param log.EnabledParameters, fltrs []x.FilterProcessor) bool {
Expand Down
20 changes: 20 additions & 0 deletions sdk/log/logger_bench_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -62,3 +62,23 @@ func BenchmarkLoggerNewRecord(b *testing.B) {
})
})
}

func BenchmarkLoggerEnabled(b *testing.B) {
provider := NewLoggerProvider(
WithProcessor(newFltrProcessor("0", false)),
WithProcessor(newFltrProcessor("1", true)),
)
logger := provider.Logger(b.Name())
ctx := context.Background()
param := log.EnabledParameters{Severity: log.SeverityDebug}
var enabled bool

b.ReportAllocs()
b.ResetTimer()

for n := 0; n < b.N; n++ {
enabled = logger.Enabled(ctx, param)
}

_ = enabled
}
20 changes: 0 additions & 20 deletions sdk/log/logger_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -279,23 +279,3 @@ func TestLoggerEnabled(t *testing.T) {
})
}
}

func BenchmarkLoggerEnabled(b *testing.B) {
provider := NewLoggerProvider(
WithProcessor(newFltrProcessor("0", false)),
WithProcessor(newFltrProcessor("1", true)),
)
logger := provider.Logger(b.Name())
ctx := context.Background()
param := log.EnabledParameters{Severity: log.SeverityDebug}
var enabled bool

b.ReportAllocs()
b.ResetTimer()

for n := 0; n < b.N; n++ {
enabled = logger.Enabled(ctx, param)
}

_ = enabled
}
28 changes: 10 additions & 18 deletions sdk/log/provider.go
Original file line number Diff line number Diff line change
Expand Up @@ -28,10 +28,11 @@ const (
)

type providerConfig struct {
resource *resource.Resource
processors []Processor
attrCntLim setting[int]
attrValLenLim setting[int]
resource *resource.Resource
processors []Processor
fltrProcessors []x.FilterProcessor
attrCntLim setting[int]
attrValLenLim setting[int]
}

func newProviderConfig(opts []LoggerProviderOption) providerConfig {
Expand Down Expand Up @@ -64,12 +65,10 @@ type LoggerProvider struct {

resource *resource.Resource
processors []Processor
fltrProcessors []x.FilterProcessor
attributeCountLimit int
attributeValueLengthLimit int

fltrProcessorsOnce sync.Once
fltrProcessors []x.FilterProcessor

loggersMu sync.Mutex
loggers map[instrumentation.Scope]*logger

Expand All @@ -92,22 +91,12 @@ func NewLoggerProvider(opts ...LoggerProviderOption) *LoggerProvider {
return &LoggerProvider{
resource: cfg.resource,
processors: cfg.processors,
fltrProcessors: cfg.fltrProcessors,
attributeCountLimit: cfg.attrCntLim.Value,
attributeValueLengthLimit: cfg.attrValLenLim.Value,
}
}

func (p *LoggerProvider) filterProcessors() []x.FilterProcessor {
p.fltrProcessorsOnce.Do(func() {
for _, proc := range p.processors {
if f, ok := proc.(x.FilterProcessor); ok {
p.fltrProcessors = append(p.fltrProcessors, f)
}
}
})
return p.fltrProcessors
}

// Logger returns a new [log.Logger] with the provided name and configuration.
//
// If p is shut down, a [noop.Logger] instance is returned.
Expand Down Expand Up @@ -220,6 +209,9 @@ func WithResource(res *resource.Resource) LoggerProviderOption {
func WithProcessor(processor Processor) LoggerProviderOption {
return loggerProviderOptionFunc(func(cfg providerConfig) providerConfig {
cfg.processors = append(cfg.processors, processor)
if f, ok := processor.(x.FilterProcessor); ok {
cfg.fltrProcessors = append(cfg.fltrProcessors, f)
}
return cfg
})
}
Expand Down

0 comments on commit 078a4a8

Please sign in to comment.