1
0
mirror of https://github.com/khorevaa/logos.git synced 2025-01-22 05:10:53 +02:00

Merge pull request #2 from khorevaa/fix/feat_new

fix: исправлена ошибка в консоле. Переход на AtomicLevel
This commit is contained in:
Aleksey Khorev 2022-02-18 23:47:00 +03:00 committed by GitHub
commit a724686052
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
13 changed files with 254 additions and 867 deletions

162
README.md
View File

@ -24,8 +24,7 @@ This project is a wrapper around the excellent logging framework zap.
- `Gelf`, *gelf for greylog*
- `Json`, *standard json encoder*
* Useful utility function
- `Setlevel(LogName string, level int)`, *hot update logger level*
- `UpdateLogger(LogName string, logger *zap.Logger*)`, *hot update core logger*
- `Setlevel(LogName string, level int, appender... string)`, *hot update logger level*
- `RedirectStdLog()`, *redirect standard log package*
* High Performance
- [Significantly faster][high-performance] json loggers.
@ -218,165 +217,6 @@ loggers:
![img.png](img/img.png)
> Note: pretty logging also works on windows console
### Jobs and Timing events
Jobs serve some functions:
* Jobs record a timing (eg, it took 21ms to complete this job)
* Jobs record a status (eg, did the job complete successfully or was there an error?)
Let's say you're writing a web service that processes JSON requests/responses. You might write something like this:
```go
import (
"github.com/khorevaa/logos"
"net/http"
)
var log = logos.New("github.com/khorevaa/rest-api") // like github.com/khorevaa/logos
func main() {
http.HandleFunc("/users", getUsers)
}
func getUsers(rw http.ResponseWriter, r *http.Request) {
// All logging and instrumentation should be within the context of a job!
job := log.Job("get_users")
err := fetchUsersFromDatabase(r)
if err != nil {
// When in your job's context, you can log errors, events, timings, etc.
job.EventErr("fetch_user_from_database", err)
}
// When done with the job, call job.Complete with a completion status.
if err == nil {
job.Complete(logos.Success)
} else {
job.Complete(logos.Err)
}
}
```
There are five types of completion statuses:
* Success - Your job completed successfully.
* Error - Some library call resulted in an error that prevented you from successfully completing your job.
* Panic - Some code paniced!
* ValidationError - Your code was fine, but the user passed in bad inputs, and so the job wasn't completed successfully.
* Junk - The job wasn't completed successfully, but not really because of an Error or ValidationError. For instance, maybe there's just a 404 (not found) or 401 (unauthorized) request to your app. This status code might not apply to all apps.
To log a jobs and events, use `Job`.
```go
package main
import (
"errors"
"github.com/khorevaa/logos"
)
func main() {
var err error
log := logos.New("<your-package-name>") // like github.com/khorevaa/logos
log.Info("This is me first log. Hello world logging systems")
job := log.Job("get_users")
job.Event("connecting to bd")
// do connection
if err != nil {
err = job.EventErr("connecting to bd", err)
panic(err)
}
// When done with the job, call job.Complete with a completion status.
if err == nil {
job.Complete(logos.Success)
} else {
job.Complete(logos.Err)
}
err = errors.New("log system error")
log.Debug("This is me first error", logos.Any("err", err))
}
```
#### Events, Timings, Gauges, and Errors
Within jobs, you can emit events, timings, gauges, and errors. The first argument of each of these methods is supposed to be a key.
Camel case with dots is good because it works with other metrics stores like StatsD.
Each method has a basic version as well as a version that accepts keys/values.
##### Events & Errors
Events emitting:
```go
// Events. Notice the camel case with dots.
// (This is helpful when you want to use StatsD sinks)
job.Event("starting_server")
job.Event("proccess_user.by_email.gmail")
// Event with keys and values:
job.EventKv("failover.started", logos.Kvs{"from_ip": fmt.Sprint(currentIP)})
```
Errors emitting:
```go
// Errors:
err := someFunc(user.Email)
if err != nil {
return job.EventErr("some_func", err)
}
// And with keys/Values:
job.EventErrKv("some_func", err, logos.Kvs{"email": user.Email})
```
##### Gauges
```go
// Gauges:
job.Gauge("num_goroutines", numRunningGoroutines())
// Timings also support keys/values:
job.GaugeKv("num_goroutines", numRunningGoroutines(),
logos.Kvs{"dispatcher": dispatcherStatus()})
```
##### Timing
```go
// Timings:
startTime := time.Now()
// Do something...
job.Timing("fetch_user", time.Since(startTime).Nanoseconds()) // NOTE: Nanoseconds!
// Timings also support keys/values:
job.TimingKv("fetch_user", time.Since(startTime).Nanoseconds(),
logos.Kvs{"user_email": userEmail})
```
#### Keys and Values
Most objects and methods in `Job` work with key/value pairs. Key/value pairs are just maps of strings to strings. Keys and values are only relevant right now for logging sinks: The keys and values will be printed on each line written.
You can add keys/values to a job. This is useful for things like hostname or pid. They keys/values will show up on every future event/timing/error.
```go
log := logos.New("<your-package-name>")
job := log.Job(map[string]string{
"hostname": hostname,
"pid": pid,
})
```
or
```go
log := logos.New("<your-package-name>")
job := log.Job()
job.KeyValue("hostname", hostname)
job.KeyValue("pid", pid)
```
### High Performance
A quick and simple benchmark with zap/zerolog, which runs on [github actions][benchmark]:

13
ctx.go Normal file
View File

@ -0,0 +1,13 @@
package logos
import "context"
type ctxLoggerKey struct{}
func ToCtx(ctx context.Context, logger Logger) context.Context {
return context.WithValue(ctx, ctxLoggerKey{}, logger)
}
func FromCtx(ctx context.Context) Logger {
return ctx.Value(ctxLoggerKey{}).(Logger)
}

View File

@ -3,11 +3,12 @@ package console
import (
"encoding/base64"
"fmt"
"go.uber.org/zap/buffer"
"go.uber.org/zap/zapcore"
"strconv"
"sync"
"time"
"go.uber.org/zap/buffer"
"go.uber.org/zap/zapcore"
)
var poolColoredEncoder = sync.Pool{
@ -305,11 +306,11 @@ func (e *coloredEncoder) OpenNamespace(_ string) {
}
func (e *coloredEncoder) AppendDuration(val time.Duration) {
//cur := e.buf.Len()
//e.EncodeDuration(val, e)
//if cur == e.buf.Len() {
// cur := e.buf.Len()
// e.EncodeDuration(val, e)
// if cur == e.buf.Len() {
// e.AppendInt64(int64(val))
//}
// }
e.appendColoredString(val.String(), e.scheme.Time)

View File

@ -2,11 +2,12 @@ package console
import (
"fmt"
"time"
"github.com/khorevaa/logos/appender"
"github.com/khorevaa/logos/internal/common"
"go.uber.org/zap/buffer"
"go.uber.org/zap/zapcore"
"time"
)
var defaultTimestampFormat = "2006-01-02T15:04:05.000Z0700"
@ -80,7 +81,7 @@ func (e *Encoder) clone() *Encoder {
}
// EncodeEntry implements the EncodeEntry method of the zapcore Encoder interface
func (e Encoder) EncodeEntry(ent zapcore.Entry, fields []zapcore.Field) (*buffer.Buffer, error) {
func (e *Encoder) EncodeEntry(ent zapcore.Entry, fields []zapcore.Field) (*buffer.Buffer, error) {
line := bufferpool.Get()
@ -90,7 +91,7 @@ func (e Encoder) EncodeEntry(ent zapcore.Entry, fields []zapcore.Field) (*buffer
e.colorizeText(line, ent.Level.CapitalString(), lvlColor)
//pp.Println(e.DisableNaming, ent.LoggerName)
// pp.Println(e.DisableNaming, ent.LoggerName)
if !e.DisableNaming && len(ent.LoggerName) > 0 {
e.addSeparatorIfNecessary(line)
@ -109,7 +110,11 @@ func (e Encoder) EncodeEntry(ent zapcore.Entry, fields []zapcore.Field) (*buffer
e.addSeparatorIfNecessary(line)
e.colorizeText(line, ent.Message, lvlColor)
}
//e.addSeparatorIfNecessary(line)
if e.buf.Len() > 0 {
line.Write(e.buf.Bytes())
}
// Add any structured context.
e.writeContext(lvlColor, line, fields)

228
logger.go
View File

@ -1,11 +1,13 @@
package logos
import (
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
"strings"
"sync"
"sync/atomic"
"time"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
)
var _ Logger = (*warpLogger)(nil)
@ -38,180 +40,206 @@ type warpLogger struct {
_lockWait sync.WaitGroup
}
func (l *warpLogger) Sugar() SugaredLogger {
func (log *warpLogger) copy() *warpLogger {
l.initSugaredLogger()
return l
return &warpLogger{
Name: log.Name,
defLogger: log.defLogger.WithOptions(),
mu: sync.RWMutex{},
emitLevel: log.emitLevel,
}
}
func (l *warpLogger) Debug(msg string, fields ...Field) {
l.checkLock()
l.defLogger.Debug(msg, fields...)
func (log *warpLogger) Sugar() SugaredLogger {
log.initSugaredLogger()
return log
}
func (l *warpLogger) Info(msg string, fields ...Field) {
func (log *warpLogger) Named(s string) Logger {
l.checkLock()
l.defLogger.Info(msg, fields...)
copyLog := log.copy()
if copyLog.Name == "" {
copyLog.Name = s
} else {
copyLog.Name = strings.Join([]string{copyLog.Name, s}, ".")
}
copyLog.defLogger = copyLog.defLogger.Named(s)
return copyLog
}
func (l *warpLogger) Warn(msg string, fields ...Field) {
l.checkLock()
l.defLogger.Warn(msg, fields...)
func (log *warpLogger) With(fields ...Field) Logger {
log.checkLock()
logger := log.copy()
logger.defLogger = logger.defLogger.WithOptions(zap.Fields(fields...))
return logger
}
func (l *warpLogger) Error(msg string, fields ...Field) {
l.checkLock()
l.defLogger.Error(msg, fields...)
func (log *warpLogger) Debug(msg string, fields ...Field) {
log.checkLock()
log.defLogger.Debug(msg, fields...)
}
func (l *warpLogger) Fatal(msg string, fields ...Field) {
l.checkLock()
l.defLogger.Fatal(msg, fields...)
func (log *warpLogger) Info(msg string, fields ...Field) {
log.checkLock()
log.defLogger.Info(msg, fields...)
}
func (l *warpLogger) Panic(msg string, fields ...Field) {
l.checkLock()
l.defLogger.Panic(msg, fields...)
func (log *warpLogger) Warn(msg string, fields ...Field) {
log.checkLock()
log.defLogger.Warn(msg, fields...)
}
func (l *warpLogger) DPanic(msg string, fields ...Field) {
l.checkLock()
l.defLogger.DPanic(msg, fields...)
func (log *warpLogger) Error(msg string, fields ...Field) {
log.checkLock()
log.defLogger.Error(msg, fields...)
}
func (l *warpLogger) Debugf(format string, args ...interface{}) {
l.checkLock()
l.sugaredLogger.Debugf(format, args...)
func (log *warpLogger) Fatal(msg string, fields ...Field) {
log.checkLock()
log.defLogger.Fatal(msg, fields...)
}
func (l *warpLogger) Infof(format string, args ...interface{}) {
l.checkLock()
l.sugaredLogger.Infof(format, args...)
func (log *warpLogger) Panic(msg string, fields ...Field) {
log.checkLock()
log.defLogger.Panic(msg, fields...)
}
func (l *warpLogger) Warnf(format string, args ...interface{}) {
l.checkLock()
l.sugaredLogger.Warnf(format, args...)
func (log *warpLogger) DPanic(msg string, fields ...Field) {
log.checkLock()
log.defLogger.DPanic(msg, fields...)
}
func (l *warpLogger) Errorf(format string, args ...interface{}) {
l.checkLock()
l.sugaredLogger.Errorf(format, args...)
func (log *warpLogger) Debugf(format string, args ...interface{}) {
log.checkLock()
log.sugaredLogger.Debugf(format, args...)
}
func (l *warpLogger) Fatalf(format string, args ...interface{}) {
l.checkLock()
l.sugaredLogger.Fatalf(format, args...)
func (log *warpLogger) Infof(format string, args ...interface{}) {
log.checkLock()
log.sugaredLogger.Infof(format, args...)
}
func (l *warpLogger) Panicf(format string, args ...interface{}) {
l.checkLock()
l.sugaredLogger.Panicf(format, args...)
func (log *warpLogger) Warnf(format string, args ...interface{}) {
log.checkLock()
log.sugaredLogger.Warnf(format, args...)
}
func (l *warpLogger) DPanicf(format string, args ...interface{}) {
l.checkLock()
l.sugaredLogger.DPanicf(format, args...)
func (log *warpLogger) Errorf(format string, args ...interface{}) {
log.checkLock()
log.sugaredLogger.Errorf(format, args...)
}
func (l *warpLogger) Debugw(msg string, keysAndValues ...interface{}) {
l.checkLock()
l.sugaredLogger.Debugw(msg, keysAndValues...)
func (log *warpLogger) Fatalf(format string, args ...interface{}) {
log.checkLock()
log.sugaredLogger.Fatalf(format, args...)
}
func (l *warpLogger) Infow(msg string, keysAndValues ...interface{}) {
l.checkLock()
l.sugaredLogger.Infow(msg, keysAndValues...)
func (log *warpLogger) Panicf(format string, args ...interface{}) {
log.checkLock()
log.sugaredLogger.Panicf(format, args...)
}
func (l *warpLogger) Warnw(msg string, keysAndValues ...interface{}) {
l.checkLock()
l.sugaredLogger.Warnw(msg, keysAndValues...)
func (log *warpLogger) DPanicf(format string, args ...interface{}) {
log.checkLock()
log.sugaredLogger.DPanicf(format, args...)
}
func (l *warpLogger) Errorw(msg string, keysAndValues ...interface{}) {
l.checkLock()
l.sugaredLogger.Errorw(msg, keysAndValues...)
func (log *warpLogger) Debugw(msg string, keysAndValues ...interface{}) {
log.checkLock()
log.sugaredLogger.Debugw(msg, keysAndValues...)
}
func (l *warpLogger) Fatalw(msg string, keysAndValues ...interface{}) {
l.checkLock()
l.sugaredLogger.Fatalw(msg, keysAndValues...)
func (log *warpLogger) Infow(msg string, keysAndValues ...interface{}) {
log.checkLock()
log.sugaredLogger.Infow(msg, keysAndValues...)
}
func (l *warpLogger) Panicw(msg string, keysAndValues ...interface{}) {
l.checkLock()
l.sugaredLogger.Panicw(msg, keysAndValues...)
func (log *warpLogger) Warnw(msg string, keysAndValues ...interface{}) {
log.checkLock()
log.sugaredLogger.Warnw(msg, keysAndValues...)
}
func (l *warpLogger) DPanicw(msg string, keysAndValues ...interface{}) {
l.checkLock()
l.sugaredLogger.DPanicw(msg, keysAndValues...)
func (log *warpLogger) Errorw(msg string, keysAndValues ...interface{}) {
log.checkLock()
log.sugaredLogger.Errorw(msg, keysAndValues...)
}
func (l *warpLogger) Sync() error {
return l.defLogger.Sync()
func (log *warpLogger) Fatalw(msg string, keysAndValues ...interface{}) {
log.checkLock()
log.sugaredLogger.Fatalw(msg, keysAndValues...)
}
func (l *warpLogger) Desugar() Logger {
return l
func (log *warpLogger) Panicw(msg string, keysAndValues ...interface{}) {
log.checkLock()
log.sugaredLogger.Panicw(msg, keysAndValues...)
}
func (l *warpLogger) UsedAt() time.Time {
unix := atomic.LoadUint32(&l._usedAt)
func (log *warpLogger) DPanicw(msg string, keysAndValues ...interface{}) {
log.checkLock()
log.sugaredLogger.DPanicw(msg, keysAndValues...)
}
func (log *warpLogger) Sync() error {
return log.defLogger.Sync()
}
func (log *warpLogger) Desugar() Logger {
return log
}
func (log *warpLogger) UsedAt() time.Time {
unix := atomic.LoadUint32(&log._usedAt)
return time.Unix(int64(unix), 0)
}
func (l *warpLogger) SetUsedAt(tm time.Time) {
atomic.StoreUint32(&l._usedAt, uint32(tm.Unix()))
func (log *warpLogger) SetUsedAt(tm time.Time) {
atomic.StoreUint32(&log._usedAt, uint32(tm.Unix()))
}
func (l *warpLogger) initSugaredLogger() {
func (log *warpLogger) initSugaredLogger() {
if l.sugaredLogger == nil {
l.sugaredLogger = l.defLogger.Sugar()
if log.sugaredLogger == nil {
log.sugaredLogger = log.defLogger.Sugar()
}
}
func (l *warpLogger) updateLogger(logger *zap.Logger) {
func (log *warpLogger) updateLogger(logger *zap.Logger) {
l.lock()
defer l.unlock()
_ = log.Sync()
_ = l.Sync()
log.defLogger = logger
l.defLogger = logger
if l.sugaredLogger != nil {
l.sugaredLogger = l.defLogger.Sugar()
if log.sugaredLogger != nil {
log.sugaredLogger = log.defLogger.Sugar()
}
}
func (l *warpLogger) lock() {
func (log *warpLogger) lock() {
l.mu.Lock()
atomic.StoreUint32(&l._locked, 1)
l._lockWait.Add(1)
atomic.StoreUint32(&log._locked, 1)
}
func (l *warpLogger) unlock() {
func (log *warpLogger) unlock() {
atomic.StoreUint32(&l._locked, 0)
l._lockWait.Done()
l.mu.Unlock()
atomic.StoreUint32(&log._locked, 0)
log._lockWait.Done()
log.mu.Unlock()
}
func (l *warpLogger) checkLock() {
func (log *warpLogger) checkLock() {
if l.locked() {
l._lockWait.Wait()
if log.locked() {
log._lockWait.Wait()
}
}
func (l *warpLogger) locked() bool {
return atomic.LoadUint32(&l._locked) == 1
func (log *warpLogger) locked() bool {
return atomic.LoadUint32(&log._locked) == 1
}

View File

@ -10,50 +10,32 @@ type loggerConfig struct {
Name string
// Global core config
Level zapcore.Level
Level zap.AtomicLevel
AddCaller bool
AddStacktrace zapcore.LevelEnabler
Parent *loggerConfig
coreConfigs map[string]zapcore.Level
coreConfigs map[string]zap.AtomicLevel
}
func (l *loggerConfig) updateConfigLevel(appenderName string, level zapcore.Level) {
if atomicLevel, ok := l.coreConfigs[appenderName]; ok {
atomicLevel.SetLevel(level)
}
}
func (l *loggerConfig) CreateLogger(appenders map[string]*appender.Appender) *warpLogger {
if l.Level == OffLevel {
return newLogger(l.Name, newZapLogger(l.Name, zapcore.NewNopCore()))
}
zc := newZapCore(l.getCoreConfigs(), appenders)
zc := newZapCore(l.coreConfigs, appenders)
zl := newZapLogger(l.Name, zc, zap.WithCaller(l.AddCaller), zap.AddStacktrace(l.AddStacktrace), zap.AddCallerSkip(1))
return newLogger(l.Name, zl)
}
func (l *loggerConfig) getCoreConfigs() map[string]zapcore.Level {
config := make(map[string]zapcore.Level)
for s, level := range l.coreConfigs {
config[s] = level
if !level.Enabled(l.Level) {
config[s] = l.Level
}
}
return config
}
func (l *loggerConfig) UpdateLogger(logger *warpLogger, appenders map[string]*appender.Appender) {
if l.Level == OffLevel {
logger.updateLogger(zap.NewNop())
}
zc := newZapCore(l.getCoreConfigs(), appenders)
zc := newZapCore(l.coreConfigs, appenders)
newLogger := zap.New(zc, zap.WithCaller(l.AddCaller), zap.AddStacktrace(l.AddStacktrace), zap.AddCallerSkip(1))
@ -71,7 +53,7 @@ func (l *loggerConfig) copy(name string) *loggerConfig {
Name: name,
Level: l.Level,
Parent: l.Parent,
coreConfigs: make(map[string]zapcore.Level),
coreConfigs: make(map[string]zap.AtomicLevel),
}
copyMapConfig(log.coreConfigs, l.coreConfigs)
@ -80,18 +62,19 @@ func (l *loggerConfig) copy(name string) *loggerConfig {
}
func copyMapConfig(dst map[string]zapcore.Level, src map[string]zapcore.Level) {
func copyMapConfig(dst map[string]zap.AtomicLevel, src map[string]zap.AtomicLevel) {
if len(src) == 0 {
return
}
if dst == nil {
dst = make(map[string]zapcore.Level, len(src))
dst = make(map[string]zap.AtomicLevel, len(src))
}
for name, level := range src {
dst[name] = level
dst[name] = zap.NewAtomicLevelAt(level.Level())
}
}

41
logger_test.go Normal file
View File

@ -0,0 +1,41 @@
package logos
import (
"testing"
"github.com/stretchr/testify/assert"
)
func Test_With(t *testing.T) {
tests := []struct {
name string
logName string
fields []Field
want string
}{
{
"simple",
"testLog",
[]Field{
String("string", "value"),
},
"logg",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
log := New(tt.logName)
logWith := log.Named("named").With(tt.fields...)
logWith.Info("info msg")
logWith.Debug("Debug before")
SetLevel(tt.logName, DebugLevel, "CONSOLE")
logWith.Debug("Debug after")
log.Debug("log debug")
assert.Equalf(t, tt.want, "", "With(%v)", tt.fields)
})
}
}

View File

@ -3,10 +3,6 @@ package logos
import (
"errors"
"fmt"
"github.com/khorevaa/logos/config"
"github.com/khorevaa/logos/internal/common"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
"io/ioutil"
"os"
"os/signal"
@ -16,6 +12,10 @@ import (
"sync"
"syscall"
"github.com/khorevaa/logos/config"
"github.com/khorevaa/logos/internal/common"
"go.uber.org/zap/zapcore"
_ "github.com/khorevaa/logos/appender"
_ "github.com/khorevaa/logos/encoder/common"
_ "github.com/khorevaa/logos/encoder/console"
@ -196,12 +196,8 @@ func New(name string) Logger {
manager.Sync()
return manager.NewLogger(name)
}
func SetLevel(name string, level zapcore.Level) {
manager.SetLevel(name, level)
}
func UpdateLogger(name string, logger *zap.Logger) {
manager.UpdateLogger(name, logger)
func SetLevel(name string, level zapcore.Level, appender ...string) {
manager.SetLevel(name, level, appender...)
}
func Sync() {

View File

@ -1,13 +1,14 @@
package logos
import (
log2 "log"
"sync"
"github.com/khorevaa/logos/appender"
config2 "github.com/khorevaa/logos/config"
"github.com/khorevaa/logos/internal/common"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
"log"
"sync"
)
const (
@ -22,7 +23,7 @@ type logManager struct {
appenders map[string]*appender.Appender
rootLevel zapcore.Level
rootLevel zap.AtomicLevel
rootLogger *warpLogger
rootLoggerConfig *loggerConfig
@ -93,10 +94,15 @@ func (m *logManager) NewLogger(name string) Logger {
return m.getLogger(name)
}
func (m *logManager) SetLevel(name string, level zapcore.Level) {
func (m *logManager) SetLevel(name string, level zapcore.Level, appender ...string) {
logConfig := m.newCoreLoggerConfig(name)
for _, appenderName := range appender {
logConfig.updateConfigLevel(appenderName, level)
}
m.loggerConfigs.Store(name, logConfig)
// TODO Чтото сделать
//return m.getLogger(name)
}
func (m *logManager) getLogger(name string, lock ...bool) *warpLogger {
@ -165,10 +171,10 @@ func (m *logManager) getRootLoggerConfig() *loggerConfig {
log := &loggerConfig{
Name: name,
Level: m.rootLevel,
coreConfigs: make(map[string]zapcore.Level),
coreConfigs: make(map[string]zap.AtomicLevel),
AddStacktrace: StackTraceLevelEnabler,
}
log.coreConfigs["CONSOLE"] = InfoLevel
log.coreConfigs["CONSOLE"] = zap.NewAtomicLevelAt(InfoLevel)
m.rootLoggerConfig = log
m.loggerConfigs.Store(name, log)
@ -193,7 +199,7 @@ func (m *logManager) newLoggerFromCfg(loggerCfg config2.LoggerConfig) (*loggerCo
log.Level = level
if len(appenders) > 0 {
log.coreConfigs = make(map[string]zapcore.Level, len(appenders))
log.coreConfigs = make(map[string]zap.AtomicLevel, len(appenders))
for _, appenderName := range appenders {
log.coreConfigs[appenderName] = level
}
@ -216,7 +222,7 @@ func (m *logManager) newLoggerFromCfg(loggerCfg config2.LoggerConfig) (*loggerCo
log.AddStacktrace = StackTraceLevelEnabler
if tLevel, err := createLevel(loggerCfg.TraceLevel); len(loggerCfg.TraceLevel) > 0 && err == nil {
log.AddStacktrace = zap.NewAtomicLevelAt(tLevel)
log.AddStacktrace = tLevel
}
return log, nil
@ -224,31 +230,31 @@ func (m *logManager) newLoggerFromCfg(loggerCfg config2.LoggerConfig) (*loggerCo
func debugf(format string, args ...interface{}) {
if debug {
log.Printf(format, args...)
log2.Printf(format, args...)
}
}
func (m *logManager) loadCoreLoggerConfig(name string, parent *loggerConfig) *loggerConfig {
if log, ok := m.loggerConfigs.Load(name); ok {
return log.(*loggerConfig)
if logConfig, ok := m.loggerConfigs.Load(name); ok {
return logConfig.(*loggerConfig)
}
if parent == nil {
parent = m.rootLoggerConfig
}
log := &loggerConfig{
logConfig := &loggerConfig{
Name: name,
Parent: parent,
AddStacktrace: parent.AddStacktrace,
AddCaller: parent.AddCaller,
coreConfigs: make(map[string]zapcore.Level),
coreConfigs: make(map[string]zap.AtomicLevel),
}
copyMapConfig(log.coreConfigs, parent.coreConfigs)
m.loggerConfigs.Store(name, log)
return log
copyMapConfig(logConfig.coreConfigs, parent.coreConfigs)
m.loggerConfigs.Store(name, logConfig)
return logConfig
}
@ -354,7 +360,7 @@ func (m *logManager) newRootLoggerFromCfg(root config2.RootLogger) error {
rootLoggerConfig := &loggerConfig{
Name: rootLoggerName,
Level: m.rootLevel,
coreConfigs: make(map[string]zapcore.Level),
coreConfigs: make(map[string]zap.AtomicLevel),
AddStacktrace: StackTraceLevelEnabler,
}
@ -391,12 +397,12 @@ func (m *logManager) UpdateLogger(name string, logger *zap.Logger) {
core.updateLogger(logger)
}
func createLevel(level string) (zapcore.Level, error) {
func createLevel(level string) (zap.AtomicLevel, error) {
switch level {
case "off", "OFF", "false":
return OffLevel, nil
return zap.NewAtomicLevelAt(OffLevel), nil
default:
var l zapcore.Level
var l zap.AtomicLevel
err := l.UnmarshalText([]byte(level))
return l, err
}

269
timing.go
View File

@ -1,269 +0,0 @@
package logos
import (
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
"time"
)
type Kvs map[string]interface{}
type Emitter interface {
EmitEvent(job string, event string, kvs map[string]interface{})
EmitEventErr(job string, event string, err error, kvs map[string]interface{})
EmitTiming(job string, event string, duration time.Duration, kvs map[string]interface{})
EmitComplete(job string, status CompletionStatus, duration time.Duration, kvs map[string]interface{})
EmitGauge(job string, event string, value float64, kvs map[string]interface{})
}
func (l *warpLogger) Job(name string, kvs ...map[string]interface{}) *Job {
return newJob(name, l, kvs...)
}
func (l *warpLogger) EventEmitter() Emitter {
return l
}
func newJob(name string, emitter Emitter, kvs ...map[string]interface{}) *Job {
job := &Job{
Name: name,
emitter: emitter,
Start: time.Now(),
KeyValues: map[string]interface{}{},
}
if len(kvs) > 0 {
job.KeyValues = kvs[0]
}
return job
}
var _ Emitter = (*warpLogger)(nil)
func (e *warpLogger) EmitEvent(job string, event string, kvs map[string]interface{}) {
var fields []Field
fields = append(fields, String("job", job), String("event", event))
kvsToFields(&fields, kvs)
e.emit(0, e.emitLevel, fields)
}
func (e *warpLogger) EmitEventErr(job string, event string, err error, kvs map[string]interface{}) {
var fields []Field
fields = append(fields, String("job", job), String("event", event), Error(err))
kvsToFields(&fields, kvs)
e.emit(0, ErrorLevel, fields)
}
func (e *warpLogger) EmitTiming(job string, event string, duration time.Duration, kvs map[string]interface{}) {
var fields []Field
fields = append(fields, String("job", job), String("event", event), Duration("duration", duration))
kvsToFields(&fields, kvs)
e.emit(0, e.emitLevel, fields)
}
func (e *warpLogger) EmitComplete(job string, status CompletionStatus, duration time.Duration, kvs map[string]interface{}) {
var fields []Field
fields = append(fields, String("job", job), String("status", status.String()), Duration("duration", duration))
kvsToFields(&fields, kvs)
lvl := e.emitLevel
switch status {
case Err, ValidationError, Panic:
lvl = ErrorLevel
case Junk:
lvl = WarnLevel
}
e.emit(0, lvl, fields)
}
func (e *warpLogger) EmitGauge(job string, event string, value float64, kvs map[string]interface{}) {
var fields []Field
fields = append(fields, String("job", job), String("event", event), Float64("gauge", value))
kvsToFields(&fields, kvs)
e.emit(0, e.emitLevel, fields)
}
func kvsToFields(fields *[]Field, kvs map[string]interface{}) {
for key, value := range kvs {
*fields = append(*fields, Any(key, value))
}
return
}
func (e *warpLogger) emit(callerSkip int, emitLevel zapcore.Level, fields []zapcore.Field) {
emitter := e.defLogger
if callerSkip > 0 {
emitter = e.defLogger.WithOptions(zap.AddCallerSkip(callerSkip))
}
switch emitLevel {
case DebugLevel:
emitter.Debug("", fields...)
case InfoLevel:
emitter.Info("", fields...)
case WarnLevel:
emitter.Warn("", fields...)
case ErrorLevel:
emitter.Error("", fields...)
case PanicLevel:
emitter.Panic("", fields...)
case DPanicLevel:
emitter.DPanic("", fields...)
case FatalLevel:
emitter.Fatal("", fields...)
}
}
type CompletionStatus int
const (
Success CompletionStatus = iota
ValidationError
Panic
Err
Junk
)
var completionStatusToString = map[CompletionStatus]string{
Success: "success",
ValidationError: "validation_error",
Panic: "panic",
Err: "error",
Junk: "junk",
}
func (cs CompletionStatus) String() string {
return completionStatusToString[cs]
}
type Job struct {
Name string
emitter Emitter
Start time.Time
KeyValues map[string]interface{}
}
func (j *Job) Event(eventName string) {
allKvs := j.mergedKeyValues(nil)
j.emitter.EmitEvent(j.Name, eventName, allKvs)
}
func (j *Job) EventKv(eventName string, kvs map[string]interface{}) {
allKvs := j.mergedKeyValues(kvs)
j.emitter.EmitEvent(j.Name, eventName, allKvs)
}
func (j *Job) EventErr(eventName string, err error) error {
allKvs := j.mergedKeyValues(nil)
j.emitter.EmitEventErr(j.Name, eventName, err, allKvs)
return err
}
func (j *Job) EventErrKv(eventName string, err error, kvs map[string]interface{}) error {
allKvs := j.mergedKeyValues(kvs)
j.emitter.EmitEventErr(j.Name, eventName, err, allKvs)
return err
}
func (j *Job) Timing(eventName string, duration time.Duration) {
allKvs := j.mergedKeyValues(nil)
j.emitter.EmitTiming(j.Name, eventName, duration, allKvs)
}
func (j *Job) TimingKv(eventName string, duration time.Duration, kvs map[string]interface{}) {
allKvs := j.mergedKeyValues(kvs)
j.emitter.EmitTiming(j.Name, eventName, duration, allKvs)
}
func (j *Job) Gauge(eventName string, value float64) {
allKvs := j.mergedKeyValues(nil)
j.emitter.EmitGauge(j.Name, eventName, value, allKvs)
}
func (j *Job) GaugeKv(eventName string, value float64, kvs map[string]interface{}) {
allKvs := j.mergedKeyValues(kvs)
j.emitter.EmitGauge(j.Name, eventName, value, allKvs)
}
func (j *Job) Complete(status CompletionStatus) {
allKvs := j.mergedKeyValues(nil)
j.emitter.EmitComplete(j.Name, status, time.Since(j.Start), allKvs)
}
func (j *Job) CompleteErr(err error) error {
j.CompleteKv(Err, map[string]interface{}{
"error": err,
})
return err
}
func (j *Job) CompleteKv(status CompletionStatus, kvs map[string]interface{}) {
allKvs := j.mergedKeyValues(kvs)
j.emitter.EmitComplete(j.Name, status, time.Since(j.Start), allKvs)
}
func (j *Job) KeyValue(key string, value string) *Job {
if j.KeyValues == nil {
j.KeyValues = make(map[string]interface{})
}
j.KeyValues[key] = value
return j
}
func (j *Job) mergedKeyValues(instanceKvs map[string]interface{}) map[string]interface{} {
var allKvs map[string]interface{}
// Count how many maps actually have contents in them. If it's 0 or 1, we won't allocate a new map.
// Also, optimistically set allKvs. We might use it or we might overwrite the value with a newly made map.
var kvCount = 0
if len(j.KeyValues) > 0 {
kvCount += 1
allKvs = j.KeyValues
}
if len(instanceKvs) > 0 {
kvCount += 1
allKvs = instanceKvs
}
if kvCount > 1 {
allKvs = make(map[string]interface{})
for k, v := range j.KeyValues {
allKvs[k] = v
}
for k, v := range instanceKvs {
allKvs[k] = v
}
}
return allKvs
}

View File

@ -1,249 +0,0 @@
package logos
import (
"errors"
"testing"
"time"
)
func TestJob_Event(t *testing.T) {
type fields struct {
Name string
emitter Emitter
Start time.Time
KeyValues map[string]string
}
type args struct {
eventType string
event string
status CompletionStatus
nanos int64
err error
KeyValues map[string]string
}
emit := New("job_emitter").EventEmitter()
tests := []struct {
name string
fields fields
args args
}{
{
"emit event",
fields{
Name: "users_job",
emitter: emit,
Start: time.Now(),
},
args{
eventType: "Event",
event: "get_users",
},
},
{
"emit event with kv",
fields{
Name: "users_job",
emitter: emit,
Start: time.Now(),
},
args{
eventType: "EventKv",
event: "get_users",
KeyValues: map[string]string{
"connect_string": "localhost:1545",
"user": "admin",
"table": "users",
},
},
},
{
"emit event error",
fields{
Name: "users_job",
emitter: emit,
Start: time.Now(),
},
args{
eventType: "EventErr",
event: "get_users",
err: errors.New("event error"),
},
},
{
"emit event error with kv",
fields{
Name: "users_job",
emitter: emit,
Start: time.Now(),
},
args{
eventType: "EventErrKv",
event: "get_users",
err: errors.New("event error with kv"),
KeyValues: map[string]string{
"connect_string": "localhost:1545",
"user": "admin",
"table": "users",
},
},
},
{
"emit event complete",
fields{
Name: "users_job",
emitter: emit,
Start: time.Now(),
},
args{
eventType: "Complete",
event: "get_users",
err: errors.New("event error"),
},
},
{
"emit event complete panic",
fields{
Name: "users_job",
emitter: emit,
Start: time.Now(),
},
args{
eventType: "Complete",
status: Panic,
event: "get_users",
},
},
{
"emit event complete validate error",
fields{
Name: "users_job",
emitter: emit,
Start: time.Now(),
},
args{
eventType: "Complete",
status: ValidationError,
event: "get_users",
},
},
{
"emit event complete Junk",
fields{
Name: "users_job",
emitter: emit,
Start: time.Now(),
},
args{
eventType: "Complete",
status: Junk,
event: "get_users",
},
},
{
"emit event complete with kv",
fields{
Name: "users_job",
emitter: emit,
Start: time.Now(),
},
args{
eventType: "CompleteKv",
event: "get_users",
err: errors.New("event error with kv"),
KeyValues: map[string]string{
"connect_string": "localhost:1545",
"user": "admin",
"table": "users",
},
},
},
{
"emit event timing",
fields{
Name: "users_job",
emitter: emit,
Start: time.Now(),
},
args{
eventType: "Timing",
event: "fetch_users",
nanos: 54000,
},
},
{
"emit event timing with kv",
fields{
Name: "users_job",
emitter: emit,
Start: time.Now(),
},
args{
eventType: "TimingKv",
event: "fetch_users",
nanos: 54000,
KeyValues: map[string]string{
"connect_string": "localhost:1545",
"raw_sql": "select * from users",
"table": "users",
},
},
},
{
"emit event job with kv",
fields{
Name: "users_job",
emitter: emit,
Start: time.Now(),
KeyValues: map[string]string{
"connect_string": "localhost:1545",
},
},
args{
eventType: "TimingKv",
event: "fetch_users",
nanos: 54000,
KeyValues: map[string]string{
"raw_sql": "select * from users",
"table": "users",
},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
j := &Job{
Name: tt.fields.Name,
emitter: tt.fields.emitter,
Start: tt.fields.Start,
KeyValues: tt.fields.KeyValues,
}
switch tt.args.eventType {
case "Event":
j.Event(tt.args.event)
case "EventKv":
j.EventKv(tt.args.event, tt.args.KeyValues)
case "EventErr":
j.EventErr(tt.args.event, tt.args.err)
case "EventErrKv":
j.EventErrKv(tt.args.event, tt.args.err, tt.args.KeyValues)
case "Complete":
j.Complete(tt.args.status)
case "CompleteKv":
j.CompleteKv(tt.args.status, tt.args.KeyValues)
case "Timing":
j.Timing(tt.args.event, tt.args.nanos)
case "TimingKv":
j.TimingKv(tt.args.event, tt.args.nanos, tt.args.KeyValues)
case "Gauge":
j.Gauge(tt.args.event, float64(tt.args.nanos))
case "GaugeKv":
j.GaugeKv(tt.args.event, float64(tt.args.nanos), tt.args.KeyValues)
}
})
}
}

View File

@ -51,13 +51,13 @@ type loggerFormat interface {
type Logger interface {
loggerFields
Named(s string) Logger
With(fields ...Field) Logger
Sync() error
Sugar() SugaredLogger
Job(name string, kvs ...map[string]interface{}) *Job
EventEmitter() Emitter
}
type SugaredLogger interface {
@ -111,8 +111,4 @@ type SugaredLogger interface {
Sync() error
Desugar() Logger
Job(name string, kvs ...map[string]interface{}) *Job
EventEmitter() Emitter
}

View File

@ -8,16 +8,12 @@ import (
var StackTraceLevelEnabler = zap.NewAtomicLevelAt(zapcore.PanicLevel)
func newZapCore(config map[string]zapcore.Level, appenders map[string]*appender.Appender) zapcore.Core {
func newZapCore(config map[string]zap.AtomicLevel, appenders map[string]*appender.Appender) zapcore.Core {
zcs := make([]zapcore.Core, 0)
for name, level := range config {
if level == OffLevel {
continue
}
if a, ok := appenders[name]; ok {
zcs = append(zcs, zapcore.NewCore(a.Encoder, a.Writer, level))
}