diff --git a/cmd/bisync/bilib/output.go b/cmd/bisync/bilib/output.go index c35abde86..00ac8d78e 100644 --- a/cmd/bisync/bilib/output.go +++ b/cmd/bisync/bilib/output.go @@ -3,20 +3,22 @@ package bilib import ( "bytes" - "log" + "log/slog" - "github.com/sirupsen/logrus" + "github.com/rclone/rclone/fs/log" ) -// CaptureOutput runs a function capturing its output. +// CaptureOutput runs a function capturing its output at log level INFO. func CaptureOutput(fun func()) []byte { - logSave := log.Writer() - logrusSave := logrus.StandardLogger().Out buf := &bytes.Buffer{} - log.SetOutput(buf) - logrus.SetOutput(buf) + oldLevel := log.Handler.SetLevel(slog.LevelInfo) + log.Handler.SetOutput(func(level slog.Level, text string) { + buf.WriteString(text) + }) + defer func() { + log.Handler.ResetOutput() + log.Handler.SetLevel(oldLevel) + }() fun() - log.SetOutput(logSave) - logrus.SetOutput(logrusSave) return buf.Bytes() } diff --git a/cmd/ncdu/ncdu.go b/cmd/ncdu/ncdu.go index dfe994a63..41dbcd75a 100644 --- a/cmd/ncdu/ncdu.go +++ b/cmd/ncdu/ncdu.go @@ -6,6 +6,8 @@ package ncdu import ( "context" "fmt" + "log/slog" + "os" "path" "reflect" "sort" @@ -925,23 +927,19 @@ func (u *UI) Run() error { return fmt.Errorf("screen init: %w", err) } - // Hijack fs.LogOutput so that it doesn't corrupt the screen. - if logOutput := fs.LogOutput; !log.Redirected() { - type log struct { - text string - level fs.LogLevel - } - var logs []log - fs.LogOutput = func(level fs.LogLevel, text string) { + // Hijack log output so that it doesn't corrupt the screen. + if !log.Redirected() { + var logs []string + log.Handler.SetOutput(func(level slog.Level, text string) { if len(logs) > 100 { logs = logs[len(logs)-100:] } - logs = append(logs, log{level: level, text: text}) - } + logs = append(logs, text) + }) defer func() { - fs.LogOutput = logOutput - for i := range logs { - logOutput(logs[i].level, logs[i].text) + log.Handler.ResetOutput() + for _, text := range logs { + _, _ = os.Stderr.WriteString(text) } }() } diff --git a/cmd/progress.go b/cmd/progress.go index 2963ad396..539f0b801 100644 --- a/cmd/progress.go +++ b/cmd/progress.go @@ -5,11 +5,11 @@ package cmd import ( "bytes" "fmt" + "log/slog" "strings" "sync" "time" - "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/accounting" "github.com/rclone/rclone/fs/log" "github.com/rclone/rclone/fs/operations" @@ -19,8 +19,6 @@ import ( const ( // interval between progress prints defaultProgressInterval = 500 * time.Millisecond - // time format for logging - logTimeFormat = "2006/01/02 15:04:05" ) // startProgress starts the progress bar printing @@ -28,15 +26,13 @@ const ( // It returns a func which should be called to stop the stats. func startProgress() func() { stopStats := make(chan struct{}) - oldLogOutput := fs.LogOutput oldSyncPrint := operations.SyncPrintf if !log.Redirected() { // Intercept the log calls if not logging to file or syslog - fs.LogOutput = func(level fs.LogLevel, text string) { - printProgress(fmt.Sprintf("%s %-6s: %s", time.Now().Format(logTimeFormat), level, text)) - - } + log.Handler.SetOutput(func(level slog.Level, text string) { + printProgress(text) + }) } // Intercept output from functions such as HashLister to stdout @@ -60,7 +56,10 @@ func startProgress() func() { case <-stopStats: ticker.Stop() printProgress("") - fs.LogOutput = oldLogOutput + if !log.Redirected() { + // Reset intercept of the log calls + log.Handler.ResetOutput() + } operations.SyncPrintf = oldSyncPrint fmt.Println("") return diff --git a/cmdtest/environment_test.go b/cmdtest/environment_test.go index 47410c687..86ab0b4de 100644 --- a/cmdtest/environment_test.go +++ b/cmdtest/environment_test.go @@ -48,7 +48,7 @@ func TestEnvironmentVariables(t *testing.T) { env = "RCLONE_LOG_LEVEL=DEBUG" out, err = rcloneEnv(env, "version", "--quiet") if assert.Error(t, err) { - assert.Contains(t, out, " DEBUG : ") + assert.Contains(t, out, " DEBUG ") assert.Contains(t, out, "Can't set -q and --log-level") assert.Contains(t, "exit status 1", err.Error()) } @@ -329,7 +329,7 @@ func TestEnvironmentVariables(t *testing.T) { jsonLogOK := func() { t.Helper() if assert.NoError(t, err) { - assert.Contains(t, out, `{"level":"debug",`) + assert.Contains(t, out, `"level":"debug"`) assert.Contains(t, out, `"msg":"Version `) assert.Contains(t, out, `"}`) } diff --git a/docs/content/docs.md b/docs/content/docs.md index 7bbbbd1fe..95ca98c65 100644 --- a/docs/content/docs.md +++ b/docs/content/docs.md @@ -1483,12 +1483,21 @@ have a signal to rotate logs. ### --log-format LIST ### -Comma separated list of log format options. Accepted options are `date`, -`time`, `microseconds`, `pid`, `longfile`, `shortfile`, `UTC`. Any other -keywords will be silently ignored. `pid` will tag log messages with process -identifier which useful with `rclone mount --daemon`. Other accepted -options are explained in the [go documentation](https://pkg.go.dev/log#pkg-constants). -The default log format is "`date`,`time`". +Comma separated list of log format options. The accepted options are: + +- `date` - Add a date in the format YYYY/MM/YY to the log. +- `time` - Add a time to the log in format HH:MM:SS. +- `microseconds` - Add microseconds to the time in format HH:MM:SS.SSSSSS. +- `UTC` - Make the logs in UTC not localtime. +- `longfile` - Adds the source file and line number of the log statement. +- `shortfile` - Adds the source file and line number of the log statement. +- `pid` - Add the process ID to the log - useful with `rclone mount --daemon`. +- `nolevel` - Don't add the level to the log. +- `json` - Equivalent to adding `--use-json-log` + +They are added to the log line in the order above. + +The default log format is `"date,time"`. ### --log-level LEVEL ### @@ -1508,8 +1517,61 @@ warnings and significant events. ### --use-json-log ### -This switches the log format to JSON for rclone. The fields of json log -are level, msg, source, time. +This switches the log format to JSON for rclone. The fields of JSON +log are `level`, `msg`, `source`, `time`. The JSON logs will be +printed on a single line, but are shown expanded here for clarity. + +```json +{ + "time": "2025-05-13T17:30:51.036237518+01:00", + "level": "debug", + "msg": "4 go routines active\n", + "source": "cmd/cmd.go:298" +} +``` + +Completed data transfer logs will have extra `size` information. Logs +which are about a particular object will have `object` and +`objectType` fields also. + +```json +{ + "time": "2025-05-13T17:38:05.540846352+01:00", + "level": "info", + "msg": "Copied (new) to: file2.txt", + "size": 6, + "object": "file.txt", + "objectType": "*local.Object", + "source": "operations/copy.go:368" +} +``` + +Stats logs will contain a `stats` field which is the same as +returned from the rc call [core/stats](/rc/#core-stats). + +```json +{ + "time": "2025-05-13T17:38:05.540912847+01:00", + "level": "info", + "msg": "...text version of the stats...", + "stats": { + "bytes": 6, + "checks": 0, + "deletedDirs": 0, + "deletes": 0, + "elapsedTime": 0.000904825, + ...truncated for clarity... + "totalBytes": 6, + "totalChecks": 0, + "totalTransfers": 1, + "transferTime": 0.000882794, + "transfers": 1 + }, + "source": "accounting/stats.go:569" +} +``` + + ### --low-level-retries NUMBER ### diff --git a/fs/config.go b/fs/config.go index e8f0dee8c..0d3f9ddc5 100644 --- a/fs/config.go +++ b/fs/config.go @@ -671,9 +671,13 @@ func init() { RegisterGlobalOptions(OptionsInfo{Name: "main", Opt: globalConfig, Options: ConfigOptionsInfo, Reload: globalConfig.Reload}) // initial guess at log level from the flags - globalConfig.LogLevel = initialLogLevel() + globalConfig.LogLevel = InitialLogLevel() } +// LogReload is written by fs/log to set variables which should really +// be there but we can't move due to them being visible here in the rc. +var LogReload = func(*ConfigInfo) error { return nil } + // Reload assumes the config has been edited and does what is necessary to make it live func (ci *ConfigInfo) Reload(ctx context.Context) error { // Set -vv if --dump is in use @@ -687,11 +691,6 @@ func (ci *ConfigInfo) Reload(ctx context.Context) error { ci.StatsLogLevel = LogLevelNotice } - // If --use-json-log then start the JSON logger - if ci.UseJSONLog { - InstallJSONLogger(ci.LogLevel) - } - // Check --compare-dest and --copy-dest if len(ci.CompareDest) > 0 && len(ci.CopyDest) > 0 { return fmt.Errorf("can't use --compare-dest with --copy-dest") @@ -731,13 +730,12 @@ func (ci *ConfigInfo) Reload(ctx context.Context) error { nonZero(&ci.Transfers) nonZero(&ci.Checkers) - return nil + return LogReload(ci) } -// Initial logging level -// -// Perform a simple check for debug flags to enable debug logging during the flag initialization -func initialLogLevel() LogLevel { +// InitialLogLevel performs a simple check for debug flags to enable +// debug logging during the flag initialization. +func InitialLogLevel() LogLevel { logLevel := LogLevelNotice for argIndex, arg := range os.Args { if strings.HasPrefix(arg, "-vv") && strings.TrimRight(arg, "v") == "-" { diff --git a/fs/log.go b/fs/log.go index d7efc5b61..98350b021 100644 --- a/fs/log.go +++ b/fs/log.go @@ -4,10 +4,9 @@ import ( "context" "encoding/json" "fmt" - "log" + "log/slog" "os" - - "github.com/sirupsen/logrus" + "slices" ) // LogLevel describes rclone's logs. These are a subset of the syslog log levels. @@ -54,19 +53,31 @@ func (logLevelChoices) Type() string { return "LogLevel" } -// LogPrintPid enables process pid in log -var LogPrintPid = false +// slogLevel definitions defined as slog.Level constants. +// The integer values determine severity for filtering. +// Lower values are less severe (e.g., Debug), higher values are more severe (e.g., Emergency). +// We fit our extra values into slog's scale. +const ( + // slog.LevelDebug slog.Level = -4 + // slog.LevelInfo slog.Level = 0 + SlogLevelNotice = slog.Level(2) // Between Info (0) and Warn (4) + // slog.LevelWarn slog.Level = 4 + // slog.LevelError slog.Level = 8 + SlogLevelCritical = slog.Level(12) // More severe than Error + SlogLevelAlert = slog.Level(16) // More severe than Critical + SlogLevelEmergency = slog.Level(20) // Most severe +) -// InstallJSONLogger is a hook that --use-json-log calls -var InstallJSONLogger = func(logLevel LogLevel) {} - -// LogOutput sends the text to the logger of level -var LogOutput = func(level LogLevel, text string) { - text = fmt.Sprintf("%-6s: %s", level, text) - if LogPrintPid { - text = fmt.Sprintf("[%d] %s", os.Getpid(), text) - } - _ = log.Output(4, text) +// Map our level numbers to slog level numbers +var levelToSlog = []slog.Level{ + LogLevelEmergency: SlogLevelEmergency, + LogLevelAlert: SlogLevelAlert, + LogLevelCritical: SlogLevelCritical, + LogLevelError: slog.LevelError, + LogLevelWarning: slog.LevelWarn, + LogLevelNotice: SlogLevelNotice, + LogLevelInfo: slog.LevelInfo, + LogLevelDebug: slog.LevelDebug, } // LogValueItem describes keyed item for a JSON log entry @@ -108,76 +119,45 @@ func (j LogValueItem) String() string { return fmt.Sprint(j.value) } -func logLogrus(level LogLevel, text string, fields logrus.Fields) { - switch level { - case LogLevelDebug: - logrus.WithFields(fields).Debug(text) - case LogLevelInfo: - logrus.WithFields(fields).Info(text) - case LogLevelNotice, LogLevelWarning: - logrus.WithFields(fields).Warn(text) - case LogLevelError: - logrus.WithFields(fields).Error(text) - case LogLevelCritical: - logrus.WithFields(fields).Fatal(text) - case LogLevelEmergency, LogLevelAlert: - logrus.WithFields(fields).Panic(text) +// LogLevelToSlog converts an rclone log level to log/slog log level. +func LogLevelToSlog(level LogLevel) slog.Level { + slogLevel := slog.LevelError + // NB level is unsigned so we don't check < 0 here + if int(level) < len(levelToSlog) { + slogLevel = levelToSlog[level] } + return slogLevel } -func logLogrusWithObject(level LogLevel, o any, text string, fields logrus.Fields) { +func logSlog(level LogLevel, text string, attrs []any) { + slog.Log(context.Background(), LogLevelToSlog(level), text, attrs...) +} + +func logSlogWithObject(level LogLevel, o any, text string, attrs []any) { if o != nil { - if fields == nil { - fields = logrus.Fields{} - } - fields["object"] = fmt.Sprintf("%+v", o) - fields["objectType"] = fmt.Sprintf("%T", o) + attrs = slices.Concat(attrs, []any{ + "object", fmt.Sprintf("%+v", o), + "objectType", fmt.Sprintf("%T", o), + }) } - logLogrus(level, text, fields) -} - -func logJSON(level LogLevel, o any, text string) { - logLogrusWithObject(level, o, text, nil) -} - -func logJSONf(level LogLevel, o any, text string, args ...any) { - text = fmt.Sprintf(text, args...) - fields := logrus.Fields{} - for _, arg := range args { - if item, ok := arg.(LogValueItem); ok { - fields[item.key] = item.value - } - } - logLogrusWithObject(level, o, text, fields) -} - -func logPlain(level LogLevel, o any, text string) { - if o != nil { - text = fmt.Sprintf("%v: %s", o, text) - } - LogOutput(level, text) -} - -func logPlainf(level LogLevel, o any, text string, args ...any) { - logPlain(level, o, fmt.Sprintf(text, args...)) + logSlog(level, text, attrs) } // LogPrint produces a log string from the arguments passed in func LogPrint(level LogLevel, o any, text string) { - if GetConfig(context.TODO()).UseJSONLog { - logJSON(level, o, text) - } else { - logPlain(level, o, text) - } + logSlogWithObject(level, o, text, nil) } // LogPrintf produces a log string from the arguments passed in func LogPrintf(level LogLevel, o any, text string, args ...any) { - if GetConfig(context.TODO()).UseJSONLog { - logJSONf(level, o, text, args...) - } else { - logPlainf(level, o, text, args...) + text = fmt.Sprintf(text, args...) + var fields []any + for _, arg := range args { + if item, ok := arg.(LogValueItem); ok { + fields = append(fields, item.key, item.value) + } } + logSlogWithObject(level, o, text, fields) } // LogLevelPrint writes logs at the given level diff --git a/fs/log/caller_hook.go b/fs/log/caller_hook.go deleted file mode 100644 index dd2c5c470..000000000 --- a/fs/log/caller_hook.go +++ /dev/null @@ -1,106 +0,0 @@ -package log - -import ( - "fmt" - "runtime" - "strings" - - "github.com/rclone/rclone/fs" - "github.com/sirupsen/logrus" -) - -var loggerInstalled = false - -// InstallJSONLogger installs the JSON logger at the specified log level -func InstallJSONLogger(logLevel fs.LogLevel) { - if !loggerInstalled { - logrus.AddHook(NewCallerHook()) - loggerInstalled = true - } - logrus.SetFormatter(&logrus.JSONFormatter{ - TimestampFormat: "2006-01-02T15:04:05.999999-07:00", - }) - logrus.SetLevel(logrus.DebugLevel) - switch logLevel { - case fs.LogLevelEmergency, fs.LogLevelAlert: - logrus.SetLevel(logrus.PanicLevel) - case fs.LogLevelCritical: - logrus.SetLevel(logrus.FatalLevel) - case fs.LogLevelError: - logrus.SetLevel(logrus.ErrorLevel) - case fs.LogLevelWarning, fs.LogLevelNotice: - logrus.SetLevel(logrus.WarnLevel) - case fs.LogLevelInfo: - logrus.SetLevel(logrus.InfoLevel) - case fs.LogLevelDebug: - logrus.SetLevel(logrus.DebugLevel) - } -} - -// install hook in fs to call to avoid circular dependency -func init() { - fs.InstallJSONLogger = InstallJSONLogger -} - -// CallerHook for log the calling file and line of the fine -type CallerHook struct { - Field string - Skip int - levels []logrus.Level -} - -// NewCallerHook use to make a hook -func NewCallerHook(levels ...logrus.Level) logrus.Hook { - hook := CallerHook{ - Field: "source", - Skip: 7, - levels: levels, - } - if len(hook.levels) == 0 { - hook.levels = logrus.AllLevels - } - return &hook -} - -// Levels implement applied hook to which levels -func (h *CallerHook) Levels() []logrus.Level { - return logrus.AllLevels -} - -// Fire logs the information of context (filename and line) -func (h *CallerHook) Fire(entry *logrus.Entry) error { - entry.Data[h.Field] = findCaller(h.Skip) - return nil -} - -// findCaller ignores the caller relevant to logrus or fslog then find out the exact caller -func findCaller(skip int) string { - file := "" - line := 0 - for i := range 10 { - file, line = getCaller(skip + i) - if !strings.HasPrefix(file, "logrus") && !strings.Contains(file, "log.go") { - break - } - } - return fmt.Sprintf("%s:%d", file, line) -} - -func getCaller(skip int) (string, int) { - _, file, line, ok := runtime.Caller(skip) - // fmt.Println(file,":",line) - if !ok { - return "", 0 - } - n := 0 - for i := len(file) - 1; i > 0; i-- { - if file[i] == '/' { - n++ - if n >= 2 { - file = file[i+1:] - break - } - } - } - return file, line -} diff --git a/fs/log/log.go b/fs/log/log.go index 2e4df791b..b94f4d621 100644 --- a/fs/log/log.go +++ b/fs/log/log.go @@ -4,14 +4,12 @@ package log import ( "context" "io" - "log" "os" "reflect" "runtime" "strings" "github.com/rclone/rclone/fs" - "github.com/sirupsen/logrus" ) // OptionsInfo descripts the Options in use @@ -22,7 +20,7 @@ var OptionsInfo = fs.Options{{ Groups: "Logging", }, { Name: "log_format", - Default: "date,time", + Default: logFormatDate | logFormatTime, Help: "Comma separated list of log format options", Groups: "Logging", }, { @@ -44,11 +42,11 @@ var OptionsInfo = fs.Options{{ // Options contains options for controlling the logging type Options struct { - File string `config:"log_file"` // Log everything to this file - Format string `config:"log_format"` // Comma separated list of log format options - UseSyslog bool `config:"syslog"` // Use Syslog for logging - SyslogFacility string `config:"syslog_facility"` // Facility for syslog, e.g. KERN,USER,... - LogSystemdSupport bool `config:"log_systemd"` // set if using systemd logging + File string `config:"log_file"` // Log everything to this file + Format logFormat `config:"log_format"` // Comma separated list of log format options + UseSyslog bool `config:"syslog"` // Use Syslog for logging + SyslogFacility string `config:"syslog_facility"` // Facility for syslog, e.g. KERN,USER,... + LogSystemdSupport bool `config:"log_systemd"` // set if using systemd logging } func init() { @@ -58,6 +56,37 @@ func init() { // Opt is the options for the logger var Opt Options +// enum for the log format +type logFormat = fs.Bits[logFormatChoices] + +const ( + logFormatDate logFormat = 1 << iota + logFormatTime + logFormatMicroseconds + logFormatUTC + logFormatLongFile + logFormatShortFile + logFormatPid + logFormatNoLevel + logFormatJSON +) + +type logFormatChoices struct{} + +func (logFormatChoices) Choices() []fs.BitsChoicesInfo { + return []fs.BitsChoicesInfo{ + {Bit: uint64(logFormatDate), Name: "date"}, + {Bit: uint64(logFormatTime), Name: "time"}, + {Bit: uint64(logFormatMicroseconds), Name: "microseconds"}, + {Bit: uint64(logFormatUTC), Name: "UTC"}, + {Bit: uint64(logFormatLongFile), Name: "longfile"}, + {Bit: uint64(logFormatShortFile), Name: "shortfile"}, + {Bit: uint64(logFormatPid), Name: "pid"}, + {Bit: uint64(logFormatNoLevel), Name: "nolevel"}, + {Bit: uint64(logFormatJSON), Name: "json"}, + } +} + // fnName returns the name of the calling +2 function func fnName() string { pc, _, _, ok := runtime.Caller(2) @@ -114,31 +143,24 @@ func Stack(o any, info string) { fs.LogPrintf(fs.LogLevelDebug, o, "%s\nStack trace:\n%s", info, buf) } +// This is called from fs when the config is reloaded +// +// The config should really be here but we can't move it as it is +// externally visible in the rc. +func logReload(ci *fs.ConfigInfo) error { + Handler.SetLevel(fs.LogLevelToSlog(ci.LogLevel)) + return nil +} + +func init() { + fs.LogReload = logReload +} + // InitLogging start the logging as per the command line flags func InitLogging() { - flagsStr := "," + Opt.Format + "," - var flags int - if strings.Contains(flagsStr, ",date,") { - flags |= log.Ldate - } - if strings.Contains(flagsStr, ",time,") { - flags |= log.Ltime - } - if strings.Contains(flagsStr, ",microseconds,") { - flags |= log.Lmicroseconds - } - if strings.Contains(flagsStr, ",UTC,") { - flags |= log.LUTC - } - if strings.Contains(flagsStr, ",longfile,") { - flags |= log.Llongfile - } - if strings.Contains(flagsStr, ",shortfile,") { - flags |= log.Lshortfile - } - log.SetFlags(flags) - - fs.LogPrintPid = strings.Contains(flagsStr, ",pid,") + // Note that ci only has the defaults in at this point + // We set real values in logReload + ci := fs.GetConfig(context.Background()) // Log file output if Opt.File != "" { @@ -150,17 +172,27 @@ func InitLogging() { if err != nil { fs.Errorf(nil, "Failed to seek log file to end: %v", err) } - log.SetOutput(f) - logrus.SetOutput(f) redirectStderr(f) + Handler.setWriter(f) } + // --use-json-log implies JSON formatting + if ci.UseJSONLog { + Opt.Format |= logFormatJSON + } + + // Set slog level to initial log level + Handler.SetLevel(fs.LogLevelToSlog(fs.InitialLogLevel())) + + // Set the format to the configured format + Handler.setFormat(Opt.Format) + // Syslog output if Opt.UseSyslog { if Opt.File != "" { fs.Fatalf(nil, "Can't use --syslog and --log-file together") } - startSysLog() + startSysLog(Handler) } // Activate systemd logger support if systemd invocation ID is @@ -173,7 +205,7 @@ func InitLogging() { // Systemd logging output if Opt.LogSystemdSupport { - startSystemdLog() + startSystemdLog(Handler) } } diff --git a/fs/log/slog.go b/fs/log/slog.go new file mode 100644 index 000000000..10b64a610 --- /dev/null +++ b/fs/log/slog.go @@ -0,0 +1,391 @@ +// Interfaces for the slog package + +package log + +import ( + "bytes" + "context" + "fmt" + "io" + "log/slog" + "os" + "runtime" + "strings" + "sync" + "time" + + "github.com/rclone/rclone/fs" +) + +// Handler is the standard handler for the logging. +var Handler = defaultHandler() + +// Create the default OutputHandler +// +// This logs to stderr with standard go logger format at level INFO. +// +// This will be adjusted by InitLogging to be the configured levels +// but it is important we have a logger running regardless of whether +// InitLogging has been called yet or not. +func defaultHandler() *OutputHandler { + // Default options for default handler + var opts = &slog.HandlerOptions{ + Level: fs.LogLevelToSlog(fs.InitialLogLevel()), + } + + // Create our handler + h := NewOutputHandler(os.Stderr, opts, logFormatDate|logFormatTime) + + // Set the slog default handler + slog.SetDefault(slog.New(h)) + + // Make log.Printf logs at level Notice + slog.SetLogLoggerLevel(fs.SlogLevelNotice) + + return h +} + +// Map slog level names to string +var slogNames = map[slog.Level]string{ + slog.LevelDebug: "DEBUG", + slog.LevelInfo: "INFO", + fs.SlogLevelNotice: "NOTICE", + slog.LevelWarn: "WARNING", + slog.LevelError: "ERROR", + fs.SlogLevelCritical: "CRITICAL", + fs.SlogLevelAlert: "ALERT", + fs.SlogLevelEmergency: "EMERGENCY", +} + +// Convert a slog level to string using rclone's extra levels +func slogLevelToString(level slog.Level) string { + levelStr := slogNames[level] + if levelStr == "" { + levelStr = level.String() + } + return levelStr +} + +// ReplaceAttr function to customize the level key's string value in logs +func mapLogLevelNames(groups []string, a slog.Attr) slog.Attr { + if a.Key == slog.LevelKey { + level, ok := a.Value.Any().(slog.Level) + if !ok { + return a + } + levelStr := strings.ToLower(slogLevelToString(level)) + a.Value = slog.StringValue(levelStr) + } + return a +} + +// get the file and line number of the caller skipping skip levels +func getCaller(skip int) string { + var pc [64]uintptr + n := runtime.Callers(skip, pc[:]) + if n == 0 { + return "" + } + frames := runtime.CallersFrames(pc[:n]) + var more = true + var frame runtime.Frame + for more { + frame, more = frames.Next() + + file := frame.File + if strings.Contains(file, "/log/") || strings.HasSuffix(file, "log.go") { + continue + } + line := frame.Line + + // shorten file name + n := 0 + for i := len(file) - 1; i > 0; i-- { + if file[i] == '/' { + n++ + if n >= 2 { + file = file[i+1:] + break + } + } + } + return fmt.Sprintf("%s:%d", file, line) + } + return "" +} + +// OutputHandler is a slog.Handler that writes log records in a format +// identical to the standard library's `log` package (e.g., "YYYY/MM/DD HH:MM:SS message"). +// +// It can also write logs in JSON format identical to logrus. +type OutputHandler struct { + opts slog.HandlerOptions + levelVar slog.LevelVar + writer io.Writer + mu sync.Mutex + output []outputFn // log to writer if empty or the last item + outputExtra []outputExtra // log to all these additional places + format logFormat + jsonBuf bytes.Buffer + jsonHandler *slog.JSONHandler +} + +// Records the type and function pointer for extra logging output. +type outputExtra struct { + json bool + output outputFn +} + +// Define the type of the override logger +type outputFn func(level slog.Level, text string) + +// NewOutputHandler creates a new OutputHandler with the specified flags. +// +// This is designed to use log/slog but produce output which is +// backwards compatible with previous rclone versions. +// +// If opts is nil, default options are used, with Level set to +// slog.LevelInfo. +func NewOutputHandler(out io.Writer, opts *slog.HandlerOptions, format logFormat) *OutputHandler { + h := &OutputHandler{ + writer: out, + format: format, + } + if opts != nil { + h.opts = *opts + } + if h.opts.Level == nil { + h.opts.Level = slog.LevelInfo + } + // Set the level var with the configured level + h.levelVar.Set(h.opts.Level.Level()) + // And use it from now on + h.opts.Level = &h.levelVar + + // Create the JSON logger in case we need it + jsonOpts := slog.HandlerOptions{ + Level: h.opts.Level, + ReplaceAttr: mapLogLevelNames, + } + h.jsonHandler = slog.NewJSONHandler(&h.jsonBuf, &jsonOpts) + return h +} + +// SetOutput sets a new output handler for the log output. +// +// This is for temporarily overriding the output. +func (h *OutputHandler) SetOutput(fn outputFn) { + h.output = append(h.output, fn) +} + +// ResetOutput resets the log output to what is was. +func (h *OutputHandler) ResetOutput() { + if len(h.output) > 0 { + h.output = h.output[:len(h.output)-1] + } +} + +// AddOutput adds an additional logging destination of the type specified. +func (h *OutputHandler) AddOutput(json bool, fn outputFn) { + h.outputExtra = append(h.outputExtra, outputExtra{ + json: json, + output: fn, + }) +} + +// SetLevel sets a new log level, returning the old one. +func (h *OutputHandler) SetLevel(level slog.Level) slog.Level { + oldLevel := h.levelVar.Level() + h.levelVar.Set(level) + return oldLevel +} + +// Set the writer for the log to that passed. +func (h *OutputHandler) setWriter(writer io.Writer) { + h.writer = writer +} + +// Set the format flags to that passed in. +func (h *OutputHandler) setFormat(format logFormat) { + h.format = format +} + +// clear format flags that this output type doesn't want +func (h *OutputHandler) clearFormatFlags(bitMask logFormat) { + h.format &^= bitMask +} + +// set format flags that this output type requires +func (h *OutputHandler) setFormatFlags(bitMask logFormat) { + h.format |= bitMask +} + +// Enabled returns whether this logger is enabled for this level. +func (h *OutputHandler) Enabled(_ context.Context, level slog.Level) bool { + minLevel := slog.LevelInfo + if h.opts.Level != nil { + minLevel = h.opts.Level.Level() + } + return level >= minLevel +} + +// Create a log header in Go standard log format. +func (h *OutputHandler) formatStdLogHeader(buf *bytes.Buffer, level slog.Level, t time.Time, object string, lineInfo string) { + // Add time in Go standard format if requested + if h.format&(logFormatDate|logFormatTime|logFormatMicroseconds) != 0 { + if h.format&logFormatUTC != 0 { + t = t.UTC() + } + if h.format&logFormatDate != 0 { + year, month, day := t.Date() + fmt.Fprintf(buf, "%04d/%02d/%02d ", year, month, day) + } + if h.format&(logFormatTime|logFormatMicroseconds) != 0 { + hour, min, sec := t.Clock() + fmt.Fprintf(buf, "%02d:%02d:%02d", hour, min, sec) + if h.format&logFormatMicroseconds != 0 { + fmt.Fprintf(buf, ".%06d", t.Nanosecond()/1e3) + } + buf.WriteByte(' ') + } + } + // Add source code filename:line if requested + if h.format&(logFormatShortFile|logFormatLongFile) != 0 && lineInfo != "" { + buf.WriteString(lineInfo) + buf.WriteByte(':') + buf.WriteByte(' ') + } + // Add PID if requested + if h.format&logFormatPid != 0 { + fmt.Fprintf(buf, "[%d] ", os.Getpid()) + } + // Add log level if required + if h.format&logFormatNoLevel == 0 { + levelStr := slogLevelToString(level) + fmt.Fprintf(buf, "%-6s: ", levelStr) + } + // Add object if passed + if object != "" { + buf.WriteString(object) + buf.WriteByte(':') + buf.WriteByte(' ') + } +} + +// Create a log in standard Go log format into buf. +func (h *OutputHandler) textLog(ctx context.Context, buf *bytes.Buffer, r slog.Record) error { + var lineInfo string + if h.format&(logFormatShortFile|logFormatLongFile) != 0 { + lineInfo = getCaller(2) + } + + var object string + r.Attrs(func(attr slog.Attr) bool { + if attr.Key == "object" { + object = attr.Value.String() + return false + } + return true + }) + + h.formatStdLogHeader(buf, r.Level, r.Time, object, lineInfo) + buf.WriteString(r.Message) + if buf.Len() == 0 || buf.Bytes()[buf.Len()-1] != '\n' { // Ensure newline + buf.WriteByte('\n') + } + return nil +} + +// Create a log in JSON format into buf. +func (h *OutputHandler) jsonLog(ctx context.Context, buf *bytes.Buffer, r slog.Record) (err error) { + // Call the JSON handler to create the JSON in buf + r.AddAttrs( + slog.String("source", getCaller(2)), + ) + h.mu.Lock() + err = h.jsonHandler.Handle(ctx, r) + if err == nil { + _, err = h.jsonBuf.WriteTo(buf) + } + h.mu.Unlock() + return err +} + +// Handle outputs a log in the current format +func (h *OutputHandler) Handle(ctx context.Context, r slog.Record) (err error) { + var ( + bufJSON *bytes.Buffer + bufText *bytes.Buffer + buf *bytes.Buffer + ) + + // Check whether we need to build Text or JSON logs or both + needJSON := h.format&logFormatJSON != 0 + needText := !needJSON + for _, out := range h.outputExtra { + if out.json { + needJSON = true + } else { + needText = true + } + } + + if needJSON { + var bufJSONBack [256]byte + bufJSON = bytes.NewBuffer(bufJSONBack[:0]) + err = h.jsonLog(ctx, bufJSON, r) + if err != nil { + return err + } + } + + if needText { + var bufTextBack [256]byte + bufText = bytes.NewBuffer(bufTextBack[:0]) + err = h.textLog(ctx, bufText, r) + if err != nil { + return err + } + } + + h.mu.Lock() + defer h.mu.Unlock() + + // Do the log, either to the default destination or to the alternate logging system + if h.format&logFormatJSON != 0 { + buf = bufJSON + } else { + buf = bufText + } + if len(h.output) > 0 { + h.output[len(h.output)-1](r.Level, buf.String()) + err = nil + } else { + _, err = h.writer.Write(buf.Bytes()) + } + + // Log to any additional destinations required + for _, out := range h.outputExtra { + if out.json { + out.output(r.Level, bufJSON.String()) + } else { + out.output(r.Level, bufText.String()) + } + } + return err +} + +// WithAttrs creates a new handler with the same writer, options, and flags. +// Attributes are ignored for the output format of this specific handler. +func (h *OutputHandler) WithAttrs(attrs []slog.Attr) slog.Handler { + return NewOutputHandler(h.writer, &h.opts, h.format) +} + +// WithGroup creates a new handler with the same writer, options, and flags. +// Groups are ignored for the output format of this specific handler. +func (h *OutputHandler) WithGroup(name string) slog.Handler { + return NewOutputHandler(h.writer, &h.opts, h.format) +} + +// Check interface +var _ slog.Handler = (*OutputHandler)(nil) diff --git a/fs/log/slog_test.go b/fs/log/slog_test.go new file mode 100644 index 000000000..5cfa52131 --- /dev/null +++ b/fs/log/slog_test.go @@ -0,0 +1,264 @@ +package log + +import ( + "bytes" + "context" + "fmt" + "os" + "regexp" + "strings" + "testing" + "time" + + "log/slog" + + "github.com/rclone/rclone/fs" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +var ( + utcPlusOne = time.FixedZone("UTC+1", 1*60*60) + t0 = time.Date(2020, 1, 2, 3, 4, 5, 123456000, utcPlusOne) +) + +// Test slogLevelToString covers all mapped levels and an unknown level. +func TestSlogLevelToString(t *testing.T) { + tests := []struct { + level slog.Level + want string + }{ + {slog.LevelDebug, "DEBUG"}, + {slog.LevelInfo, "INFO"}, + {fs.SlogLevelNotice, "NOTICE"}, + {slog.LevelWarn, "WARNING"}, + {slog.LevelError, "ERROR"}, + {fs.SlogLevelCritical, "CRITICAL"}, + {fs.SlogLevelAlert, "ALERT"}, + {fs.SlogLevelEmergency, "EMERGENCY"}, + // Unknown level should fall back to .String() + {slog.Level(1234), slog.Level(1234).String()}, + } + for _, tc := range tests { + got := slogLevelToString(tc.level) + assert.Equal(t, tc.want, got) + } +} + +// Test mapLogLevelNames replaces only the LevelKey attr and lowercases it. +func TestMapLogLevelNames(t *testing.T) { + a := slog.Any(slog.LevelKey, slog.LevelWarn) + mapped := mapLogLevelNames(nil, a) + val, ok := mapped.Value.Any().(string) + if !ok || val != "warning" { + t.Errorf("mapLogLevelNames did not lowercase level: got %v", mapped.Value.Any()) + } + // non-level attr should remain unchanged + other := slog.String("foo", "bar") + out := mapLogLevelNames(nil, other) + assert.Equal(t, out.Value, other.Value, "mapLogLevelNames changed a non-level attr") +} + +// Test getCaller returns a file:line string of the correct form. +func TestGetCaller(t *testing.T) { + out := getCaller(0) + assert.NotEqual(t, "", out) + match := regexp.MustCompile(`^([^:]+):(\d+)$`).FindStringSubmatch(out) + assert.NotNil(t, match) + // Can't test this as it skips the /log/ directory! + // assert.Equal(t, "slog_test.go", match[1]) +} + +// Test formatStdLogHeader for various flag combinations. +func TestFormatStdLogHeader(t *testing.T) { + cases := []struct { + name string + format logFormat + lineInfo string + object string + wantPrefix string + }{ + {"dateTime", logFormatDate | logFormatTime, "", "", "2020/01/02 03:04:05 "}, + {"time", logFormatTime, "", "", "03:04:05 "}, + {"date", logFormatDate, "", "", "2020/01/02 "}, + {"dateTimeUTC", logFormatDate | logFormatTime | logFormatUTC, "", "", "2020/01/02 02:04:05 "}, + {"dateTimeMicro", logFormatDate | logFormatTime | logFormatMicroseconds, "", "", "2020/01/02 03:04:05.123456 "}, + {"micro", logFormatMicroseconds, "", "", "03:04:05.123456 "}, + {"shortFile", logFormatShortFile, "foo.go:10", "03:04:05 ", "foo.go:10: "}, + {"longFile", logFormatLongFile, "foo.go:10", "03:04:05 ", "foo.go:10: "}, + {"timePID", logFormatPid, "", "", fmt.Sprintf("[%d] ", os.Getpid())}, + {"levelObject", 0, "", "myobj", "INFO : myobj: "}, + } + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + h := &OutputHandler{format: tc.format} + buf := &bytes.Buffer{} + h.formatStdLogHeader(buf, slog.LevelInfo, t0, tc.object, tc.lineInfo) + if !strings.HasPrefix(buf.String(), tc.wantPrefix) { + t.Errorf("%s: got %q; want prefix %q", tc.name, buf.String(), tc.wantPrefix) + } + }) + } +} + +// Test Enabled honors the HandlerOptions.Level. +func TestEnabled(t *testing.T) { + h := NewOutputHandler(&bytes.Buffer{}, nil, 0) + assert.True(t, h.Enabled(context.Background(), slog.LevelInfo)) + assert.False(t, h.Enabled(context.Background(), slog.LevelDebug)) + + opts := &slog.HandlerOptions{Level: slog.LevelDebug} + h2 := NewOutputHandler(&bytes.Buffer{}, opts, 0) + assert.True(t, h2.Enabled(context.Background(), slog.LevelDebug)) +} + +// Test clearFormatFlags and setFormatFlags bitwise ops. +func TestClearSetFormatFlags(t *testing.T) { + h := &OutputHandler{format: logFormatDate | logFormatTime} + + h.clearFormatFlags(logFormatTime) + assert.True(t, h.format&logFormatTime == 0) + + h.setFormatFlags(logFormatMicroseconds) + assert.True(t, h.format&logFormatMicroseconds != 0) +} + +// Test SetOutput and ResetOutput override the default writer. +func TestSetResetOutput(t *testing.T) { + buf := &bytes.Buffer{} + h := NewOutputHandler(buf, nil, 0) + var gotOverride string + out := func(_ slog.Level, txt string) { + gotOverride = txt + } + + h.SetOutput(out) + r := slog.NewRecord(t0, slog.LevelInfo, "hello", 0) + require.NoError(t, h.Handle(context.Background(), r)) + assert.NotEqual(t, "", gotOverride) + require.Equal(t, "", buf.String()) + + h.ResetOutput() + require.NoError(t, h.Handle(context.Background(), r)) + require.NotEqual(t, "", buf.String()) +} + +// Test AddOutput sends to extra destinations. +func TestAddOutput(t *testing.T) { + buf := &bytes.Buffer{} + h := NewOutputHandler(buf, nil, logFormatDate|logFormatTime) + var extraText string + out := func(_ slog.Level, txt string) { + extraText = txt + } + + h.AddOutput(false, out) + + r := slog.NewRecord(t0, slog.LevelInfo, "world", 0) + require.NoError(t, h.Handle(context.Background(), r)) + assert.Equal(t, "2020/01/02 03:04:05 INFO : world\n", buf.String()) + assert.Equal(t, "2020/01/02 03:04:05 INFO : world\n", extraText) +} + +// Test AddOutputJSON sends JSON to extra destinations. +func TestAddOutputJSON(t *testing.T) { + buf := &bytes.Buffer{} + h := NewOutputHandler(buf, nil, logFormatDate|logFormatTime) + var extraText string + out := func(_ slog.Level, txt string) { + extraText = txt + } + + h.AddOutput(true, out) + + r := slog.NewRecord(t0, slog.LevelInfo, "world", 0) + require.NoError(t, h.Handle(context.Background(), r)) + assert.NotEqual(t, "", extraText) + assert.Equal(t, "2020/01/02 03:04:05 INFO : world\n", buf.String()) + assert.True(t, strings.HasPrefix(extraText, `{"time":"2020-01-02T03:04:05.123456+01:00","level":"info","msg":"world","source":"`)) + assert.True(t, strings.HasSuffix(extraText, "\"}\n")) +} + +// Test AddOutputUseJSONLog sends text to extra destinations. +func TestAddOutputUseJSONLog(t *testing.T) { + buf := &bytes.Buffer{} + h := NewOutputHandler(buf, nil, logFormatDate|logFormatTime|logFormatJSON) + var extraText string + out := func(_ slog.Level, txt string) { + extraText = txt + } + + h.AddOutput(false, out) + + r := slog.NewRecord(t0, slog.LevelInfo, "world", 0) + require.NoError(t, h.Handle(context.Background(), r)) + assert.NotEqual(t, "", extraText) + assert.True(t, strings.HasPrefix(buf.String(), `{"time":"2020-01-02T03:04:05.123456+01:00","level":"info","msg":"world","source":"`)) + assert.True(t, strings.HasSuffix(buf.String(), "\"}\n")) + assert.Equal(t, "2020/01/02 03:04:05 INFO : world\n", extraText) +} + +// Test WithAttrs and WithGroup return new handlers with same settings. +func TestWithAttrsAndGroup(t *testing.T) { + buf := &bytes.Buffer{} + h := NewOutputHandler(buf, nil, logFormatDate) + h2 := h.WithAttrs([]slog.Attr{slog.String("k", "v")}) + if _, ok := h2.(*OutputHandler); !ok { + t.Error("WithAttrs returned wrong type") + } + h3 := h.WithGroup("grp") + if _, ok := h3.(*OutputHandler); !ok { + t.Error("WithGroup returned wrong type") + } +} + +// Test textLog and jsonLog directly for basic correctness. +func TestTextLogAndJsonLog(t *testing.T) { + h := NewOutputHandler(&bytes.Buffer{}, nil, logFormatDate|logFormatTime) + r := slog.NewRecord(t0, slog.LevelWarn, "msg!", 0) + r.AddAttrs(slog.String("object", "obj")) + + // textLog + bufText := &bytes.Buffer{} + require.NoError(t, h.textLog(context.Background(), bufText, r)) + out := bufText.String() + if !strings.Contains(out, "WARNING") || !strings.Contains(out, "obj:") || !strings.HasSuffix(out, "\n") { + t.Errorf("textLog output = %q", out) + } + + // jsonLog + bufJSON := &bytes.Buffer{} + require.NoError(t, h.jsonLog(context.Background(), bufJSON, r)) + j := bufJSON.String() + if !strings.Contains(j, `"level":"warning"`) || !strings.Contains(j, `"msg":"msg!"`) { + t.Errorf("jsonLog output = %q", j) + } +} + +// Table-driven test for JSON vs text Handle behavior. +func TestHandleFormatFlags(t *testing.T) { + r := slog.NewRecord(t0, slog.LevelInfo, "hi", 0) + cases := []struct { + name string + format logFormat + wantJSON bool + }{ + {"textMode", 0, false}, + {"jsonMode", logFormatJSON, true}, + } + for _, tc := range cases { + buf := &bytes.Buffer{} + h := NewOutputHandler(buf, nil, tc.format) + require.NoError(t, h.Handle(context.Background(), r)) + out := buf.String() + if tc.wantJSON { + if !strings.HasPrefix(out, "{") || !strings.Contains(out, `"level":"info"`) { + t.Errorf("%s: got %q; want JSON", tc.name, out) + } + } else { + if !strings.Contains(out, "INFO") { + t.Errorf("%s: got %q; want text INFO", tc.name, out) + } + } + } +} diff --git a/fs/log/syslog.go b/fs/log/syslog.go index fd98f9fb6..f3ad7386e 100644 --- a/fs/log/syslog.go +++ b/fs/log/syslog.go @@ -11,7 +11,7 @@ import ( ) // Starts syslog if configured, returns true if it was started -func startSysLog() bool { +func startSysLog(handler *OutputHandler) bool { fs.Fatalf(nil, "--syslog not supported on %s platform", runtime.GOOS) return false } diff --git a/fs/log/syslog_unix.go b/fs/log/syslog_unix.go index 4fbbd2099..708083349 100644 --- a/fs/log/syslog_unix.go +++ b/fs/log/syslog_unix.go @@ -5,7 +5,7 @@ package log import ( - "log" + "log/slog" "log/syslog" "os" "path" @@ -39,7 +39,7 @@ var ( ) // Starts syslog -func startSysLog() bool { +func startSysLog(handler *OutputHandler) bool { facility, ok := syslogFacilityMap[Opt.SyslogFacility] if !ok { fs.Fatalf(nil, "Unknown syslog facility %q - man syslog for list", Opt.SyslogFacility) @@ -49,27 +49,27 @@ func startSysLog() bool { if err != nil { fs.Fatalf(nil, "Failed to start syslog: %v", err) } - log.SetFlags(0) - log.SetOutput(w) - fs.LogOutput = func(level fs.LogLevel, text string) { + handler.clearFormatFlags(logFormatDate | logFormatTime | logFormatMicroseconds | logFormatUTC | logFormatLongFile | logFormatShortFile | logFormatPid) + handler.setFormatFlags(logFormatNoLevel) + handler.SetOutput(func(level slog.Level, text string) { switch level { - case fs.LogLevelEmergency: + case fs.SlogLevelEmergency: _ = w.Emerg(text) - case fs.LogLevelAlert: + case fs.SlogLevelAlert: _ = w.Alert(text) - case fs.LogLevelCritical: + case fs.SlogLevelCritical: _ = w.Crit(text) - case fs.LogLevelError: + case slog.LevelError: _ = w.Err(text) - case fs.LogLevelWarning: + case slog.LevelWarn: _ = w.Warning(text) - case fs.LogLevelNotice: + case fs.SlogLevelNotice: _ = w.Notice(text) - case fs.LogLevelInfo: + case slog.LevelInfo: _ = w.Info(text) - case fs.LogLevelDebug: + case slog.LevelDebug: _ = w.Debug(text) } - } + }) return true } diff --git a/fs/log/systemd.go b/fs/log/systemd.go index 84e9a3bd3..8ebb2c0d9 100644 --- a/fs/log/systemd.go +++ b/fs/log/systemd.go @@ -11,7 +11,7 @@ import ( ) // Enables systemd logs if configured or if auto-detected -func startSystemdLog() bool { +func startSystemdLog(handler *OutputHandler) bool { fs.Fatalf(nil, "--log-systemd not supported on %s platform", runtime.GOOS) return false } diff --git a/fs/log/systemd_unix.go b/fs/log/systemd_unix.go index 22c82ad11..a71bc6880 100644 --- a/fs/log/systemd_unix.go +++ b/fs/log/systemd_unix.go @@ -7,54 +7,47 @@ package log import ( "fmt" "log" + "log/slog" "strconv" - "strings" "github.com/coreos/go-systemd/v22/journal" "github.com/rclone/rclone/fs" ) // Enables systemd logs if configured or if auto-detected -func startSystemdLog() bool { - flagsStr := "," + Opt.Format + "," - var flags int - if strings.Contains(flagsStr, ",longfile,") { - flags |= log.Llongfile - } - if strings.Contains(flagsStr, ",shortfile,") { - flags |= log.Lshortfile - } - log.SetFlags(flags) +func startSystemdLog(handler *OutputHandler) bool { + handler.clearFormatFlags(logFormatDate | logFormatTime | logFormatMicroseconds | logFormatUTC | logFormatLongFile | logFormatShortFile | logFormatPid) + handler.setFormatFlags(logFormatNoLevel) // TODO: Use the native journal.Print approach rather than a custom implementation - fs.LogOutput = func(level fs.LogLevel, text string) { + handler.SetOutput(func(level slog.Level, text string) { text = fmt.Sprintf("<%s>%-6s: %s", systemdLogPrefix(level), level, text) _ = log.Output(4, text) - } + }) return true } -var logLevelToSystemdPrefix = []journal.Priority{ - fs.LogLevelEmergency: journal.PriEmerg, - fs.LogLevelAlert: journal.PriAlert, - fs.LogLevelCritical: journal.PriCrit, - fs.LogLevelError: journal.PriErr, - fs.LogLevelWarning: journal.PriWarning, - fs.LogLevelNotice: journal.PriNotice, - fs.LogLevelInfo: journal.PriInfo, - fs.LogLevelDebug: journal.PriDebug, +var slogLevelToSystemdPrefix = map[slog.Level]journal.Priority{ + fs.SlogLevelEmergency: journal.PriEmerg, + fs.SlogLevelAlert: journal.PriAlert, + fs.SlogLevelCritical: journal.PriCrit, + slog.LevelError: journal.PriErr, + slog.LevelWarn: journal.PriWarning, + fs.SlogLevelNotice: journal.PriNotice, + slog.LevelInfo: journal.PriInfo, + slog.LevelDebug: journal.PriDebug, } -func systemdLogPrefix(l fs.LogLevel) string { - if l >= fs.LogLevel(len(logLevelToSystemdPrefix)) { +func systemdLogPrefix(l slog.Level) string { + prio, ok := slogLevelToSystemdPrefix[l] + if !ok { return "" } - return strconv.Itoa(int(logLevelToSystemdPrefix[l])) + return strconv.Itoa(int(prio)) } func isJournalStream() bool { if usingJournald, _ := journal.StderrIsJournalStream(); usingJournald { return true } - return false } diff --git a/fs/operations/check_test.go b/fs/operations/check_test.go index cf499dd96..eeffb0514 100644 --- a/fs/operations/check_test.go +++ b/fs/operations/check_test.go @@ -6,12 +6,11 @@ import ( "errors" "fmt" "io" - "log" - "os" "sort" "strings" "testing" + "github.com/rclone/rclone/cmd/bisync/bilib" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/accounting" "github.com/rclone/rclone/fs/hash" @@ -65,18 +64,16 @@ func testCheck(t *testing.T, checkFunction func(ctx context.Context, opt *operat check := func(i int, wantErrors int64, wantChecks int64, oneway bool, wantOutput map[string]string) { t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { accounting.GlobalStats().ResetCounters() - var buf bytes.Buffer - log.SetOutput(&buf) - defer func() { - log.SetOutput(os.Stderr) - }() opt := operations.CheckOpt{ Fdst: r.Fremote, Fsrc: r.Flocal, OneWay: oneway, } addBuffers(&opt) - err := checkFunction(ctx, &opt) + var err error + buf := bilib.CaptureOutput(func() { + err = checkFunction(ctx, &opt) + }) gotErrors := accounting.GlobalStats().GetErrors() gotChecks := accounting.GlobalStats().GetChecks() if wantErrors == 0 && err != nil { @@ -88,7 +85,7 @@ func testCheck(t *testing.T, checkFunction func(ctx context.Context, opt *operat if wantErrors != gotErrors { t.Errorf("%d: Expecting %d errors but got %d", i, wantErrors, gotErrors) } - if gotChecks > 0 && !strings.Contains(buf.String(), "matching files") { + if gotChecks > 0 && !strings.Contains(string(buf), "matching files") { t.Errorf("%d: Total files matching line missing", i) } if wantChecks != gotChecks { @@ -389,9 +386,6 @@ func testCheckSum(t *testing.T, download bool) { checkRun := func(runNo, wantChecks, wantErrors int, want wantType) { accounting.GlobalStats().ResetCounters() - buf := new(bytes.Buffer) - log.SetOutput(buf) - defer log.SetOutput(os.Stderr) opt := operations.CheckOpt{ Combined: new(bytes.Buffer), @@ -401,8 +395,10 @@ func testCheckSum(t *testing.T, download bool) { MissingOnSrc: new(bytes.Buffer), MissingOnDst: new(bytes.Buffer), } - err := operations.CheckSum(ctx, dataFs, r.Fremote, sumFile, hashType, &opt, download) - + var err error + buf := bilib.CaptureOutput(func() { + err = operations.CheckSum(ctx, dataFs, r.Fremote, sumFile, hashType, &opt, download) + }) gotErrors := int(accounting.GlobalStats().GetErrors()) if wantErrors == 0 { assert.NoError(t, err, "unexpected error in run %d", runNo) @@ -414,7 +410,7 @@ func testCheckSum(t *testing.T, download bool) { gotChecks := int(accounting.GlobalStats().GetChecks()) if wantChecks > 0 || gotChecks > 0 { - assert.Contains(t, buf.String(), "matching files", "missing matching files in run %d", runNo) + assert.Contains(t, string(buf), "matching files", "missing matching files in run %d", runNo) } assert.Equal(t, wantChecks, gotChecks, "wrong number of checks in run %d", runNo) diff --git a/go.mod b/go.mod index 8e62e7ea5..b05d93171 100644 --- a/go.mod +++ b/go.mod @@ -66,7 +66,6 @@ require ( github.com/rivo/uniseg v0.4.7 github.com/rogpeppe/go-internal v1.14.1 github.com/shirou/gopsutil/v4 v4.25.1 - github.com/sirupsen/logrus v1.9.3 github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 github.com/spf13/cobra v1.9.1 github.com/spf13/pflag v1.0.6 @@ -208,6 +207,7 @@ require ( github.com/sabhiram/go-gitignore v0.0.0-20210923224102-525f6e181f06 // indirect github.com/samber/lo v1.47.0 // indirect github.com/shabbyrobe/gocovmerge v0.0.0-20230507112040-c3350d9342df // indirect + github.com/sirupsen/logrus v1.9.3 // indirect github.com/sony/gobreaker v0.5.0 // indirect github.com/spacemonkeygo/monkit/v3 v3.0.22 // indirect github.com/tklauser/go-sysconf v0.3.13 // indirect