mirror of
https://github.com/rclone/rclone.git
synced 2025-10-06 05:47:10 +02:00
accounting: add SetMaxCompletedTransfers method to fix bisync race #8815
Before this change bisync adjusted the global MaxCompletedTransfers variable which caused races. This adds a SetMaxCompletedTransfers method and uses it in bisync. The MaxCompletedTransfers global becomes the default. This can be changed externally if rclone is in use as a library, and the commit history indicates that MaxCompletedTransfers was added for exactly this purpose so we try not to break it here.
This commit is contained in:
@@ -245,10 +245,8 @@ func (b *bisyncRun) fastCopy(ctx context.Context, fsrc, fdst fs.Fs, files bilib.
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
b.SyncCI = fs.GetConfig(ctxCopy) // allows us to request graceful shutdown
|
b.SyncCI = fs.GetConfig(ctxCopy) // allows us to request graceful shutdown
|
||||||
if accounting.MaxCompletedTransfers != -1 {
|
accounting.Stats(ctxCopy).SetMaxCompletedTransfers(-1) // we need a complete list in the event of graceful shutdown
|
||||||
accounting.MaxCompletedTransfers = -1 // we need a complete list in the event of graceful shutdown
|
|
||||||
}
|
|
||||||
ctxCopy, b.CancelSync = context.WithCancel(ctxCopy)
|
ctxCopy, b.CancelSync = context.WithCancel(ctxCopy)
|
||||||
b.testFn()
|
b.testFn()
|
||||||
err := sync.Sync(ctxCopy, fdst, fsrc, b.opt.CreateEmptySrcDirs)
|
err := sync.Sync(ctxCopy, fdst, fsrc, b.opt.CreateEmptySrcDirs)
|
||||||
|
@@ -22,48 +22,52 @@ const (
|
|||||||
averageStopAfter = time.Minute
|
averageStopAfter = time.Minute
|
||||||
)
|
)
|
||||||
|
|
||||||
// MaxCompletedTransfers specifies maximum number of completed transfers in startedTransfers list
|
// MaxCompletedTransfers specifies the default maximum number of
|
||||||
|
// completed transfers in startedTransfers list. This can be adjusted
|
||||||
|
// for a given StatsInfo by calling the SetMaxCompletedTransfers
|
||||||
|
// method.
|
||||||
var MaxCompletedTransfers = 100
|
var MaxCompletedTransfers = 100
|
||||||
|
|
||||||
// StatsInfo accounts all transfers
|
// StatsInfo accounts all transfers
|
||||||
// N.B.: if this struct is modified, please remember to also update sum() function in stats_groups
|
// N.B.: if this struct is modified, please remember to also update sum() function in stats_groups
|
||||||
// to correctly count the updated fields
|
// to correctly count the updated fields
|
||||||
type StatsInfo struct {
|
type StatsInfo struct {
|
||||||
mu sync.RWMutex
|
mu sync.RWMutex
|
||||||
ctx context.Context
|
ctx context.Context
|
||||||
ci *fs.ConfigInfo
|
ci *fs.ConfigInfo
|
||||||
bytes int64
|
bytes int64
|
||||||
errors int64
|
errors int64
|
||||||
lastError error
|
lastError error
|
||||||
fatalError bool
|
fatalError bool
|
||||||
retryError bool
|
retryError bool
|
||||||
retryAfter time.Time
|
retryAfter time.Time
|
||||||
checks int64
|
checks int64
|
||||||
checking *transferMap
|
checking *transferMap
|
||||||
checkQueue int
|
checkQueue int
|
||||||
checkQueueSize int64
|
checkQueueSize int64
|
||||||
transfers int64
|
transfers int64
|
||||||
transferring *transferMap
|
transferring *transferMap
|
||||||
transferQueue int
|
transferQueue int
|
||||||
transferQueueSize int64
|
transferQueueSize int64
|
||||||
listed int64
|
listed int64
|
||||||
renames int64
|
renames int64
|
||||||
renameQueue int
|
renameQueue int
|
||||||
renameQueueSize int64
|
renameQueueSize int64
|
||||||
deletes int64
|
deletes int64
|
||||||
deletesSize int64
|
deletesSize int64
|
||||||
deletedDirs int64
|
deletedDirs int64
|
||||||
inProgress *inProgress
|
inProgress *inProgress
|
||||||
startedTransfers []*Transfer // currently active transfers
|
startedTransfers []*Transfer // currently active transfers
|
||||||
oldTimeRanges timeRanges // a merged list of time ranges for the transfers
|
oldTimeRanges timeRanges // a merged list of time ranges for the transfers
|
||||||
oldDuration time.Duration // duration of transfers we have culled
|
oldDuration time.Duration // duration of transfers we have culled
|
||||||
group string
|
group string
|
||||||
startTime time.Time // the moment these stats were initialized or reset
|
startTime time.Time // the moment these stats were initialized or reset
|
||||||
average averageValues
|
average averageValues
|
||||||
serverSideCopies int64
|
serverSideCopies int64
|
||||||
serverSideCopyBytes int64
|
serverSideCopyBytes int64
|
||||||
serverSideMoves int64
|
serverSideMoves int64
|
||||||
serverSideMoveBytes int64
|
serverSideMoveBytes int64
|
||||||
|
maxCompletedTransfers int
|
||||||
}
|
}
|
||||||
|
|
||||||
type averageValues struct {
|
type averageValues struct {
|
||||||
@@ -81,17 +85,26 @@ type averageValues struct {
|
|||||||
func NewStats(ctx context.Context) *StatsInfo {
|
func NewStats(ctx context.Context) *StatsInfo {
|
||||||
ci := fs.GetConfig(ctx)
|
ci := fs.GetConfig(ctx)
|
||||||
s := &StatsInfo{
|
s := &StatsInfo{
|
||||||
ctx: ctx,
|
ctx: ctx,
|
||||||
ci: ci,
|
ci: ci,
|
||||||
checking: newTransferMap(ci.Checkers, "checking"),
|
checking: newTransferMap(ci.Checkers, "checking"),
|
||||||
transferring: newTransferMap(ci.Transfers, "transferring"),
|
transferring: newTransferMap(ci.Transfers, "transferring"),
|
||||||
inProgress: newInProgress(ctx),
|
inProgress: newInProgress(ctx),
|
||||||
startTime: time.Now(),
|
startTime: time.Now(),
|
||||||
average: averageValues{},
|
average: averageValues{},
|
||||||
|
maxCompletedTransfers: MaxCompletedTransfers,
|
||||||
}
|
}
|
||||||
return s
|
return s
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SetMaxCompletedTransfers sets the maximum number of completed transfers to keep.
|
||||||
|
func (s *StatsInfo) SetMaxCompletedTransfers(n int) *StatsInfo {
|
||||||
|
s.mu.Lock()
|
||||||
|
s.maxCompletedTransfers = n
|
||||||
|
s.mu.Unlock()
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
// RemoteStats returns stats for rc
|
// RemoteStats returns stats for rc
|
||||||
//
|
//
|
||||||
// If short is true then the transfers and checkers won't be added.
|
// If short is true then the transfers and checkers won't be added.
|
||||||
@@ -914,13 +927,14 @@ func (s *StatsInfo) RemoveTransfer(transfer *Transfer) {
|
|||||||
// PruneTransfers makes sure there aren't too many old transfers by removing
|
// PruneTransfers makes sure there aren't too many old transfers by removing
|
||||||
// a single finished transfer. Returns true if it removed a transfer.
|
// a single finished transfer. Returns true if it removed a transfer.
|
||||||
func (s *StatsInfo) PruneTransfers() bool {
|
func (s *StatsInfo) PruneTransfers() bool {
|
||||||
if MaxCompletedTransfers < 0 {
|
s.mu.Lock()
|
||||||
|
defer s.mu.Unlock()
|
||||||
|
if s.maxCompletedTransfers < 0 {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
removed := false
|
removed := false
|
||||||
s.mu.Lock()
|
|
||||||
// remove a transfer from the start if we are over quota
|
// remove a transfer from the start if we are over quota
|
||||||
if len(s.startedTransfers) > MaxCompletedTransfers+s.ci.Transfers {
|
if len(s.startedTransfers) > s.maxCompletedTransfers+s.ci.Transfers {
|
||||||
for i, tr := range s.startedTransfers {
|
for i, tr := range s.startedTransfers {
|
||||||
if tr.IsDone() {
|
if tr.IsDone() {
|
||||||
s._removeTransfer(tr, i)
|
s._removeTransfer(tr, i)
|
||||||
@@ -929,7 +943,6 @@ func (s *StatsInfo) PruneTransfers() bool {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
s.mu.Unlock()
|
|
||||||
return removed
|
return removed
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Reference in New Issue
Block a user