mirror of
https://github.com/rclone/rclone.git
synced 2025-03-17 20:27:52 +02:00
fs: deglobalise the config #4685
This is done by making fs.Config private and attaching it to the context instead. The Config should be obtained with fs.GetConfig and fs.AddConfig should be used to get a new mutable config that can be changed.
This commit is contained in:
parent
506342317b
commit
2e21c58e6a
backend
amazonclouddrive
azureblob
b2
box
cache
drive
dropbox
fichier
filefabric
ftp
googlecloudstorage
googlephotos
http
hubic
jottacloud
koofr
mailru
mega
onedrive
opendrive
pcloud
premiumizeme
putio
qingstor
s3
seafile
sftp
sharefile
sugarsync
swift
webdav
yandex
cmd
fs
accounting
accounting.goaccounting_test.goinprogress.gostats.gostats_groups.gostats_test.gotoken_bucket.gotransfer.gotransfermap.go
asyncreader
config.goconfig
config_test.gofilter
fs.gofs_test.gofshttp
log.golog
march
operations
check.gocheck_test.godedupe.godedupe_test.golsjson.gomultithread.gomultithread_test.gooperations.gooperations_internal_test.gooperations_test.go
rc
sync
walk
fstest
lib/oauthutil
vfs
@ -144,6 +144,7 @@ type Fs struct {
|
||||
name string // name of this remote
|
||||
features *fs.Features // optional features
|
||||
opt Options // options for this Fs
|
||||
ci *fs.ConfigInfo // global config
|
||||
c *acd.Client // the connection to the acd server
|
||||
noAuthClient *http.Client // unauthenticated http client
|
||||
root string // the path we are working on
|
||||
@ -247,7 +248,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
return nil, err
|
||||
}
|
||||
root = parsePath(root)
|
||||
baseClient := fshttp.NewClient(fs.Config)
|
||||
baseClient := fshttp.NewClient(fs.GetConfig(ctx))
|
||||
if do, ok := baseClient.Transport.(interface {
|
||||
SetRequestFilter(f func(req *http.Request))
|
||||
}); ok {
|
||||
@ -261,13 +262,15 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
}
|
||||
|
||||
c := acd.NewClient(oAuthClient)
|
||||
ci := fs.GetConfig(ctx)
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
ci: ci,
|
||||
c: c,
|
||||
pacer: fs.NewPacer(pacer.NewAmazonCloudDrive(pacer.MinSleep(minSleep))),
|
||||
noAuthClient: fshttp.NewClient(fs.Config),
|
||||
pacer: fs.NewPacer(ctx, pacer.NewAmazonCloudDrive(pacer.MinSleep(minSleep))),
|
||||
noAuthClient: fshttp.NewClient(ci),
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
CaseInsensitive: true,
|
||||
@ -501,7 +504,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
maxTries := fs.Config.LowLevelRetries
|
||||
maxTries := f.ci.LowLevelRetries
|
||||
var iErr error
|
||||
for tries := 1; tries <= maxTries; tries++ {
|
||||
entries = nil
|
||||
@ -716,7 +719,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
dstObj fs.Object
|
||||
srcErr, dstErr error
|
||||
)
|
||||
for i := 1; i <= fs.Config.LowLevelRetries; i++ {
|
||||
for i := 1; i <= f.ci.LowLevelRetries; i++ {
|
||||
_, srcErr = srcObj.fs.NewObject(ctx, srcObj.remote) // try reading the object
|
||||
if srcErr != nil && srcErr != fs.ErrorObjectNotFound {
|
||||
// exit if error on source
|
||||
@ -731,7 +734,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
// finished if src not found and dst found
|
||||
break
|
||||
}
|
||||
fs.Debugf(src, "Wait for directory listing to update after move %d/%d", i, fs.Config.LowLevelRetries)
|
||||
fs.Debugf(src, "Wait for directory listing to update after move %d/%d", i, f.ci.LowLevelRetries)
|
||||
time.Sleep(1 * time.Second)
|
||||
}
|
||||
return dstObj, dstErr
|
||||
|
@ -187,6 +187,7 @@ type Fs struct {
|
||||
name string // name of this remote
|
||||
root string // the path we are working on if any
|
||||
opt Options // parsed config options
|
||||
ci *fs.ConfigInfo // global config
|
||||
features *fs.Features // optional features
|
||||
client *http.Client // http client we are using
|
||||
svcURL *azblob.ServiceURL // reference to serviceURL
|
||||
@ -409,18 +410,20 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
string(azblob.AccessTierHot), string(azblob.AccessTierCool), string(azblob.AccessTierArchive))
|
||||
}
|
||||
|
||||
ci := fs.GetConfig(ctx)
|
||||
f := &Fs{
|
||||
name: name,
|
||||
opt: *opt,
|
||||
pacer: fs.NewPacer(pacer.NewS3(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
uploadToken: pacer.NewTokenDispenser(fs.Config.Transfers),
|
||||
client: fshttp.NewClient(fs.Config),
|
||||
ci: ci,
|
||||
pacer: fs.NewPacer(ctx, pacer.NewS3(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
uploadToken: pacer.NewTokenDispenser(ci.Transfers),
|
||||
client: fshttp.NewClient(fs.GetConfig(ctx)),
|
||||
cache: bucket.NewCache(),
|
||||
cntURLcache: make(map[string]*azblob.ContainerURL, 1),
|
||||
pool: pool.New(
|
||||
time.Duration(opt.MemoryPoolFlushTime),
|
||||
int(opt.ChunkSize),
|
||||
fs.Config.Transfers,
|
||||
ci.Transfers,
|
||||
opt.MemoryPoolUseMmap,
|
||||
),
|
||||
}
|
||||
@ -1035,7 +1038,7 @@ func (f *Fs) getMemoryPool(size int64) *pool.Pool {
|
||||
return pool.New(
|
||||
time.Duration(f.opt.MemoryPoolFlushTime),
|
||||
int(size),
|
||||
fs.Config.Transfers,
|
||||
f.ci.Transfers,
|
||||
f.opt.MemoryPoolUseMmap,
|
||||
)
|
||||
}
|
||||
|
@ -214,6 +214,7 @@ type Fs struct {
|
||||
name string // name of this remote
|
||||
root string // the path we are working on if any
|
||||
opt Options // parsed config options
|
||||
ci *fs.ConfigInfo // global config
|
||||
features *fs.Features // optional features
|
||||
srv *rest.Client // the connection to the b2 server
|
||||
rootBucket string // bucket part of root (if any)
|
||||
@ -415,20 +416,22 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
if opt.Endpoint == "" {
|
||||
opt.Endpoint = defaultEndpoint
|
||||
}
|
||||
ci := fs.GetConfig(ctx)
|
||||
f := &Fs{
|
||||
name: name,
|
||||
opt: *opt,
|
||||
srv: rest.NewClient(fshttp.NewClient(fs.Config)).SetErrorHandler(errorHandler),
|
||||
ci: ci,
|
||||
srv: rest.NewClient(fshttp.NewClient(fs.GetConfig(ctx))).SetErrorHandler(errorHandler),
|
||||
cache: bucket.NewCache(),
|
||||
_bucketID: make(map[string]string, 1),
|
||||
_bucketType: make(map[string]string, 1),
|
||||
uploads: make(map[string][]*api.GetUploadURLResponse),
|
||||
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
uploadToken: pacer.NewTokenDispenser(fs.Config.Transfers),
|
||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
uploadToken: pacer.NewTokenDispenser(ci.Transfers),
|
||||
pool: pool.New(
|
||||
time.Duration(opt.MemoryPoolFlushTime),
|
||||
int(opt.ChunkSize),
|
||||
fs.Config.Transfers,
|
||||
ci.Transfers,
|
||||
opt.MemoryPoolUseMmap,
|
||||
),
|
||||
}
|
||||
@ -1167,10 +1170,10 @@ func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool) error {
|
||||
}
|
||||
|
||||
// Delete Config.Transfers in parallel
|
||||
toBeDeleted := make(chan *api.File, fs.Config.Transfers)
|
||||
toBeDeleted := make(chan *api.File, f.ci.Transfers)
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(fs.Config.Transfers)
|
||||
for i := 0; i < fs.Config.Transfers; i++ {
|
||||
wg.Add(f.ci.Transfers)
|
||||
for i := 0; i < f.ci.Transfers; i++ {
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for object := range toBeDeleted {
|
||||
|
@ -91,7 +91,7 @@ func init() {
|
||||
var err error
|
||||
// If using box config.json, use JWT auth
|
||||
if ok && boxSubTypeOk && jsonFile != "" && boxSubType != "" {
|
||||
err = refreshJWTToken(jsonFile, boxSubType, name, m)
|
||||
err = refreshJWTToken(ctx, jsonFile, boxSubType, name, m)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token with jwt authentication: %v", err)
|
||||
}
|
||||
@ -153,7 +153,7 @@ func init() {
|
||||
})
|
||||
}
|
||||
|
||||
func refreshJWTToken(jsonFile string, boxSubType string, name string, m configmap.Mapper) error {
|
||||
func refreshJWTToken(ctx context.Context, jsonFile string, boxSubType string, name string, m configmap.Mapper) error {
|
||||
jsonFile = env.ShellExpand(jsonFile)
|
||||
boxConfig, err := getBoxConfig(jsonFile)
|
||||
if err != nil {
|
||||
@ -169,7 +169,7 @@ func refreshJWTToken(jsonFile string, boxSubType string, name string, m configma
|
||||
}
|
||||
signingHeaders := getSigningHeaders(boxConfig)
|
||||
queryParams := getQueryParams(boxConfig)
|
||||
client := fshttp.NewClient(fs.Config)
|
||||
client := fshttp.NewClient(fs.GetConfig(ctx))
|
||||
err = jwtutil.Config("box", name, claims, signingHeaders, queryParams, privateKey, m, client)
|
||||
return err
|
||||
}
|
||||
@ -386,7 +386,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
|
||||
root = parsePath(root)
|
||||
|
||||
client := fshttp.NewClient(fs.Config)
|
||||
client := fshttp.NewClient(fs.GetConfig(ctx))
|
||||
var ts *oauthutil.TokenSource
|
||||
// If not using an accessToken, create an oauth client and tokensource
|
||||
if opt.AccessToken == "" {
|
||||
@ -396,13 +396,14 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
}
|
||||
}
|
||||
|
||||
ci := fs.GetConfig(ctx)
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
srv: rest.NewClient(client).SetRoot(rootURL),
|
||||
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
uploadToken: pacer.NewTokenDispenser(fs.Config.Transfers),
|
||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
uploadToken: pacer.NewTokenDispenser(ci.Transfers),
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
CaseInsensitive: true,
|
||||
@ -423,7 +424,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
// should do so whether there are uploads pending or not.
|
||||
if ok && boxSubTypeOk && jsonFile != "" && boxSubType != "" {
|
||||
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
|
||||
err := refreshJWTToken(jsonFile, boxSubType, name, m)
|
||||
err := refreshJWTToken(ctx, jsonFile, boxSubType, name, m)
|
||||
return err
|
||||
})
|
||||
f.tokenRenewer.Start()
|
||||
|
3
backend/cache/cache_internal_test.go
vendored
3
backend/cache/cache_internal_test.go
vendored
@ -925,7 +925,8 @@ func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool
|
||||
boltDb, err := cache.GetPersistent(runInstance.dbPath, runInstance.chunkPath, &cache.Features{PurgeDb: true})
|
||||
require.NoError(t, err)
|
||||
|
||||
fs.Config.LowLevelRetries = 1
|
||||
ci := fs.GetConfig(context.Background())
|
||||
ci.LowLevelRetries = 1
|
||||
|
||||
// Instantiate root
|
||||
if purge {
|
||||
|
@ -564,6 +564,7 @@ type Fs struct {
|
||||
name string // name of this remote
|
||||
root string // the path we are working on
|
||||
opt Options // parsed options
|
||||
ci *fs.ConfigInfo // global config
|
||||
features *fs.Features // optional features
|
||||
svc *drive.Service // the connection to the drive server
|
||||
v2Svc *drive_v2.Service // used to create download links for the v2 api
|
||||
@ -940,8 +941,10 @@ func parseExtensions(extensionsIn ...string) (extensions, mimeTypes []string, er
|
||||
|
||||
// Figure out if the user wants to use a team drive
|
||||
func configTeamDrive(ctx context.Context, opt *Options, m configmap.Mapper, name string) error {
|
||||
ci := fs.GetConfig(ctx)
|
||||
|
||||
// Stop if we are running non-interactive config
|
||||
if fs.Config.AutoConfirm {
|
||||
if ci.AutoConfirm {
|
||||
return nil
|
||||
}
|
||||
if opt.TeamDriveID == "" {
|
||||
@ -979,8 +982,8 @@ func configTeamDrive(ctx context.Context, opt *Options, m configmap.Mapper, name
|
||||
}
|
||||
|
||||
// getClient makes an http client according to the options
|
||||
func getClient(opt *Options) *http.Client {
|
||||
t := fshttp.NewTransportCustom(fs.Config, func(t *http.Transport) {
|
||||
func getClient(ctx context.Context, opt *Options) *http.Client {
|
||||
t := fshttp.NewTransportCustom(fs.GetConfig(ctx), func(t *http.Transport) {
|
||||
if opt.DisableHTTP2 {
|
||||
t.TLSNextProto = map[string]func(string, *tls.Conn) http.RoundTripper{}
|
||||
}
|
||||
@ -999,7 +1002,7 @@ func getServiceAccountClient(ctx context.Context, opt *Options, credentialsData
|
||||
if opt.Impersonate != "" {
|
||||
conf.Subject = opt.Impersonate
|
||||
}
|
||||
ctxWithSpecialClient := oauthutil.Context(ctx, getClient(opt))
|
||||
ctxWithSpecialClient := oauthutil.Context(ctx, getClient(ctx, opt))
|
||||
return oauth2.NewClient(ctxWithSpecialClient, conf.TokenSource(ctxWithSpecialClient)), nil
|
||||
}
|
||||
|
||||
@ -1021,7 +1024,7 @@ func createOAuthClient(ctx context.Context, opt *Options, name string, m configm
|
||||
return nil, errors.Wrap(err, "failed to create oauth client from service account")
|
||||
}
|
||||
} else {
|
||||
oAuthClient, _, err = oauthutil.NewClientWithBaseClient(ctx, name, m, driveConfig, getClient(opt))
|
||||
oAuthClient, _, err = oauthutil.NewClientWithBaseClient(ctx, name, m, driveConfig, getClient(ctx, opt))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to create oauth client")
|
||||
}
|
||||
@ -1090,11 +1093,13 @@ func newFs(ctx context.Context, name, path string, m configmap.Mapper) (*Fs, err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ci := fs.GetConfig(ctx)
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
pacer: fs.NewPacer(pacer.NewGoogleDrive(pacer.MinSleep(opt.PacerMinSleep), pacer.Burst(opt.PacerBurst))),
|
||||
ci: ci,
|
||||
pacer: fs.NewPacer(ctx, pacer.NewGoogleDrive(pacer.MinSleep(opt.PacerMinSleep), pacer.Burst(opt.PacerBurst))),
|
||||
m: m,
|
||||
grouping: listRGrouping,
|
||||
listRmu: new(sync.Mutex),
|
||||
@ -1803,7 +1808,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
||||
mu := sync.Mutex{} // protects in and overflow
|
||||
wg := sync.WaitGroup{}
|
||||
in := make(chan listREntry, listRInputBuffer)
|
||||
out := make(chan error, fs.Config.Checkers)
|
||||
out := make(chan error, f.ci.Checkers)
|
||||
list := walk.NewListRHelper(callback)
|
||||
overflow := []listREntry{}
|
||||
listed := 0
|
||||
@ -1842,7 +1847,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
||||
wg.Add(1)
|
||||
in <- listREntry{directoryID, dir}
|
||||
|
||||
for i := 0; i < fs.Config.Checkers; i++ {
|
||||
for i := 0; i < f.ci.Checkers; i++ {
|
||||
go f.listRRunner(ctx, &wg, in, out, cb, sendJob)
|
||||
}
|
||||
go func() {
|
||||
@ -1875,7 +1880,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
||||
mu.Unlock()
|
||||
}()
|
||||
// wait until the all workers to finish
|
||||
for i := 0; i < fs.Config.Checkers; i++ {
|
||||
for i := 0; i < f.ci.Checkers; i++ {
|
||||
e := <-out
|
||||
mu.Lock()
|
||||
// if one worker returns an error early, close the input so all other workers exit
|
||||
|
@ -324,7 +324,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
f := &Fs{
|
||||
name: name,
|
||||
opt: *opt,
|
||||
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
}
|
||||
config := dropbox.Config{
|
||||
LogLevel: dropbox.LogOff, // logging in the SDK: LogOff, LogDebug, LogInfo
|
||||
|
@ -186,7 +186,7 @@ func NewFs(ctx context.Context, name string, root string, config configmap.Mappe
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant), pacer.AttackConstant(attackConstant))),
|
||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant), pacer.AttackConstant(attackConstant))),
|
||||
baseClient: &http.Client{},
|
||||
}
|
||||
|
||||
@ -195,7 +195,7 @@ func NewFs(ctx context.Context, name string, root string, config configmap.Mappe
|
||||
CanHaveEmptyDirectories: true,
|
||||
}).Fill(ctx, f)
|
||||
|
||||
client := fshttp.NewClient(fs.Config)
|
||||
client := fshttp.NewClient(fs.GetConfig(ctx))
|
||||
|
||||
f.rest = rest.NewClient(client).SetRoot(apiBaseURL)
|
||||
|
||||
|
@ -4,13 +4,11 @@ package fichier
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fs.Config.LogLevel = fs.LogLevelDebug
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestFichier:",
|
||||
})
|
||||
|
@ -425,7 +425,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
|
||||
root = parsePath(root)
|
||||
|
||||
client := fshttp.NewClient(fs.Config)
|
||||
client := fshttp.NewClient(fs.GetConfig(ctx))
|
||||
|
||||
f := &Fs{
|
||||
name: name,
|
||||
@ -433,7 +433,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
opt: *opt,
|
||||
m: m,
|
||||
srv: rest.NewClient(client).SetRoot(opt.URL),
|
||||
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
token: opt.Token,
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
|
@ -122,10 +122,11 @@ type Options struct {
|
||||
|
||||
// Fs represents a remote FTP server
|
||||
type Fs struct {
|
||||
name string // name of this remote
|
||||
root string // the path we are working on if any
|
||||
opt Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
name string // name of this remote
|
||||
root string // the path we are working on if any
|
||||
opt Options // parsed options
|
||||
ci *fs.ConfigInfo // global config
|
||||
features *fs.Features // optional features
|
||||
url string
|
||||
user string
|
||||
pass string
|
||||
@ -210,9 +211,9 @@ func (dl *debugLog) Write(p []byte) (n int, err error) {
|
||||
}
|
||||
|
||||
// Open a new connection to the FTP server.
|
||||
func (f *Fs) ftpConnection() (*ftp.ServerConn, error) {
|
||||
func (f *Fs) ftpConnection(ctx context.Context) (*ftp.ServerConn, error) {
|
||||
fs.Debugf(f, "Connecting to FTP server")
|
||||
ftpConfig := []ftp.DialOption{ftp.DialWithTimeout(fs.Config.ConnectTimeout)}
|
||||
ftpConfig := []ftp.DialOption{ftp.DialWithTimeout(f.ci.ConnectTimeout)}
|
||||
if f.opt.TLS && f.opt.ExplicitTLS {
|
||||
fs.Errorf(f, "Implicit TLS and explicit TLS are mutually incompatible. Please revise your config")
|
||||
return nil, errors.New("Implicit TLS and explicit TLS are mutually incompatible. Please revise your config")
|
||||
@ -235,8 +236,8 @@ func (f *Fs) ftpConnection() (*ftp.ServerConn, error) {
|
||||
if f.opt.DisableMLSD {
|
||||
ftpConfig = append(ftpConfig, ftp.DialWithDisabledMLSD(true))
|
||||
}
|
||||
if fs.Config.Dump&(fs.DumpHeaders|fs.DumpBodies|fs.DumpRequests|fs.DumpResponses) != 0 {
|
||||
ftpConfig = append(ftpConfig, ftp.DialWithDebugOutput(&debugLog{auth: fs.Config.Dump&fs.DumpAuth != 0}))
|
||||
if f.ci.Dump&(fs.DumpHeaders|fs.DumpBodies|fs.DumpRequests|fs.DumpResponses) != 0 {
|
||||
ftpConfig = append(ftpConfig, ftp.DialWithDebugOutput(&debugLog{auth: f.ci.Dump&fs.DumpAuth != 0}))
|
||||
}
|
||||
c, err := ftp.Dial(f.dialAddr, ftpConfig...)
|
||||
if err != nil {
|
||||
@ -253,7 +254,7 @@ func (f *Fs) ftpConnection() (*ftp.ServerConn, error) {
|
||||
}
|
||||
|
||||
// Get an FTP connection from the pool, or open a new one
|
||||
func (f *Fs) getFtpConnection() (c *ftp.ServerConn, err error) {
|
||||
func (f *Fs) getFtpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
|
||||
if f.opt.Concurrency > 0 {
|
||||
f.tokens.Get()
|
||||
}
|
||||
@ -266,7 +267,7 @@ func (f *Fs) getFtpConnection() (c *ftp.ServerConn, err error) {
|
||||
if c != nil {
|
||||
return c, nil
|
||||
}
|
||||
c, err = f.ftpConnection()
|
||||
c, err = f.ftpConnection(ctx)
|
||||
if err != nil && f.opt.Concurrency > 0 {
|
||||
f.tokens.Put()
|
||||
}
|
||||
@ -336,10 +337,12 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (ff fs.Fs
|
||||
protocol = "ftps://"
|
||||
}
|
||||
u := protocol + path.Join(dialAddr+"/", root)
|
||||
ci := fs.GetConfig(ctx)
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
ci: ci,
|
||||
url: u,
|
||||
user: user,
|
||||
pass: pass,
|
||||
@ -350,7 +353,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (ff fs.Fs
|
||||
CanHaveEmptyDirectories: true,
|
||||
}).Fill(ctx, f)
|
||||
// Make a connection and pool it to return errors early
|
||||
c, err := f.getFtpConnection()
|
||||
c, err := f.getFtpConnection(ctx)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "NewFs")
|
||||
}
|
||||
@ -421,7 +424,7 @@ func (f *Fs) dirFromStandardPath(dir string) string {
|
||||
}
|
||||
|
||||
// findItem finds a directory entry for the name in its parent directory
|
||||
func (f *Fs) findItem(remote string) (entry *ftp.Entry, err error) {
|
||||
func (f *Fs) findItem(ctx context.Context, remote string) (entry *ftp.Entry, err error) {
|
||||
// defer fs.Trace(remote, "")("o=%v, err=%v", &o, &err)
|
||||
fullPath := path.Join(f.root, remote)
|
||||
if fullPath == "" || fullPath == "." || fullPath == "/" {
|
||||
@ -435,7 +438,7 @@ func (f *Fs) findItem(remote string) (entry *ftp.Entry, err error) {
|
||||
dir := path.Dir(fullPath)
|
||||
base := path.Base(fullPath)
|
||||
|
||||
c, err := f.getFtpConnection()
|
||||
c, err := f.getFtpConnection(ctx)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "findItem")
|
||||
}
|
||||
@ -457,7 +460,7 @@ func (f *Fs) findItem(remote string) (entry *ftp.Entry, err error) {
|
||||
// it returns the error fs.ErrorObjectNotFound.
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (o fs.Object, err error) {
|
||||
// defer fs.Trace(remote, "")("o=%v, err=%v", &o, &err)
|
||||
entry, err := f.findItem(remote)
|
||||
entry, err := f.findItem(ctx, remote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -479,8 +482,8 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (o fs.Object, err err
|
||||
}
|
||||
|
||||
// dirExists checks the directory pointed to by remote exists or not
|
||||
func (f *Fs) dirExists(remote string) (exists bool, err error) {
|
||||
entry, err := f.findItem(remote)
|
||||
func (f *Fs) dirExists(ctx context.Context, remote string) (exists bool, err error) {
|
||||
entry, err := f.findItem(ctx, remote)
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "dirExists")
|
||||
}
|
||||
@ -501,7 +504,7 @@ func (f *Fs) dirExists(remote string) (exists bool, err error) {
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
// defer log.Trace(dir, "dir=%q", dir)("entries=%v, err=%v", &entries, &err)
|
||||
c, err := f.getFtpConnection()
|
||||
c, err := f.getFtpConnection(ctx)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "list")
|
||||
}
|
||||
@ -522,7 +525,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
}()
|
||||
|
||||
// Wait for List for up to Timeout seconds
|
||||
timer := time.NewTimer(fs.Config.Timeout)
|
||||
timer := time.NewTimer(f.ci.Timeout)
|
||||
select {
|
||||
case listErr = <-errchan:
|
||||
timer.Stop()
|
||||
@ -539,7 +542,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
// doesn't exist, so check it really doesn't exist if no
|
||||
// entries found.
|
||||
if len(files) == 0 {
|
||||
exists, err := f.dirExists(dir)
|
||||
exists, err := f.dirExists(ctx, dir)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "list")
|
||||
}
|
||||
@ -592,7 +595,7 @@ func (f *Fs) Precision() time.Duration {
|
||||
// nil and the error
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
// fs.Debugf(f, "Trying to put file %s", src.Remote())
|
||||
err := f.mkParentDir(src.Remote())
|
||||
err := f.mkParentDir(ctx, src.Remote())
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Put mkParentDir failed")
|
||||
}
|
||||
@ -610,12 +613,12 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
|
||||
}
|
||||
|
||||
// getInfo reads the FileInfo for a path
|
||||
func (f *Fs) getInfo(remote string) (fi *FileInfo, err error) {
|
||||
func (f *Fs) getInfo(ctx context.Context, remote string) (fi *FileInfo, err error) {
|
||||
// defer fs.Trace(remote, "")("fi=%v, err=%v", &fi, &err)
|
||||
dir := path.Dir(remote)
|
||||
base := path.Base(remote)
|
||||
|
||||
c, err := f.getFtpConnection()
|
||||
c, err := f.getFtpConnection(ctx)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "getInfo")
|
||||
}
|
||||
@ -642,12 +645,12 @@ func (f *Fs) getInfo(remote string) (fi *FileInfo, err error) {
|
||||
}
|
||||
|
||||
// mkdir makes the directory and parents using unrooted paths
|
||||
func (f *Fs) mkdir(abspath string) error {
|
||||
func (f *Fs) mkdir(ctx context.Context, abspath string) error {
|
||||
abspath = path.Clean(abspath)
|
||||
if abspath == "." || abspath == "/" {
|
||||
return nil
|
||||
}
|
||||
fi, err := f.getInfo(abspath)
|
||||
fi, err := f.getInfo(ctx, abspath)
|
||||
if err == nil {
|
||||
if fi.IsDir {
|
||||
return nil
|
||||
@ -657,11 +660,11 @@ func (f *Fs) mkdir(abspath string) error {
|
||||
return errors.Wrapf(err, "mkdir %q failed", abspath)
|
||||
}
|
||||
parent := path.Dir(abspath)
|
||||
err = f.mkdir(parent)
|
||||
err = f.mkdir(ctx, parent)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c, connErr := f.getFtpConnection()
|
||||
c, connErr := f.getFtpConnection(ctx)
|
||||
if connErr != nil {
|
||||
return errors.Wrap(connErr, "mkdir")
|
||||
}
|
||||
@ -681,23 +684,23 @@ func (f *Fs) mkdir(abspath string) error {
|
||||
|
||||
// mkParentDir makes the parent of remote if necessary and any
|
||||
// directories above that
|
||||
func (f *Fs) mkParentDir(remote string) error {
|
||||
func (f *Fs) mkParentDir(ctx context.Context, remote string) error {
|
||||
parent := path.Dir(remote)
|
||||
return f.mkdir(path.Join(f.root, parent))
|
||||
return f.mkdir(ctx, path.Join(f.root, parent))
|
||||
}
|
||||
|
||||
// Mkdir creates the directory if it doesn't exist
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) {
|
||||
// defer fs.Trace(dir, "")("err=%v", &err)
|
||||
root := path.Join(f.root, dir)
|
||||
return f.mkdir(root)
|
||||
return f.mkdir(ctx, root)
|
||||
}
|
||||
|
||||
// Rmdir removes the directory (container, bucket) if empty
|
||||
//
|
||||
// Return an error if it doesn't exist or isn't empty
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
c, err := f.getFtpConnection()
|
||||
c, err := f.getFtpConnection(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(translateErrorFile(err), "Rmdir")
|
||||
}
|
||||
@ -713,11 +716,11 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
fs.Debugf(src, "Can't move - not same remote type")
|
||||
return nil, fs.ErrorCantMove
|
||||
}
|
||||
err := f.mkParentDir(remote)
|
||||
err := f.mkParentDir(ctx, remote)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Move mkParentDir failed")
|
||||
}
|
||||
c, err := f.getFtpConnection()
|
||||
c, err := f.getFtpConnection(ctx)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Move")
|
||||
}
|
||||
@ -754,7 +757,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
dstPath := path.Join(f.root, dstRemote)
|
||||
|
||||
// Check if destination exists
|
||||
fi, err := f.getInfo(dstPath)
|
||||
fi, err := f.getInfo(ctx, dstPath)
|
||||
if err == nil {
|
||||
if fi.IsDir {
|
||||
return fs.ErrorDirExists
|
||||
@ -765,13 +768,13 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
}
|
||||
|
||||
// Make sure the parent directory exists
|
||||
err = f.mkdir(path.Dir(dstPath))
|
||||
err = f.mkdir(ctx, path.Dir(dstPath))
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "DirMove mkParentDir dst failed")
|
||||
}
|
||||
|
||||
// Do the move
|
||||
c, err := f.getFtpConnection()
|
||||
c, err := f.getFtpConnection(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "DirMove")
|
||||
}
|
||||
@ -903,7 +906,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.Read
|
||||
}
|
||||
}
|
||||
}
|
||||
c, err := o.fs.getFtpConnection()
|
||||
c, err := o.fs.getFtpConnection(ctx)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "open")
|
||||
}
|
||||
@ -938,7 +941,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
fs.Debugf(o, "Removed after failed upload: %v", err)
|
||||
}
|
||||
}
|
||||
c, err := o.fs.getFtpConnection()
|
||||
c, err := o.fs.getFtpConnection(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Update")
|
||||
}
|
||||
@ -950,7 +953,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
return errors.Wrap(err, "update stor")
|
||||
}
|
||||
o.fs.putFtpConnection(&c, nil)
|
||||
o.info, err = o.fs.getInfo(path)
|
||||
o.info, err = o.fs.getInfo(ctx, path)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "update getinfo")
|
||||
}
|
||||
@ -962,14 +965,14 @@ func (o *Object) Remove(ctx context.Context) (err error) {
|
||||
// defer fs.Trace(o, "")("err=%v", &err)
|
||||
path := path.Join(o.fs.root, o.remote)
|
||||
// Check if it's a directory or a file
|
||||
info, err := o.fs.getInfo(path)
|
||||
info, err := o.fs.getInfo(ctx, path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if info.IsDir {
|
||||
err = o.fs.Rmdir(ctx, o.remote)
|
||||
} else {
|
||||
c, err := o.fs.getFtpConnection()
|
||||
c, err := o.fs.getFtpConnection(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Remove")
|
||||
}
|
||||
|
@ -375,7 +375,7 @@ func getServiceAccountClient(ctx context.Context, credentialsData []byte) (*http
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error processing credentials")
|
||||
}
|
||||
ctxWithSpecialClient := oauthutil.Context(ctx, fshttp.NewClient(fs.Config))
|
||||
ctxWithSpecialClient := oauthutil.Context(ctx, fshttp.NewClient(fs.GetConfig(ctx)))
|
||||
return oauth2.NewClient(ctxWithSpecialClient, conf.TokenSource(ctxWithSpecialClient)), nil
|
||||
}
|
||||
|
||||
@ -432,7 +432,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
pacer: fs.NewPacer(pacer.NewGoogleDrive(pacer.MinSleep(minSleep))),
|
||||
pacer: fs.NewPacer(ctx, pacer.NewGoogleDrive(pacer.MinSleep(minSleep))),
|
||||
cache: bucket.NewCache(),
|
||||
}
|
||||
f.setRoot(root)
|
||||
|
@ -254,7 +254,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
return nil, err
|
||||
}
|
||||
|
||||
baseClient := fshttp.NewClient(fs.Config)
|
||||
baseClient := fshttp.NewClient(fs.GetConfig(ctx))
|
||||
oAuthClient, ts, err := oauthutil.NewClientWithBaseClient(ctx, name, m, oauthConfig, baseClient)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to configure Box")
|
||||
@ -272,7 +272,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
unAuth: rest.NewClient(baseClient),
|
||||
srv: rest.NewClient(oAuthClient).SetRoot(rootURL),
|
||||
ts: ts,
|
||||
pacer: fs.NewPacer(pacer.NewGoogleDrive(pacer.MinSleep(minSleep))),
|
||||
pacer: fs.NewPacer(ctx, pacer.NewGoogleDrive(pacer.MinSleep(minSleep))),
|
||||
startTime: time.Now(),
|
||||
albums: map[bool]*albums{},
|
||||
uploaded: dirtree.New(),
|
||||
|
@ -115,8 +115,9 @@ type Options struct {
|
||||
type Fs struct {
|
||||
name string
|
||||
root string
|
||||
features *fs.Features // optional features
|
||||
opt Options // options for this backend
|
||||
features *fs.Features // optional features
|
||||
opt Options // options for this backend
|
||||
ci *fs.ConfigInfo // global config
|
||||
endpoint *url.URL
|
||||
endpointURL string // endpoint as a string
|
||||
httpClient *http.Client
|
||||
@ -171,7 +172,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
return nil, err
|
||||
}
|
||||
|
||||
client := fshttp.NewClient(fs.Config)
|
||||
client := fshttp.NewClient(fs.GetConfig(ctx))
|
||||
|
||||
var isFile = false
|
||||
if !strings.HasSuffix(u.String(), "/") {
|
||||
@ -209,10 +210,12 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ci := fs.GetConfig(ctx)
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
ci: ci,
|
||||
httpClient: client,
|
||||
endpoint: u,
|
||||
endpointURL: u.String(),
|
||||
@ -439,14 +442,15 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
var (
|
||||
entriesMu sync.Mutex // to protect entries
|
||||
wg sync.WaitGroup
|
||||
in = make(chan string, fs.Config.Checkers)
|
||||
checkers = f.ci.Checkers
|
||||
in = make(chan string, checkers)
|
||||
)
|
||||
add := func(entry fs.DirEntry) {
|
||||
entriesMu.Lock()
|
||||
entries = append(entries, entry)
|
||||
entriesMu.Unlock()
|
||||
}
|
||||
for i := 0; i < fs.Config.Checkers; i++ {
|
||||
for i := 0; i < checkers; i++ {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
|
@ -157,11 +157,12 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
}
|
||||
|
||||
// Make the swift Connection
|
||||
ci := fs.GetConfig(ctx)
|
||||
c := &swiftLib.Connection{
|
||||
Auth: newAuth(f),
|
||||
ConnectTimeout: 10 * fs.Config.ConnectTimeout, // Use the timeouts in the transport
|
||||
Timeout: 10 * fs.Config.Timeout, // Use the timeouts in the transport
|
||||
Transport: fshttp.NewTransport(fs.Config),
|
||||
ConnectTimeout: 10 * ci.ConnectTimeout, // Use the timeouts in the transport
|
||||
Timeout: 10 * ci.Timeout, // Use the timeouts in the transport
|
||||
Transport: fshttp.NewTransport(fs.GetConfig(ctx)),
|
||||
}
|
||||
err = c.Authenticate()
|
||||
if err != nil {
|
||||
|
@ -230,7 +230,7 @@ func shouldRetry(resp *http.Response, err error) (bool, error) {
|
||||
|
||||
// v1config configure a jottacloud backend using legacy authentication
|
||||
func v1config(ctx context.Context, name string, m configmap.Mapper) {
|
||||
srv := rest.NewClient(fshttp.NewClient(fs.Config))
|
||||
srv := rest.NewClient(fshttp.NewClient(fs.GetConfig(ctx)))
|
||||
|
||||
fmt.Printf("\nDo you want to create a machine specific API key?\n\nRclone has it's own Jottacloud API KEY which works fine as long as one only uses rclone on a single machine. When you want to use rclone with this account on more than one machine it's recommended to create a machine specific API key. These keys can NOT be shared between machines.\n\n")
|
||||
if config.Confirm(false) {
|
||||
@ -365,7 +365,7 @@ func doAuthV1(ctx context.Context, srv *rest.Client, username, password string)
|
||||
|
||||
// v2config configure a jottacloud backend using the modern JottaCli token based authentication
|
||||
func v2config(ctx context.Context, name string, m configmap.Mapper) {
|
||||
srv := rest.NewClient(fshttp.NewClient(fs.Config))
|
||||
srv := rest.NewClient(fshttp.NewClient(fs.GetConfig(ctx)))
|
||||
|
||||
fmt.Printf("Generate a personal login token here: https://www.jottacloud.com/web/secure\n")
|
||||
fmt.Printf("Login Token> ")
|
||||
@ -661,7 +661,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
return nil, errors.New("Outdated config - please reconfigure this backend")
|
||||
}
|
||||
|
||||
baseClient := fshttp.NewClient(fs.Config)
|
||||
baseClient := fshttp.NewClient(fs.GetConfig(ctx))
|
||||
|
||||
if ver == configVersion {
|
||||
oauthConfig.ClientID = "jottacli"
|
||||
@ -711,7 +711,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
opt: *opt,
|
||||
srv: rest.NewClient(oAuthClient).SetRoot(rootURL),
|
||||
apiSrv: rest.NewClient(oAuthClient).SetRoot(apiURL),
|
||||
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
CaseInsensitive: true,
|
||||
|
@ -267,7 +267,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (ff fs.Fs
|
||||
return nil, err
|
||||
}
|
||||
httpClient := httpclient.New()
|
||||
httpClient.Client = fshttp.NewClient(fs.Config)
|
||||
httpClient.Client = fshttp.NewClient(fs.GetConfig(ctx))
|
||||
client := koofrclient.NewKoofrClientWithHTTPClient(opt.Endpoint, httpClient)
|
||||
basicAuth := fmt.Sprintf("Basic %s",
|
||||
base64.StdEncoding.EncodeToString([]byte(opt.User+":"+pass)))
|
||||
|
@ -273,6 +273,7 @@ type Fs struct {
|
||||
name string
|
||||
root string // root path
|
||||
opt Options // parsed options
|
||||
ci *fs.ConfigInfo // global config
|
||||
speedupGlobs []string // list of file name patterns eligible for speedup
|
||||
speedupAny bool // true if all file names are eligible for speedup
|
||||
features *fs.Features // optional features
|
||||
@ -312,10 +313,12 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
// However the f.root string should not have leading or trailing slashes
|
||||
root = strings.Trim(root, "/")
|
||||
|
||||
ci := fs.GetConfig(ctx)
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
ci: ci,
|
||||
m: m,
|
||||
}
|
||||
|
||||
@ -324,7 +327,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
}
|
||||
f.quirks.parseQuirks(opt.Quirks)
|
||||
|
||||
f.pacer = fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleepPacer), pacer.MaxSleep(maxSleepPacer), pacer.DecayConstant(decayConstPacer)))
|
||||
f.pacer = fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleepPacer), pacer.MaxSleep(maxSleepPacer), pacer.DecayConstant(decayConstPacer)))
|
||||
|
||||
f.features = (&fs.Features{
|
||||
CaseInsensitive: true,
|
||||
@ -335,7 +338,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
}).Fill(ctx, f)
|
||||
|
||||
// Override few config settings and create a client
|
||||
clientConfig := *fs.Config
|
||||
clientConfig := *fs.GetConfig(ctx)
|
||||
if opt.UserAgent != "" {
|
||||
clientConfig.UserAgent = opt.UserAgent
|
||||
}
|
||||
@ -692,7 +695,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
entries, err = f.listM1(ctx, f.absPath(dir), 0, maxInt32)
|
||||
}
|
||||
|
||||
if err == nil && fs.Config.LogLevel >= fs.LogLevelDebug {
|
||||
if err == nil && f.ci.LogLevel >= fs.LogLevelDebug {
|
||||
names := []string{}
|
||||
for _, entry := range entries {
|
||||
names = append(names, entry.Remote())
|
||||
@ -956,7 +959,7 @@ func (t *treeState) NextRecord() (fs.DirEntry, error) {
|
||||
return nil, r.Error()
|
||||
}
|
||||
|
||||
if fs.Config.LogLevel >= fs.LogLevelDebug {
|
||||
if t.f.ci.LogLevel >= fs.LogLevelDebug {
|
||||
ctime, _ := modTime.MarshalJSON()
|
||||
fs.Debugf(t.f, "binDir %d.%d %q %q (%d) %s", t.level, itemType, t.currDir, name, size, ctime)
|
||||
}
|
||||
@ -2376,7 +2379,7 @@ func (p *serverPool) addServer(url string, now time.Time) {
|
||||
expiry := now.Add(p.expirySec * time.Second)
|
||||
|
||||
expiryStr := []byte("-")
|
||||
if fs.Config.LogLevel >= fs.LogLevelInfo {
|
||||
if p.fs.ci.LogLevel >= fs.LogLevelInfo {
|
||||
expiryStr, _ = expiry.MarshalJSON()
|
||||
}
|
||||
|
||||
|
@ -194,6 +194,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
return nil, errors.Wrap(err, "couldn't decrypt password")
|
||||
}
|
||||
}
|
||||
ci := fs.GetConfig(ctx)
|
||||
|
||||
// cache *mega.Mega on username so we can re-use and share
|
||||
// them between remotes. They are expensive to make as they
|
||||
@ -204,8 +205,8 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
defer megaCacheMu.Unlock()
|
||||
srv := megaCache[opt.User]
|
||||
if srv == nil {
|
||||
srv = mega.New().SetClient(fshttp.NewClient(fs.Config))
|
||||
srv.SetRetries(fs.Config.LowLevelRetries) // let mega do the low level retries
|
||||
srv = mega.New().SetClient(fshttp.NewClient(fs.GetConfig(ctx)))
|
||||
srv.SetRetries(ci.LowLevelRetries) // let mega do the low level retries
|
||||
srv.SetLogger(func(format string, v ...interface{}) {
|
||||
fs.Infof("*go-mega*", format, v...)
|
||||
})
|
||||
@ -228,7 +229,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
root: root,
|
||||
opt: *opt,
|
||||
srv: srv,
|
||||
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
DuplicateFiles: true,
|
||||
|
@ -81,6 +81,7 @@ func init() {
|
||||
Description: "Microsoft OneDrive",
|
||||
NewFs: NewFs,
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper) {
|
||||
ci := fs.GetConfig(ctx)
|
||||
err := oauthutil.Config(ctx, "onedrive", name, m, oauthConfig, nil)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
@ -88,7 +89,7 @@ func init() {
|
||||
}
|
||||
|
||||
// Stop if we are running non-interactive config
|
||||
if fs.Config.AutoConfirm {
|
||||
if ci.AutoConfirm {
|
||||
return
|
||||
}
|
||||
|
||||
@ -363,6 +364,7 @@ type Fs struct {
|
||||
name string // name of this remote
|
||||
root string // the path we are working on
|
||||
opt Options // parsed options
|
||||
ci *fs.ConfigInfo // global config
|
||||
features *fs.Features // optional features
|
||||
srv *rest.Client // the connection to the one drive server
|
||||
dirCache *dircache.DirCache // Map of directory path to directory id
|
||||
@ -618,14 +620,16 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
return nil, errors.Wrap(err, "failed to configure OneDrive")
|
||||
}
|
||||
|
||||
ci := fs.GetConfig(ctx)
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
ci: ci,
|
||||
driveID: opt.DriveID,
|
||||
driveType: opt.DriveType,
|
||||
srv: rest.NewClient(oAuthClient).SetRoot(graphURL + "/drives/" + opt.DriveID),
|
||||
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
CaseInsensitive: true,
|
||||
@ -971,7 +975,7 @@ func (f *Fs) Precision() time.Duration {
|
||||
|
||||
// waitForJob waits for the job with status in url to complete
|
||||
func (f *Fs) waitForJob(ctx context.Context, location string, o *Object) error {
|
||||
deadline := time.Now().Add(fs.Config.Timeout)
|
||||
deadline := time.Now().Add(f.ci.Timeout)
|
||||
for time.Now().Before(deadline) {
|
||||
var resp *http.Response
|
||||
var err error
|
||||
@ -1007,7 +1011,7 @@ func (f *Fs) waitForJob(ctx context.Context, location string, o *Object) error {
|
||||
|
||||
time.Sleep(1 * time.Second)
|
||||
}
|
||||
return errors.Errorf("async operation didn't complete after %v", fs.Config.Timeout)
|
||||
return errors.Errorf("async operation didn't complete after %v", f.ci.Timeout)
|
||||
}
|
||||
|
||||
// Copy src to this remote using server-side copy operations.
|
||||
@ -1300,7 +1304,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
|
||||
|
||||
// CleanUp deletes all the hidden files.
|
||||
func (f *Fs) CleanUp(ctx context.Context) error {
|
||||
token := make(chan struct{}, fs.Config.Checkers)
|
||||
token := make(chan struct{}, f.ci.Checkers)
|
||||
var wg sync.WaitGroup
|
||||
err := walk.Walk(ctx, f, "", true, -1, func(path string, entries fs.DirEntries, err error) error {
|
||||
err = entries.ForObjectError(func(obj fs.Object) error {
|
||||
|
@ -187,8 +187,8 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
srv: rest.NewClient(fshttp.NewClient(fs.Config)).SetErrorHandler(errorHandler),
|
||||
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
srv: rest.NewClient(fshttp.NewClient(fs.GetConfig(ctx))).SetErrorHandler(errorHandler),
|
||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
}
|
||||
|
||||
f.dirCache = dircache.New(root, "0", f)
|
||||
|
@ -299,7 +299,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
root: root,
|
||||
opt: *opt,
|
||||
srv: rest.NewClient(oAuthClient).SetRoot("https://" + opt.Hostname),
|
||||
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
CaseInsensitive: false,
|
||||
|
@ -252,7 +252,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
return nil, errors.Wrap(err, "failed to configure premiumize.me")
|
||||
}
|
||||
} else {
|
||||
client = fshttp.NewClient(fs.Config)
|
||||
client = fshttp.NewClient(fs.GetConfig(ctx))
|
||||
}
|
||||
|
||||
f := &Fs{
|
||||
@ -260,7 +260,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
root: root,
|
||||
opt: *opt,
|
||||
srv: rest.NewClient(client).SetRoot(rootURL),
|
||||
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
CaseInsensitive: true,
|
||||
|
@ -77,7 +77,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (f fs.Fs,
|
||||
return nil, err
|
||||
}
|
||||
root = parsePath(root)
|
||||
httpClient := fshttp.NewClient(fs.Config)
|
||||
httpClient := fshttp.NewClient(fs.GetConfig(ctx))
|
||||
oAuthClient, _, err := oauthutil.NewClientWithBaseClient(ctx, name, m, putioConfig, httpClient)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to configure putio")
|
||||
@ -86,7 +86,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (f fs.Fs,
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
client: putio.NewClient(oAuthClient),
|
||||
httpClient: httpClient,
|
||||
oAuthClient: oAuthClient,
|
||||
|
@ -228,7 +228,7 @@ func qsParseEndpoint(endpoint string) (protocol, host, port string, err error) {
|
||||
}
|
||||
|
||||
// qsConnection makes a connection to qingstor
|
||||
func qsServiceConnection(opt *Options) (*qs.Service, error) {
|
||||
func qsServiceConnection(ctx context.Context, opt *Options) (*qs.Service, error) {
|
||||
accessKeyID := opt.AccessKeyID
|
||||
secretAccessKey := opt.SecretAccessKey
|
||||
|
||||
@ -277,7 +277,7 @@ func qsServiceConnection(opt *Options) (*qs.Service, error) {
|
||||
cf.Host = host
|
||||
cf.Port = port
|
||||
// unsupported in v3.1: cf.ConnectionRetries = opt.ConnectionRetries
|
||||
cf.Connection = fshttp.NewClient(fs.Config)
|
||||
cf.Connection = fshttp.NewClient(fs.GetConfig(ctx))
|
||||
|
||||
return qs.Init(cf)
|
||||
}
|
||||
@ -334,7 +334,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "qingstor: upload cutoff")
|
||||
}
|
||||
svc, err := qsServiceConnection(opt)
|
||||
svc, err := qsServiceConnection(ctx, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -1285,6 +1285,8 @@ type Fs struct {
|
||||
name string // the name of the remote
|
||||
root string // root of the bucket - ignore all objects above this
|
||||
opt Options // parsed options
|
||||
ci *fs.ConfigInfo // global config
|
||||
ctx context.Context // global context for reading config
|
||||
features *fs.Features // optional features
|
||||
c *s3.S3 // the connection to the s3 server
|
||||
ses *session.Session // the s3 session
|
||||
@ -1401,9 +1403,9 @@ func (o *Object) split() (bucket, bucketPath string) {
|
||||
}
|
||||
|
||||
// getClient makes an http client according to the options
|
||||
func getClient(opt *Options) *http.Client {
|
||||
func getClient(ctx context.Context, opt *Options) *http.Client {
|
||||
// TODO: Do we need cookies too?
|
||||
t := fshttp.NewTransportCustom(fs.Config, func(t *http.Transport) {
|
||||
t := fshttp.NewTransportCustom(fs.GetConfig(ctx), func(t *http.Transport) {
|
||||
if opt.DisableHTTP2 {
|
||||
t.TLSNextProto = map[string]func(string, *tls.Conn) http.RoundTripper{}
|
||||
}
|
||||
@ -1414,7 +1416,7 @@ func getClient(opt *Options) *http.Client {
|
||||
}
|
||||
|
||||
// s3Connection makes a connection to s3
|
||||
func s3Connection(opt *Options) (*s3.S3, *session.Session, error) {
|
||||
func s3Connection(ctx context.Context, opt *Options) (*s3.S3, *session.Session, error) {
|
||||
// Make the auth
|
||||
v := credentials.Value{
|
||||
AccessKeyID: opt.AccessKeyID,
|
||||
@ -1492,7 +1494,7 @@ func s3Connection(opt *Options) (*s3.S3, *session.Session, error) {
|
||||
awsConfig := aws.NewConfig().
|
||||
WithMaxRetries(0). // Rely on rclone's retry logic
|
||||
WithCredentials(cred).
|
||||
WithHTTPClient(getClient(opt)).
|
||||
WithHTTPClient(getClient(ctx, opt)).
|
||||
WithS3ForcePathStyle(opt.ForcePathStyle).
|
||||
WithS3UseAccelerate(opt.UseAccelerateEndpoint).
|
||||
WithS3UsEast1RegionalEndpoint(endpoints.RegionalS3UsEast1Endpoint)
|
||||
@ -1599,23 +1601,26 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
md5sumBinary := md5.Sum([]byte(opt.SSECustomerKey))
|
||||
opt.SSECustomerKeyMD5 = base64.StdEncoding.EncodeToString(md5sumBinary[:])
|
||||
}
|
||||
c, ses, err := s3Connection(opt)
|
||||
c, ses, err := s3Connection(ctx, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ci := fs.GetConfig(ctx)
|
||||
f := &Fs{
|
||||
name: name,
|
||||
opt: *opt,
|
||||
ci: ci,
|
||||
ctx: ctx,
|
||||
c: c,
|
||||
ses: ses,
|
||||
pacer: fs.NewPacer(pacer.NewS3(pacer.MinSleep(minSleep))),
|
||||
pacer: fs.NewPacer(ctx, pacer.NewS3(pacer.MinSleep(minSleep))),
|
||||
cache: bucket.NewCache(),
|
||||
srv: getClient(opt),
|
||||
srv: getClient(ctx, opt),
|
||||
pool: pool.New(
|
||||
time.Duration(opt.MemoryPoolFlushTime),
|
||||
int(opt.ChunkSize),
|
||||
opt.UploadConcurrency*fs.Config.Transfers,
|
||||
opt.UploadConcurrency*ci.Transfers,
|
||||
opt.MemoryPoolUseMmap,
|
||||
),
|
||||
}
|
||||
@ -1728,7 +1733,7 @@ func (f *Fs) updateRegionForBucket(bucket string) error {
|
||||
// Make a new session with the new region
|
||||
oldRegion := f.opt.Region
|
||||
f.opt.Region = region
|
||||
c, ses, err := s3Connection(&f.opt)
|
||||
c, ses, err := s3Connection(f.ctx, &f.opt)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "creating new session failed")
|
||||
}
|
||||
@ -2343,7 +2348,7 @@ func (f *Fs) getMemoryPool(size int64) *pool.Pool {
|
||||
return pool.New(
|
||||
time.Duration(f.opt.MemoryPoolFlushTime),
|
||||
int(size),
|
||||
f.opt.UploadConcurrency*fs.Config.Transfers,
|
||||
f.opt.UploadConcurrency*f.ci.Transfers,
|
||||
f.opt.MemoryPoolUseMmap,
|
||||
)
|
||||
}
|
||||
@ -2810,7 +2815,7 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
|
||||
// It attempts to read the objects mtime and if that isn't present the
|
||||
// LastModified returned in the http headers
|
||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
if fs.Config.UseServerModTime {
|
||||
if o.fs.ci.UseServerModTime {
|
||||
return o.lastModified
|
||||
}
|
||||
err := o.readMetaData(ctx)
|
||||
|
@ -1,6 +1,7 @@
|
||||
package seafile
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"sync"
|
||||
@ -27,7 +28,7 @@ func init() {
|
||||
}
|
||||
|
||||
// getPacer returns the unique pacer for that remote URL
|
||||
func getPacer(remote string) *fs.Pacer {
|
||||
func getPacer(ctx context.Context, remote string) *fs.Pacer {
|
||||
pacerMutex.Lock()
|
||||
defer pacerMutex.Unlock()
|
||||
|
||||
@ -37,6 +38,7 @@ func getPacer(remote string) *fs.Pacer {
|
||||
}
|
||||
|
||||
pacers[remote] = fs.NewPacer(
|
||||
ctx,
|
||||
pacer.NewDefault(
|
||||
pacer.MinSleep(minSleep),
|
||||
pacer.MaxSleep(maxSleep),
|
||||
|
@ -197,8 +197,8 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
opt: *opt,
|
||||
endpoint: u,
|
||||
endpointURL: u.String(),
|
||||
srv: rest.NewClient(fshttp.NewClient(fs.Config)).SetRoot(u.String()),
|
||||
pacer: getPacer(opt.URL),
|
||||
srv: rest.NewClient(fshttp.NewClient(fs.GetConfig(ctx))).SetRoot(u.String()),
|
||||
pacer: getPacer(ctx, opt.URL),
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
CanHaveEmptyDirectories: true,
|
||||
@ -297,6 +297,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
|
||||
// Config callback for 2FA
|
||||
func Config(ctx context.Context, name string, m configmap.Mapper) {
|
||||
ci := fs.GetConfig(ctx)
|
||||
serverURL, ok := m.Get(configURL)
|
||||
if !ok || serverURL == "" {
|
||||
// If there's no server URL, it means we're trying an operation at the backend level, like a "rclone authorize seafile"
|
||||
@ -305,7 +306,7 @@ func Config(ctx context.Context, name string, m configmap.Mapper) {
|
||||
}
|
||||
|
||||
// Stop if we are running non-interactive config
|
||||
if fs.Config.AutoConfirm {
|
||||
if ci.AutoConfirm {
|
||||
return
|
||||
}
|
||||
|
||||
@ -342,7 +343,7 @@ func Config(ctx context.Context, name string, m configmap.Mapper) {
|
||||
if !strings.HasPrefix(url, "/") {
|
||||
url += "/"
|
||||
}
|
||||
srv := rest.NewClient(fshttp.NewClient(fs.Config)).SetRoot(url)
|
||||
srv := rest.NewClient(fshttp.NewClient(fs.GetConfig(ctx))).SetRoot(url)
|
||||
|
||||
// We loop asking for a 2FA code
|
||||
for {
|
||||
|
@ -226,6 +226,7 @@ type Fs struct {
|
||||
root string
|
||||
absRoot string
|
||||
opt Options // parsed options
|
||||
ci *fs.ConfigInfo // global config
|
||||
m configmap.Mapper // config
|
||||
features *fs.Features // optional features
|
||||
config *ssh.ClientConfig
|
||||
@ -252,8 +253,8 @@ type Object struct {
|
||||
// dial starts a client connection to the given SSH server. It is a
|
||||
// convenience function that connects to the given network address,
|
||||
// initiates the SSH handshake, and then sets up a Client.
|
||||
func (f *Fs) dial(network, addr string, sshConfig *ssh.ClientConfig) (*ssh.Client, error) {
|
||||
dialer := fshttp.NewDialer(fs.Config)
|
||||
func (f *Fs) dial(ctx context.Context, network, addr string, sshConfig *ssh.ClientConfig) (*ssh.Client, error) {
|
||||
dialer := fshttp.NewDialer(fs.GetConfig(ctx))
|
||||
conn, err := dialer.Dial(network, addr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -299,12 +300,12 @@ func (c *conn) closed() error {
|
||||
}
|
||||
|
||||
// Open a new connection to the SFTP server.
|
||||
func (f *Fs) sftpConnection() (c *conn, err error) {
|
||||
func (f *Fs) sftpConnection(ctx context.Context) (c *conn, err error) {
|
||||
// Rate limit rate of new connections
|
||||
c = &conn{
|
||||
err: make(chan error, 1),
|
||||
}
|
||||
c.sshClient, err = f.dial("tcp", f.opt.Host+":"+f.opt.Port, f.config)
|
||||
c.sshClient, err = f.dial(ctx, "tcp", f.opt.Host+":"+f.opt.Port, f.config)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't connect SSH")
|
||||
}
|
||||
@ -347,7 +348,7 @@ func (f *Fs) newSftpClient(conn *ssh.Client, opts ...sftp.ClientOption) (*sftp.C
|
||||
}
|
||||
|
||||
// Get an SFTP connection from the pool, or open a new one
|
||||
func (f *Fs) getSftpConnection() (c *conn, err error) {
|
||||
func (f *Fs) getSftpConnection(ctx context.Context) (c *conn, err error) {
|
||||
f.poolMu.Lock()
|
||||
for len(f.pool) > 0 {
|
||||
c = f.pool[0]
|
||||
@ -364,7 +365,7 @@ func (f *Fs) getSftpConnection() (c *conn, err error) {
|
||||
return c, nil
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
c, err = f.sftpConnection()
|
||||
c, err = f.sftpConnection(ctx)
|
||||
if err != nil {
|
||||
return true, err
|
||||
}
|
||||
@ -417,7 +418,9 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
// This will hold the Fs object. We need to create it here
|
||||
// so we can refer to it in the SSH callback, but it's populated
|
||||
// in NewFsWithConnection
|
||||
f := &Fs{}
|
||||
f := &Fs{
|
||||
ci: fs.GetConfig(ctx),
|
||||
}
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
@ -435,8 +438,8 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
User: opt.User,
|
||||
Auth: []ssh.AuthMethod{},
|
||||
HostKeyCallback: ssh.InsecureIgnoreHostKey(),
|
||||
Timeout: fs.Config.ConnectTimeout,
|
||||
ClientVersion: "SSH-2.0-" + fs.Config.UserAgent,
|
||||
Timeout: f.ci.ConnectTimeout,
|
||||
ClientVersion: "SSH-2.0-" + f.ci.UserAgent,
|
||||
}
|
||||
|
||||
if opt.KnownHostsFile != "" {
|
||||
@ -603,7 +606,7 @@ func NewFsWithConnection(ctx context.Context, f *Fs, name string, root string, m
|
||||
f.config = sshConfig
|
||||
f.url = "sftp://" + opt.User + "@" + opt.Host + ":" + opt.Port + "/" + root
|
||||
f.mkdirLock = newStringLock()
|
||||
f.pacer = fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant)))
|
||||
f.pacer = fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant)))
|
||||
f.savedpswd = ""
|
||||
|
||||
f.features = (&fs.Features{
|
||||
@ -611,7 +614,7 @@ func NewFsWithConnection(ctx context.Context, f *Fs, name string, root string, m
|
||||
SlowHash: true,
|
||||
}).Fill(ctx, f)
|
||||
// Make a connection and pool it to return errors early
|
||||
c, err := f.getSftpConnection()
|
||||
c, err := f.getSftpConnection(ctx)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "NewFs")
|
||||
}
|
||||
@ -679,7 +682,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
err := o.stat()
|
||||
err := o.stat(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -688,11 +691,11 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
|
||||
// dirExists returns true,nil if the directory exists, false, nil if
|
||||
// it doesn't or false, err
|
||||
func (f *Fs) dirExists(dir string) (bool, error) {
|
||||
func (f *Fs) dirExists(ctx context.Context, dir string) (bool, error) {
|
||||
if dir == "" {
|
||||
dir = "."
|
||||
}
|
||||
c, err := f.getSftpConnection()
|
||||
c, err := f.getSftpConnection(ctx)
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "dirExists")
|
||||
}
|
||||
@ -721,7 +724,7 @@ func (f *Fs) dirExists(dir string) (bool, error) {
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
root := path.Join(f.absRoot, dir)
|
||||
ok, err := f.dirExists(root)
|
||||
ok, err := f.dirExists(ctx, root)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "List failed")
|
||||
}
|
||||
@ -732,7 +735,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
if sftpDir == "" {
|
||||
sftpDir = "."
|
||||
}
|
||||
c, err := f.getSftpConnection()
|
||||
c, err := f.getSftpConnection(ctx)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "List")
|
||||
}
|
||||
@ -751,7 +754,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
continue
|
||||
}
|
||||
oldInfo := info
|
||||
info, err = f.stat(remote)
|
||||
info, err = f.stat(ctx, remote)
|
||||
if err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
fs.Errorf(remote, "stat of non-regular file failed: %v", err)
|
||||
@ -776,7 +779,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
|
||||
// Put data from <in> into a new remote sftp file object described by <src.Remote()> and <src.ModTime(ctx)>
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
err := f.mkParentDir(src.Remote())
|
||||
err := f.mkParentDir(ctx, src.Remote())
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Put mkParentDir failed")
|
||||
}
|
||||
@ -799,19 +802,19 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
|
||||
|
||||
// mkParentDir makes the parent of remote if necessary and any
|
||||
// directories above that
|
||||
func (f *Fs) mkParentDir(remote string) error {
|
||||
func (f *Fs) mkParentDir(ctx context.Context, remote string) error {
|
||||
parent := path.Dir(remote)
|
||||
return f.mkdir(path.Join(f.absRoot, parent))
|
||||
return f.mkdir(ctx, path.Join(f.absRoot, parent))
|
||||
}
|
||||
|
||||
// mkdir makes the directory and parents using native paths
|
||||
func (f *Fs) mkdir(dirPath string) error {
|
||||
func (f *Fs) mkdir(ctx context.Context, dirPath string) error {
|
||||
f.mkdirLock.Lock(dirPath)
|
||||
defer f.mkdirLock.Unlock(dirPath)
|
||||
if dirPath == "." || dirPath == "/" {
|
||||
return nil
|
||||
}
|
||||
ok, err := f.dirExists(dirPath)
|
||||
ok, err := f.dirExists(ctx, dirPath)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "mkdir dirExists failed")
|
||||
}
|
||||
@ -819,11 +822,11 @@ func (f *Fs) mkdir(dirPath string) error {
|
||||
return nil
|
||||
}
|
||||
parent := path.Dir(dirPath)
|
||||
err = f.mkdir(parent)
|
||||
err = f.mkdir(ctx, parent)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c, err := f.getSftpConnection()
|
||||
c, err := f.getSftpConnection(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "mkdir")
|
||||
}
|
||||
@ -838,7 +841,7 @@ func (f *Fs) mkdir(dirPath string) error {
|
||||
// Mkdir makes the root directory of the Fs object
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
root := path.Join(f.absRoot, dir)
|
||||
return f.mkdir(root)
|
||||
return f.mkdir(ctx, root)
|
||||
}
|
||||
|
||||
// Rmdir removes the root directory of the Fs object
|
||||
@ -854,7 +857,7 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
}
|
||||
// Remove the directory
|
||||
root := path.Join(f.absRoot, dir)
|
||||
c, err := f.getSftpConnection()
|
||||
c, err := f.getSftpConnection(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Rmdir")
|
||||
}
|
||||
@ -870,11 +873,11 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
fs.Debugf(src, "Can't move - not same remote type")
|
||||
return nil, fs.ErrorCantMove
|
||||
}
|
||||
err := f.mkParentDir(remote)
|
||||
err := f.mkParentDir(ctx, remote)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Move mkParentDir failed")
|
||||
}
|
||||
c, err := f.getSftpConnection()
|
||||
c, err := f.getSftpConnection(ctx)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Move")
|
||||
}
|
||||
@ -911,7 +914,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
dstPath := path.Join(f.absRoot, dstRemote)
|
||||
|
||||
// Check if destination exists
|
||||
ok, err := f.dirExists(dstPath)
|
||||
ok, err := f.dirExists(ctx, dstPath)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "DirMove dirExists dst failed")
|
||||
}
|
||||
@ -920,13 +923,13 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
}
|
||||
|
||||
// Make sure the parent directory exists
|
||||
err = f.mkdir(path.Dir(dstPath))
|
||||
err = f.mkdir(ctx, path.Dir(dstPath))
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "DirMove mkParentDir dst failed")
|
||||
}
|
||||
|
||||
// Do the move
|
||||
c, err := f.getSftpConnection()
|
||||
c, err := f.getSftpConnection(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "DirMove")
|
||||
}
|
||||
@ -942,8 +945,8 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
}
|
||||
|
||||
// run runds cmd on the remote end returning standard output
|
||||
func (f *Fs) run(cmd string) ([]byte, error) {
|
||||
c, err := f.getSftpConnection()
|
||||
func (f *Fs) run(ctx context.Context, cmd string) ([]byte, error) {
|
||||
c, err := f.getSftpConnection(ctx)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "run: get SFTP connection")
|
||||
}
|
||||
@ -971,6 +974,7 @@ func (f *Fs) run(cmd string) ([]byte, error) {
|
||||
|
||||
// Hashes returns the supported hash types of the filesystem
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
ctx := context.TODO()
|
||||
if f.opt.DisableHashCheck {
|
||||
return hash.Set(hash.None)
|
||||
}
|
||||
@ -989,7 +993,7 @@ func (f *Fs) Hashes() hash.Set {
|
||||
}
|
||||
*changed = true
|
||||
for _, command := range commands {
|
||||
output, err := f.run(command)
|
||||
output, err := f.run(ctx, command)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
@ -1034,7 +1038,7 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
if len(escapedPath) == 0 {
|
||||
escapedPath = "/"
|
||||
}
|
||||
stdout, err := f.run("df -k " + escapedPath)
|
||||
stdout, err := f.run(ctx, "df -k "+escapedPath)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "your remote may not support About")
|
||||
}
|
||||
@ -1097,7 +1101,7 @@ func (o *Object) Hash(ctx context.Context, r hash.Type) (string, error) {
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
|
||||
c, err := o.fs.getSftpConnection()
|
||||
c, err := o.fs.getSftpConnection(ctx)
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "Hash get SFTP connection")
|
||||
}
|
||||
@ -1205,8 +1209,8 @@ func (o *Object) setMetadata(info os.FileInfo) {
|
||||
}
|
||||
|
||||
// statRemote stats the file or directory at the remote given
|
||||
func (f *Fs) stat(remote string) (info os.FileInfo, err error) {
|
||||
c, err := f.getSftpConnection()
|
||||
func (f *Fs) stat(ctx context.Context, remote string) (info os.FileInfo, err error) {
|
||||
c, err := f.getSftpConnection(ctx)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "stat")
|
||||
}
|
||||
@ -1217,8 +1221,8 @@ func (f *Fs) stat(remote string) (info os.FileInfo, err error) {
|
||||
}
|
||||
|
||||
// stat updates the info in the Object
|
||||
func (o *Object) stat() error {
|
||||
info, err := o.fs.stat(o.remote)
|
||||
func (o *Object) stat(ctx context.Context) error {
|
||||
info, err := o.fs.stat(ctx, o.remote)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return fs.ErrorObjectNotFound
|
||||
@ -1237,7 +1241,7 @@ func (o *Object) stat() error {
|
||||
// it also updates the info field
|
||||
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||
if o.fs.opt.SetModTime {
|
||||
c, err := o.fs.getSftpConnection()
|
||||
c, err := o.fs.getSftpConnection(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "SetModTime")
|
||||
}
|
||||
@ -1247,7 +1251,7 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||
return errors.Wrap(err, "SetModTime failed")
|
||||
}
|
||||
}
|
||||
err := o.stat()
|
||||
err := o.stat(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "SetModTime stat failed")
|
||||
}
|
||||
@ -1320,7 +1324,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
}
|
||||
}
|
||||
}
|
||||
c, err := o.fs.getSftpConnection()
|
||||
c, err := o.fs.getSftpConnection(ctx)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Open")
|
||||
}
|
||||
@ -1344,7 +1348,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
// Clear the hash cache since we are about to update the object
|
||||
o.md5sum = nil
|
||||
o.sha1sum = nil
|
||||
c, err := o.fs.getSftpConnection()
|
||||
c, err := o.fs.getSftpConnection(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Update")
|
||||
}
|
||||
@ -1355,7 +1359,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
}
|
||||
// remove the file if upload failed
|
||||
remove := func() {
|
||||
c, removeErr := o.fs.getSftpConnection()
|
||||
c, removeErr := o.fs.getSftpConnection(ctx)
|
||||
if removeErr != nil {
|
||||
fs.Debugf(src, "Failed to open new SSH connection for delete: %v", removeErr)
|
||||
return
|
||||
@ -1387,7 +1391,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
|
||||
// Remove a remote sftp file object
|
||||
func (o *Object) Remove(ctx context.Context) error {
|
||||
c, err := o.fs.getSftpConnection()
|
||||
c, err := o.fs.getSftpConnection(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Remove")
|
||||
}
|
||||
|
@ -237,6 +237,7 @@ type Fs struct {
|
||||
name string // name of this remote
|
||||
root string // the path we are working on
|
||||
opt Options // parsed options
|
||||
ci *fs.ConfigInfo // global config
|
||||
features *fs.Features // optional features
|
||||
srv *rest.Client // the connection to the server
|
||||
dirCache *dircache.DirCache // Map of directory path to directory id
|
||||
@ -441,12 +442,14 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
return nil, errors.Wrap(err, "failed to configure sharefile")
|
||||
}
|
||||
|
||||
ci := fs.GetConfig(ctx)
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
ci: ci,
|
||||
srv: rest.NewClient(client).SetRoot(opt.Endpoint + apiPath),
|
||||
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
CaseInsensitive: true,
|
||||
@ -531,8 +534,8 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
|
||||
// Fill up (or reset) the buffer tokens
|
||||
func (f *Fs) fillBufferTokens() {
|
||||
f.bufferTokens = make(chan []byte, fs.Config.Transfers)
|
||||
for i := 0; i < fs.Config.Transfers; i++ {
|
||||
f.bufferTokens = make(chan []byte, f.ci.Transfers)
|
||||
for i := 0; i < f.ci.Transfers; i++ {
|
||||
f.bufferTokens <- nil
|
||||
}
|
||||
}
|
||||
@ -1338,7 +1341,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
Overwrite: true,
|
||||
CreatedDate: modTime,
|
||||
ModifiedDate: modTime,
|
||||
Tool: fs.Config.UserAgent,
|
||||
Tool: o.fs.ci.UserAgent,
|
||||
}
|
||||
|
||||
if isLargeFile {
|
||||
@ -1348,7 +1351,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
} else {
|
||||
// otherwise use threaded which is more efficient
|
||||
req.Method = "threaded"
|
||||
req.ThreadCount = &fs.Config.Transfers
|
||||
req.ThreadCount = &o.fs.ci.Transfers
|
||||
req.Filesize = &size
|
||||
}
|
||||
}
|
||||
|
@ -58,7 +58,7 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
|
||||
return nil, errors.Errorf("can't use method %q with newLargeUpload", info.Method)
|
||||
}
|
||||
|
||||
threads := fs.Config.Transfers
|
||||
threads := f.ci.Transfers
|
||||
if threads > info.MaxNumberOfThreads {
|
||||
threads = info.MaxNumberOfThreads
|
||||
}
|
||||
|
@ -106,7 +106,7 @@ func init() {
|
||||
Method: "POST",
|
||||
Path: "/app-authorization",
|
||||
}
|
||||
srv := rest.NewClient(fshttp.NewClient(fs.Config)).SetRoot(rootURL) // FIXME
|
||||
srv := rest.NewClient(fshttp.NewClient(fs.GetConfig(ctx))).SetRoot(rootURL) // FIXME
|
||||
|
||||
// FIXME
|
||||
//err = f.pacer.Call(func() (bool, error) {
|
||||
@ -403,13 +403,13 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
}
|
||||
|
||||
root = parsePath(root)
|
||||
client := fshttp.NewClient(fs.Config)
|
||||
client := fshttp.NewClient(fs.GetConfig(ctx))
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
srv: rest.NewClient(client).SetRoot(rootURL),
|
||||
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
m: m,
|
||||
authExpiry: parseExpiry(opt.AuthorizationExpiry),
|
||||
}
|
||||
|
@ -221,6 +221,7 @@ type Fs struct {
|
||||
root string // the path we are working on if any
|
||||
features *fs.Features // optional features
|
||||
opt Options // options for this backend
|
||||
ci *fs.ConfigInfo // global config
|
||||
c *swift.Connection // the connection to the swift server
|
||||
rootContainer string // container part of root (if any)
|
||||
rootDirectory string // directory part of root (if any)
|
||||
@ -340,7 +341,8 @@ func (o *Object) split() (container, containerPath string) {
|
||||
}
|
||||
|
||||
// swiftConnection makes a connection to swift
|
||||
func swiftConnection(opt *Options, name string) (*swift.Connection, error) {
|
||||
func swiftConnection(ctx context.Context, opt *Options, name string) (*swift.Connection, error) {
|
||||
ci := fs.GetConfig(ctx)
|
||||
c := &swift.Connection{
|
||||
// Keep these in the same order as the Config for ease of checking
|
||||
UserName: opt.User,
|
||||
@ -359,9 +361,9 @@ func swiftConnection(opt *Options, name string) (*swift.Connection, error) {
|
||||
ApplicationCredentialName: opt.ApplicationCredentialName,
|
||||
ApplicationCredentialSecret: opt.ApplicationCredentialSecret,
|
||||
EndpointType: swift.EndpointType(opt.EndpointType),
|
||||
ConnectTimeout: 10 * fs.Config.ConnectTimeout, // Use the timeouts in the transport
|
||||
Timeout: 10 * fs.Config.Timeout, // Use the timeouts in the transport
|
||||
Transport: fshttp.NewTransport(fs.Config),
|
||||
ConnectTimeout: 10 * ci.ConnectTimeout, // Use the timeouts in the transport
|
||||
Timeout: 10 * ci.Timeout, // Use the timeouts in the transport
|
||||
Transport: fshttp.NewTransport(fs.GetConfig(ctx)),
|
||||
}
|
||||
if opt.EnvAuth {
|
||||
err := c.ApplyEnvironment()
|
||||
@ -433,12 +435,14 @@ func (f *Fs) setRoot(root string) {
|
||||
// if noCheckContainer is set then the Fs won't check the container
|
||||
// exists before creating it.
|
||||
func NewFsWithConnection(ctx context.Context, opt *Options, name, root string, c *swift.Connection, noCheckContainer bool) (fs.Fs, error) {
|
||||
ci := fs.GetConfig(ctx)
|
||||
f := &Fs{
|
||||
name: name,
|
||||
opt: *opt,
|
||||
ci: ci,
|
||||
c: c,
|
||||
noCheckContainer: noCheckContainer,
|
||||
pacer: fs.NewPacer(pacer.NewS3(pacer.MinSleep(minSleep))),
|
||||
pacer: fs.NewPacer(ctx, pacer.NewS3(pacer.MinSleep(minSleep))),
|
||||
cache: bucket.NewCache(),
|
||||
}
|
||||
f.setRoot(root)
|
||||
@ -485,7 +489,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
return nil, errors.Wrap(err, "swift: chunk size")
|
||||
}
|
||||
|
||||
c, err := swiftConnection(opt, name)
|
||||
c, err := swiftConnection(ctx, opt, name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -849,7 +853,7 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
return fs.ErrorListBucketRequired
|
||||
}
|
||||
// Delete all the files including the directory markers
|
||||
toBeDeleted := make(chan fs.Object, fs.Config.Transfers)
|
||||
toBeDeleted := make(chan fs.Object, f.ci.Transfers)
|
||||
delErr := make(chan error, 1)
|
||||
go func() {
|
||||
delErr <- operations.DeleteFiles(ctx, toBeDeleted)
|
||||
@ -1040,7 +1044,7 @@ func (o *Object) readMetaData() (err error) {
|
||||
// It attempts to read the objects mtime and if that isn't present the
|
||||
// LastModified returned in the http headers
|
||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
if fs.Config.UseServerModTime {
|
||||
if o.fs.ci.UseServerModTime {
|
||||
return o.lastModified
|
||||
}
|
||||
err := o.readMetaData()
|
||||
|
@ -182,7 +182,7 @@ func (ca *CookieAuth) getSPToken(ctx context.Context) (conf *SharepointSuccessRe
|
||||
}
|
||||
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
|
||||
|
||||
client := fshttp.NewClient(fs.Config)
|
||||
client := fshttp.NewClient(fs.GetConfig(ctx))
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Error while logging in to endpoint")
|
||||
|
@ -336,8 +336,8 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
opt: *opt,
|
||||
endpoint: u,
|
||||
endpointURL: u.String(),
|
||||
srv: rest.NewClient(fshttp.NewClient(fs.Config)).SetRoot(u.String()),
|
||||
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
srv: rest.NewClient(fshttp.NewClient(fs.GetConfig(ctx))).SetRoot(u.String()),
|
||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
precision: fs.ModTimeNotSupported,
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
|
@ -88,12 +88,13 @@ type Options struct {
|
||||
// Fs represents a remote yandex
|
||||
type Fs struct {
|
||||
name string
|
||||
root string // root path
|
||||
opt Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
srv *rest.Client // the connection to the yandex server
|
||||
pacer *fs.Pacer // pacer for API calls
|
||||
diskRoot string // root path with "disk:/" container name
|
||||
root string // root path
|
||||
opt Options // parsed options
|
||||
ci *fs.ConfigInfo // global config
|
||||
features *fs.Features // optional features
|
||||
srv *rest.Client // the connection to the yandex server
|
||||
pacer *fs.Pacer // pacer for API calls
|
||||
diskRoot string // root path with "disk:/" container name
|
||||
}
|
||||
|
||||
// Object describes a swift object
|
||||
@ -265,11 +266,13 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
log.Fatalf("Failed to configure Yandex: %v", err)
|
||||
}
|
||||
|
||||
ci := fs.GetConfig(ctx)
|
||||
f := &Fs{
|
||||
name: name,
|
||||
opt: *opt,
|
||||
ci: ci,
|
||||
srv: rest.NewClient(oAuthClient).SetRoot(rootURL),
|
||||
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
}
|
||||
f.setRoot(root)
|
||||
f.features = (&fs.Features{
|
||||
@ -534,7 +537,7 @@ func (f *Fs) waitForJob(ctx context.Context, location string) (err error) {
|
||||
RootURL: location,
|
||||
Method: "GET",
|
||||
}
|
||||
deadline := time.Now().Add(fs.Config.Timeout)
|
||||
deadline := time.Now().Add(f.ci.Timeout)
|
||||
for time.Now().Before(deadline) {
|
||||
var resp *http.Response
|
||||
var body []byte
|
||||
@ -565,7 +568,7 @@ func (f *Fs) waitForJob(ctx context.Context, location string) (err error) {
|
||||
|
||||
time.Sleep(1 * time.Second)
|
||||
}
|
||||
return errors.Errorf("async operation didn't complete after %v", fs.Config.Timeout)
|
||||
return errors.Errorf("async operation didn't complete after %v", f.ci.Timeout)
|
||||
}
|
||||
|
||||
func (f *Fs) delete(ctx context.Context, path string, hardDelete bool) (err error) {
|
||||
|
23
cmd/cmd.go
23
cmd/cmd.go
@ -234,12 +234,13 @@ func ShowStats() bool {
|
||||
|
||||
// Run the function with stats and retries if required
|
||||
func Run(Retry bool, showStats bool, cmd *cobra.Command, f func() error) {
|
||||
ci := fs.GetConfig(context.Background())
|
||||
var cmdErr error
|
||||
stopStats := func() {}
|
||||
if !showStats && ShowStats() {
|
||||
showStats = true
|
||||
}
|
||||
if fs.Config.Progress {
|
||||
if ci.Progress {
|
||||
stopStats = startProgress()
|
||||
} else if showStats {
|
||||
stopStats = StartStats()
|
||||
@ -291,13 +292,13 @@ func Run(Retry bool, showStats bool, cmd *cobra.Command, f func() error) {
|
||||
}
|
||||
fs.Debugf(nil, "%d go routines active\n", runtime.NumGoroutine())
|
||||
|
||||
if fs.Config.Progress && fs.Config.ProgressTerminalTitle {
|
||||
if ci.Progress && ci.ProgressTerminalTitle {
|
||||
// Clear terminal title
|
||||
terminal.WriteTerminalTitle("")
|
||||
}
|
||||
|
||||
// dump all running go-routines
|
||||
if fs.Config.Dump&fs.DumpGoRoutines != 0 {
|
||||
if ci.Dump&fs.DumpGoRoutines != 0 {
|
||||
err := pprof.Lookup("goroutine").WriteTo(os.Stdout, 1)
|
||||
if err != nil {
|
||||
fs.Errorf(nil, "Failed to dump goroutines: %v", err)
|
||||
@ -305,7 +306,7 @@ func Run(Retry bool, showStats bool, cmd *cobra.Command, f func() error) {
|
||||
}
|
||||
|
||||
// dump open files
|
||||
if fs.Config.Dump&fs.DumpOpenFiles != 0 {
|
||||
if ci.Dump&fs.DumpOpenFiles != 0 {
|
||||
c := exec.Command("lsof", "-p", strconv.Itoa(os.Getpid()))
|
||||
c.Stdout = os.Stdout
|
||||
c.Stderr = os.Stderr
|
||||
@ -372,17 +373,18 @@ func StartStats() func() {
|
||||
|
||||
// initConfig is run by cobra after initialising the flags
|
||||
func initConfig() {
|
||||
ci := fs.GetConfig(context.Background())
|
||||
// Activate logger systemd support if systemd invocation ID is detected
|
||||
_, sysdLaunch := systemd.GetInvocationID()
|
||||
if sysdLaunch {
|
||||
fs.Config.LogSystemdSupport = true // used during fslog.InitLogging()
|
||||
ci.LogSystemdSupport = true // used during fslog.InitLogging()
|
||||
}
|
||||
|
||||
// Start the logger
|
||||
fslog.InitLogging()
|
||||
|
||||
// Finish parsing any command line flags
|
||||
configflags.SetFlags()
|
||||
configflags.SetFlags(ci)
|
||||
|
||||
// Load filters
|
||||
err := filterflags.Reload()
|
||||
@ -396,7 +398,7 @@ func initConfig() {
|
||||
// Inform user about systemd log support now that we have a logger
|
||||
if sysdLaunch {
|
||||
fs.Debugf("rclone", "systemd logging support automatically activated")
|
||||
} else if fs.Config.LogSystemdSupport {
|
||||
} else if ci.LogSystemdSupport {
|
||||
fs.Debugf("rclone", "systemd logging support manually activated")
|
||||
}
|
||||
|
||||
@ -448,16 +450,17 @@ func initConfig() {
|
||||
|
||||
if m, _ := regexp.MatchString("^(bits|bytes)$", *dataRateUnit); m == false {
|
||||
fs.Errorf(nil, "Invalid unit passed to --stats-unit. Defaulting to bytes.")
|
||||
fs.Config.DataRateUnit = "bytes"
|
||||
ci.DataRateUnit = "bytes"
|
||||
} else {
|
||||
fs.Config.DataRateUnit = *dataRateUnit
|
||||
ci.DataRateUnit = *dataRateUnit
|
||||
}
|
||||
}
|
||||
|
||||
func resolveExitCode(err error) {
|
||||
ci := fs.GetConfig(context.Background())
|
||||
atexit.Run()
|
||||
if err == nil {
|
||||
if fs.Config.ErrorOnNoTransfer {
|
||||
if ci.ErrorOnNoTransfer {
|
||||
if accounting.GlobalStats().GetTransfers() == 0 {
|
||||
os.Exit(exitCodeNoFilesTransferred)
|
||||
}
|
||||
|
@ -43,7 +43,7 @@ password to protect your configuration.
|
||||
`,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(0, 0, command, args)
|
||||
config.EditConfig()
|
||||
config.EditConfig(context.Background())
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -1,6 +1,7 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
@ -166,8 +167,9 @@ func runRoot(cmd *cobra.Command, args []string) {
|
||||
//
|
||||
// Helpful example: http://rtfcode.com/xref/moby-17.03.2-ce/cli/cobra.go
|
||||
func setupRootCommand(rootCmd *cobra.Command) {
|
||||
ci := fs.GetConfig(context.Background())
|
||||
// Add global flags
|
||||
configflags.AddFlags(pflag.CommandLine)
|
||||
configflags.AddFlags(ci, pflag.CommandLine)
|
||||
filterflags.AddFlags(pflag.CommandLine)
|
||||
rcflags.AddFlags(pflag.CommandLine)
|
||||
logflags.AddFlags(pflag.CommandLine)
|
||||
|
@ -249,9 +249,11 @@ func (r *results) checkStringPositions(k, s string) {
|
||||
// check we can write a file with the control chars
|
||||
func (r *results) checkControls() {
|
||||
fs.Infof(r.f, "Trying to create control character file names")
|
||||
ci := fs.GetConfig(context.Background())
|
||||
|
||||
// Concurrency control
|
||||
tokens := make(chan struct{}, fs.Config.Checkers)
|
||||
for i := 0; i < fs.Config.Checkers; i++ {
|
||||
tokens := make(chan struct{}, ci.Checkers)
|
||||
for i := 0; i < ci.Checkers; i++ {
|
||||
tokens <- struct{}{}
|
||||
}
|
||||
var wg sync.WaitGroup
|
||||
|
@ -49,9 +49,10 @@ If you just want the directory names use "rclone lsf --dirs-only".
|
||||
|
||||
` + lshelp.Help,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
ci := fs.GetConfig(context.Background())
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
if recurse {
|
||||
fs.Config.MaxDepth = 0
|
||||
ci.MaxDepth = 0
|
||||
}
|
||||
fsrc := cmd.NewFsSrc(args)
|
||||
cmd.Run(false, false, command, func() error {
|
||||
|
@ -162,12 +162,13 @@ func (d *Dir) AttrI(i int) (size int64, count int64, isDir bool, readable bool)
|
||||
// Scan the Fs passed in, returning a root directory channel and an
|
||||
// error channel
|
||||
func Scan(ctx context.Context, f fs.Fs) (chan *Dir, chan error, chan struct{}) {
|
||||
ci := fs.GetConfig(ctx)
|
||||
root := make(chan *Dir, 1)
|
||||
errChan := make(chan error, 1)
|
||||
updated := make(chan struct{}, 1)
|
||||
go func() {
|
||||
parents := map[string]*Dir{}
|
||||
err := walk.Walk(ctx, f, "", false, fs.Config.MaxDepth, func(dirPath string, entries fs.DirEntries, err error) error {
|
||||
err := walk.Walk(ctx, f, "", false, ci.MaxDepth, func(dirPath string, entries fs.DirEntries, err error) error {
|
||||
if err != nil {
|
||||
return err // FIXME mark directory as errored instead of aborting
|
||||
}
|
||||
|
@ -177,7 +177,7 @@ func doCall(ctx context.Context, path string, in rc.Params) (out rc.Params, err
|
||||
}
|
||||
|
||||
// Do HTTP request
|
||||
client := fshttp.NewClient(fs.Config)
|
||||
client := fshttp.NewClient(fs.GetConfig(ctx))
|
||||
url += path
|
||||
data, err := json.Marshal(in)
|
||||
if err != nil {
|
||||
|
@ -145,7 +145,7 @@ func (s *server) serve() (err error) {
|
||||
// An SSH server is represented by a ServerConfig, which holds
|
||||
// certificate details and handles authentication of ServerConns.
|
||||
s.config = &ssh.ServerConfig{
|
||||
ServerVersion: "SSH-2.0-" + fs.Config.UserAgent,
|
||||
ServerVersion: "SSH-2.0-" + fs.GetConfig(s.ctx).UserAgent,
|
||||
PasswordCallback: func(c ssh.ConnMetadata, pass []byte) (*ssh.Permissions, error) {
|
||||
fs.Debugf(describeConn(c), "Password login attempt for %s", c.User())
|
||||
if s.proxy != nil {
|
||||
|
@ -108,8 +108,9 @@ short options as they conflict with rclone's short options.
|
||||
opts.CTimeSort = opts.CTimeSort || sort == "ctime"
|
||||
opts.NameSort = sort == "name"
|
||||
opts.SizeSort = sort == "size"
|
||||
ci := fs.GetConfig(context.Background())
|
||||
if opts.DeepLevel == 0 {
|
||||
opts.DeepLevel = fs.Config.MaxDepth
|
||||
opts.DeepLevel = ci.MaxDepth
|
||||
}
|
||||
cmd.Run(false, false, command, func() error {
|
||||
return Tree(fsrc, outFile, &opts)
|
||||
|
@ -41,6 +41,7 @@ type Account struct {
|
||||
mu sync.Mutex // mutex protects these values
|
||||
in io.Reader
|
||||
ctx context.Context // current context for transfer - may change
|
||||
ci *fs.ConfigInfo
|
||||
origIn io.ReadCloser
|
||||
close io.Closer
|
||||
size int64
|
||||
@ -74,6 +75,7 @@ func newAccountSizeName(ctx context.Context, stats *StatsInfo, in io.ReadCloser,
|
||||
stats: stats,
|
||||
in: in,
|
||||
ctx: ctx,
|
||||
ci: fs.GetConfig(ctx),
|
||||
close: in,
|
||||
origIn: in,
|
||||
size: size,
|
||||
@ -85,10 +87,10 @@ func newAccountSizeName(ctx context.Context, stats *StatsInfo, in io.ReadCloser,
|
||||
max: -1,
|
||||
},
|
||||
}
|
||||
if fs.Config.CutoffMode == fs.CutoffModeHard {
|
||||
acc.values.max = int64((fs.Config.MaxTransfer))
|
||||
if acc.ci.CutoffMode == fs.CutoffModeHard {
|
||||
acc.values.max = int64((acc.ci.MaxTransfer))
|
||||
}
|
||||
currLimit := fs.Config.BwLimitFile.LimitAt(time.Now())
|
||||
currLimit := acc.ci.BwLimitFile.LimitAt(time.Now())
|
||||
if currLimit.Bandwidth > 0 {
|
||||
fs.Debugf(acc.name, "Limiting file transfer to %v", currLimit.Bandwidth)
|
||||
acc.tokenBucket = newTokenBucket(currLimit.Bandwidth)
|
||||
@ -107,14 +109,14 @@ func (acc *Account) WithBuffer() *Account {
|
||||
}
|
||||
acc.withBuf = true
|
||||
var buffers int
|
||||
if acc.size >= int64(fs.Config.BufferSize) || acc.size == -1 {
|
||||
buffers = int(int64(fs.Config.BufferSize) / asyncreader.BufferSize)
|
||||
if acc.size >= int64(acc.ci.BufferSize) || acc.size == -1 {
|
||||
buffers = int(int64(acc.ci.BufferSize) / asyncreader.BufferSize)
|
||||
} else {
|
||||
buffers = int(acc.size / asyncreader.BufferSize)
|
||||
}
|
||||
// On big files add a buffer
|
||||
if buffers > 0 {
|
||||
rc, err := asyncreader.New(acc.origIn, buffers)
|
||||
rc, err := asyncreader.New(acc.ctx, acc.origIn, buffers)
|
||||
if err != nil {
|
||||
fs.Errorf(acc.name, "Failed to make buffer: %v", err)
|
||||
} else {
|
||||
@ -472,7 +474,7 @@ func (acc *Account) String() string {
|
||||
}
|
||||
}
|
||||
|
||||
if fs.Config.DataRateUnit == "bits" {
|
||||
if acc.ci.DataRateUnit == "bits" {
|
||||
cur = cur * 8
|
||||
}
|
||||
|
||||
@ -482,8 +484,8 @@ func (acc *Account) String() string {
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%*s:%3d%% /%s, %s/s, %s",
|
||||
fs.Config.StatsFileNameLength,
|
||||
shortenName(acc.name, fs.Config.StatsFileNameLength),
|
||||
acc.ci.StatsFileNameLength,
|
||||
shortenName(acc.name, acc.ci.StatsFileNameLength),
|
||||
percentageDone,
|
||||
fs.SizeSuffix(b),
|
||||
fs.SizeSuffix(cur),
|
||||
|
@ -258,13 +258,14 @@ func TestAccountAccounter(t *testing.T) {
|
||||
|
||||
func TestAccountMaxTransfer(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
old := fs.Config.MaxTransfer
|
||||
oldMode := fs.Config.CutoffMode
|
||||
ci := fs.GetConfig(ctx)
|
||||
old := ci.MaxTransfer
|
||||
oldMode := ci.CutoffMode
|
||||
|
||||
fs.Config.MaxTransfer = 15
|
||||
ci.MaxTransfer = 15
|
||||
defer func() {
|
||||
fs.Config.MaxTransfer = old
|
||||
fs.Config.CutoffMode = oldMode
|
||||
ci.MaxTransfer = old
|
||||
ci.CutoffMode = oldMode
|
||||
}()
|
||||
|
||||
in := ioutil.NopCloser(bytes.NewBuffer(make([]byte, 100)))
|
||||
@ -284,7 +285,7 @@ func TestAccountMaxTransfer(t *testing.T) {
|
||||
assert.Equal(t, ErrorMaxTransferLimitReachedFatal, err)
|
||||
assert.True(t, fserrors.IsFatalError(err))
|
||||
|
||||
fs.Config.CutoffMode = fs.CutoffModeSoft
|
||||
ci.CutoffMode = fs.CutoffModeSoft
|
||||
stats = NewStats(ctx)
|
||||
acc = newAccountSizeName(ctx, stats, in, 1, "test")
|
||||
|
||||
@ -301,13 +302,14 @@ func TestAccountMaxTransfer(t *testing.T) {
|
||||
|
||||
func TestAccountMaxTransferWriteTo(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
old := fs.Config.MaxTransfer
|
||||
oldMode := fs.Config.CutoffMode
|
||||
ci := fs.GetConfig(ctx)
|
||||
old := ci.MaxTransfer
|
||||
oldMode := ci.CutoffMode
|
||||
|
||||
fs.Config.MaxTransfer = 15
|
||||
ci.MaxTransfer = 15
|
||||
defer func() {
|
||||
fs.Config.MaxTransfer = old
|
||||
fs.Config.CutoffMode = oldMode
|
||||
ci.MaxTransfer = old
|
||||
ci.CutoffMode = oldMode
|
||||
}()
|
||||
|
||||
in := ioutil.NopCloser(readers.NewPatternReader(1024))
|
||||
|
@ -15,8 +15,9 @@ type inProgress struct {
|
||||
|
||||
// newInProgress makes a new inProgress object
|
||||
func newInProgress(ctx context.Context) *inProgress {
|
||||
ci := fs.GetConfig(ctx)
|
||||
return &inProgress{
|
||||
m: make(map[string]*Account, fs.Config.Transfers),
|
||||
m: make(map[string]*Account, ci.Transfers),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -24,6 +24,7 @@ var startTime = time.Now()
|
||||
type StatsInfo struct {
|
||||
mu sync.RWMutex
|
||||
ctx context.Context
|
||||
ci *fs.ConfigInfo
|
||||
bytes int64
|
||||
errors int64
|
||||
lastError error
|
||||
@ -52,10 +53,12 @@ type StatsInfo struct {
|
||||
|
||||
// NewStats creates an initialised StatsInfo
|
||||
func NewStats(ctx context.Context) *StatsInfo {
|
||||
ci := fs.GetConfig(ctx)
|
||||
return &StatsInfo{
|
||||
ctx: ctx,
|
||||
checking: newTransferMap(fs.Config.Checkers, "checking"),
|
||||
transferring: newTransferMap(fs.Config.Transfers, "transferring"),
|
||||
ci: ci,
|
||||
checking: newTransferMap(ci.Checkers, "checking"),
|
||||
transferring: newTransferMap(ci.Transfers, "transferring"),
|
||||
inProgress: newInProgress(ctx),
|
||||
}
|
||||
}
|
||||
@ -243,7 +246,7 @@ func (s *StatsInfo) String() string {
|
||||
}
|
||||
|
||||
displaySpeed := speed
|
||||
if fs.Config.DataRateUnit == "bits" {
|
||||
if s.ci.DataRateUnit == "bits" {
|
||||
displaySpeed *= 8
|
||||
}
|
||||
|
||||
@ -259,7 +262,7 @@ func (s *StatsInfo) String() string {
|
||||
dateString = ""
|
||||
)
|
||||
|
||||
if !fs.Config.StatsOneLine {
|
||||
if !s.ci.StatsOneLine {
|
||||
_, _ = fmt.Fprintf(buf, "\nTransferred: ")
|
||||
} else {
|
||||
xfrchk := []string{}
|
||||
@ -272,9 +275,9 @@ func (s *StatsInfo) String() string {
|
||||
if len(xfrchk) > 0 {
|
||||
xfrchkString = fmt.Sprintf(" (%s)", strings.Join(xfrchk, ", "))
|
||||
}
|
||||
if fs.Config.StatsOneLineDate {
|
||||
if s.ci.StatsOneLineDate {
|
||||
t := time.Now()
|
||||
dateString = t.Format(fs.Config.StatsOneLineDateFormat) // Including the separator so people can customize it
|
||||
dateString = t.Format(s.ci.StatsOneLineDateFormat) // Including the separator so people can customize it
|
||||
}
|
||||
}
|
||||
|
||||
@ -283,17 +286,17 @@ func (s *StatsInfo) String() string {
|
||||
fs.SizeSuffix(s.bytes),
|
||||
fs.SizeSuffix(totalSize).Unit("Bytes"),
|
||||
percent(s.bytes, totalSize),
|
||||
fs.SizeSuffix(displaySpeed).Unit(strings.Title(fs.Config.DataRateUnit)+"/s"),
|
||||
fs.SizeSuffix(displaySpeed).Unit(strings.Title(s.ci.DataRateUnit)+"/s"),
|
||||
etaString(currentSize, totalSize, speed),
|
||||
xfrchkString,
|
||||
)
|
||||
|
||||
if fs.Config.ProgressTerminalTitle {
|
||||
if s.ci.ProgressTerminalTitle {
|
||||
// Writes ETA to the terminal title
|
||||
terminal.WriteTerminalTitle("ETA: " + etaString(currentSize, totalSize, speed))
|
||||
}
|
||||
|
||||
if !fs.Config.StatsOneLine {
|
||||
if !s.ci.StatsOneLine {
|
||||
_, _ = buf.WriteRune('\n')
|
||||
errorDetails := ""
|
||||
switch {
|
||||
@ -333,7 +336,7 @@ func (s *StatsInfo) String() string {
|
||||
s.mu.RUnlock()
|
||||
|
||||
// Add per transfer stats if required
|
||||
if !fs.Config.StatsOneLine {
|
||||
if !s.ci.StatsOneLine {
|
||||
if !s.checking.empty() {
|
||||
_, _ = fmt.Fprintf(buf, "Checking:\n%s\n", s.checking.String(s.ctx, s.inProgress, s.transferring))
|
||||
}
|
||||
@ -361,11 +364,11 @@ func (s *StatsInfo) Transferred() []TransferSnapshot {
|
||||
|
||||
// Log outputs the StatsInfo to the log
|
||||
func (s *StatsInfo) Log() {
|
||||
if fs.Config.UseJSONLog {
|
||||
if s.ci.UseJSONLog {
|
||||
out, _ := s.RemoteStats()
|
||||
fs.LogLevelPrintf(fs.Config.StatsLogLevel, nil, "%v%v\n", s, fs.LogValue("stats", out))
|
||||
fs.LogLevelPrintf(s.ci.StatsLogLevel, nil, "%v%v\n", s, fs.LogValue("stats", out))
|
||||
} else {
|
||||
fs.LogLevelPrintf(fs.Config.StatsLogLevel, nil, "%v\n", s)
|
||||
fs.LogLevelPrintf(s.ci.StatsLogLevel, nil, "%v\n", s)
|
||||
}
|
||||
|
||||
}
|
||||
@ -681,7 +684,7 @@ func (s *StatsInfo) PruneTransfers() {
|
||||
}
|
||||
s.mu.Lock()
|
||||
// remove a transfer from the start if we are over quota
|
||||
if len(s.startedTransfers) > MaxCompletedTransfers+fs.Config.Transfers {
|
||||
if len(s.startedTransfers) > MaxCompletedTransfers+s.ci.Transfers {
|
||||
for i, tr := range s.startedTransfers {
|
||||
if tr.IsDone() {
|
||||
s.removeTransfer(tr, i)
|
||||
|
@ -308,13 +308,14 @@ func newStatsGroups() *statsGroups {
|
||||
func (sg *statsGroups) set(ctx context.Context, group string, stats *StatsInfo) {
|
||||
sg.mu.Lock()
|
||||
defer sg.mu.Unlock()
|
||||
ci := fs.GetConfig(ctx)
|
||||
|
||||
// Limit number of groups kept in memory.
|
||||
if len(sg.order) >= fs.Config.MaxStatsGroups {
|
||||
if len(sg.order) >= ci.MaxStatsGroups {
|
||||
group := sg.order[0]
|
||||
fs.LogPrintf(fs.LogLevelDebug, nil, "Max number of stats groups reached removing %s", group)
|
||||
delete(sg.m, group)
|
||||
r := (len(sg.order) - fs.Config.MaxStatsGroups) + 1
|
||||
r := (len(sg.order) - ci.MaxStatsGroups) + 1
|
||||
sg.order = sg.order[r:]
|
||||
}
|
||||
|
||||
|
@ -386,6 +386,7 @@ func TestTimeRangeDuration(t *testing.T) {
|
||||
|
||||
func TestPruneTransfers(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
ci := fs.GetConfig(ctx)
|
||||
for _, test := range []struct {
|
||||
Name string
|
||||
Transfers int
|
||||
@ -396,7 +397,7 @@ func TestPruneTransfers(t *testing.T) {
|
||||
Name: "Limited number of StartedTransfers",
|
||||
Limit: 100,
|
||||
Transfers: 200,
|
||||
ExpectedStartedTransfers: 100 + fs.Config.Transfers,
|
||||
ExpectedStartedTransfers: 100 + ci.Transfers,
|
||||
},
|
||||
{
|
||||
Name: "Unlimited number of StartedTransfers",
|
||||
|
@ -36,8 +36,9 @@ func newTokenBucket(bandwidth fs.SizeSuffix) *rate.Limiter {
|
||||
|
||||
// StartTokenBucket starts the token bucket if necessary
|
||||
func StartTokenBucket(ctx context.Context) {
|
||||
ci := fs.GetConfig(ctx)
|
||||
currLimitMu.Lock()
|
||||
currLimit := fs.Config.BwLimit.LimitAt(time.Now())
|
||||
currLimit := ci.BwLimit.LimitAt(time.Now())
|
||||
currLimitMu.Unlock()
|
||||
|
||||
if currLimit.Bandwidth > 0 {
|
||||
@ -52,16 +53,17 @@ func StartTokenBucket(ctx context.Context) {
|
||||
|
||||
// StartTokenTicker creates a ticker to update the bandwidth limiter every minute.
|
||||
func StartTokenTicker(ctx context.Context) {
|
||||
ci := fs.GetConfig(ctx)
|
||||
// If the timetable has a single entry or was not specified, we don't need
|
||||
// a ticker to update the bandwidth.
|
||||
if len(fs.Config.BwLimit) <= 1 {
|
||||
if len(ci.BwLimit) <= 1 {
|
||||
return
|
||||
}
|
||||
|
||||
ticker := time.NewTicker(time.Minute)
|
||||
go func() {
|
||||
for range ticker.C {
|
||||
limitNow := fs.Config.BwLimit.LimitAt(time.Now())
|
||||
limitNow := ci.BwLimit.LimitAt(time.Now())
|
||||
currLimitMu.Lock()
|
||||
|
||||
if currLimit.Bandwidth != limitNow.Bandwidth {
|
||||
|
@ -99,10 +99,11 @@ func (tr *Transfer) Done(ctx context.Context, err error) {
|
||||
acc := tr.acc
|
||||
tr.mu.RUnlock()
|
||||
|
||||
ci := fs.GetConfig(ctx)
|
||||
if acc != nil {
|
||||
// Close the file if it is still open
|
||||
if err := acc.Close(); err != nil {
|
||||
fs.LogLevelPrintf(fs.Config.StatsLogLevel, nil, "can't close account: %+v\n", err)
|
||||
fs.LogLevelPrintf(ci.StatsLogLevel, nil, "can't close account: %+v\n", err)
|
||||
}
|
||||
// Signal done with accounting
|
||||
acc.Done()
|
||||
@ -128,10 +129,11 @@ func (tr *Transfer) Reset(ctx context.Context) {
|
||||
acc := tr.acc
|
||||
tr.acc = nil
|
||||
tr.mu.RUnlock()
|
||||
ci := fs.GetConfig(ctx)
|
||||
|
||||
if acc != nil {
|
||||
if err := acc.Close(); err != nil {
|
||||
fs.LogLevelPrintf(fs.Config.StatsLogLevel, nil, "can't close account: %+v\n", err)
|
||||
fs.LogLevelPrintf(ci.StatsLogLevel, nil, "can't close account: %+v\n", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -92,6 +92,7 @@ func (tm *transferMap) _sortedSlice() []*Transfer {
|
||||
func (tm *transferMap) String(ctx context.Context, progress *inProgress, exclude *transferMap) string {
|
||||
tm.mu.RLock()
|
||||
defer tm.mu.RUnlock()
|
||||
ci := fs.GetConfig(ctx)
|
||||
stringList := make([]string, 0, len(tm.items))
|
||||
for _, tr := range tm._sortedSlice() {
|
||||
if exclude != nil {
|
||||
@ -107,8 +108,8 @@ func (tm *transferMap) String(ctx context.Context, progress *inProgress, exclude
|
||||
out = acc.String()
|
||||
} else {
|
||||
out = fmt.Sprintf("%*s: %s",
|
||||
fs.Config.StatsFileNameLength,
|
||||
shortenName(tr.remote, fs.Config.StatsFileNameLength),
|
||||
ci.StatsFileNameLength,
|
||||
shortenName(tr.remote, ci.StatsFileNameLength),
|
||||
tm.name,
|
||||
)
|
||||
}
|
||||
|
@ -3,6 +3,7 @@
|
||||
package asyncreader
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"sync"
|
||||
"time"
|
||||
@ -29,17 +30,18 @@ var ErrorStreamAbandoned = errors.New("stream abandoned")
|
||||
// This should be fully transparent, except that once an error
|
||||
// has been returned from the Reader, it will not recover.
|
||||
type AsyncReader struct {
|
||||
in io.ReadCloser // Input reader
|
||||
ready chan *buffer // Buffers ready to be handed to the reader
|
||||
token chan struct{} // Tokens which allow a buffer to be taken
|
||||
exit chan struct{} // Closes when finished
|
||||
buffers int // Number of buffers
|
||||
err error // If an error has occurred it is here
|
||||
cur *buffer // Current buffer being served
|
||||
exited chan struct{} // Channel is closed been the async reader shuts down
|
||||
size int // size of buffer to use
|
||||
closed bool // whether we have closed the underlying stream
|
||||
mu sync.Mutex // lock for Read/WriteTo/Abandon/Close
|
||||
in io.ReadCloser // Input reader
|
||||
ready chan *buffer // Buffers ready to be handed to the reader
|
||||
token chan struct{} // Tokens which allow a buffer to be taken
|
||||
exit chan struct{} // Closes when finished
|
||||
buffers int // Number of buffers
|
||||
err error // If an error has occurred it is here
|
||||
cur *buffer // Current buffer being served
|
||||
exited chan struct{} // Channel is closed been the async reader shuts down
|
||||
size int // size of buffer to use
|
||||
closed bool // whether we have closed the underlying stream
|
||||
mu sync.Mutex // lock for Read/WriteTo/Abandon/Close
|
||||
ci *fs.ConfigInfo // for reading config
|
||||
}
|
||||
|
||||
// New returns a reader that will asynchronously read from
|
||||
@ -48,14 +50,16 @@ type AsyncReader struct {
|
||||
// function has returned.
|
||||
// The input can be read from the returned reader.
|
||||
// When done use Close to release the buffers and close the supplied input.
|
||||
func New(rd io.ReadCloser, buffers int) (*AsyncReader, error) {
|
||||
func New(ctx context.Context, rd io.ReadCloser, buffers int) (*AsyncReader, error) {
|
||||
if buffers <= 0 {
|
||||
return nil, errors.New("number of buffers too small")
|
||||
}
|
||||
if rd == nil {
|
||||
return nil, errors.New("nil reader supplied")
|
||||
}
|
||||
a := &AsyncReader{}
|
||||
a := &AsyncReader{
|
||||
ci: fs.GetConfig(ctx),
|
||||
}
|
||||
a.init(rd, buffers)
|
||||
return a, nil
|
||||
}
|
||||
@ -114,7 +118,7 @@ func (a *AsyncReader) putBuffer(b *buffer) {
|
||||
func (a *AsyncReader) getBuffer() *buffer {
|
||||
bufferPoolOnce.Do(func() {
|
||||
// Initialise the buffer pool when used
|
||||
bufferPool = pool.New(bufferCacheFlushTime, BufferSize, bufferCacheSize, fs.Config.UseMmap)
|
||||
bufferPool = pool.New(bufferCacheFlushTime, BufferSize, bufferCacheSize, a.ci.UseMmap)
|
||||
})
|
||||
return &buffer{
|
||||
buf: bufferPool.Get(),
|
||||
|
@ -3,6 +3,7 @@ package asyncreader
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
@ -20,8 +21,10 @@ import (
|
||||
)
|
||||
|
||||
func TestAsyncReader(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
buf := ioutil.NopCloser(bytes.NewBufferString("Testbuffer"))
|
||||
ar, err := New(buf, 4)
|
||||
ar, err := New(ctx, buf, 4)
|
||||
require.NoError(t, err)
|
||||
|
||||
var dst = make([]byte, 100)
|
||||
@ -46,7 +49,7 @@ func TestAsyncReader(t *testing.T) {
|
||||
|
||||
// Test Close without reading everything
|
||||
buf = ioutil.NopCloser(bytes.NewBuffer(make([]byte, 50000)))
|
||||
ar, err = New(buf, 4)
|
||||
ar, err = New(ctx, buf, 4)
|
||||
require.NoError(t, err)
|
||||
err = ar.Close()
|
||||
require.NoError(t, err)
|
||||
@ -54,8 +57,10 @@ func TestAsyncReader(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestAsyncWriteTo(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
buf := ioutil.NopCloser(bytes.NewBufferString("Testbuffer"))
|
||||
ar, err := New(buf, 4)
|
||||
ar, err := New(ctx, buf, 4)
|
||||
require.NoError(t, err)
|
||||
|
||||
var dst = &bytes.Buffer{}
|
||||
@ -73,15 +78,17 @@ func TestAsyncWriteTo(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestAsyncReaderErrors(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
// test nil reader
|
||||
_, err := New(nil, 4)
|
||||
_, err := New(ctx, nil, 4)
|
||||
require.Error(t, err)
|
||||
|
||||
// invalid buffer number
|
||||
buf := ioutil.NopCloser(bytes.NewBufferString("Testbuffer"))
|
||||
_, err = New(buf, 0)
|
||||
_, err = New(ctx, buf, 0)
|
||||
require.Error(t, err)
|
||||
_, err = New(buf, -1)
|
||||
_, err = New(ctx, buf, -1)
|
||||
require.Error(t, err)
|
||||
}
|
||||
|
||||
@ -140,6 +147,8 @@ var bufsizes = []int{
|
||||
|
||||
// Test various input buffer sizes, number of buffers and read sizes.
|
||||
func TestAsyncReaderSizes(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
var texts [31]string
|
||||
str := ""
|
||||
all := ""
|
||||
@ -161,7 +170,7 @@ func TestAsyncReaderSizes(t *testing.T) {
|
||||
bufsize := bufsizes[k]
|
||||
read := readmaker.fn(strings.NewReader(text))
|
||||
buf := bufio.NewReaderSize(read, bufsize)
|
||||
ar, _ := New(ioutil.NopCloser(buf), l)
|
||||
ar, _ := New(ctx, ioutil.NopCloser(buf), l)
|
||||
s := bufreader.fn(ar)
|
||||
// "timeout" expects the Reader to recover, AsyncReader does not.
|
||||
if s != text && readmaker.name != "timeout" {
|
||||
@ -179,6 +188,8 @@ func TestAsyncReaderSizes(t *testing.T) {
|
||||
|
||||
// Test various input buffer sizes, number of buffers and read sizes.
|
||||
func TestAsyncReaderWriteTo(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
var texts [31]string
|
||||
str := ""
|
||||
all := ""
|
||||
@ -200,7 +211,7 @@ func TestAsyncReaderWriteTo(t *testing.T) {
|
||||
bufsize := bufsizes[k]
|
||||
read := readmaker.fn(strings.NewReader(text))
|
||||
buf := bufio.NewReaderSize(read, bufsize)
|
||||
ar, _ := New(ioutil.NopCloser(buf), l)
|
||||
ar, _ := New(ctx, ioutil.NopCloser(buf), l)
|
||||
dst := &bytes.Buffer{}
|
||||
_, err := ar.WriteTo(dst)
|
||||
if err != nil && err != io.EOF && err != iotest.ErrTimeout {
|
||||
@ -246,8 +257,10 @@ func (z *zeroReader) Close() error {
|
||||
|
||||
// Test closing and abandoning
|
||||
func testAsyncReaderClose(t *testing.T, writeto bool) {
|
||||
ctx := context.Background()
|
||||
|
||||
zr := &zeroReader{}
|
||||
a, err := New(zr, 16)
|
||||
a, err := New(ctx, zr, 16)
|
||||
require.NoError(t, err)
|
||||
var copyN int64
|
||||
var copyErr error
|
||||
@ -287,6 +300,8 @@ func TestAsyncReaderCloseRead(t *testing.T) { testAsyncReaderClose(t, false)
|
||||
func TestAsyncReaderCloseWriteTo(t *testing.T) { testAsyncReaderClose(t, true) }
|
||||
|
||||
func TestAsyncReaderSkipBytes(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
t.Parallel()
|
||||
data := make([]byte, 15000)
|
||||
buf := make([]byte, len(data))
|
||||
@ -312,7 +327,7 @@ func TestAsyncReaderSkipBytes(t *testing.T) {
|
||||
t.Run(fmt.Sprintf("%d", initialRead), func(t *testing.T) {
|
||||
for _, skip := range skips {
|
||||
t.Run(fmt.Sprintf("%d", skip), func(t *testing.T) {
|
||||
ar, err := New(ioutil.NopCloser(bytes.NewReader(data)), buffers)
|
||||
ar, err := New(ctx, ioutil.NopCloser(bytes.NewReader(data)), buffers)
|
||||
require.NoError(t, err)
|
||||
|
||||
wantSkipFalse := false
|
||||
|
33
fs/config.go
33
fs/config.go
@ -1,6 +1,7 @@
|
||||
package fs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net"
|
||||
"strings"
|
||||
"time"
|
||||
@ -10,8 +11,8 @@ import (
|
||||
|
||||
// Global
|
||||
var (
|
||||
// Config is the global config
|
||||
Config = NewConfig()
|
||||
// globalConfig for rclone
|
||||
globalConfig = NewConfig()
|
||||
|
||||
// Read a value from the config file
|
||||
//
|
||||
@ -162,6 +163,34 @@ func NewConfig() *ConfigInfo {
|
||||
return c
|
||||
}
|
||||
|
||||
type configContextKeyType struct{}
|
||||
|
||||
// Context key for config
|
||||
var configContextKey = configContextKeyType{}
|
||||
|
||||
// GetConfig returns the global or context sensitive context
|
||||
func GetConfig(ctx context.Context) *ConfigInfo {
|
||||
if ctx == nil {
|
||||
return globalConfig
|
||||
}
|
||||
c := ctx.Value(configContextKey)
|
||||
if c == nil {
|
||||
return globalConfig
|
||||
}
|
||||
return c.(*ConfigInfo)
|
||||
}
|
||||
|
||||
// AddConfig returns a mutable config structure based on a shallow
|
||||
// copy of that found in ctx and returns a new context with that added
|
||||
// to it.
|
||||
func AddConfig(ctx context.Context) (context.Context, *ConfigInfo) {
|
||||
c := GetConfig(ctx)
|
||||
cCopy := new(ConfigInfo)
|
||||
*cCopy = *c
|
||||
newCtx := context.WithValue(ctx, configContextKey, cCopy)
|
||||
return newCtx, cCopy
|
||||
}
|
||||
|
||||
// ConfigToEnv converts a config section and name, e.g. ("myremote",
|
||||
// "ignore-size") into an environment name
|
||||
// "RCLONE_CONFIG_MYREMOTE_IGNORE_SIZE"
|
||||
|
@ -236,7 +236,7 @@ func LoadConfig(ctx context.Context) {
|
||||
accounting.StartTokenTicker(ctx)
|
||||
|
||||
// Start the transactions per second limiter
|
||||
fshttp.StartHTTPTokenBucket()
|
||||
fshttp.StartHTTPTokenBucket(ctx)
|
||||
}
|
||||
|
||||
var errorConfigFileNotFound = errors.New("config file not found")
|
||||
@ -244,6 +244,8 @@ var errorConfigFileNotFound = errors.New("config file not found")
|
||||
// loadConfigFile will load a config file, and
|
||||
// automatically decrypt it.
|
||||
func loadConfigFile() (*goconfig.ConfigFile, error) {
|
||||
ctx := context.Background()
|
||||
ci := fs.GetConfig(ctx)
|
||||
var usingPasswordCommand bool
|
||||
|
||||
b, err := ioutil.ReadFile(ConfigPath)
|
||||
@ -278,11 +280,11 @@ func loadConfigFile() (*goconfig.ConfigFile, error) {
|
||||
}
|
||||
|
||||
if len(configKey) == 0 {
|
||||
if len(fs.Config.PasswordCommand) != 0 {
|
||||
if len(ci.PasswordCommand) != 0 {
|
||||
var stdout bytes.Buffer
|
||||
var stderr bytes.Buffer
|
||||
|
||||
cmd := exec.Command(fs.Config.PasswordCommand[0], fs.Config.PasswordCommand[1:]...)
|
||||
cmd := exec.Command(ci.PasswordCommand[0], ci.PasswordCommand[1:]...)
|
||||
|
||||
cmd.Stdout = &stdout
|
||||
cmd.Stderr = &stderr
|
||||
@ -358,7 +360,7 @@ func loadConfigFile() (*goconfig.ConfigFile, error) {
|
||||
if usingPasswordCommand {
|
||||
return nil, errors.New("using --password-command derived password, unable to decrypt configuration")
|
||||
}
|
||||
if !fs.Config.AskPassword {
|
||||
if !ci.AskPassword {
|
||||
return nil, errors.New("unable to decrypt configuration and not allowed to ask for password - set RCLONE_CONFIG_PASS to your configuration password")
|
||||
}
|
||||
getConfigPassword("Enter configuration password:")
|
||||
@ -600,15 +602,17 @@ func saveConfig() error {
|
||||
// SaveConfig calling function which saves configuration file.
|
||||
// if saveConfig returns error trying again after sleep.
|
||||
func SaveConfig() {
|
||||
ctx := context.Background()
|
||||
ci := fs.GetConfig(ctx)
|
||||
var err error
|
||||
for i := 0; i < fs.Config.LowLevelRetries+1; i++ {
|
||||
for i := 0; i < ci.LowLevelRetries+1; i++ {
|
||||
if err = saveConfig(); err == nil {
|
||||
return
|
||||
}
|
||||
waitingTimeMs := mathrand.Intn(1000)
|
||||
time.Sleep(time.Duration(waitingTimeMs) * time.Millisecond)
|
||||
}
|
||||
log.Fatalf("Failed to save config after %d tries: %v", fs.Config.LowLevelRetries, err)
|
||||
log.Fatalf("Failed to save config after %d tries: %v", ci.LowLevelRetries, err)
|
||||
|
||||
return
|
||||
}
|
||||
@ -746,7 +750,8 @@ func Confirm(Default bool) bool {
|
||||
// that, but if it isn't set then it will return the Default value
|
||||
// passed in
|
||||
func ConfirmWithConfig(ctx context.Context, m configmap.Getter, configName string, Default bool) bool {
|
||||
if fs.Config.AutoConfirm {
|
||||
ci := fs.GetConfig(ctx)
|
||||
if ci.AutoConfirm {
|
||||
configString, ok := m.Get(configName)
|
||||
if ok {
|
||||
configValue, err := strconv.ParseBool(configString)
|
||||
@ -897,12 +902,12 @@ func MustFindByName(name string) *fs.RegInfo {
|
||||
}
|
||||
|
||||
// RemoteConfig runs the config helper for the remote if needed
|
||||
func RemoteConfig(name string) {
|
||||
func RemoteConfig(ctx context.Context, name string) {
|
||||
fmt.Printf("Remote config\n")
|
||||
f := MustFindByName(name)
|
||||
if f.Config != nil {
|
||||
m := fs.ConfigMap(f, name)
|
||||
f.Config(context.Background(), name, m)
|
||||
f.Config(ctx, name, m)
|
||||
}
|
||||
}
|
||||
|
||||
@ -1023,13 +1028,11 @@ func ChooseOption(o *fs.Option, name string) string {
|
||||
return in
|
||||
}
|
||||
|
||||
// Suppress the confirm prompts and return a function to undo that
|
||||
func suppressConfirm(ctx context.Context) func() {
|
||||
old := fs.Config.AutoConfirm
|
||||
fs.Config.AutoConfirm = true
|
||||
return func() {
|
||||
fs.Config.AutoConfirm = old
|
||||
}
|
||||
// Suppress the confirm prompts by altering the context config
|
||||
func suppressConfirm(ctx context.Context) context.Context {
|
||||
newCtx, ci := fs.AddConfig(ctx)
|
||||
ci.AutoConfirm = true
|
||||
return newCtx
|
||||
}
|
||||
|
||||
// UpdateRemote adds the keyValues passed in to the remote of name.
|
||||
@ -1042,7 +1045,7 @@ func UpdateRemote(ctx context.Context, name string, keyValues rc.Params, doObscu
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer suppressConfirm(ctx)()
|
||||
ctx = suppressConfirm(ctx)
|
||||
|
||||
// Work out which options need to be obscured
|
||||
needsObscure := map[string]struct{}{}
|
||||
@ -1079,7 +1082,7 @@ func UpdateRemote(ctx context.Context, name string, keyValues rc.Params, doObscu
|
||||
}
|
||||
getConfigData().SetValue(name, k, vStr)
|
||||
}
|
||||
RemoteConfig(name)
|
||||
RemoteConfig(ctx, name)
|
||||
SaveConfig()
|
||||
return nil
|
||||
}
|
||||
@ -1103,11 +1106,11 @@ func CreateRemote(ctx context.Context, name string, provider string, keyValues r
|
||||
// PasswordRemote adds the keyValues passed in to the remote of name.
|
||||
// keyValues should be key, value pairs.
|
||||
func PasswordRemote(ctx context.Context, name string, keyValues rc.Params) error {
|
||||
ctx = suppressConfirm(ctx)
|
||||
err := fspath.CheckConfigName(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer suppressConfirm(ctx)()
|
||||
for k, v := range keyValues {
|
||||
keyValues[k] = obscure.MustObscure(fmt.Sprint(v))
|
||||
}
|
||||
@ -1206,7 +1209,7 @@ func editOptions(ri *fs.RegInfo, name string, isNew bool) {
|
||||
}
|
||||
|
||||
// NewRemote make a new remote from its name
|
||||
func NewRemote(name string) {
|
||||
func NewRemote(ctx context.Context, name string) {
|
||||
var (
|
||||
newType string
|
||||
ri *fs.RegInfo
|
||||
@ -1226,16 +1229,16 @@ func NewRemote(name string) {
|
||||
getConfigData().SetValue(name, "type", newType)
|
||||
|
||||
editOptions(ri, name, true)
|
||||
RemoteConfig(name)
|
||||
RemoteConfig(ctx, name)
|
||||
if OkRemote(name) {
|
||||
SaveConfig()
|
||||
return
|
||||
}
|
||||
EditRemote(ri, name)
|
||||
EditRemote(ctx, ri, name)
|
||||
}
|
||||
|
||||
// EditRemote gets the user to edit a remote
|
||||
func EditRemote(ri *fs.RegInfo, name string) {
|
||||
func EditRemote(ctx context.Context, ri *fs.RegInfo, name string) {
|
||||
ShowRemote(name)
|
||||
fmt.Printf("Edit remote\n")
|
||||
for {
|
||||
@ -1245,7 +1248,7 @@ func EditRemote(ri *fs.RegInfo, name string) {
|
||||
}
|
||||
}
|
||||
SaveConfig()
|
||||
RemoteConfig(name)
|
||||
RemoteConfig(ctx, name)
|
||||
}
|
||||
|
||||
// DeleteRemote gets the user to delete a remote
|
||||
@ -1307,7 +1310,7 @@ func ShowConfig() {
|
||||
}
|
||||
|
||||
// EditConfig edits the config file interactively
|
||||
func EditConfig() {
|
||||
func EditConfig(ctx context.Context) {
|
||||
for {
|
||||
haveRemotes := len(getConfigData().GetSectionList()) != 0
|
||||
what := []string{"eEdit existing remote", "nNew remote", "dDelete remote", "rRename remote", "cCopy remote", "sSet configuration password", "qQuit config"}
|
||||
@ -1324,9 +1327,9 @@ func EditConfig() {
|
||||
case 'e':
|
||||
name := ChooseRemote()
|
||||
fs := MustFindByName(name)
|
||||
EditRemote(fs, name)
|
||||
EditRemote(ctx, fs, name)
|
||||
case 'n':
|
||||
NewRemote(NewRemoteName())
|
||||
NewRemote(ctx, NewRemoteName())
|
||||
case 'd':
|
||||
name := ChooseRemote()
|
||||
DeleteRemote(name)
|
||||
@ -1388,7 +1391,7 @@ func SetPassword() {
|
||||
// rclone authorize "fs name"
|
||||
// rclone authorize "fs name" "client id" "client secret"
|
||||
func Authorize(ctx context.Context, args []string, noAutoBrowser bool) {
|
||||
defer suppressConfirm(ctx)()
|
||||
ctx = suppressConfirm(ctx)
|
||||
switch len(args) {
|
||||
case 1, 3:
|
||||
default:
|
||||
|
@ -17,6 +17,7 @@ import (
|
||||
|
||||
func testConfigFile(t *testing.T, configFileName string) func() {
|
||||
ctx := context.Background()
|
||||
ci := fs.GetConfig(ctx)
|
||||
configKey = nil // reset password
|
||||
_ = os.Unsetenv("_RCLONE_CONFIG_KEY_FILE")
|
||||
_ = os.Unsetenv("RCLONE_CONFIG_PASS")
|
||||
@ -29,13 +30,13 @@ func testConfigFile(t *testing.T, configFileName string) func() {
|
||||
// temporarily adapt configuration
|
||||
oldOsStdout := os.Stdout
|
||||
oldConfigPath := ConfigPath
|
||||
oldConfig := fs.Config
|
||||
oldConfig := *ci
|
||||
oldConfigFile := configFile
|
||||
oldReadLine := ReadLine
|
||||
oldPassword := Password
|
||||
os.Stdout = nil
|
||||
ConfigPath = path
|
||||
fs.Config = &fs.ConfigInfo{}
|
||||
ci = &fs.ConfigInfo{}
|
||||
configFile = nil
|
||||
|
||||
LoadConfig(ctx)
|
||||
@ -67,7 +68,7 @@ func testConfigFile(t *testing.T, configFileName string) func() {
|
||||
ConfigPath = oldConfigPath
|
||||
ReadLine = oldReadLine
|
||||
Password = oldPassword
|
||||
fs.Config = oldConfig
|
||||
*ci = oldConfig
|
||||
configFile = oldConfigFile
|
||||
|
||||
_ = os.Unsetenv("_RCLONE_CONFIG_KEY_FILE")
|
||||
@ -87,6 +88,7 @@ func makeReadLine(answers []string) func() string {
|
||||
|
||||
func TestCRUD(t *testing.T) {
|
||||
defer testConfigFile(t, "crud.conf")()
|
||||
ctx := context.Background()
|
||||
|
||||
// script for creating remote
|
||||
ReadLine = makeReadLine([]string{
|
||||
@ -97,7 +99,7 @@ func TestCRUD(t *testing.T) {
|
||||
"secret", // repeat
|
||||
"y", // looks good, save
|
||||
})
|
||||
NewRemote("test")
|
||||
NewRemote(ctx, "test")
|
||||
|
||||
assert.Equal(t, []string{"test"}, configFile.GetSectionList())
|
||||
assert.Equal(t, "config_test_remote", FileGet("test", "type"))
|
||||
@ -124,6 +126,7 @@ func TestCRUD(t *testing.T) {
|
||||
|
||||
func TestChooseOption(t *testing.T) {
|
||||
defer testConfigFile(t, "crud.conf")()
|
||||
ctx := context.Background()
|
||||
|
||||
// script for creating remote
|
||||
ReadLine = makeReadLine([]string{
|
||||
@ -139,7 +142,7 @@ func TestChooseOption(t *testing.T) {
|
||||
assert.Equal(t, 1024, bits)
|
||||
return "not very random password", nil
|
||||
}
|
||||
NewRemote("test")
|
||||
NewRemote(ctx, "test")
|
||||
|
||||
assert.Equal(t, "false", FileGet("test", "bool"))
|
||||
assert.Equal(t, "not very random password", obscure.MustReveal(FileGet("test", "pass")))
|
||||
@ -151,7 +154,7 @@ func TestChooseOption(t *testing.T) {
|
||||
"n", // not required
|
||||
"y", // looks good, save
|
||||
})
|
||||
NewRemote("test")
|
||||
NewRemote(ctx, "test")
|
||||
|
||||
assert.Equal(t, "true", FileGet("test", "bool"))
|
||||
assert.Equal(t, "", FileGet("test", "pass"))
|
||||
@ -159,6 +162,7 @@ func TestChooseOption(t *testing.T) {
|
||||
|
||||
func TestNewRemoteName(t *testing.T) {
|
||||
defer testConfigFile(t, "crud.conf")()
|
||||
ctx := context.Background()
|
||||
|
||||
// script for creating remote
|
||||
ReadLine = makeReadLine([]string{
|
||||
@ -167,7 +171,7 @@ func TestNewRemoteName(t *testing.T) {
|
||||
"n", // not required
|
||||
"y", // looks good, save
|
||||
})
|
||||
NewRemote("test")
|
||||
NewRemote(ctx, "test")
|
||||
|
||||
ReadLine = makeReadLine([]string{
|
||||
"test", // already exists
|
||||
@ -293,16 +297,18 @@ func TestConfigLoadEncrypted(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestConfigLoadEncryptedWithValidPassCommand(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
ci := fs.GetConfig(ctx)
|
||||
oldConfigPath := ConfigPath
|
||||
oldConfig := fs.Config
|
||||
oldConfig := *ci
|
||||
ConfigPath = "./testdata/encrypted.conf"
|
||||
// using fs.Config.PasswordCommand, correct password
|
||||
fs.Config.PasswordCommand = fs.SpaceSepList{"echo", "asdf"}
|
||||
// using ci.PasswordCommand, correct password
|
||||
ci.PasswordCommand = fs.SpaceSepList{"echo", "asdf"}
|
||||
defer func() {
|
||||
ConfigPath = oldConfigPath
|
||||
configKey = nil // reset password
|
||||
fs.Config = oldConfig
|
||||
fs.Config.PasswordCommand = nil
|
||||
*ci = oldConfig
|
||||
ci.PasswordCommand = nil
|
||||
}()
|
||||
|
||||
configKey = nil // reset password
|
||||
@ -320,16 +326,18 @@ func TestConfigLoadEncryptedWithValidPassCommand(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestConfigLoadEncryptedWithInvalidPassCommand(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
ci := fs.GetConfig(ctx)
|
||||
oldConfigPath := ConfigPath
|
||||
oldConfig := fs.Config
|
||||
oldConfig := *ci
|
||||
ConfigPath = "./testdata/encrypted.conf"
|
||||
// using fs.Config.PasswordCommand, incorrect password
|
||||
fs.Config.PasswordCommand = fs.SpaceSepList{"echo", "asdf-blurfl"}
|
||||
// using ci.PasswordCommand, incorrect password
|
||||
ci.PasswordCommand = fs.SpaceSepList{"echo", "asdf-blurfl"}
|
||||
defer func() {
|
||||
ConfigPath = oldConfigPath
|
||||
configKey = nil // reset password
|
||||
fs.Config = oldConfig
|
||||
fs.Config.PasswordCommand = nil
|
||||
*ci = oldConfig
|
||||
ci.PasswordCommand = nil
|
||||
}()
|
||||
|
||||
configKey = nil // reset password
|
||||
|
@ -35,96 +35,96 @@ var (
|
||||
)
|
||||
|
||||
// AddFlags adds the non filing system specific flags to the command
|
||||
func AddFlags(flagSet *pflag.FlagSet) {
|
||||
rc.AddOption("main", fs.Config)
|
||||
func AddFlags(ci *fs.ConfigInfo, flagSet *pflag.FlagSet) {
|
||||
rc.AddOption("main", ci)
|
||||
// NB defaults which aren't the zero for the type should be set in fs/config.go NewConfig
|
||||
flags.CountVarP(flagSet, &verbose, "verbose", "v", "Print lots more stuff (repeat for more)")
|
||||
flags.BoolVarP(flagSet, &quiet, "quiet", "q", false, "Print as little stuff as possible")
|
||||
flags.DurationVarP(flagSet, &fs.Config.ModifyWindow, "modify-window", "", fs.Config.ModifyWindow, "Max time diff to be considered the same")
|
||||
flags.IntVarP(flagSet, &fs.Config.Checkers, "checkers", "", fs.Config.Checkers, "Number of checkers to run in parallel.")
|
||||
flags.IntVarP(flagSet, &fs.Config.Transfers, "transfers", "", fs.Config.Transfers, "Number of file transfers to run in parallel.")
|
||||
flags.DurationVarP(flagSet, &ci.ModifyWindow, "modify-window", "", ci.ModifyWindow, "Max time diff to be considered the same")
|
||||
flags.IntVarP(flagSet, &ci.Checkers, "checkers", "", ci.Checkers, "Number of checkers to run in parallel.")
|
||||
flags.IntVarP(flagSet, &ci.Transfers, "transfers", "", ci.Transfers, "Number of file transfers to run in parallel.")
|
||||
flags.StringVarP(flagSet, &config.ConfigPath, "config", "", config.ConfigPath, "Config file.")
|
||||
flags.StringVarP(flagSet, &config.CacheDir, "cache-dir", "", config.CacheDir, "Directory rclone will use for caching.")
|
||||
flags.BoolVarP(flagSet, &fs.Config.CheckSum, "checksum", "c", fs.Config.CheckSum, "Skip based on checksum (if available) & size, not mod-time & size")
|
||||
flags.BoolVarP(flagSet, &fs.Config.SizeOnly, "size-only", "", fs.Config.SizeOnly, "Skip based on size only, not mod-time or checksum")
|
||||
flags.BoolVarP(flagSet, &fs.Config.IgnoreTimes, "ignore-times", "I", fs.Config.IgnoreTimes, "Don't skip files that match size and time - transfer all files")
|
||||
flags.BoolVarP(flagSet, &fs.Config.IgnoreExisting, "ignore-existing", "", fs.Config.IgnoreExisting, "Skip all files that exist on destination")
|
||||
flags.BoolVarP(flagSet, &fs.Config.IgnoreErrors, "ignore-errors", "", fs.Config.IgnoreErrors, "delete even if there are I/O errors")
|
||||
flags.BoolVarP(flagSet, &fs.Config.DryRun, "dry-run", "n", fs.Config.DryRun, "Do a trial run with no permanent changes")
|
||||
flags.BoolVarP(flagSet, &fs.Config.Interactive, "interactive", "i", fs.Config.Interactive, "Enable interactive mode")
|
||||
flags.DurationVarP(flagSet, &fs.Config.ConnectTimeout, "contimeout", "", fs.Config.ConnectTimeout, "Connect timeout")
|
||||
flags.DurationVarP(flagSet, &fs.Config.Timeout, "timeout", "", fs.Config.Timeout, "IO idle timeout")
|
||||
flags.DurationVarP(flagSet, &fs.Config.ExpectContinueTimeout, "expect-continue-timeout", "", fs.Config.ExpectContinueTimeout, "Timeout when using expect / 100-continue in HTTP")
|
||||
flags.BoolVarP(flagSet, &ci.CheckSum, "checksum", "c", ci.CheckSum, "Skip based on checksum (if available) & size, not mod-time & size")
|
||||
flags.BoolVarP(flagSet, &ci.SizeOnly, "size-only", "", ci.SizeOnly, "Skip based on size only, not mod-time or checksum")
|
||||
flags.BoolVarP(flagSet, &ci.IgnoreTimes, "ignore-times", "I", ci.IgnoreTimes, "Don't skip files that match size and time - transfer all files")
|
||||
flags.BoolVarP(flagSet, &ci.IgnoreExisting, "ignore-existing", "", ci.IgnoreExisting, "Skip all files that exist on destination")
|
||||
flags.BoolVarP(flagSet, &ci.IgnoreErrors, "ignore-errors", "", ci.IgnoreErrors, "delete even if there are I/O errors")
|
||||
flags.BoolVarP(flagSet, &ci.DryRun, "dry-run", "n", ci.DryRun, "Do a trial run with no permanent changes")
|
||||
flags.BoolVarP(flagSet, &ci.Interactive, "interactive", "i", ci.Interactive, "Enable interactive mode")
|
||||
flags.DurationVarP(flagSet, &ci.ConnectTimeout, "contimeout", "", ci.ConnectTimeout, "Connect timeout")
|
||||
flags.DurationVarP(flagSet, &ci.Timeout, "timeout", "", ci.Timeout, "IO idle timeout")
|
||||
flags.DurationVarP(flagSet, &ci.ExpectContinueTimeout, "expect-continue-timeout", "", ci.ExpectContinueTimeout, "Timeout when using expect / 100-continue in HTTP")
|
||||
flags.BoolVarP(flagSet, &dumpHeaders, "dump-headers", "", false, "Dump HTTP headers - may contain sensitive info")
|
||||
flags.BoolVarP(flagSet, &dumpBodies, "dump-bodies", "", false, "Dump HTTP headers and bodies - may contain sensitive info")
|
||||
flags.BoolVarP(flagSet, &fs.Config.InsecureSkipVerify, "no-check-certificate", "", fs.Config.InsecureSkipVerify, "Do not verify the server SSL certificate. Insecure.")
|
||||
flags.BoolVarP(flagSet, &fs.Config.AskPassword, "ask-password", "", fs.Config.AskPassword, "Allow prompt for password for encrypted configuration.")
|
||||
flags.FVarP(flagSet, &fs.Config.PasswordCommand, "password-command", "", "Command for supplying password for encrypted configuration.")
|
||||
flags.BoolVarP(flagSet, &ci.InsecureSkipVerify, "no-check-certificate", "", ci.InsecureSkipVerify, "Do not verify the server SSL certificate. Insecure.")
|
||||
flags.BoolVarP(flagSet, &ci.AskPassword, "ask-password", "", ci.AskPassword, "Allow prompt for password for encrypted configuration.")
|
||||
flags.FVarP(flagSet, &ci.PasswordCommand, "password-command", "", "Command for supplying password for encrypted configuration.")
|
||||
flags.BoolVarP(flagSet, &deleteBefore, "delete-before", "", false, "When synchronizing, delete files on destination before transferring")
|
||||
flags.BoolVarP(flagSet, &deleteDuring, "delete-during", "", false, "When synchronizing, delete files during transfer")
|
||||
flags.BoolVarP(flagSet, &deleteAfter, "delete-after", "", false, "When synchronizing, delete files on destination after transferring (default)")
|
||||
flags.Int64VarP(flagSet, &fs.Config.MaxDelete, "max-delete", "", -1, "When synchronizing, limit the number of deletes")
|
||||
flags.BoolVarP(flagSet, &fs.Config.TrackRenames, "track-renames", "", fs.Config.TrackRenames, "When synchronizing, track file renames and do a server-side move if possible")
|
||||
flags.StringVarP(flagSet, &fs.Config.TrackRenamesStrategy, "track-renames-strategy", "", fs.Config.TrackRenamesStrategy, "Strategies to use when synchronizing using track-renames hash|modtime|leaf")
|
||||
flags.IntVarP(flagSet, &fs.Config.LowLevelRetries, "low-level-retries", "", fs.Config.LowLevelRetries, "Number of low level retries to do.")
|
||||
flags.BoolVarP(flagSet, &fs.Config.UpdateOlder, "update", "u", fs.Config.UpdateOlder, "Skip files that are newer on the destination.")
|
||||
flags.BoolVarP(flagSet, &fs.Config.UseServerModTime, "use-server-modtime", "", fs.Config.UseServerModTime, "Use server modified time instead of object metadata")
|
||||
flags.BoolVarP(flagSet, &fs.Config.NoGzip, "no-gzip-encoding", "", fs.Config.NoGzip, "Don't set Accept-Encoding: gzip.")
|
||||
flags.IntVarP(flagSet, &fs.Config.MaxDepth, "max-depth", "", fs.Config.MaxDepth, "If set limits the recursion depth to this.")
|
||||
flags.BoolVarP(flagSet, &fs.Config.IgnoreSize, "ignore-size", "", false, "Ignore size when skipping use mod-time or checksum.")
|
||||
flags.BoolVarP(flagSet, &fs.Config.IgnoreChecksum, "ignore-checksum", "", fs.Config.IgnoreChecksum, "Skip post copy check of checksums.")
|
||||
flags.BoolVarP(flagSet, &fs.Config.IgnoreCaseSync, "ignore-case-sync", "", fs.Config.IgnoreCaseSync, "Ignore case when synchronizing")
|
||||
flags.BoolVarP(flagSet, &fs.Config.NoTraverse, "no-traverse", "", fs.Config.NoTraverse, "Don't traverse destination file system on copy.")
|
||||
flags.BoolVarP(flagSet, &fs.Config.CheckFirst, "check-first", "", fs.Config.CheckFirst, "Do all the checks before starting transfers.")
|
||||
flags.BoolVarP(flagSet, &fs.Config.NoCheckDest, "no-check-dest", "", fs.Config.NoCheckDest, "Don't check the destination, copy regardless.")
|
||||
flags.BoolVarP(flagSet, &fs.Config.NoUnicodeNormalization, "no-unicode-normalization", "", fs.Config.NoUnicodeNormalization, "Don't normalize unicode characters in filenames.")
|
||||
flags.BoolVarP(flagSet, &fs.Config.NoUpdateModTime, "no-update-modtime", "", fs.Config.NoUpdateModTime, "Don't update destination mod-time if files identical.")
|
||||
flags.StringVarP(flagSet, &fs.Config.CompareDest, "compare-dest", "", fs.Config.CompareDest, "Include additional server-side path during comparison.")
|
||||
flags.StringVarP(flagSet, &fs.Config.CopyDest, "copy-dest", "", fs.Config.CopyDest, "Implies --compare-dest but also copies files from path into destination.")
|
||||
flags.StringVarP(flagSet, &fs.Config.BackupDir, "backup-dir", "", fs.Config.BackupDir, "Make backups into hierarchy based in DIR.")
|
||||
flags.StringVarP(flagSet, &fs.Config.Suffix, "suffix", "", fs.Config.Suffix, "Suffix to add to changed files.")
|
||||
flags.BoolVarP(flagSet, &fs.Config.SuffixKeepExtension, "suffix-keep-extension", "", fs.Config.SuffixKeepExtension, "Preserve the extension when using --suffix.")
|
||||
flags.BoolVarP(flagSet, &fs.Config.UseListR, "fast-list", "", fs.Config.UseListR, "Use recursive list if available. Uses more memory but fewer transactions.")
|
||||
flags.Float64VarP(flagSet, &fs.Config.TPSLimit, "tpslimit", "", fs.Config.TPSLimit, "Limit HTTP transactions per second to this.")
|
||||
flags.IntVarP(flagSet, &fs.Config.TPSLimitBurst, "tpslimit-burst", "", fs.Config.TPSLimitBurst, "Max burst of transactions for --tpslimit.")
|
||||
flags.Int64VarP(flagSet, &ci.MaxDelete, "max-delete", "", -1, "When synchronizing, limit the number of deletes")
|
||||
flags.BoolVarP(flagSet, &ci.TrackRenames, "track-renames", "", ci.TrackRenames, "When synchronizing, track file renames and do a server-side move if possible")
|
||||
flags.StringVarP(flagSet, &ci.TrackRenamesStrategy, "track-renames-strategy", "", ci.TrackRenamesStrategy, "Strategies to use when synchronizing using track-renames hash|modtime|leaf")
|
||||
flags.IntVarP(flagSet, &ci.LowLevelRetries, "low-level-retries", "", ci.LowLevelRetries, "Number of low level retries to do.")
|
||||
flags.BoolVarP(flagSet, &ci.UpdateOlder, "update", "u", ci.UpdateOlder, "Skip files that are newer on the destination.")
|
||||
flags.BoolVarP(flagSet, &ci.UseServerModTime, "use-server-modtime", "", ci.UseServerModTime, "Use server modified time instead of object metadata")
|
||||
flags.BoolVarP(flagSet, &ci.NoGzip, "no-gzip-encoding", "", ci.NoGzip, "Don't set Accept-Encoding: gzip.")
|
||||
flags.IntVarP(flagSet, &ci.MaxDepth, "max-depth", "", ci.MaxDepth, "If set limits the recursion depth to this.")
|
||||
flags.BoolVarP(flagSet, &ci.IgnoreSize, "ignore-size", "", false, "Ignore size when skipping use mod-time or checksum.")
|
||||
flags.BoolVarP(flagSet, &ci.IgnoreChecksum, "ignore-checksum", "", ci.IgnoreChecksum, "Skip post copy check of checksums.")
|
||||
flags.BoolVarP(flagSet, &ci.IgnoreCaseSync, "ignore-case-sync", "", ci.IgnoreCaseSync, "Ignore case when synchronizing")
|
||||
flags.BoolVarP(flagSet, &ci.NoTraverse, "no-traverse", "", ci.NoTraverse, "Don't traverse destination file system on copy.")
|
||||
flags.BoolVarP(flagSet, &ci.CheckFirst, "check-first", "", ci.CheckFirst, "Do all the checks before starting transfers.")
|
||||
flags.BoolVarP(flagSet, &ci.NoCheckDest, "no-check-dest", "", ci.NoCheckDest, "Don't check the destination, copy regardless.")
|
||||
flags.BoolVarP(flagSet, &ci.NoUnicodeNormalization, "no-unicode-normalization", "", ci.NoUnicodeNormalization, "Don't normalize unicode characters in filenames.")
|
||||
flags.BoolVarP(flagSet, &ci.NoUpdateModTime, "no-update-modtime", "", ci.NoUpdateModTime, "Don't update destination mod-time if files identical.")
|
||||
flags.StringVarP(flagSet, &ci.CompareDest, "compare-dest", "", ci.CompareDest, "Include additional server-side path during comparison.")
|
||||
flags.StringVarP(flagSet, &ci.CopyDest, "copy-dest", "", ci.CopyDest, "Implies --compare-dest but also copies files from path into destination.")
|
||||
flags.StringVarP(flagSet, &ci.BackupDir, "backup-dir", "", ci.BackupDir, "Make backups into hierarchy based in DIR.")
|
||||
flags.StringVarP(flagSet, &ci.Suffix, "suffix", "", ci.Suffix, "Suffix to add to changed files.")
|
||||
flags.BoolVarP(flagSet, &ci.SuffixKeepExtension, "suffix-keep-extension", "", ci.SuffixKeepExtension, "Preserve the extension when using --suffix.")
|
||||
flags.BoolVarP(flagSet, &ci.UseListR, "fast-list", "", ci.UseListR, "Use recursive list if available. Uses more memory but fewer transactions.")
|
||||
flags.Float64VarP(flagSet, &ci.TPSLimit, "tpslimit", "", ci.TPSLimit, "Limit HTTP transactions per second to this.")
|
||||
flags.IntVarP(flagSet, &ci.TPSLimitBurst, "tpslimit-burst", "", ci.TPSLimitBurst, "Max burst of transactions for --tpslimit.")
|
||||
flags.StringVarP(flagSet, &bindAddr, "bind", "", "", "Local address to bind to for outgoing connections, IPv4, IPv6 or name.")
|
||||
flags.StringVarP(flagSet, &disableFeatures, "disable", "", "", "Disable a comma separated list of features. Use help to see a list.")
|
||||
flags.StringVarP(flagSet, &fs.Config.UserAgent, "user-agent", "", fs.Config.UserAgent, "Set the user-agent to a specified string. The default is rclone/ version")
|
||||
flags.BoolVarP(flagSet, &fs.Config.Immutable, "immutable", "", fs.Config.Immutable, "Do not modify files. Fail if existing files have been modified.")
|
||||
flags.BoolVarP(flagSet, &fs.Config.AutoConfirm, "auto-confirm", "", fs.Config.AutoConfirm, "If enabled, do not request console confirmation.")
|
||||
flags.IntVarP(flagSet, &fs.Config.StatsFileNameLength, "stats-file-name-length", "", fs.Config.StatsFileNameLength, "Max file name length in stats. 0 for no limit")
|
||||
flags.FVarP(flagSet, &fs.Config.LogLevel, "log-level", "", "Log level DEBUG|INFO|NOTICE|ERROR")
|
||||
flags.FVarP(flagSet, &fs.Config.StatsLogLevel, "stats-log-level", "", "Log level to show --stats output DEBUG|INFO|NOTICE|ERROR")
|
||||
flags.FVarP(flagSet, &fs.Config.BwLimit, "bwlimit", "", "Bandwidth limit in kBytes/s, or use suffix b|k|M|G or a full timetable.")
|
||||
flags.FVarP(flagSet, &fs.Config.BwLimitFile, "bwlimit-file", "", "Bandwidth limit per file in kBytes/s, or use suffix b|k|M|G or a full timetable.")
|
||||
flags.FVarP(flagSet, &fs.Config.BufferSize, "buffer-size", "", "In memory buffer size when reading files for each --transfer.")
|
||||
flags.FVarP(flagSet, &fs.Config.StreamingUploadCutoff, "streaming-upload-cutoff", "", "Cutoff for switching to chunked upload if file size is unknown. Upload starts after reaching cutoff or when file ends.")
|
||||
flags.FVarP(flagSet, &fs.Config.Dump, "dump", "", "List of items to dump from: "+fs.DumpFlagsList)
|
||||
flags.FVarP(flagSet, &fs.Config.MaxTransfer, "max-transfer", "", "Maximum size of data to transfer.")
|
||||
flags.DurationVarP(flagSet, &fs.Config.MaxDuration, "max-duration", "", 0, "Maximum duration rclone will transfer data for.")
|
||||
flags.FVarP(flagSet, &fs.Config.CutoffMode, "cutoff-mode", "", "Mode to stop transfers when reaching the max transfer limit HARD|SOFT|CAUTIOUS")
|
||||
flags.IntVarP(flagSet, &fs.Config.MaxBacklog, "max-backlog", "", fs.Config.MaxBacklog, "Maximum number of objects in sync or check backlog.")
|
||||
flags.IntVarP(flagSet, &fs.Config.MaxStatsGroups, "max-stats-groups", "", fs.Config.MaxStatsGroups, "Maximum number of stats groups to keep in memory. On max oldest is discarded.")
|
||||
flags.BoolVarP(flagSet, &fs.Config.StatsOneLine, "stats-one-line", "", fs.Config.StatsOneLine, "Make the stats fit on one line.")
|
||||
flags.BoolVarP(flagSet, &fs.Config.StatsOneLineDate, "stats-one-line-date", "", fs.Config.StatsOneLineDate, "Enables --stats-one-line and add current date/time prefix.")
|
||||
flags.StringVarP(flagSet, &fs.Config.StatsOneLineDateFormat, "stats-one-line-date-format", "", fs.Config.StatsOneLineDateFormat, "Enables --stats-one-line-date and uses custom formatted date. Enclose date string in double quotes (\"). See https://golang.org/pkg/time/#Time.Format")
|
||||
flags.BoolVarP(flagSet, &fs.Config.ErrorOnNoTransfer, "error-on-no-transfer", "", fs.Config.ErrorOnNoTransfer, "Sets exit code 9 if no files are transferred, useful in scripts")
|
||||
flags.BoolVarP(flagSet, &fs.Config.Progress, "progress", "P", fs.Config.Progress, "Show progress during transfer.")
|
||||
flags.BoolVarP(flagSet, &fs.Config.ProgressTerminalTitle, "progress-terminal-title", "", fs.Config.ProgressTerminalTitle, "Show progress on the terminal title. Requires -P/--progress.")
|
||||
flags.BoolVarP(flagSet, &fs.Config.Cookie, "use-cookies", "", fs.Config.Cookie, "Enable session cookiejar.")
|
||||
flags.BoolVarP(flagSet, &fs.Config.UseMmap, "use-mmap", "", fs.Config.UseMmap, "Use mmap allocator (see docs).")
|
||||
flags.StringVarP(flagSet, &fs.Config.CaCert, "ca-cert", "", fs.Config.CaCert, "CA certificate used to verify servers")
|
||||
flags.StringVarP(flagSet, &fs.Config.ClientCert, "client-cert", "", fs.Config.ClientCert, "Client SSL certificate (PEM) for mutual TLS auth")
|
||||
flags.StringVarP(flagSet, &fs.Config.ClientKey, "client-key", "", fs.Config.ClientKey, "Client SSL private key (PEM) for mutual TLS auth")
|
||||
flags.FVarP(flagSet, &fs.Config.MultiThreadCutoff, "multi-thread-cutoff", "", "Use multi-thread downloads for files above this size.")
|
||||
flags.IntVarP(flagSet, &fs.Config.MultiThreadStreams, "multi-thread-streams", "", fs.Config.MultiThreadStreams, "Max number of streams to use for multi-thread downloads.")
|
||||
flags.BoolVarP(flagSet, &fs.Config.UseJSONLog, "use-json-log", "", fs.Config.UseJSONLog, "Use json log format.")
|
||||
flags.StringVarP(flagSet, &fs.Config.OrderBy, "order-by", "", fs.Config.OrderBy, "Instructions on how to order the transfers, e.g. 'size,descending'")
|
||||
flags.StringVarP(flagSet, &ci.UserAgent, "user-agent", "", ci.UserAgent, "Set the user-agent to a specified string. The default is rclone/ version")
|
||||
flags.BoolVarP(flagSet, &ci.Immutable, "immutable", "", ci.Immutable, "Do not modify files. Fail if existing files have been modified.")
|
||||
flags.BoolVarP(flagSet, &ci.AutoConfirm, "auto-confirm", "", ci.AutoConfirm, "If enabled, do not request console confirmation.")
|
||||
flags.IntVarP(flagSet, &ci.StatsFileNameLength, "stats-file-name-length", "", ci.StatsFileNameLength, "Max file name length in stats. 0 for no limit")
|
||||
flags.FVarP(flagSet, &ci.LogLevel, "log-level", "", "Log level DEBUG|INFO|NOTICE|ERROR")
|
||||
flags.FVarP(flagSet, &ci.StatsLogLevel, "stats-log-level", "", "Log level to show --stats output DEBUG|INFO|NOTICE|ERROR")
|
||||
flags.FVarP(flagSet, &ci.BwLimit, "bwlimit", "", "Bandwidth limit in kBytes/s, or use suffix b|k|M|G or a full timetable.")
|
||||
flags.FVarP(flagSet, &ci.BwLimitFile, "bwlimit-file", "", "Bandwidth limit per file in kBytes/s, or use suffix b|k|M|G or a full timetable.")
|
||||
flags.FVarP(flagSet, &ci.BufferSize, "buffer-size", "", "In memory buffer size when reading files for each --transfer.")
|
||||
flags.FVarP(flagSet, &ci.StreamingUploadCutoff, "streaming-upload-cutoff", "", "Cutoff for switching to chunked upload if file size is unknown. Upload starts after reaching cutoff or when file ends.")
|
||||
flags.FVarP(flagSet, &ci.Dump, "dump", "", "List of items to dump from: "+fs.DumpFlagsList)
|
||||
flags.FVarP(flagSet, &ci.MaxTransfer, "max-transfer", "", "Maximum size of data to transfer.")
|
||||
flags.DurationVarP(flagSet, &ci.MaxDuration, "max-duration", "", 0, "Maximum duration rclone will transfer data for.")
|
||||
flags.FVarP(flagSet, &ci.CutoffMode, "cutoff-mode", "", "Mode to stop transfers when reaching the max transfer limit HARD|SOFT|CAUTIOUS")
|
||||
flags.IntVarP(flagSet, &ci.MaxBacklog, "max-backlog", "", ci.MaxBacklog, "Maximum number of objects in sync or check backlog.")
|
||||
flags.IntVarP(flagSet, &ci.MaxStatsGroups, "max-stats-groups", "", ci.MaxStatsGroups, "Maximum number of stats groups to keep in memory. On max oldest is discarded.")
|
||||
flags.BoolVarP(flagSet, &ci.StatsOneLine, "stats-one-line", "", ci.StatsOneLine, "Make the stats fit on one line.")
|
||||
flags.BoolVarP(flagSet, &ci.StatsOneLineDate, "stats-one-line-date", "", ci.StatsOneLineDate, "Enables --stats-one-line and add current date/time prefix.")
|
||||
flags.StringVarP(flagSet, &ci.StatsOneLineDateFormat, "stats-one-line-date-format", "", ci.StatsOneLineDateFormat, "Enables --stats-one-line-date and uses custom formatted date. Enclose date string in double quotes (\"). See https://golang.org/pkg/time/#Time.Format")
|
||||
flags.BoolVarP(flagSet, &ci.ErrorOnNoTransfer, "error-on-no-transfer", "", ci.ErrorOnNoTransfer, "Sets exit code 9 if no files are transferred, useful in scripts")
|
||||
flags.BoolVarP(flagSet, &ci.Progress, "progress", "P", ci.Progress, "Show progress during transfer.")
|
||||
flags.BoolVarP(flagSet, &ci.ProgressTerminalTitle, "progress-terminal-title", "", ci.ProgressTerminalTitle, "Show progress on the terminal title. Requires -P/--progress.")
|
||||
flags.BoolVarP(flagSet, &ci.Cookie, "use-cookies", "", ci.Cookie, "Enable session cookiejar.")
|
||||
flags.BoolVarP(flagSet, &ci.UseMmap, "use-mmap", "", ci.UseMmap, "Use mmap allocator (see docs).")
|
||||
flags.StringVarP(flagSet, &ci.CaCert, "ca-cert", "", ci.CaCert, "CA certificate used to verify servers")
|
||||
flags.StringVarP(flagSet, &ci.ClientCert, "client-cert", "", ci.ClientCert, "Client SSL certificate (PEM) for mutual TLS auth")
|
||||
flags.StringVarP(flagSet, &ci.ClientKey, "client-key", "", ci.ClientKey, "Client SSL private key (PEM) for mutual TLS auth")
|
||||
flags.FVarP(flagSet, &ci.MultiThreadCutoff, "multi-thread-cutoff", "", "Use multi-thread downloads for files above this size.")
|
||||
flags.IntVarP(flagSet, &ci.MultiThreadStreams, "multi-thread-streams", "", ci.MultiThreadStreams, "Max number of streams to use for multi-thread downloads.")
|
||||
flags.BoolVarP(flagSet, &ci.UseJSONLog, "use-json-log", "", ci.UseJSONLog, "Use json log format.")
|
||||
flags.StringVarP(flagSet, &ci.OrderBy, "order-by", "", ci.OrderBy, "Instructions on how to order the transfers, e.g. 'size,descending'")
|
||||
flags.StringArrayVarP(flagSet, &uploadHeaders, "header-upload", "", nil, "Set HTTP header for upload transactions")
|
||||
flags.StringArrayVarP(flagSet, &downloadHeaders, "header-download", "", nil, "Set HTTP header for download transactions")
|
||||
flags.StringArrayVarP(flagSet, &headers, "header", "", nil, "Set HTTP header for all transactions")
|
||||
flags.BoolVarP(flagSet, &fs.Config.RefreshTimes, "refresh-times", "", fs.Config.RefreshTimes, "Refresh the modtime of remote files.")
|
||||
flags.BoolVarP(flagSet, &fs.Config.LogSystemdSupport, "log-systemd", "", fs.Config.LogSystemdSupport, "Activate systemd integration for the logger.")
|
||||
flags.BoolVarP(flagSet, &ci.RefreshTimes, "refresh-times", "", ci.RefreshTimes, "Refresh the modtime of remote files.")
|
||||
flags.BoolVarP(flagSet, &ci.LogSystemdSupport, "log-systemd", "", ci.LogSystemdSupport, "Activate systemd integration for the logger.")
|
||||
}
|
||||
|
||||
// ParseHeaders converts the strings passed in via the header flags into HTTPOptions
|
||||
@ -145,17 +145,17 @@ func ParseHeaders(headers []string) []*fs.HTTPOption {
|
||||
}
|
||||
|
||||
// SetFlags converts any flags into config which weren't straight forward
|
||||
func SetFlags() {
|
||||
func SetFlags(ci *fs.ConfigInfo) {
|
||||
if verbose >= 2 {
|
||||
fs.Config.LogLevel = fs.LogLevelDebug
|
||||
ci.LogLevel = fs.LogLevelDebug
|
||||
} else if verbose >= 1 {
|
||||
fs.Config.LogLevel = fs.LogLevelInfo
|
||||
ci.LogLevel = fs.LogLevelInfo
|
||||
}
|
||||
if quiet {
|
||||
if verbose > 0 {
|
||||
log.Fatalf("Can't set -v and -q")
|
||||
}
|
||||
fs.Config.LogLevel = fs.LogLevelError
|
||||
ci.LogLevel = fs.LogLevelError
|
||||
}
|
||||
logLevelFlag := pflag.Lookup("log-level")
|
||||
if logLevelFlag != nil && logLevelFlag.Changed {
|
||||
@ -166,13 +166,13 @@ func SetFlags() {
|
||||
log.Fatalf("Can't set -q and --log-level")
|
||||
}
|
||||
}
|
||||
if fs.Config.UseJSONLog {
|
||||
if ci.UseJSONLog {
|
||||
logrus.AddHook(fsLog.NewCallerHook())
|
||||
logrus.SetFormatter(&logrus.JSONFormatter{
|
||||
TimestampFormat: "2006-01-02T15:04:05.999999-07:00",
|
||||
})
|
||||
logrus.SetLevel(logrus.DebugLevel)
|
||||
switch fs.Config.LogLevel {
|
||||
switch ci.LogLevel {
|
||||
case fs.LogLevelEmergency, fs.LogLevelAlert:
|
||||
logrus.SetLevel(logrus.PanicLevel)
|
||||
case fs.LogLevelCritical:
|
||||
@ -189,11 +189,11 @@ func SetFlags() {
|
||||
}
|
||||
|
||||
if dumpHeaders {
|
||||
fs.Config.Dump |= fs.DumpHeaders
|
||||
ci.Dump |= fs.DumpHeaders
|
||||
fs.Logf(nil, "--dump-headers is obsolete - please use --dump headers instead")
|
||||
}
|
||||
if dumpBodies {
|
||||
fs.Config.Dump |= fs.DumpBodies
|
||||
ci.Dump |= fs.DumpBodies
|
||||
fs.Logf(nil, "--dump-bodies is obsolete - please use --dump bodies instead")
|
||||
}
|
||||
|
||||
@ -202,26 +202,26 @@ func SetFlags() {
|
||||
deleteDuring && deleteAfter:
|
||||
log.Fatalf(`Only one of --delete-before, --delete-during or --delete-after can be used.`)
|
||||
case deleteBefore:
|
||||
fs.Config.DeleteMode = fs.DeleteModeBefore
|
||||
ci.DeleteMode = fs.DeleteModeBefore
|
||||
case deleteDuring:
|
||||
fs.Config.DeleteMode = fs.DeleteModeDuring
|
||||
ci.DeleteMode = fs.DeleteModeDuring
|
||||
case deleteAfter:
|
||||
fs.Config.DeleteMode = fs.DeleteModeAfter
|
||||
ci.DeleteMode = fs.DeleteModeAfter
|
||||
default:
|
||||
fs.Config.DeleteMode = fs.DeleteModeDefault
|
||||
ci.DeleteMode = fs.DeleteModeDefault
|
||||
}
|
||||
|
||||
if fs.Config.CompareDest != "" && fs.Config.CopyDest != "" {
|
||||
if ci.CompareDest != "" && ci.CopyDest != "" {
|
||||
log.Fatalf(`Can't use --compare-dest with --copy-dest.`)
|
||||
}
|
||||
|
||||
switch {
|
||||
case len(fs.Config.StatsOneLineDateFormat) > 0:
|
||||
fs.Config.StatsOneLineDate = true
|
||||
fs.Config.StatsOneLine = true
|
||||
case fs.Config.StatsOneLineDate:
|
||||
fs.Config.StatsOneLineDateFormat = "2006/01/02 15:04:05 - "
|
||||
fs.Config.StatsOneLine = true
|
||||
case len(ci.StatsOneLineDateFormat) > 0:
|
||||
ci.StatsOneLineDate = true
|
||||
ci.StatsOneLine = true
|
||||
case ci.StatsOneLineDate:
|
||||
ci.StatsOneLineDateFormat = "2006/01/02 15:04:05 - "
|
||||
ci.StatsOneLine = true
|
||||
}
|
||||
|
||||
if bindAddr != "" {
|
||||
@ -232,24 +232,24 @@ func SetFlags() {
|
||||
if len(addrs) != 1 {
|
||||
log.Fatalf("--bind: Expecting 1 IP address for %q but got %d", bindAddr, len(addrs))
|
||||
}
|
||||
fs.Config.BindAddr = addrs[0]
|
||||
ci.BindAddr = addrs[0]
|
||||
}
|
||||
|
||||
if disableFeatures != "" {
|
||||
if disableFeatures == "help" {
|
||||
log.Fatalf("Possible backend features are: %s\n", strings.Join(new(fs.Features).List(), ", "))
|
||||
}
|
||||
fs.Config.DisableFeatures = strings.Split(disableFeatures, ",")
|
||||
ci.DisableFeatures = strings.Split(disableFeatures, ",")
|
||||
}
|
||||
|
||||
if len(uploadHeaders) != 0 {
|
||||
fs.Config.UploadHeaders = ParseHeaders(uploadHeaders)
|
||||
ci.UploadHeaders = ParseHeaders(uploadHeaders)
|
||||
}
|
||||
if len(downloadHeaders) != 0 {
|
||||
fs.Config.DownloadHeaders = ParseHeaders(downloadHeaders)
|
||||
ci.DownloadHeaders = ParseHeaders(downloadHeaders)
|
||||
}
|
||||
if len(headers) != 0 {
|
||||
fs.Config.Headers = ParseHeaders(headers)
|
||||
ci.Headers = ParseHeaders(headers)
|
||||
}
|
||||
|
||||
// Make the config file absolute
|
||||
@ -260,6 +260,6 @@ func SetFlags() {
|
||||
|
||||
// Set whether multi-thread-streams was set
|
||||
multiThreadStreamsFlag := pflag.Lookup("multi-thread-streams")
|
||||
fs.Config.MultiThreadSet = multiThreadStreamsFlag != nil && multiThreadStreamsFlag.Changed
|
||||
ci.MultiThreadSet = multiThreadStreamsFlag != nil && multiThreadStreamsFlag.Changed
|
||||
|
||||
}
|
||||
|
29
fs/config_test.go
Normal file
29
fs/config_test.go
Normal file
@ -0,0 +1,29 @@
|
||||
package fs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestGetConfig(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
// Check nil
|
||||
config := GetConfig(nil)
|
||||
assert.Equal(t, globalConfig, config)
|
||||
|
||||
// Check empty config
|
||||
config = GetConfig(ctx)
|
||||
assert.Equal(t, globalConfig, config)
|
||||
|
||||
// Check adding a config
|
||||
ctx2, config2 := AddConfig(ctx)
|
||||
config2.Transfers++
|
||||
assert.NotEqual(t, config2, config)
|
||||
|
||||
// Check can get config back
|
||||
config2ctx := GetConfig(ctx2)
|
||||
assert.Equal(t, config2, config2ctx)
|
||||
}
|
@ -229,7 +229,7 @@ func NewFilter(opt *Opt) (f *Filter, err error) {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if fs.Config.Dump&fs.DumpFilters != 0 {
|
||||
if fs.GetConfig(context.Background()).Dump&fs.DumpFilters != 0 {
|
||||
fmt.Println("--- start filters ---")
|
||||
fmt.Println(f.DumpFilters())
|
||||
fmt.Println("--- end filters ---")
|
||||
@ -540,14 +540,16 @@ var errFilesFromNotSet = errors.New("--files-from not set so can't use Filter.Li
|
||||
// MakeListR makes function to return all the files set using --files-from
|
||||
func (f *Filter) MakeListR(ctx context.Context, NewObject func(ctx context.Context, remote string) (fs.Object, error)) fs.ListRFn {
|
||||
return func(ctx context.Context, dir string, callback fs.ListRCallback) error {
|
||||
ci := fs.GetConfig(ctx)
|
||||
if !f.HaveFilesFrom() {
|
||||
return errFilesFromNotSet
|
||||
}
|
||||
var (
|
||||
remotes = make(chan string, fs.Config.Checkers)
|
||||
g errgroup.Group
|
||||
checkers = ci.Checkers
|
||||
remotes = make(chan string, checkers)
|
||||
g errgroup.Group
|
||||
)
|
||||
for i := 0; i < fs.Config.Checkers; i++ {
|
||||
for i := 0; i < checkers; i++ {
|
||||
g.Go(func() (err error) {
|
||||
var entries = make(fs.DirEntries, 1)
|
||||
for remote := range remotes {
|
||||
|
12
fs/fs.go
12
fs/fs.go
@ -774,7 +774,7 @@ func (ft *Features) Fill(ctx context.Context, f Fs) *Features {
|
||||
if do, ok := f.(Commander); ok {
|
||||
ft.Command = do.Command
|
||||
}
|
||||
return ft.DisableList(Config.DisableFeatures)
|
||||
return ft.DisableList(GetConfig(ctx).DisableFeatures)
|
||||
}
|
||||
|
||||
// Mask the Features with the Fs passed in
|
||||
@ -854,7 +854,7 @@ func (ft *Features) Mask(ctx context.Context, f Fs) *Features {
|
||||
ft.Disconnect = nil
|
||||
}
|
||||
// Command is always local so we don't mask it
|
||||
return ft.DisableList(Config.DisableFeatures)
|
||||
return ft.DisableList(GetConfig(ctx).DisableFeatures)
|
||||
}
|
||||
|
||||
// Wrap makes a Copy of the features passed in, overriding the UnWrap/Wrap
|
||||
@ -1399,7 +1399,7 @@ func FileExists(ctx context.Context, fs Fs, remote string) (bool, error) {
|
||||
// GetModifyWindow calculates the maximum modify window between the given Fses
|
||||
// and the Config.ModifyWindow parameter.
|
||||
func GetModifyWindow(ctx context.Context, fss ...Info) time.Duration {
|
||||
window := Config.ModifyWindow
|
||||
window := GetConfig(ctx).ModifyWindow
|
||||
for _, f := range fss {
|
||||
if f != nil {
|
||||
precision := f.Precision()
|
||||
@ -1424,12 +1424,12 @@ type logCalculator struct {
|
||||
}
|
||||
|
||||
// NewPacer creates a Pacer for the given Fs and Calculator.
|
||||
func NewPacer(c pacer.Calculator) *Pacer {
|
||||
func NewPacer(ctx context.Context, c pacer.Calculator) *Pacer {
|
||||
p := &Pacer{
|
||||
Pacer: pacer.New(
|
||||
pacer.InvokerOption(pacerInvoker),
|
||||
pacer.MaxConnectionsOption(Config.Checkers+Config.Transfers),
|
||||
pacer.RetriesOption(Config.LowLevelRetries),
|
||||
pacer.MaxConnectionsOption(GetConfig(ctx).Checkers+GetConfig(ctx).Transfers),
|
||||
pacer.RetriesOption(GetConfig(ctx).LowLevelRetries),
|
||||
pacer.CalculatorOption(c),
|
||||
),
|
||||
}
|
||||
|
@ -127,15 +127,15 @@ func (dp *dummyPaced) fn() (bool, error) {
|
||||
}
|
||||
|
||||
func TestPacerCall(t *testing.T) {
|
||||
expectedCalled := Config.LowLevelRetries
|
||||
ctx := context.Background()
|
||||
config := GetConfig(ctx)
|
||||
expectedCalled := config.LowLevelRetries
|
||||
if expectedCalled == 0 {
|
||||
ctx, config = AddConfig(ctx)
|
||||
expectedCalled = 20
|
||||
Config.LowLevelRetries = expectedCalled
|
||||
defer func() {
|
||||
Config.LowLevelRetries = 0
|
||||
}()
|
||||
config.LowLevelRetries = expectedCalled
|
||||
}
|
||||
p := NewPacer(pacer.NewDefault(pacer.MinSleep(1*time.Millisecond), pacer.MaxSleep(2*time.Millisecond)))
|
||||
p := NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(1*time.Millisecond), pacer.MaxSleep(2*time.Millisecond)))
|
||||
|
||||
dp := &dummyPaced{retry: true}
|
||||
err := p.Call(dp.fn)
|
||||
@ -144,7 +144,7 @@ func TestPacerCall(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestPacerCallNoRetry(t *testing.T) {
|
||||
p := NewPacer(pacer.NewDefault(pacer.MinSleep(1*time.Millisecond), pacer.MaxSleep(2*time.Millisecond)))
|
||||
p := NewPacer(context.Background(), pacer.NewDefault(pacer.MinSleep(1*time.Millisecond), pacer.MaxSleep(2*time.Millisecond)))
|
||||
|
||||
dp := &dummyPaced{retry: true}
|
||||
err := p.CallNoRetry(dp.fn)
|
||||
|
@ -34,14 +34,14 @@ var (
|
||||
)
|
||||
|
||||
// StartHTTPTokenBucket starts the token bucket if necessary
|
||||
func StartHTTPTokenBucket() {
|
||||
if fs.Config.TPSLimit > 0 {
|
||||
tpsBurst := fs.Config.TPSLimitBurst
|
||||
func StartHTTPTokenBucket(ctx context.Context) {
|
||||
if fs.GetConfig(ctx).TPSLimit > 0 {
|
||||
tpsBurst := fs.GetConfig(ctx).TPSLimitBurst
|
||||
if tpsBurst < 1 {
|
||||
tpsBurst = 1
|
||||
}
|
||||
tpsBucket = rate.NewLimiter(rate.Limit(fs.Config.TPSLimit), tpsBurst)
|
||||
fs.Infof(nil, "Starting HTTP transaction limiter: max %g transactions/s with burst %d", fs.Config.TPSLimit, tpsBurst)
|
||||
tpsBucket = rate.NewLimiter(rate.Limit(fs.GetConfig(ctx).TPSLimit), tpsBurst)
|
||||
fs.Infof(nil, "Starting HTTP transaction limiter: max %g transactions/s with burst %d", fs.GetConfig(ctx).TPSLimit, tpsBurst)
|
||||
}
|
||||
}
|
||||
|
||||
|
15
fs/log.go
15
fs/log.go
@ -1,6 +1,7 @@
|
||||
package fs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
|
||||
@ -72,7 +73,7 @@ func (l *LogLevel) Type() string {
|
||||
// LogPrint sends the text to the logger of level
|
||||
var LogPrint = func(level LogLevel, text string) {
|
||||
var prefix string
|
||||
if Config.LogSystemdSupport {
|
||||
if GetConfig(context.TODO()).LogSystemdSupport {
|
||||
switch level {
|
||||
case LogLevelDebug:
|
||||
prefix = sysdjournald.DebugPrefix
|
||||
@ -121,7 +122,7 @@ func (j LogValueItem) String() string {
|
||||
func LogPrintf(level LogLevel, o interface{}, text string, args ...interface{}) {
|
||||
out := fmt.Sprintf(text, args...)
|
||||
|
||||
if Config.UseJSONLog {
|
||||
if GetConfig(context.TODO()).UseJSONLog {
|
||||
fields := logrus.Fields{}
|
||||
if o != nil {
|
||||
fields = logrus.Fields{
|
||||
@ -158,7 +159,7 @@ func LogPrintf(level LogLevel, o interface{}, text string, args ...interface{})
|
||||
|
||||
// LogLevelPrintf writes logs at the given level
|
||||
func LogLevelPrintf(level LogLevel, o interface{}, text string, args ...interface{}) {
|
||||
if Config.LogLevel >= level {
|
||||
if GetConfig(context.TODO()).LogLevel >= level {
|
||||
LogPrintf(level, o, text, args...)
|
||||
}
|
||||
}
|
||||
@ -166,7 +167,7 @@ func LogLevelPrintf(level LogLevel, o interface{}, text string, args ...interfac
|
||||
// Errorf writes error log output for this Object or Fs. It
|
||||
// should always be seen by the user.
|
||||
func Errorf(o interface{}, text string, args ...interface{}) {
|
||||
if Config.LogLevel >= LogLevelError {
|
||||
if GetConfig(context.TODO()).LogLevel >= LogLevelError {
|
||||
LogPrintf(LogLevelError, o, text, args...)
|
||||
}
|
||||
}
|
||||
@ -177,7 +178,7 @@ func Errorf(o interface{}, text string, args ...interface{}) {
|
||||
// important things the user should see. The user can filter these
|
||||
// out with the -q flag.
|
||||
func Logf(o interface{}, text string, args ...interface{}) {
|
||||
if Config.LogLevel >= LogLevelNotice {
|
||||
if GetConfig(context.TODO()).LogLevel >= LogLevelNotice {
|
||||
LogPrintf(LogLevelNotice, o, text, args...)
|
||||
}
|
||||
}
|
||||
@ -186,7 +187,7 @@ func Logf(o interface{}, text string, args ...interface{}) {
|
||||
// level for logging transfers, deletions and things which should
|
||||
// appear with the -v flag.
|
||||
func Infof(o interface{}, text string, args ...interface{}) {
|
||||
if Config.LogLevel >= LogLevelInfo {
|
||||
if GetConfig(context.TODO()).LogLevel >= LogLevelInfo {
|
||||
LogPrintf(LogLevelInfo, o, text, args...)
|
||||
}
|
||||
}
|
||||
@ -194,7 +195,7 @@ func Infof(o interface{}, text string, args ...interface{}) {
|
||||
// Debugf writes debugging output for this Object or Fs. Use this for
|
||||
// debug only. The user must have to specify -vv to see this.
|
||||
func Debugf(o interface{}, text string, args ...interface{}) {
|
||||
if Config.LogLevel >= LogLevelDebug {
|
||||
if GetConfig(context.TODO()).LogLevel >= LogLevelDebug {
|
||||
LogPrintf(LogLevelDebug, o, text, args...)
|
||||
}
|
||||
}
|
||||
|
@ -2,6 +2,7 @@
|
||||
package log
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
@ -51,7 +52,7 @@ func fnName() string {
|
||||
//
|
||||
// Any pointers in the exit function will be dereferenced
|
||||
func Trace(o interface{}, format string, a ...interface{}) func(string, ...interface{}) {
|
||||
if fs.Config.LogLevel < fs.LogLevelDebug {
|
||||
if fs.GetConfig(context.Background()).LogLevel < fs.LogLevelDebug {
|
||||
return func(format string, a ...interface{}) {}
|
||||
}
|
||||
name := fnName()
|
||||
@ -76,7 +77,7 @@ func Trace(o interface{}, format string, a ...interface{}) func(string, ...inter
|
||||
|
||||
// Stack logs a stack trace of callers with the o and info passed in
|
||||
func Stack(o interface{}, info string) {
|
||||
if fs.Config.LogLevel < fs.LogLevelDebug {
|
||||
if fs.GetConfig(context.Background()).LogLevel < fs.LogLevelDebug {
|
||||
return
|
||||
}
|
||||
arr := [16 * 1024]byte{}
|
||||
@ -90,7 +91,7 @@ func Stack(o interface{}, info string) {
|
||||
func InitLogging() {
|
||||
flagsStr := "," + Opt.Format + ","
|
||||
var flags int
|
||||
if !fs.Config.LogSystemdSupport {
|
||||
if !fs.GetConfig(context.Background()).LogSystemdSupport {
|
||||
if strings.Contains(flagsStr, ",date,") {
|
||||
flags |= log.Ldate
|
||||
}
|
||||
|
@ -49,10 +49,11 @@ type Marcher interface {
|
||||
}
|
||||
|
||||
// init sets up a march over opt.Fsrc, and opt.Fdst calling back callback for each match
|
||||
func (m *March) init() {
|
||||
m.srcListDir = m.makeListDir(m.Fsrc, m.SrcIncludeAll)
|
||||
func (m *March) init(ctx context.Context) {
|
||||
ci := fs.GetConfig(ctx)
|
||||
m.srcListDir = m.makeListDir(ctx, m.Fsrc, m.SrcIncludeAll)
|
||||
if !m.NoTraverse {
|
||||
m.dstListDir = m.makeListDir(m.Fdst, m.DstIncludeAll)
|
||||
m.dstListDir = m.makeListDir(ctx, m.Fdst, m.DstIncludeAll)
|
||||
}
|
||||
// Now create the matching transform
|
||||
// ..normalise the UTF8 first
|
||||
@ -65,7 +66,7 @@ func (m *March) init() {
|
||||
// | Yes | No | No |
|
||||
// | No | Yes | Yes |
|
||||
// | Yes | Yes | Yes |
|
||||
if m.Fdst.Features().CaseInsensitive || fs.Config.IgnoreCaseSync {
|
||||
if m.Fdst.Features().CaseInsensitive || ci.IgnoreCaseSync {
|
||||
m.transforms = append(m.transforms, strings.ToLower)
|
||||
}
|
||||
}
|
||||
@ -75,9 +76,10 @@ type listDirFn func(dir string) (entries fs.DirEntries, err error)
|
||||
|
||||
// makeListDir makes constructs a listing function for the given fs
|
||||
// and includeAll flags for marching through the file system.
|
||||
func (m *March) makeListDir(f fs.Fs, includeAll bool) listDirFn {
|
||||
if !(fs.Config.UseListR && f.Features().ListR != nil) && // !--fast-list active and
|
||||
!(fs.Config.NoTraverse && filter.Active.HaveFilesFrom()) { // !(--files-from and --no-traverse)
|
||||
func (m *March) makeListDir(ctx context.Context, f fs.Fs, includeAll bool) listDirFn {
|
||||
ci := fs.GetConfig(ctx)
|
||||
if !(ci.UseListR && f.Features().ListR != nil) && // !--fast-list active and
|
||||
!(ci.NoTraverse && filter.Active.HaveFilesFrom()) { // !(--files-from and --no-traverse)
|
||||
return func(dir string) (entries fs.DirEntries, err error) {
|
||||
return list.DirSorted(m.Ctx, f, includeAll, dir)
|
||||
}
|
||||
@ -95,7 +97,7 @@ func (m *March) makeListDir(f fs.Fs, includeAll bool) listDirFn {
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
if !started {
|
||||
dirs, dirsErr = walk.NewDirTree(m.Ctx, f, m.Dir, includeAll, fs.Config.MaxDepth)
|
||||
dirs, dirsErr = walk.NewDirTree(m.Ctx, f, m.Dir, includeAll, ci.MaxDepth)
|
||||
started = true
|
||||
}
|
||||
if dirsErr != nil {
|
||||
@ -122,10 +124,11 @@ type listDirJob struct {
|
||||
}
|
||||
|
||||
// Run starts the matching process off
|
||||
func (m *March) Run() error {
|
||||
m.init()
|
||||
func (m *March) Run(ctx context.Context) error {
|
||||
ci := fs.GetConfig(ctx)
|
||||
m.init(ctx)
|
||||
|
||||
srcDepth := fs.Config.MaxDepth
|
||||
srcDepth := ci.MaxDepth
|
||||
if srcDepth < 0 {
|
||||
srcDepth = fs.MaxLevel
|
||||
}
|
||||
@ -141,8 +144,9 @@ func (m *March) Run() error {
|
||||
// Start some directory listing go routines
|
||||
var wg sync.WaitGroup // sync closing of go routines
|
||||
var traversing sync.WaitGroup // running directory traversals
|
||||
in := make(chan listDirJob, fs.Config.Checkers)
|
||||
for i := 0; i < fs.Config.Checkers; i++ {
|
||||
checkers := ci.Checkers
|
||||
in := make(chan listDirJob, checkers)
|
||||
for i := 0; i < checkers; i++ {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
|
@ -203,7 +203,7 @@ func TestMarch(t *testing.T) {
|
||||
DstIncludeAll: filter.Active.Opt.DeleteExcluded,
|
||||
}
|
||||
|
||||
mt.processError(m.Run())
|
||||
mt.processError(m.Run(ctx))
|
||||
mt.cancel()
|
||||
err := mt.currentError()
|
||||
require.NoError(t, err)
|
||||
@ -270,7 +270,7 @@ func TestMarchNoTraverse(t *testing.T) {
|
||||
DstIncludeAll: filter.Active.Opt.DeleteExcluded,
|
||||
}
|
||||
|
||||
mt.processError(m.Run())
|
||||
mt.processError(m.Run(ctx))
|
||||
mt.cancel()
|
||||
err := mt.currentError()
|
||||
require.NoError(t, err)
|
||||
|
@ -114,16 +114,17 @@ func (c *checkMarch) SrcOnly(src fs.DirEntry) (recurse bool) {
|
||||
|
||||
// check to see if two objects are identical using the check function
|
||||
func (c *checkMarch) checkIdentical(ctx context.Context, dst, src fs.Object) (differ bool, noHash bool, err error) {
|
||||
ci := fs.GetConfig(ctx)
|
||||
tr := accounting.Stats(ctx).NewCheckingTransfer(src)
|
||||
defer func() {
|
||||
tr.Done(ctx, err)
|
||||
}()
|
||||
if sizeDiffers(src, dst) {
|
||||
if sizeDiffers(ctx, src, dst) {
|
||||
err = errors.Errorf("Sizes differ")
|
||||
fs.Errorf(src, "%v", err)
|
||||
return true, false, nil
|
||||
}
|
||||
if fs.Config.SizeOnly {
|
||||
if ci.SizeOnly {
|
||||
return false, false, nil
|
||||
}
|
||||
return c.opt.Check(ctx, dst, src)
|
||||
@ -202,11 +203,12 @@ func (c *checkMarch) Match(ctx context.Context, dst, src fs.DirEntry) (recurse b
|
||||
// it returns true if differences were found
|
||||
// it also returns whether it couldn't be hashed
|
||||
func CheckFn(ctx context.Context, opt *CheckOpt) error {
|
||||
ci := fs.GetConfig(ctx)
|
||||
if opt.Check == nil {
|
||||
return errors.New("internal error: nil check function")
|
||||
}
|
||||
c := &checkMarch{
|
||||
tokens: make(chan struct{}, fs.Config.Checkers),
|
||||
tokens: make(chan struct{}, ci.Checkers),
|
||||
opt: *opt,
|
||||
}
|
||||
|
||||
@ -219,7 +221,7 @@ func CheckFn(ctx context.Context, opt *CheckOpt) error {
|
||||
Callback: c,
|
||||
}
|
||||
fs.Debugf(c.opt.Fdst, "Waiting for checks to finish")
|
||||
err := m.Run()
|
||||
err := m.Run(ctx)
|
||||
c.wg.Wait() // wait for background go-routines
|
||||
|
||||
if c.dstFilesMissing > 0 {
|
||||
@ -308,7 +310,8 @@ func CheckEqualReaders(in1, in2 io.Reader) (differ bool, err error) {
|
||||
//
|
||||
// it returns true if differences were found
|
||||
func CheckIdenticalDownload(ctx context.Context, dst, src fs.Object) (differ bool, err error) {
|
||||
err = Retry(src, fs.Config.LowLevelRetries, func() error {
|
||||
ci := fs.GetConfig(ctx)
|
||||
err = Retry(src, ci.LowLevelRetries, func() error {
|
||||
differ, err = checkIdenticalDownload(ctx, dst, src)
|
||||
return err
|
||||
})
|
||||
|
@ -24,6 +24,8 @@ import (
|
||||
func testCheck(t *testing.T, checkFunction func(ctx context.Context, opt *operations.CheckOpt) error) {
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
ctx := context.Background()
|
||||
ci := fs.GetConfig(ctx)
|
||||
|
||||
addBuffers := func(opt *operations.CheckOpt) {
|
||||
opt.Combined = new(bytes.Buffer)
|
||||
@ -73,7 +75,7 @@ func testCheck(t *testing.T, checkFunction func(ctx context.Context, opt *operat
|
||||
OneWay: oneway,
|
||||
}
|
||||
addBuffers(&opt)
|
||||
err := checkFunction(context.Background(), &opt)
|
||||
err := checkFunction(ctx, &opt)
|
||||
gotErrors := accounting.GlobalStats().GetErrors()
|
||||
gotChecks := accounting.GlobalStats().GetChecks()
|
||||
if wantErrors == 0 && err != nil {
|
||||
@ -95,7 +97,7 @@ func testCheck(t *testing.T, checkFunction func(ctx context.Context, opt *operat
|
||||
})
|
||||
}
|
||||
|
||||
file1 := r.WriteBoth(context.Background(), "rutabaga", "is tasty", t3)
|
||||
file1 := r.WriteBoth(ctx, "rutabaga", "is tasty", t3)
|
||||
fstest.CheckItems(t, r.Fremote, file1)
|
||||
fstest.CheckItems(t, r.Flocal, file1)
|
||||
check(1, 0, 1, false, map[string]string{
|
||||
@ -118,7 +120,7 @@ func testCheck(t *testing.T, checkFunction func(ctx context.Context, opt *operat
|
||||
"error": "",
|
||||
})
|
||||
|
||||
file3 := r.WriteObject(context.Background(), "empty space", "-", t2)
|
||||
file3 := r.WriteObject(ctx, "empty space", "-", t2)
|
||||
fstest.CheckItems(t, r.Fremote, file1, file3)
|
||||
check(3, 2, 1, false, map[string]string{
|
||||
"combined": "- empty space\n+ potato2\n= rutabaga\n",
|
||||
@ -130,10 +132,10 @@ func testCheck(t *testing.T, checkFunction func(ctx context.Context, opt *operat
|
||||
})
|
||||
|
||||
file2r := file2
|
||||
if fs.Config.SizeOnly {
|
||||
file2r = r.WriteObject(context.Background(), "potato2", "--Some-Differences-But-Size-Only-Is-Enabled-----------------", t1)
|
||||
if ci.SizeOnly {
|
||||
file2r = r.WriteObject(ctx, "potato2", "--Some-Differences-But-Size-Only-Is-Enabled-----------------", t1)
|
||||
} else {
|
||||
r.WriteObject(context.Background(), "potato2", "------------------------------------------------------------", t1)
|
||||
r.WriteObject(ctx, "potato2", "------------------------------------------------------------", t1)
|
||||
}
|
||||
fstest.CheckItems(t, r.Fremote, file1, file2r, file3)
|
||||
check(4, 1, 2, false, map[string]string{
|
||||
@ -157,7 +159,7 @@ func testCheck(t *testing.T, checkFunction func(ctx context.Context, opt *operat
|
||||
"error": "",
|
||||
})
|
||||
|
||||
file4 := r.WriteObject(context.Background(), "remotepotato", "------------------------------------------------------------", t1)
|
||||
file4 := r.WriteObject(ctx, "remotepotato", "------------------------------------------------------------", t1)
|
||||
fstest.CheckItems(t, r.Fremote, file1, file2r, file3r, file4)
|
||||
check(6, 2, 3, false, map[string]string{
|
||||
"combined": "* empty space\n= potato2\n= rutabaga\n- remotepotato\n",
|
||||
@ -182,11 +184,12 @@ func TestCheck(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestCheckFsError(t *testing.T) {
|
||||
dstFs, err := fs.NewFs(context.Background(), "non-existent")
|
||||
ctx := context.Background()
|
||||
dstFs, err := fs.NewFs(ctx, "non-existent")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
srcFs, err := fs.NewFs(context.Background(), "non-existent")
|
||||
srcFs, err := fs.NewFs(ctx, "non-existent")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -195,7 +198,7 @@ func TestCheckFsError(t *testing.T) {
|
||||
Fsrc: srcFs,
|
||||
OneWay: false,
|
||||
}
|
||||
err = operations.Check(context.Background(), &opt)
|
||||
err = operations.Check(ctx, &opt)
|
||||
require.Error(t, err)
|
||||
}
|
||||
|
||||
@ -204,8 +207,10 @@ func TestCheckDownload(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestCheckSizeOnly(t *testing.T) {
|
||||
fs.Config.SizeOnly = true
|
||||
defer func() { fs.Config.SizeOnly = false }()
|
||||
ctx := context.Background()
|
||||
ci := fs.GetConfig(ctx)
|
||||
ci.SizeOnly = true
|
||||
defer func() { ci.SizeOnly = false }()
|
||||
TestCheck(t)
|
||||
}
|
||||
|
||||
|
@ -75,6 +75,8 @@ func dedupeDeleteAllButOne(ctx context.Context, keep int, remote string, objs []
|
||||
|
||||
// dedupeDeleteIdentical deletes all but one of identical (by hash) copies
|
||||
func dedupeDeleteIdentical(ctx context.Context, ht hash.Type, remote string, objs []fs.Object) (remainingObjs []fs.Object) {
|
||||
ci := fs.GetConfig(ctx)
|
||||
|
||||
// Make map of IDs
|
||||
IDs := make(map[string]int, len(objs))
|
||||
for _, o := range objs {
|
||||
@ -104,7 +106,7 @@ func dedupeDeleteIdentical(ctx context.Context, ht hash.Type, remote string, obj
|
||||
dupesByID := make(map[string][]fs.Object, len(objs))
|
||||
for _, o := range objs {
|
||||
ID := ""
|
||||
if fs.Config.SizeOnly && o.Size() >= 0 {
|
||||
if ci.SizeOnly && o.Size() >= 0 {
|
||||
ID = fmt.Sprintf("size %d", o.Size())
|
||||
} else if ht != hash.None {
|
||||
hashValue, err := o.Hash(ctx, ht)
|
||||
@ -229,8 +231,9 @@ func (x *DeduplicateMode) Type() string {
|
||||
|
||||
// dedupeFindDuplicateDirs scans f for duplicate directories
|
||||
func dedupeFindDuplicateDirs(ctx context.Context, f fs.Fs) ([][]fs.Directory, error) {
|
||||
ci := fs.GetConfig(ctx)
|
||||
dirs := map[string][]fs.Directory{}
|
||||
err := walk.ListR(ctx, f, "", true, fs.Config.MaxDepth, walk.ListDirs, func(entries fs.DirEntries) error {
|
||||
err := walk.ListR(ctx, f, "", true, ci.MaxDepth, walk.ListDirs, func(entries fs.DirEntries) error {
|
||||
entries.ForDir(func(d fs.Directory) {
|
||||
dirs[d.Remote()] = append(dirs[d.Remote()], d)
|
||||
})
|
||||
@ -297,6 +300,7 @@ func sortSmallestFirst(objs []fs.Object) {
|
||||
// Google Drive which can have duplicate file names.
|
||||
func Deduplicate(ctx context.Context, f fs.Fs, mode DeduplicateMode) error {
|
||||
fs.Infof(f, "Looking for duplicates using %v mode.", mode)
|
||||
ci := fs.GetConfig(ctx)
|
||||
|
||||
// Find duplicate directories first and fix them
|
||||
duplicateDirs, err := dedupeFindDuplicateDirs(ctx, f)
|
||||
@ -315,7 +319,7 @@ func Deduplicate(ctx context.Context, f fs.Fs, mode DeduplicateMode) error {
|
||||
|
||||
// Now find duplicate files
|
||||
files := map[string][]fs.Object{}
|
||||
err = walk.ListR(ctx, f, "", true, fs.Config.MaxDepth, walk.ListObjects, func(entries fs.DirEntries) error {
|
||||
err = walk.ListR(ctx, f, "", true, ci.MaxDepth, walk.ListObjects, func(entries fs.DirEntries) error {
|
||||
entries.ForObject(func(o fs.Object) {
|
||||
remote := o.Remote()
|
||||
files[remote] = append(files[remote], o)
|
||||
|
@ -79,15 +79,17 @@ func TestDeduplicateSizeOnly(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
skipIfCantDedupe(t, r.Fremote)
|
||||
ctx := context.Background()
|
||||
ci := fs.GetConfig(ctx)
|
||||
|
||||
file1 := r.WriteUncheckedObject(context.Background(), "one", "This is one", t1)
|
||||
file2 := r.WriteUncheckedObject(context.Background(), "one", "THIS IS ONE", t1)
|
||||
file3 := r.WriteUncheckedObject(context.Background(), "one", "This is another one", t1)
|
||||
r.CheckWithDuplicates(t, file1, file2, file3)
|
||||
|
||||
fs.Config.SizeOnly = true
|
||||
ci.SizeOnly = true
|
||||
defer func() {
|
||||
fs.Config.SizeOnly = false
|
||||
ci.SizeOnly = false
|
||||
}()
|
||||
|
||||
err := operations.Deduplicate(context.Background(), r.Fremote, operations.DeduplicateSkip)
|
||||
|
@ -115,7 +115,7 @@ func ListJSON(ctx context.Context, fsrc fs.Fs, remote string, opt *ListJSONOpt,
|
||||
hashTypes = append(hashTypes, ht)
|
||||
}
|
||||
}
|
||||
err := walk.ListR(ctx, fsrc, remote, false, ConfigMaxDepth(opt.Recurse), walk.ListAll, func(entries fs.DirEntries) (err error) {
|
||||
err := walk.ListR(ctx, fsrc, remote, false, ConfigMaxDepth(ctx, opt.Recurse), walk.ListAll, func(entries fs.DirEntries) (err error) {
|
||||
for _, entry := range entries {
|
||||
switch entry.(type) {
|
||||
case fs.Directory:
|
||||
|
@ -18,15 +18,17 @@ const (
|
||||
|
||||
// Return a boolean as to whether we should use multi thread copy for
|
||||
// this transfer
|
||||
func doMultiThreadCopy(f fs.Fs, src fs.Object) bool {
|
||||
func doMultiThreadCopy(ctx context.Context, f fs.Fs, src fs.Object) bool {
|
||||
ci := fs.GetConfig(ctx)
|
||||
|
||||
// Disable multi thread if...
|
||||
|
||||
// ...it isn't configured
|
||||
if fs.Config.MultiThreadStreams <= 1 {
|
||||
if ci.MultiThreadStreams <= 1 {
|
||||
return false
|
||||
}
|
||||
// ...size of object is less than cutoff
|
||||
if src.Size() < int64(fs.Config.MultiThreadCutoff) {
|
||||
if src.Size() < int64(ci.MultiThreadCutoff) {
|
||||
return false
|
||||
}
|
||||
// ...source doesn't support it
|
||||
@ -36,7 +38,7 @@ func doMultiThreadCopy(f fs.Fs, src fs.Object) bool {
|
||||
}
|
||||
// ...if --multi-thread-streams not in use and source and
|
||||
// destination are both local
|
||||
if !fs.Config.MultiThreadSet && dstFeatures.IsLocal && src.Fs().Features().IsLocal {
|
||||
if !ci.MultiThreadSet && dstFeatures.IsLocal && src.Fs().Features().IsLocal {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
@ -55,6 +57,7 @@ type multiThreadCopyState struct {
|
||||
|
||||
// Copy a single stream into place
|
||||
func (mc *multiThreadCopyState) copyStream(ctx context.Context, stream int) (err error) {
|
||||
ci := fs.GetConfig(ctx)
|
||||
defer func() {
|
||||
if err != nil {
|
||||
fs.Debugf(mc.src, "multi-thread copy: stream %d/%d failed: %v", stream+1, mc.streams, err)
|
||||
@ -71,7 +74,7 @@ func (mc *multiThreadCopyState) copyStream(ctx context.Context, stream int) (err
|
||||
|
||||
fs.Debugf(mc.src, "multi-thread copy: stream %d/%d (%d-%d) size %v starting", stream+1, mc.streams, start, end, fs.SizeSuffix(end-start))
|
||||
|
||||
rc, err := NewReOpen(ctx, mc.src, fs.Config.LowLevelRetries, &fs.RangeOption{Start: start, End: end - 1})
|
||||
rc, err := NewReOpen(ctx, mc.src, ci.LowLevelRetries, &fs.RangeOption{Start: start, End: end - 1})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "multipart copy: failed to open source")
|
||||
}
|
||||
|
@ -17,64 +17,66 @@ import (
|
||||
)
|
||||
|
||||
func TestDoMultiThreadCopy(t *testing.T) {
|
||||
f := mockfs.NewFs(context.Background(), "potato", "")
|
||||
ctx := context.Background()
|
||||
ci := fs.GetConfig(ctx)
|
||||
f := mockfs.NewFs(ctx, "potato", "")
|
||||
src := mockobject.New("file.txt").WithContent([]byte(random.String(100)), mockobject.SeekModeNone)
|
||||
srcFs := mockfs.NewFs(context.Background(), "sausage", "")
|
||||
srcFs := mockfs.NewFs(ctx, "sausage", "")
|
||||
src.SetFs(srcFs)
|
||||
|
||||
oldStreams := fs.Config.MultiThreadStreams
|
||||
oldCutoff := fs.Config.MultiThreadCutoff
|
||||
oldIsSet := fs.Config.MultiThreadSet
|
||||
oldStreams := ci.MultiThreadStreams
|
||||
oldCutoff := ci.MultiThreadCutoff
|
||||
oldIsSet := ci.MultiThreadSet
|
||||
defer func() {
|
||||
fs.Config.MultiThreadStreams = oldStreams
|
||||
fs.Config.MultiThreadCutoff = oldCutoff
|
||||
fs.Config.MultiThreadSet = oldIsSet
|
||||
ci.MultiThreadStreams = oldStreams
|
||||
ci.MultiThreadCutoff = oldCutoff
|
||||
ci.MultiThreadSet = oldIsSet
|
||||
}()
|
||||
|
||||
fs.Config.MultiThreadStreams, fs.Config.MultiThreadCutoff = 4, 50
|
||||
fs.Config.MultiThreadSet = false
|
||||
ci.MultiThreadStreams, ci.MultiThreadCutoff = 4, 50
|
||||
ci.MultiThreadSet = false
|
||||
|
||||
nullWriterAt := func(ctx context.Context, remote string, size int64) (fs.WriterAtCloser, error) {
|
||||
panic("don't call me")
|
||||
}
|
||||
f.Features().OpenWriterAt = nullWriterAt
|
||||
|
||||
assert.True(t, doMultiThreadCopy(f, src))
|
||||
assert.True(t, doMultiThreadCopy(ctx, f, src))
|
||||
|
||||
fs.Config.MultiThreadStreams = 0
|
||||
assert.False(t, doMultiThreadCopy(f, src))
|
||||
fs.Config.MultiThreadStreams = 1
|
||||
assert.False(t, doMultiThreadCopy(f, src))
|
||||
fs.Config.MultiThreadStreams = 2
|
||||
assert.True(t, doMultiThreadCopy(f, src))
|
||||
ci.MultiThreadStreams = 0
|
||||
assert.False(t, doMultiThreadCopy(ctx, f, src))
|
||||
ci.MultiThreadStreams = 1
|
||||
assert.False(t, doMultiThreadCopy(ctx, f, src))
|
||||
ci.MultiThreadStreams = 2
|
||||
assert.True(t, doMultiThreadCopy(ctx, f, src))
|
||||
|
||||
fs.Config.MultiThreadCutoff = 200
|
||||
assert.False(t, doMultiThreadCopy(f, src))
|
||||
fs.Config.MultiThreadCutoff = 101
|
||||
assert.False(t, doMultiThreadCopy(f, src))
|
||||
fs.Config.MultiThreadCutoff = 100
|
||||
assert.True(t, doMultiThreadCopy(f, src))
|
||||
ci.MultiThreadCutoff = 200
|
||||
assert.False(t, doMultiThreadCopy(ctx, f, src))
|
||||
ci.MultiThreadCutoff = 101
|
||||
assert.False(t, doMultiThreadCopy(ctx, f, src))
|
||||
ci.MultiThreadCutoff = 100
|
||||
assert.True(t, doMultiThreadCopy(ctx, f, src))
|
||||
|
||||
f.Features().OpenWriterAt = nil
|
||||
assert.False(t, doMultiThreadCopy(f, src))
|
||||
assert.False(t, doMultiThreadCopy(ctx, f, src))
|
||||
f.Features().OpenWriterAt = nullWriterAt
|
||||
assert.True(t, doMultiThreadCopy(f, src))
|
||||
assert.True(t, doMultiThreadCopy(ctx, f, src))
|
||||
|
||||
f.Features().IsLocal = true
|
||||
srcFs.Features().IsLocal = true
|
||||
assert.False(t, doMultiThreadCopy(f, src))
|
||||
fs.Config.MultiThreadSet = true
|
||||
assert.True(t, doMultiThreadCopy(f, src))
|
||||
fs.Config.MultiThreadSet = false
|
||||
assert.False(t, doMultiThreadCopy(f, src))
|
||||
assert.False(t, doMultiThreadCopy(ctx, f, src))
|
||||
ci.MultiThreadSet = true
|
||||
assert.True(t, doMultiThreadCopy(ctx, f, src))
|
||||
ci.MultiThreadSet = false
|
||||
assert.False(t, doMultiThreadCopy(ctx, f, src))
|
||||
srcFs.Features().IsLocal = false
|
||||
assert.True(t, doMultiThreadCopy(f, src))
|
||||
assert.True(t, doMultiThreadCopy(ctx, f, src))
|
||||
srcFs.Features().IsLocal = true
|
||||
assert.False(t, doMultiThreadCopy(f, src))
|
||||
assert.False(t, doMultiThreadCopy(ctx, f, src))
|
||||
f.Features().IsLocal = false
|
||||
assert.True(t, doMultiThreadCopy(f, src))
|
||||
assert.True(t, doMultiThreadCopy(ctx, f, src))
|
||||
srcFs.Features().IsLocal = false
|
||||
assert.True(t, doMultiThreadCopy(f, src))
|
||||
assert.True(t, doMultiThreadCopy(ctx, f, src))
|
||||
}
|
||||
|
||||
func TestMultithreadCalculateChunks(t *testing.T) {
|
||||
|
@ -119,13 +119,14 @@ func checkHashes(ctx context.Context, src fs.ObjectInfo, dst fs.Object, ht hash.
|
||||
// Otherwise the file is considered to be not equal including if there
|
||||
// were errors reading info.
|
||||
func Equal(ctx context.Context, src fs.ObjectInfo, dst fs.Object) bool {
|
||||
return equal(ctx, src, dst, defaultEqualOpt())
|
||||
return equal(ctx, src, dst, defaultEqualOpt(ctx))
|
||||
}
|
||||
|
||||
// sizeDiffers compare the size of src and dst taking into account the
|
||||
// various ways of ignoring sizes
|
||||
func sizeDiffers(src, dst fs.ObjectInfo) bool {
|
||||
if fs.Config.IgnoreSize || src.Size() < 0 || dst.Size() < 0 {
|
||||
func sizeDiffers(ctx context.Context, src, dst fs.ObjectInfo) bool {
|
||||
ci := fs.GetConfig(ctx)
|
||||
if ci.IgnoreSize || src.Size() < 0 || dst.Size() < 0 {
|
||||
return false
|
||||
}
|
||||
return src.Size() != dst.Size()
|
||||
@ -142,11 +143,12 @@ type equalOpt struct {
|
||||
}
|
||||
|
||||
// default set of options for equal()
|
||||
func defaultEqualOpt() equalOpt {
|
||||
func defaultEqualOpt(ctx context.Context) equalOpt {
|
||||
ci := fs.GetConfig(ctx)
|
||||
return equalOpt{
|
||||
sizeOnly: fs.Config.SizeOnly,
|
||||
checkSum: fs.Config.CheckSum,
|
||||
updateModTime: !fs.Config.NoUpdateModTime,
|
||||
sizeOnly: ci.SizeOnly,
|
||||
checkSum: ci.CheckSum,
|
||||
updateModTime: !ci.NoUpdateModTime,
|
||||
forceModTimeMatch: false,
|
||||
}
|
||||
}
|
||||
@ -161,7 +163,8 @@ func logModTimeUpload(dst fs.Object) {
|
||||
}
|
||||
|
||||
func equal(ctx context.Context, src fs.ObjectInfo, dst fs.Object, opt equalOpt) bool {
|
||||
if sizeDiffers(src, dst) {
|
||||
ci := fs.GetConfig(ctx)
|
||||
if sizeDiffers(ctx, src, dst) {
|
||||
fs.Debugf(src, "Sizes differ (src %d vs dst %d)", src.Size(), dst.Size())
|
||||
return false
|
||||
}
|
||||
@ -218,7 +221,7 @@ func equal(ctx context.Context, src fs.ObjectInfo, dst fs.Object, opt equalOpt)
|
||||
fs.Debugf(src, "%v differ", ht)
|
||||
return false
|
||||
}
|
||||
if ht == hash.None && !fs.Config.RefreshTimes {
|
||||
if ht == hash.None && !ci.RefreshTimes {
|
||||
// if couldn't check hash, return that they differ
|
||||
return false
|
||||
}
|
||||
@ -228,7 +231,7 @@ func equal(ctx context.Context, src fs.ObjectInfo, dst fs.Object, opt equalOpt)
|
||||
if !SkipDestructive(ctx, src, "update modification time") {
|
||||
// Size and hash the same but mtime different
|
||||
// Error if objects are treated as immutable
|
||||
if fs.Config.Immutable {
|
||||
if ci.Immutable {
|
||||
fs.Errorf(dst, "StartedAt mismatch between immutable objects")
|
||||
return false
|
||||
}
|
||||
@ -243,7 +246,7 @@ func equal(ctx context.Context, src fs.ObjectInfo, dst fs.Object, opt equalOpt)
|
||||
fs.Infof(dst, "src and dst identical but can't set mod time without deleting and re-uploading")
|
||||
// Remove the file if BackupDir isn't set. If BackupDir is set we would rather have the old file
|
||||
// put in the BackupDir than deleted which is what will happen if we don't delete it.
|
||||
if fs.Config.BackupDir == "" {
|
||||
if ci.BackupDir == "" {
|
||||
err = dst.Remove(ctx)
|
||||
if err != nil {
|
||||
fs.Errorf(dst, "failed to delete before re-upload: %v", err)
|
||||
@ -337,11 +340,12 @@ var _ fs.FullObjectInfo = (*OverrideRemote)(nil)
|
||||
|
||||
// CommonHash returns a single hash.Type and a HashOption with that
|
||||
// type which is in common between the two fs.Fs.
|
||||
func CommonHash(fa, fb fs.Info) (hash.Type, *fs.HashesOption) {
|
||||
func CommonHash(ctx context.Context, fa, fb fs.Info) (hash.Type, *fs.HashesOption) {
|
||||
ci := fs.GetConfig(ctx)
|
||||
// work out which hash to use - limit to 1 hash in common
|
||||
var common hash.Set
|
||||
hashType := hash.None
|
||||
if !fs.Config.IgnoreChecksum {
|
||||
if !ci.IgnoreChecksum {
|
||||
common = fb.Hashes().Overlap(fa.Hashes())
|
||||
if common.Count() > 0 {
|
||||
hashType = common.GetOne()
|
||||
@ -357,6 +361,7 @@ func CommonHash(fa, fb fs.Info) (hash.Type, *fs.HashesOption) {
|
||||
// It returns the destination object if possible. Note that this may
|
||||
// be nil.
|
||||
func Copy(ctx context.Context, f fs.Fs, dst fs.Object, remote string, src fs.Object) (newDst fs.Object, err error) {
|
||||
ci := fs.GetConfig(ctx)
|
||||
tr := accounting.Stats(ctx).NewTransfer(src)
|
||||
defer func() {
|
||||
tr.Done(ctx, err)
|
||||
@ -365,25 +370,25 @@ func Copy(ctx context.Context, f fs.Fs, dst fs.Object, remote string, src fs.Obj
|
||||
if SkipDestructive(ctx, src, "copy") {
|
||||
return newDst, nil
|
||||
}
|
||||
maxTries := fs.Config.LowLevelRetries
|
||||
maxTries := ci.LowLevelRetries
|
||||
tries := 0
|
||||
doUpdate := dst != nil
|
||||
hashType, hashOption := CommonHash(f, src.Fs())
|
||||
hashType, hashOption := CommonHash(ctx, f, src.Fs())
|
||||
|
||||
var actionTaken string
|
||||
for {
|
||||
// Try server-side copy first - if has optional interface and
|
||||
// is same underlying remote
|
||||
actionTaken = "Copied (server-side copy)"
|
||||
if fs.Config.MaxTransfer >= 0 {
|
||||
if ci.MaxTransfer >= 0 {
|
||||
var bytesSoFar int64
|
||||
if fs.Config.CutoffMode == fs.CutoffModeCautious {
|
||||
if ci.CutoffMode == fs.CutoffModeCautious {
|
||||
bytesSoFar = accounting.Stats(ctx).GetBytesWithPending() + src.Size()
|
||||
} else {
|
||||
bytesSoFar = accounting.Stats(ctx).GetBytes()
|
||||
}
|
||||
if bytesSoFar >= int64(fs.Config.MaxTransfer) {
|
||||
if fs.Config.CutoffMode == fs.CutoffModeHard {
|
||||
if bytesSoFar >= int64(ci.MaxTransfer) {
|
||||
if ci.CutoffMode == fs.CutoffModeHard {
|
||||
return nil, accounting.ErrorMaxTransferLimitReachedFatal
|
||||
}
|
||||
return nil, accounting.ErrorMaxTransferLimitReachedGraceful
|
||||
@ -408,12 +413,12 @@ func Copy(ctx context.Context, f fs.Fs, dst fs.Object, remote string, src fs.Obj
|
||||
}
|
||||
// If can't server-side copy, do it manually
|
||||
if err == fs.ErrorCantCopy {
|
||||
if doMultiThreadCopy(f, src) {
|
||||
if doMultiThreadCopy(ctx, f, src) {
|
||||
// Number of streams proportional to size
|
||||
streams := src.Size() / int64(fs.Config.MultiThreadCutoff)
|
||||
streams := src.Size() / int64(ci.MultiThreadCutoff)
|
||||
// With maximum
|
||||
if streams > int64(fs.Config.MultiThreadStreams) {
|
||||
streams = int64(fs.Config.MultiThreadStreams)
|
||||
if streams > int64(ci.MultiThreadStreams) {
|
||||
streams = int64(ci.MultiThreadStreams)
|
||||
}
|
||||
if streams < 2 {
|
||||
streams = 2
|
||||
@ -427,10 +432,10 @@ func Copy(ctx context.Context, f fs.Fs, dst fs.Object, remote string, src fs.Obj
|
||||
} else {
|
||||
var in0 io.ReadCloser
|
||||
options := []fs.OpenOption{hashOption}
|
||||
for _, option := range fs.Config.DownloadHeaders {
|
||||
for _, option := range ci.DownloadHeaders {
|
||||
options = append(options, option)
|
||||
}
|
||||
in0, err = NewReOpen(ctx, src, fs.Config.LowLevelRetries, options...)
|
||||
in0, err = NewReOpen(ctx, src, ci.LowLevelRetries, options...)
|
||||
if err != nil {
|
||||
err = errors.Wrap(err, "failed to open source object")
|
||||
} else {
|
||||
@ -452,7 +457,7 @@ func Copy(ctx context.Context, f fs.Fs, dst fs.Object, remote string, src fs.Obj
|
||||
wrappedSrc = NewOverrideRemote(src, remote)
|
||||
}
|
||||
options := []fs.OpenOption{hashOption}
|
||||
for _, option := range fs.Config.UploadHeaders {
|
||||
for _, option := range ci.UploadHeaders {
|
||||
options = append(options, option)
|
||||
}
|
||||
if doUpdate {
|
||||
@ -491,7 +496,7 @@ func Copy(ctx context.Context, f fs.Fs, dst fs.Object, remote string, src fs.Obj
|
||||
}
|
||||
|
||||
// Verify sizes are the same after transfer
|
||||
if sizeDiffers(src, dst) {
|
||||
if sizeDiffers(ctx, src, dst) {
|
||||
err = errors.Errorf("corrupted on transfer: sizes differ %d vs %d", src.Size(), dst.Size())
|
||||
fs.Errorf(dst, "%v", err)
|
||||
err = fs.CountError(err)
|
||||
@ -607,16 +612,17 @@ func CanServerSideMove(fdst fs.Fs) bool {
|
||||
|
||||
// SuffixName adds the current --suffix to the remote, obeying
|
||||
// --suffix-keep-extension if set
|
||||
func SuffixName(remote string) string {
|
||||
if fs.Config.Suffix == "" {
|
||||
func SuffixName(ctx context.Context, remote string) string {
|
||||
ci := fs.GetConfig(ctx)
|
||||
if ci.Suffix == "" {
|
||||
return remote
|
||||
}
|
||||
if fs.Config.SuffixKeepExtension {
|
||||
if ci.SuffixKeepExtension {
|
||||
ext := path.Ext(remote)
|
||||
base := remote[:len(remote)-len(ext)]
|
||||
return base + fs.Config.Suffix + ext
|
||||
return base + ci.Suffix + ext
|
||||
}
|
||||
return remote + fs.Config.Suffix
|
||||
return remote + ci.Suffix
|
||||
}
|
||||
|
||||
// DeleteFileWithBackupDir deletes a single file respecting --dry-run
|
||||
@ -625,12 +631,13 @@ func SuffixName(remote string) string {
|
||||
// If backupDir is set then it moves the file to there instead of
|
||||
// deleting
|
||||
func DeleteFileWithBackupDir(ctx context.Context, dst fs.Object, backupDir fs.Fs) (err error) {
|
||||
ci := fs.GetConfig(ctx)
|
||||
tr := accounting.Stats(ctx).NewCheckingTransfer(dst)
|
||||
defer func() {
|
||||
tr.Done(ctx, err)
|
||||
}()
|
||||
numDeletes := accounting.Stats(ctx).Deletes(1)
|
||||
if fs.Config.MaxDelete != -1 && numDeletes > fs.Config.MaxDelete {
|
||||
if ci.MaxDelete != -1 && numDeletes > ci.MaxDelete {
|
||||
return fserrors.FatalError(errors.New("--max-delete threshold reached"))
|
||||
}
|
||||
action, actioned := "delete", "Deleted"
|
||||
@ -669,11 +676,12 @@ func DeleteFile(ctx context.Context, dst fs.Object) (err error) {
|
||||
// instead of being deleted.
|
||||
func DeleteFilesWithBackupDir(ctx context.Context, toBeDeleted fs.ObjectsChan, backupDir fs.Fs) error {
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(fs.Config.Transfers)
|
||||
ci := fs.GetConfig(ctx)
|
||||
wg.Add(ci.Transfers)
|
||||
var errorCount int32
|
||||
var fatalErrorCount int32
|
||||
|
||||
for i := 0; i < fs.Config.Transfers; i++ {
|
||||
for i := 0; i < ci.Transfers; i++ {
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for dst := range toBeDeleted {
|
||||
@ -779,7 +787,8 @@ func Retry(o interface{}, maxTries int, fn func() error) (err error) {
|
||||
//
|
||||
// Lists in parallel which may get them out of order
|
||||
func ListFn(ctx context.Context, f fs.Fs, fn func(fs.Object)) error {
|
||||
return walk.ListR(ctx, f, "", false, fs.Config.MaxDepth, walk.ListObjects, func(entries fs.DirEntries) error {
|
||||
ci := fs.GetConfig(ctx)
|
||||
return walk.ListR(ctx, f, "", false, ci.MaxDepth, walk.ListObjects, func(entries fs.DirEntries) error {
|
||||
entries.ForObject(fn)
|
||||
return nil
|
||||
})
|
||||
@ -898,8 +907,9 @@ func Count(ctx context.Context, f fs.Fs) (objects int64, size int64, err error)
|
||||
}
|
||||
|
||||
// ConfigMaxDepth returns the depth to use for a recursive or non recursive listing.
|
||||
func ConfigMaxDepth(recursive bool) int {
|
||||
depth := fs.Config.MaxDepth
|
||||
func ConfigMaxDepth(ctx context.Context, recursive bool) int {
|
||||
ci := fs.GetConfig(ctx)
|
||||
depth := ci.MaxDepth
|
||||
if !recursive && depth < 0 {
|
||||
depth = 1
|
||||
}
|
||||
@ -908,7 +918,7 @@ func ConfigMaxDepth(recursive bool) int {
|
||||
|
||||
// ListDir lists the directories/buckets/containers in the Fs to the supplied writer
|
||||
func ListDir(ctx context.Context, f fs.Fs, w io.Writer) error {
|
||||
return walk.ListR(ctx, f, "", false, ConfigMaxDepth(false), walk.ListDirs, func(entries fs.DirEntries) error {
|
||||
return walk.ListR(ctx, f, "", false, ConfigMaxDepth(ctx, false), walk.ListDirs, func(entries fs.DirEntries) error {
|
||||
entries.ForDir(func(dir fs.Directory) {
|
||||
if dir != nil {
|
||||
syncFprintf(w, "%12d %13s %9d %s\n", dir.Size(), dir.ModTime(ctx).Local().Format("2006-01-02 15:04:05"), dir.Items(), dir.Remote())
|
||||
@ -985,7 +995,8 @@ func Purge(ctx context.Context, f fs.Fs, dir string) (err error) {
|
||||
// Delete removes all the contents of a container. Unlike Purge, it
|
||||
// obeys includes and excludes.
|
||||
func Delete(ctx context.Context, f fs.Fs) error {
|
||||
delChan := make(fs.ObjectsChan, fs.Config.Transfers)
|
||||
ci := fs.GetConfig(ctx)
|
||||
delChan := make(fs.ObjectsChan, ci.Transfers)
|
||||
delErr := make(chan error, 1)
|
||||
go func() {
|
||||
delErr <- DeleteFiles(ctx, delChan)
|
||||
@ -1008,10 +1019,11 @@ func Delete(ctx context.Context, f fs.Fs) error {
|
||||
//
|
||||
// If the error was ErrorDirNotFound then it will be ignored
|
||||
func listToChan(ctx context.Context, f fs.Fs, dir string) fs.ObjectsChan {
|
||||
o := make(fs.ObjectsChan, fs.Config.Checkers)
|
||||
ci := fs.GetConfig(ctx)
|
||||
o := make(fs.ObjectsChan, ci.Checkers)
|
||||
go func() {
|
||||
defer close(o)
|
||||
err := walk.ListR(ctx, f, dir, true, fs.Config.MaxDepth, walk.ListObjects, func(entries fs.DirEntries) error {
|
||||
err := walk.ListR(ctx, f, dir, true, ci.MaxDepth, walk.ListObjects, func(entries fs.DirEntries) error {
|
||||
entries.ForObject(func(obj fs.Object) {
|
||||
o <- obj
|
||||
})
|
||||
@ -1054,6 +1066,7 @@ type readCloser struct {
|
||||
// if count >= 0 then only that many characters will be output
|
||||
func Cat(ctx context.Context, f fs.Fs, w io.Writer, offset, count int64) error {
|
||||
var mu sync.Mutex
|
||||
ci := fs.GetConfig(ctx)
|
||||
return ListFn(ctx, f, func(o fs.Object) {
|
||||
var err error
|
||||
tr := accounting.Stats(ctx).NewTransfer(o)
|
||||
@ -1072,7 +1085,7 @@ func Cat(ctx context.Context, f fs.Fs, w io.Writer, offset, count int64) error {
|
||||
if opt.Start > 0 || opt.End >= 0 {
|
||||
options = append(options, &opt)
|
||||
}
|
||||
for _, option := range fs.Config.DownloadHeaders {
|
||||
for _, option := range ci.DownloadHeaders {
|
||||
options = append(options, option)
|
||||
}
|
||||
in, err := o.Open(ctx, options...)
|
||||
@ -1098,6 +1111,7 @@ func Cat(ctx context.Context, f fs.Fs, w io.Writer, offset, count int64) error {
|
||||
|
||||
// Rcat reads data from the Reader until EOF and uploads it to a file on remote
|
||||
func Rcat(ctx context.Context, fdst fs.Fs, dstFileName string, in io.ReadCloser, modTime time.Time) (dst fs.Object, err error) {
|
||||
ci := fs.GetConfig(ctx)
|
||||
tr := accounting.Stats(ctx).NewTransferRemoteSize(dstFileName, -1)
|
||||
defer func() {
|
||||
tr.Done(ctx, err)
|
||||
@ -1108,7 +1122,7 @@ func Rcat(ctx context.Context, fdst fs.Fs, dstFileName string, in io.ReadCloser,
|
||||
var trackingIn io.Reader
|
||||
var hasher *hash.MultiHasher
|
||||
var options []fs.OpenOption
|
||||
if !fs.Config.IgnoreChecksum {
|
||||
if !ci.IgnoreChecksum {
|
||||
hashes := hash.NewHashSet(fdst.Hashes().GetOne()) // just pick one hash
|
||||
hashOption := &fs.HashesOption{Hashes: hashes}
|
||||
options = append(options, hashOption)
|
||||
@ -1120,7 +1134,7 @@ func Rcat(ctx context.Context, fdst fs.Fs, dstFileName string, in io.ReadCloser,
|
||||
} else {
|
||||
trackingIn = readCounter
|
||||
}
|
||||
for _, option := range fs.Config.UploadHeaders {
|
||||
for _, option := range ci.UploadHeaders {
|
||||
options = append(options, option)
|
||||
}
|
||||
|
||||
@ -1140,7 +1154,7 @@ func Rcat(ctx context.Context, fdst fs.Fs, dstFileName string, in io.ReadCloser,
|
||||
}
|
||||
|
||||
// check if file small enough for direct upload
|
||||
buf := make([]byte, fs.Config.StreamingUploadCutoff)
|
||||
buf := make([]byte, ci.StreamingUploadCutoff)
|
||||
if n, err := io.ReadFull(trackingIn, buf); err == io.EOF || err == io.ErrUnexpectedEOF {
|
||||
fs.Debugf(fdst, "File to upload is small (%d bytes), uploading instead of streaming", n)
|
||||
src := object.NewMemoryObject(dstFileName, modTime, buf[:n])
|
||||
@ -1202,9 +1216,10 @@ func PublicLink(ctx context.Context, f fs.Fs, remote string, expire fs.Duration,
|
||||
// Rmdirs removes any empty directories (or directories only
|
||||
// containing empty directories) under f, including f.
|
||||
func Rmdirs(ctx context.Context, f fs.Fs, dir string, leaveRoot bool) error {
|
||||
ci := fs.GetConfig(ctx)
|
||||
dirEmpty := make(map[string]bool)
|
||||
dirEmpty[dir] = !leaveRoot
|
||||
err := walk.Walk(ctx, f, dir, true, fs.Config.MaxDepth, func(dirPath string, entries fs.DirEntries, err error) error {
|
||||
err := walk.Walk(ctx, f, dir, true, ci.MaxDepth, func(dirPath string, entries fs.DirEntries, err error) error {
|
||||
if err != nil {
|
||||
err = fs.CountError(err)
|
||||
fs.Errorf(f, "Failed to list %q: %v", dirPath, err)
|
||||
@ -1263,9 +1278,10 @@ func Rmdirs(ctx context.Context, f fs.Fs, dir string, leaveRoot bool) error {
|
||||
|
||||
// GetCompareDest sets up --compare-dest
|
||||
func GetCompareDest(ctx context.Context) (CompareDest fs.Fs, err error) {
|
||||
CompareDest, err = cache.Get(ctx, fs.Config.CompareDest)
|
||||
ci := fs.GetConfig(ctx)
|
||||
CompareDest, err = cache.Get(ctx, ci.CompareDest)
|
||||
if err != nil {
|
||||
return nil, fserrors.FatalError(errors.Errorf("Failed to make fs for --compare-dest %q: %v", fs.Config.CompareDest, err))
|
||||
return nil, fserrors.FatalError(errors.Errorf("Failed to make fs for --compare-dest %q: %v", ci.CompareDest, err))
|
||||
}
|
||||
return CompareDest, nil
|
||||
}
|
||||
@ -1299,9 +1315,10 @@ func compareDest(ctx context.Context, dst, src fs.Object, CompareDest fs.Fs) (No
|
||||
|
||||
// GetCopyDest sets up --copy-dest
|
||||
func GetCopyDest(ctx context.Context, fdst fs.Fs) (CopyDest fs.Fs, err error) {
|
||||
CopyDest, err = cache.Get(ctx, fs.Config.CopyDest)
|
||||
ci := fs.GetConfig(ctx)
|
||||
CopyDest, err = cache.Get(ctx, ci.CopyDest)
|
||||
if err != nil {
|
||||
return nil, fserrors.FatalError(errors.Errorf("Failed to make fs for --copy-dest %q: %v", fs.Config.CopyDest, err))
|
||||
return nil, fserrors.FatalError(errors.Errorf("Failed to make fs for --copy-dest %q: %v", ci.CopyDest, err))
|
||||
}
|
||||
if !SameConfig(fdst, CopyDest) {
|
||||
return nil, fserrors.FatalError(errors.New("parameter to --copy-dest has to be on the same remote as destination"))
|
||||
@ -1332,7 +1349,7 @@ func copyDest(ctx context.Context, fdst fs.Fs, dst, src fs.Object, CopyDest, bac
|
||||
default:
|
||||
return false, err
|
||||
}
|
||||
opt := defaultEqualOpt()
|
||||
opt := defaultEqualOpt(ctx)
|
||||
opt.updateModTime = false
|
||||
if equal(ctx, src, CopyDestFile, opt) {
|
||||
if dst == nil || !Equal(ctx, src, dst) {
|
||||
@ -1364,9 +1381,10 @@ func copyDest(ctx context.Context, fdst fs.Fs, dst, src fs.Object, CopyDest, bac
|
||||
//
|
||||
// Returns True if src does not need to be copied
|
||||
func CompareOrCopyDest(ctx context.Context, fdst fs.Fs, dst, src fs.Object, CompareOrCopyDest, backupDir fs.Fs) (NoNeedTransfer bool, err error) {
|
||||
if fs.Config.CompareDest != "" {
|
||||
ci := fs.GetConfig(ctx)
|
||||
if ci.CompareDest != "" {
|
||||
return compareDest(ctx, dst, src, CompareOrCopyDest)
|
||||
} else if fs.Config.CopyDest != "" {
|
||||
} else if ci.CopyDest != "" {
|
||||
return copyDest(ctx, fdst, dst, src, CompareOrCopyDest, backupDir)
|
||||
}
|
||||
return false, nil
|
||||
@ -1378,22 +1396,23 @@ func CompareOrCopyDest(ctx context.Context, fdst fs.Fs, dst, src fs.Object, Comp
|
||||
// Returns a flag which indicates whether the file needs to be
|
||||
// transferred or not.
|
||||
func NeedTransfer(ctx context.Context, dst, src fs.Object) bool {
|
||||
ci := fs.GetConfig(ctx)
|
||||
if dst == nil {
|
||||
fs.Debugf(src, "Need to transfer - File not found at Destination")
|
||||
return true
|
||||
}
|
||||
// If we should ignore existing files, don't transfer
|
||||
if fs.Config.IgnoreExisting {
|
||||
if ci.IgnoreExisting {
|
||||
fs.Debugf(src, "Destination exists, skipping")
|
||||
return false
|
||||
}
|
||||
// If we should upload unconditionally
|
||||
if fs.Config.IgnoreTimes {
|
||||
if ci.IgnoreTimes {
|
||||
fs.Debugf(src, "Transferring unconditionally as --ignore-times is in use")
|
||||
return true
|
||||
}
|
||||
// If UpdateOlder is in effect, skip if dst is newer than src
|
||||
if fs.Config.UpdateOlder {
|
||||
if ci.UpdateOlder {
|
||||
srcModTime := src.ModTime(ctx)
|
||||
dstModTime := dst.ModTime(ctx)
|
||||
dt := dstModTime.Sub(srcModTime)
|
||||
@ -1411,7 +1430,7 @@ func NeedTransfer(ctx context.Context, dst, src fs.Object) bool {
|
||||
return false
|
||||
case dt <= -modifyWindow:
|
||||
// force --checksum on for the check and do update modtimes by default
|
||||
opt := defaultEqualOpt()
|
||||
opt := defaultEqualOpt(ctx)
|
||||
opt.forceModTimeMatch = true
|
||||
if equal(ctx, src, dst, opt) {
|
||||
fs.Debugf(src, "Unchanged skipping")
|
||||
@ -1419,8 +1438,8 @@ func NeedTransfer(ctx context.Context, dst, src fs.Object) bool {
|
||||
}
|
||||
default:
|
||||
// Do a size only compare unless --checksum is set
|
||||
opt := defaultEqualOpt()
|
||||
opt.sizeOnly = !fs.Config.CheckSum
|
||||
opt := defaultEqualOpt(ctx)
|
||||
opt.sizeOnly = !ci.CheckSum
|
||||
if equal(ctx, src, dst, opt) {
|
||||
fs.Debugf(src, "Destination mod time is within %v of source and files identical, skipping", modifyWindow)
|
||||
return false
|
||||
@ -1483,7 +1502,7 @@ type copyURLFunc func(ctx context.Context, dstFileName string, in io.ReadCloser,
|
||||
|
||||
// copyURLFn copies the data from the url to the function supplied
|
||||
func copyURLFn(ctx context.Context, dstFileName string, url string, dstFileNameFromURL bool, fn copyURLFunc) (err error) {
|
||||
client := fshttp.NewClient(fs.Config)
|
||||
client := fshttp.NewClient(fs.GetConfig(ctx))
|
||||
resp, err := client.Get(url)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -1531,10 +1550,11 @@ func CopyURLToWriter(ctx context.Context, url string, out io.Writer) (err error)
|
||||
|
||||
// BackupDir returns the correctly configured --backup-dir
|
||||
func BackupDir(ctx context.Context, fdst fs.Fs, fsrc fs.Fs, srcFileName string) (backupDir fs.Fs, err error) {
|
||||
if fs.Config.BackupDir != "" {
|
||||
backupDir, err = cache.Get(ctx, fs.Config.BackupDir)
|
||||
ci := fs.GetConfig(ctx)
|
||||
if ci.BackupDir != "" {
|
||||
backupDir, err = cache.Get(ctx, ci.BackupDir)
|
||||
if err != nil {
|
||||
return nil, fserrors.FatalError(errors.Errorf("Failed to make fs for --backup-dir %q: %v", fs.Config.BackupDir, err))
|
||||
return nil, fserrors.FatalError(errors.Errorf("Failed to make fs for --backup-dir %q: %v", ci.BackupDir, err))
|
||||
}
|
||||
if !SameConfig(fdst, backupDir) {
|
||||
return nil, fserrors.FatalError(errors.New("parameter to --backup-dir has to be on the same remote as destination"))
|
||||
@ -1547,7 +1567,7 @@ func BackupDir(ctx context.Context, fdst fs.Fs, fsrc fs.Fs, srcFileName string)
|
||||
return nil, fserrors.FatalError(errors.New("source and parameter to --backup-dir mustn't overlap"))
|
||||
}
|
||||
} else {
|
||||
if fs.Config.Suffix == "" {
|
||||
if ci.Suffix == "" {
|
||||
if SameDir(fdst, backupDir) {
|
||||
return nil, fserrors.FatalError(errors.New("destination and parameter to --backup-dir mustn't be the same"))
|
||||
}
|
||||
@ -1556,7 +1576,7 @@ func BackupDir(ctx context.Context, fdst fs.Fs, fsrc fs.Fs, srcFileName string)
|
||||
}
|
||||
}
|
||||
}
|
||||
} else if fs.Config.Suffix != "" {
|
||||
} else if ci.Suffix != "" {
|
||||
// --backup-dir is not set but --suffix is - use the destination as the backupDir
|
||||
backupDir = fdst
|
||||
} else {
|
||||
@ -1570,7 +1590,7 @@ func BackupDir(ctx context.Context, fdst fs.Fs, fsrc fs.Fs, srcFileName string)
|
||||
|
||||
// MoveBackupDir moves a file to the backup dir
|
||||
func MoveBackupDir(ctx context.Context, backupDir fs.Fs, dst fs.Object) (err error) {
|
||||
remoteWithSuffix := SuffixName(dst.Remote())
|
||||
remoteWithSuffix := SuffixName(ctx, dst.Remote())
|
||||
overwritten, _ := backupDir.NewObject(ctx, remoteWithSuffix)
|
||||
_, err = Move(ctx, backupDir, overwritten, remoteWithSuffix, dst)
|
||||
return err
|
||||
@ -1578,6 +1598,7 @@ func MoveBackupDir(ctx context.Context, backupDir fs.Fs, dst fs.Object) (err err
|
||||
|
||||
// moveOrCopyFile moves or copies a single file possibly to a new name
|
||||
func moveOrCopyFile(ctx context.Context, fdst fs.Fs, fsrc fs.Fs, dstFileName string, srcFileName string, cp bool) (err error) {
|
||||
ci := fs.GetConfig(ctx)
|
||||
dstFilePath := path.Join(fdst.Root(), dstFileName)
|
||||
srcFilePath := path.Join(fsrc.Root(), srcFileName)
|
||||
if fdst.Name() == fsrc.Name() && dstFilePath == srcFilePath {
|
||||
@ -1599,7 +1620,7 @@ func moveOrCopyFile(ctx context.Context, fdst fs.Fs, fsrc fs.Fs, dstFileName str
|
||||
|
||||
// Find dst object if it exists
|
||||
var dstObj fs.Object
|
||||
if !fs.Config.NoCheckDest {
|
||||
if !ci.NoCheckDest {
|
||||
dstObj, err = fdst.NewObject(ctx, dstFileName)
|
||||
if err == fs.ErrorObjectNotFound {
|
||||
dstObj = nil
|
||||
@ -1635,18 +1656,18 @@ func moveOrCopyFile(ctx context.Context, fdst fs.Fs, fsrc fs.Fs, dstFileName str
|
||||
}
|
||||
|
||||
var backupDir, copyDestDir fs.Fs
|
||||
if fs.Config.BackupDir != "" || fs.Config.Suffix != "" {
|
||||
if ci.BackupDir != "" || ci.Suffix != "" {
|
||||
backupDir, err = BackupDir(ctx, fdst, fsrc, srcFileName)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "creating Fs for --backup-dir failed")
|
||||
}
|
||||
}
|
||||
if fs.Config.CompareDest != "" {
|
||||
if ci.CompareDest != "" {
|
||||
copyDestDir, err = GetCompareDest(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else if fs.Config.CopyDest != "" {
|
||||
} else if ci.CopyDest != "" {
|
||||
copyDestDir, err = GetCopyDest(ctx, fdst)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -1853,6 +1874,7 @@ func (l *ListFormat) Format(entry *ListJSONItem) (result string) {
|
||||
// It does this by loading the directory tree into memory (using ListR
|
||||
// if available) and doing renames in parallel.
|
||||
func DirMove(ctx context.Context, f fs.Fs, srcRemote, dstRemote string) (err error) {
|
||||
ci := fs.GetConfig(ctx)
|
||||
// Use DirMove if possible
|
||||
if doDirMove := f.Features().DirMove; doDirMove != nil {
|
||||
err = doDirMove(ctx, f, srcRemote, dstRemote)
|
||||
@ -1885,9 +1907,9 @@ func DirMove(ctx context.Context, f fs.Fs, srcRemote, dstRemote string) (err err
|
||||
o fs.Object
|
||||
newPath string
|
||||
}
|
||||
renames := make(chan rename, fs.Config.Transfers)
|
||||
renames := make(chan rename, ci.Transfers)
|
||||
g, gCtx := errgroup.WithContext(context.Background())
|
||||
for i := 0; i < fs.Config.Transfers; i++ {
|
||||
for i := 0; i < ci.Transfers; i++ {
|
||||
g.Go(func() error {
|
||||
for job := range renames {
|
||||
dstOverwritten, _ := f.NewObject(gCtx, job.newPath)
|
||||
@ -2019,11 +2041,12 @@ func skipDestructiveChoose(ctx context.Context, subject interface{}, action stri
|
||||
// to action subject".
|
||||
func SkipDestructive(ctx context.Context, subject interface{}, action string) (skip bool) {
|
||||
var flag string
|
||||
ci := fs.GetConfig(ctx)
|
||||
switch {
|
||||
case fs.Config.DryRun:
|
||||
case ci.DryRun:
|
||||
flag = "--dry-run"
|
||||
skip = true
|
||||
case fs.Config.Interactive:
|
||||
case ci.Interactive:
|
||||
flag = "--interactive"
|
||||
interactiveMu.Lock()
|
||||
defer interactiveMu.Unlock()
|
||||
|
@ -3,6 +3,7 @@
|
||||
package operations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
@ -13,6 +14,8 @@ import (
|
||||
)
|
||||
|
||||
func TestSizeDiffers(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
ci := fs.GetConfig(ctx)
|
||||
when := time.Now()
|
||||
for _, test := range []struct {
|
||||
ignoreSize bool
|
||||
@ -31,10 +34,10 @@ func TestSizeDiffers(t *testing.T) {
|
||||
} {
|
||||
src := object.NewStaticObjectInfo("a", when, test.srcSize, true, nil, nil)
|
||||
dst := object.NewStaticObjectInfo("a", when, test.dstSize, true, nil, nil)
|
||||
oldIgnoreSize := fs.Config.IgnoreSize
|
||||
fs.Config.IgnoreSize = test.ignoreSize
|
||||
got := sizeDiffers(src, dst)
|
||||
fs.Config.IgnoreSize = oldIgnoreSize
|
||||
oldIgnoreSize := ci.IgnoreSize
|
||||
ci.IgnoreSize = test.ignoreSize
|
||||
got := sizeDiffers(ctx, src, dst)
|
||||
ci.IgnoreSize = oldIgnoreSize
|
||||
assert.Equal(t, test.want, got, fmt.Sprintf("ignoreSize=%v, srcSize=%v, dstSize=%v", test.ignoreSize, test.srcSize, test.dstSize))
|
||||
}
|
||||
}
|
||||
|
@ -106,6 +106,7 @@ func TestLs(t *testing.T) {
|
||||
|
||||
func TestLsWithFilesFrom(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
ci := fs.GetConfig(ctx)
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
file1 := r.WriteBoth(ctx, "potato2", "------------------------------------------------------------", t1)
|
||||
@ -132,10 +133,10 @@ func TestLsWithFilesFrom(t *testing.T) {
|
||||
assert.Equal(t, " 60 potato2\n", buf.String())
|
||||
|
||||
// Now try with --no-traverse
|
||||
oldNoTraverse := fs.Config.NoTraverse
|
||||
fs.Config.NoTraverse = true
|
||||
oldNoTraverse := ci.NoTraverse
|
||||
ci.NoTraverse = true
|
||||
defer func() {
|
||||
fs.Config.NoTraverse = oldNoTraverse
|
||||
ci.NoTraverse = oldNoTraverse
|
||||
}()
|
||||
|
||||
buf.Reset()
|
||||
@ -269,9 +270,11 @@ func TestHashSums(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestSuffixName(t *testing.T) {
|
||||
origSuffix, origKeepExt := fs.Config.Suffix, fs.Config.SuffixKeepExtension
|
||||
ctx := context.Background()
|
||||
ci := fs.GetConfig(ctx)
|
||||
origSuffix, origKeepExt := ci.Suffix, ci.SuffixKeepExtension
|
||||
defer func() {
|
||||
fs.Config.Suffix, fs.Config.SuffixKeepExtension = origSuffix, origKeepExt
|
||||
ci.Suffix, ci.SuffixKeepExtension = origSuffix, origKeepExt
|
||||
}()
|
||||
for _, test := range []struct {
|
||||
remote string
|
||||
@ -288,15 +291,16 @@ func TestSuffixName(t *testing.T) {
|
||||
{"test", "-suffix", false, "test-suffix"},
|
||||
{"test", "-suffix", true, "test-suffix"},
|
||||
} {
|
||||
fs.Config.Suffix = test.suffix
|
||||
fs.Config.SuffixKeepExtension = test.keepExt
|
||||
got := operations.SuffixName(test.remote)
|
||||
ci.Suffix = test.suffix
|
||||
ci.SuffixKeepExtension = test.keepExt
|
||||
got := operations.SuffixName(ctx, test.remote)
|
||||
assert.Equal(t, test.want, got, fmt.Sprintf("%+v", test))
|
||||
}
|
||||
}
|
||||
|
||||
func TestCount(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
ci := fs.GetConfig(ctx)
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
file1 := r.WriteBoth(ctx, "potato2", "------------------------------------------------------------", t1)
|
||||
@ -306,8 +310,8 @@ func TestCount(t *testing.T) {
|
||||
fstest.CheckItems(t, r.Fremote, file1, file2, file3)
|
||||
|
||||
// Check the MaxDepth too
|
||||
fs.Config.MaxDepth = 1
|
||||
defer func() { fs.Config.MaxDepth = -1 }()
|
||||
ci.MaxDepth = 1
|
||||
defer func() { ci.MaxDepth = -1 }()
|
||||
|
||||
objects, size, err := operations.Count(ctx, r.Fremote)
|
||||
require.NoError(t, err)
|
||||
@ -583,6 +587,7 @@ func TestRmdirsLeaveRoot(t *testing.T) {
|
||||
|
||||
func TestCopyURL(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
ci := fs.GetConfig(ctx)
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
|
||||
@ -635,10 +640,10 @@ func TestCopyURL(t *testing.T) {
|
||||
status = 0
|
||||
|
||||
// check when reading from unverified HTTPS server
|
||||
fs.Config.InsecureSkipVerify = true
|
||||
ci.InsecureSkipVerify = true
|
||||
fshttp.ResetTransport()
|
||||
defer func() {
|
||||
fs.Config.InsecureSkipVerify = false
|
||||
ci.InsecureSkipVerify = false
|
||||
fshttp.ResetTransport()
|
||||
}()
|
||||
tss := httptest.NewTLSServer(handler)
|
||||
@ -750,16 +755,17 @@ func TestCaseInsensitiveMoveFile(t *testing.T) {
|
||||
|
||||
func TestMoveFileBackupDir(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
ci := fs.GetConfig(ctx)
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
if !operations.CanServerSideMove(r.Fremote) {
|
||||
t.Skip("Skipping test as remote does not support server-side move or copy")
|
||||
}
|
||||
|
||||
oldBackupDir := fs.Config.BackupDir
|
||||
fs.Config.BackupDir = r.FremoteName + "/backup"
|
||||
oldBackupDir := ci.BackupDir
|
||||
ci.BackupDir = r.FremoteName + "/backup"
|
||||
defer func() {
|
||||
fs.Config.BackupDir = oldBackupDir
|
||||
ci.BackupDir = oldBackupDir
|
||||
}()
|
||||
|
||||
file1 := r.WriteFile("dst/file1", "file1 contents", t1)
|
||||
@ -804,16 +810,17 @@ func TestCopyFile(t *testing.T) {
|
||||
|
||||
func TestCopyFileBackupDir(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
ci := fs.GetConfig(ctx)
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
if !operations.CanServerSideMove(r.Fremote) {
|
||||
t.Skip("Skipping test as remote does not support server-side move or copy")
|
||||
}
|
||||
|
||||
oldBackupDir := fs.Config.BackupDir
|
||||
fs.Config.BackupDir = r.FremoteName + "/backup"
|
||||
oldBackupDir := ci.BackupDir
|
||||
ci.BackupDir = r.FremoteName + "/backup"
|
||||
defer func() {
|
||||
fs.Config.BackupDir = oldBackupDir
|
||||
ci.BackupDir = oldBackupDir
|
||||
}()
|
||||
|
||||
file1 := r.WriteFile("dst/file1", "file1 contents", t1)
|
||||
@ -832,12 +839,13 @@ func TestCopyFileBackupDir(t *testing.T) {
|
||||
// Test with CompareDest set
|
||||
func TestCopyFileCompareDest(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
ci := fs.GetConfig(ctx)
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
|
||||
fs.Config.CompareDest = r.FremoteName + "/CompareDest"
|
||||
ci.CompareDest = r.FremoteName + "/CompareDest"
|
||||
defer func() {
|
||||
fs.Config.CompareDest = ""
|
||||
ci.CompareDest = ""
|
||||
}()
|
||||
fdst, err := fs.NewFs(ctx, r.FremoteName+"/dst")
|
||||
require.NoError(t, err)
|
||||
@ -913,6 +921,7 @@ func TestCopyFileCompareDest(t *testing.T) {
|
||||
// Test with CopyDest set
|
||||
func TestCopyFileCopyDest(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
ci := fs.GetConfig(ctx)
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
|
||||
@ -920,9 +929,9 @@ func TestCopyFileCopyDest(t *testing.T) {
|
||||
t.Skip("Skipping test as remote does not support server-side copy")
|
||||
}
|
||||
|
||||
fs.Config.CopyDest = r.FremoteName + "/CopyDest"
|
||||
ci.CopyDest = r.FremoteName + "/CopyDest"
|
||||
defer func() {
|
||||
fs.Config.CopyDest = ""
|
||||
ci.CopyDest = ""
|
||||
}()
|
||||
|
||||
fdst, err := fs.NewFs(ctx, r.FremoteName+"/dst")
|
||||
@ -955,7 +964,7 @@ func TestCopyFileCopyDest(t *testing.T) {
|
||||
|
||||
// check old dest, new copy, backup-dir
|
||||
|
||||
fs.Config.BackupDir = r.FremoteName + "/BackupDir"
|
||||
ci.BackupDir = r.FremoteName + "/BackupDir"
|
||||
|
||||
file3 := r.WriteObject(ctx, "dst/one", "one", t1)
|
||||
file2 := r.WriteObject(ctx, "CopyDest/one", "onet2", t2)
|
||||
@ -971,7 +980,7 @@ func TestCopyFileCopyDest(t *testing.T) {
|
||||
file3.Path = "BackupDir/one"
|
||||
|
||||
fstest.CheckItems(t, r.Fremote, file2, file2dst, file3)
|
||||
fs.Config.BackupDir = ""
|
||||
ci.BackupDir = ""
|
||||
|
||||
// check empty dest, new copy
|
||||
file4 := r.WriteObject(ctx, "CopyDest/two", "two", t2)
|
||||
@ -1329,11 +1338,13 @@ func TestGetFsInfo(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestRcat(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
ci := fs.GetConfig(ctx)
|
||||
check := func(withChecksum, ignoreChecksum bool) {
|
||||
checksumBefore, ignoreChecksumBefore := fs.Config.CheckSum, fs.Config.IgnoreChecksum
|
||||
fs.Config.CheckSum, fs.Config.IgnoreChecksum = withChecksum, ignoreChecksum
|
||||
checksumBefore, ignoreChecksumBefore := ci.CheckSum, ci.IgnoreChecksum
|
||||
ci.CheckSum, ci.IgnoreChecksum = withChecksum, ignoreChecksum
|
||||
defer func() {
|
||||
fs.Config.CheckSum, fs.Config.IgnoreChecksum = checksumBefore, ignoreChecksumBefore
|
||||
ci.CheckSum, ci.IgnoreChecksum = checksumBefore, ignoreChecksumBefore
|
||||
}()
|
||||
|
||||
var prefix string
|
||||
@ -1350,13 +1361,13 @@ func TestRcat(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
|
||||
if *fstest.SizeLimit > 0 && int64(fs.Config.StreamingUploadCutoff) > *fstest.SizeLimit {
|
||||
savedCutoff := fs.Config.StreamingUploadCutoff
|
||||
if *fstest.SizeLimit > 0 && int64(ci.StreamingUploadCutoff) > *fstest.SizeLimit {
|
||||
savedCutoff := ci.StreamingUploadCutoff
|
||||
defer func() {
|
||||
fs.Config.StreamingUploadCutoff = savedCutoff
|
||||
ci.StreamingUploadCutoff = savedCutoff
|
||||
}()
|
||||
fs.Config.StreamingUploadCutoff = fs.SizeSuffix(*fstest.SizeLimit)
|
||||
t.Logf("Adjust StreamingUploadCutoff to size limit %s (was %s)", fs.Config.StreamingUploadCutoff, savedCutoff)
|
||||
ci.StreamingUploadCutoff = fs.SizeSuffix(*fstest.SizeLimit)
|
||||
t.Logf("Adjust StreamingUploadCutoff to size limit %s (was %s)", ci.StreamingUploadCutoff, savedCutoff)
|
||||
}
|
||||
|
||||
fstest.CheckListing(t, r.Fremote, []fstest.Item{})
|
||||
@ -1364,7 +1375,7 @@ func TestRcat(t *testing.T) {
|
||||
data1 := "this is some really nice test data"
|
||||
path1 := prefix + "small_file_from_pipe"
|
||||
|
||||
data2 := string(make([]byte, fs.Config.StreamingUploadCutoff+1))
|
||||
data2 := string(make([]byte, ci.StreamingUploadCutoff+1))
|
||||
path2 := prefix + "big_file_from_pipe"
|
||||
|
||||
in := ioutil.NopCloser(strings.NewReader(data1))
|
||||
@ -1418,14 +1429,15 @@ func TestRcatSize(t *testing.T) {
|
||||
|
||||
func TestCopyFileMaxTransfer(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
ci := fs.GetConfig(ctx)
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
old := fs.Config.MaxTransfer
|
||||
oldMode := fs.Config.CutoffMode
|
||||
old := ci.MaxTransfer
|
||||
oldMode := ci.CutoffMode
|
||||
|
||||
defer func() {
|
||||
fs.Config.MaxTransfer = old
|
||||
fs.Config.CutoffMode = oldMode
|
||||
ci.MaxTransfer = old
|
||||
ci.CutoffMode = oldMode
|
||||
accounting.Stats(ctx).ResetCounters()
|
||||
}()
|
||||
|
||||
@ -1436,8 +1448,8 @@ func TestCopyFileMaxTransfer(t *testing.T) {
|
||||
file4 := r.WriteFile("TestCopyFileMaxTransfer/file4", "file4 contents"+random.String(sizeCutoff), t2)
|
||||
|
||||
// Cutoff mode: Hard
|
||||
fs.Config.MaxTransfer = sizeCutoff
|
||||
fs.Config.CutoffMode = fs.CutoffModeHard
|
||||
ci.MaxTransfer = sizeCutoff
|
||||
ci.CutoffMode = fs.CutoffModeHard
|
||||
|
||||
// file1: Show a small file gets transferred OK
|
||||
accounting.Stats(ctx).ResetCounters()
|
||||
@ -1456,7 +1468,7 @@ func TestCopyFileMaxTransfer(t *testing.T) {
|
||||
fstest.CheckItems(t, r.Fremote, file1)
|
||||
|
||||
// Cutoff mode: Cautious
|
||||
fs.Config.CutoffMode = fs.CutoffModeCautious
|
||||
ci.CutoffMode = fs.CutoffModeCautious
|
||||
|
||||
// file3: show a large file does not get transferred
|
||||
accounting.Stats(ctx).ResetCounters()
|
||||
@ -1473,7 +1485,7 @@ func TestCopyFileMaxTransfer(t *testing.T) {
|
||||
}
|
||||
|
||||
// Cutoff mode: Soft
|
||||
fs.Config.CutoffMode = fs.CutoffModeSoft
|
||||
ci.CutoffMode = fs.CutoffModeSoft
|
||||
|
||||
// file4: show a large file does get transferred this time
|
||||
accounting.Stats(ctx).ResetCounters()
|
||||
|
@ -75,10 +75,12 @@ func TestOptionsGet(t *testing.T) {
|
||||
|
||||
func TestOptionsGetMarshal(t *testing.T) {
|
||||
defer clearOptionBlock()()
|
||||
ctx := context.Background()
|
||||
ci := fs.GetConfig(ctx)
|
||||
|
||||
// Add some real options
|
||||
AddOption("http", &httplib.DefaultOpt)
|
||||
AddOption("main", fs.Config)
|
||||
AddOption("main", ci)
|
||||
AddOption("rc", &DefaultOpt)
|
||||
|
||||
// get them
|
||||
|
@ -30,6 +30,7 @@ type syncCopyMove struct {
|
||||
deleteEmptySrcDirs bool
|
||||
dir string
|
||||
// internal state
|
||||
ci *fs.ConfigInfo // global config
|
||||
ctx context.Context // internal context for controlling go-routines
|
||||
cancel func() // cancel the context
|
||||
inCtx context.Context // internal context for controlling march
|
||||
@ -97,7 +98,9 @@ func newSyncCopyMove(ctx context.Context, fdst, fsrc fs.Fs, deleteMode fs.Delete
|
||||
if (deleteMode != fs.DeleteModeOff || DoMove) && operations.Overlapping(fdst, fsrc) {
|
||||
return nil, fserrors.FatalError(fs.ErrorOverlapping)
|
||||
}
|
||||
ci := fs.GetConfig(ctx)
|
||||
s := &syncCopyMove{
|
||||
ci: ci,
|
||||
fdst: fdst,
|
||||
fsrc: fsrc,
|
||||
deleteMode: deleteMode,
|
||||
@ -105,42 +108,42 @@ func newSyncCopyMove(ctx context.Context, fdst, fsrc fs.Fs, deleteMode fs.Delete
|
||||
copyEmptySrcDirs: copyEmptySrcDirs,
|
||||
deleteEmptySrcDirs: deleteEmptySrcDirs,
|
||||
dir: "",
|
||||
srcFilesChan: make(chan fs.Object, fs.Config.Checkers+fs.Config.Transfers),
|
||||
srcFilesChan: make(chan fs.Object, ci.Checkers+ci.Transfers),
|
||||
srcFilesResult: make(chan error, 1),
|
||||
dstFilesResult: make(chan error, 1),
|
||||
dstEmptyDirs: make(map[string]fs.DirEntry),
|
||||
srcEmptyDirs: make(map[string]fs.DirEntry),
|
||||
noTraverse: fs.Config.NoTraverse,
|
||||
noCheckDest: fs.Config.NoCheckDest,
|
||||
noUnicodeNormalization: fs.Config.NoUnicodeNormalization,
|
||||
deleteFilesCh: make(chan fs.Object, fs.Config.Checkers),
|
||||
trackRenames: fs.Config.TrackRenames,
|
||||
noTraverse: ci.NoTraverse,
|
||||
noCheckDest: ci.NoCheckDest,
|
||||
noUnicodeNormalization: ci.NoUnicodeNormalization,
|
||||
deleteFilesCh: make(chan fs.Object, ci.Checkers),
|
||||
trackRenames: ci.TrackRenames,
|
||||
commonHash: fsrc.Hashes().Overlap(fdst.Hashes()).GetOne(),
|
||||
modifyWindow: fs.GetModifyWindow(ctx, fsrc, fdst),
|
||||
trackRenamesCh: make(chan fs.Object, fs.Config.Checkers),
|
||||
checkFirst: fs.Config.CheckFirst,
|
||||
trackRenamesCh: make(chan fs.Object, ci.Checkers),
|
||||
checkFirst: ci.CheckFirst,
|
||||
}
|
||||
backlog := fs.Config.MaxBacklog
|
||||
backlog := ci.MaxBacklog
|
||||
if s.checkFirst {
|
||||
fs.Infof(s.fdst, "Running all checks before starting transfers")
|
||||
backlog = -1
|
||||
}
|
||||
var err error
|
||||
s.toBeChecked, err = newPipe(fs.Config.OrderBy, accounting.Stats(ctx).SetCheckQueue, backlog)
|
||||
s.toBeChecked, err = newPipe(ci.OrderBy, accounting.Stats(ctx).SetCheckQueue, backlog)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
s.toBeUploaded, err = newPipe(fs.Config.OrderBy, accounting.Stats(ctx).SetTransferQueue, backlog)
|
||||
s.toBeUploaded, err = newPipe(ci.OrderBy, accounting.Stats(ctx).SetTransferQueue, backlog)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
s.toBeRenamed, err = newPipe(fs.Config.OrderBy, accounting.Stats(ctx).SetRenameQueue, backlog)
|
||||
s.toBeRenamed, err = newPipe(ci.OrderBy, accounting.Stats(ctx).SetRenameQueue, backlog)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// If a max session duration has been defined add a deadline to the context
|
||||
if fs.Config.MaxDuration > 0 {
|
||||
endTime := time.Now().Add(fs.Config.MaxDuration)
|
||||
if ci.MaxDuration > 0 {
|
||||
endTime := time.Now().Add(ci.MaxDuration)
|
||||
fs.Infof(s.fdst, "Transfer session deadline: %s", endTime.Format("2006/01/02 15:04:05"))
|
||||
s.ctx, s.cancel = context.WithDeadline(ctx, endTime)
|
||||
} else {
|
||||
@ -152,7 +155,7 @@ func newSyncCopyMove(ctx context.Context, fdst, fsrc fs.Fs, deleteMode fs.Delete
|
||||
fs.Errorf(nil, "Ignoring --no-traverse with sync")
|
||||
s.noTraverse = false
|
||||
}
|
||||
s.trackRenamesStrategy, err = parseTrackRenamesStrategy(fs.Config.TrackRenamesStrategy)
|
||||
s.trackRenamesStrategy, err = parseTrackRenamesStrategy(ci.TrackRenamesStrategy)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -160,7 +163,7 @@ func newSyncCopyMove(ctx context.Context, fdst, fsrc fs.Fs, deleteMode fs.Delete
|
||||
if s.deleteMode != fs.DeleteModeOff {
|
||||
return nil, errors.New("can't use --no-check-dest with sync: use copy instead")
|
||||
}
|
||||
if fs.Config.Immutable {
|
||||
if ci.Immutable {
|
||||
return nil, errors.New("can't use --no-check-dest with --immutable")
|
||||
}
|
||||
if s.backupDir != nil {
|
||||
@ -199,20 +202,20 @@ func newSyncCopyMove(ctx context.Context, fdst, fsrc fs.Fs, deleteMode fs.Delete
|
||||
}
|
||||
}
|
||||
// Make Fs for --backup-dir if required
|
||||
if fs.Config.BackupDir != "" || fs.Config.Suffix != "" {
|
||||
if ci.BackupDir != "" || ci.Suffix != "" {
|
||||
var err error
|
||||
s.backupDir, err = operations.BackupDir(ctx, fdst, fsrc, "")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if fs.Config.CompareDest != "" {
|
||||
if ci.CompareDest != "" {
|
||||
var err error
|
||||
s.compareCopyDest, err = operations.GetCompareDest(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else if fs.Config.CopyDest != "" {
|
||||
} else if ci.CopyDest != "" {
|
||||
var err error
|
||||
s.compareCopyDest, err = operations.GetCopyDest(ctx, fdst)
|
||||
if err != nil {
|
||||
@ -312,7 +315,7 @@ func (s *syncCopyMove) pairChecker(in *pipe, out *pipe, fraction int, wg *sync.W
|
||||
}
|
||||
if !NoNeedTransfer && operations.NeedTransfer(s.ctx, pair.Dst, pair.Src) {
|
||||
// If files are treated as immutable, fail if destination exists and does not match
|
||||
if fs.Config.Immutable && pair.Dst != nil {
|
||||
if s.ci.Immutable && pair.Dst != nil {
|
||||
fs.Errorf(pair.Dst, "Source and destination exist but do not match: immutable file modified")
|
||||
s.processError(fs.ErrorImmutableModified)
|
||||
} else {
|
||||
@ -389,9 +392,9 @@ func (s *syncCopyMove) pairCopyOrMove(ctx context.Context, in *pipe, fdst fs.Fs,
|
||||
|
||||
// This starts the background checkers.
|
||||
func (s *syncCopyMove) startCheckers() {
|
||||
s.checkerWg.Add(fs.Config.Checkers)
|
||||
for i := 0; i < fs.Config.Checkers; i++ {
|
||||
fraction := (100 * i) / fs.Config.Checkers
|
||||
s.checkerWg.Add(s.ci.Checkers)
|
||||
for i := 0; i < s.ci.Checkers; i++ {
|
||||
fraction := (100 * i) / s.ci.Checkers
|
||||
go s.pairChecker(s.toBeChecked, s.toBeUploaded, fraction, &s.checkerWg)
|
||||
}
|
||||
}
|
||||
@ -405,9 +408,9 @@ func (s *syncCopyMove) stopCheckers() {
|
||||
|
||||
// This starts the background transfers
|
||||
func (s *syncCopyMove) startTransfers() {
|
||||
s.transfersWg.Add(fs.Config.Transfers)
|
||||
for i := 0; i < fs.Config.Transfers; i++ {
|
||||
fraction := (100 * i) / fs.Config.Transfers
|
||||
s.transfersWg.Add(s.ci.Transfers)
|
||||
for i := 0; i < s.ci.Transfers; i++ {
|
||||
fraction := (100 * i) / s.ci.Transfers
|
||||
go s.pairCopyOrMove(s.ctx, s.toBeUploaded, s.fdst, fraction, &s.transfersWg)
|
||||
}
|
||||
}
|
||||
@ -424,9 +427,9 @@ func (s *syncCopyMove) startRenamers() {
|
||||
if !s.trackRenames {
|
||||
return
|
||||
}
|
||||
s.renamerWg.Add(fs.Config.Checkers)
|
||||
for i := 0; i < fs.Config.Checkers; i++ {
|
||||
fraction := (100 * i) / fs.Config.Checkers
|
||||
s.renamerWg.Add(s.ci.Checkers)
|
||||
for i := 0; i < s.ci.Checkers; i++ {
|
||||
fraction := (100 * i) / s.ci.Checkers
|
||||
go s.pairRenamer(s.toBeRenamed, s.toBeUploaded, fraction, &s.renamerWg)
|
||||
}
|
||||
}
|
||||
@ -492,13 +495,13 @@ func (s *syncCopyMove) stopDeleters() {
|
||||
// checkSrcMap is clear then it assumes that the any source files that
|
||||
// have been found have been removed from dstFiles already.
|
||||
func (s *syncCopyMove) deleteFiles(checkSrcMap bool) error {
|
||||
if accounting.Stats(s.ctx).Errored() && !fs.Config.IgnoreErrors {
|
||||
if accounting.Stats(s.ctx).Errored() && !s.ci.IgnoreErrors {
|
||||
fs.Errorf(s.fdst, "%v", fs.ErrorNotDeleting)
|
||||
return fs.ErrorNotDeleting
|
||||
}
|
||||
|
||||
// Delete the spare files
|
||||
toDelete := make(fs.ObjectsChan, fs.Config.Transfers)
|
||||
toDelete := make(fs.ObjectsChan, s.ci.Transfers)
|
||||
go func() {
|
||||
outer:
|
||||
for remote, o := range s.dstFiles {
|
||||
@ -524,11 +527,11 @@ func (s *syncCopyMove) deleteFiles(checkSrcMap bool) error {
|
||||
|
||||
// This deletes the empty directories in the slice passed in. It
|
||||
// ignores any errors deleting directories
|
||||
func deleteEmptyDirectories(ctx context.Context, f fs.Fs, entriesMap map[string]fs.DirEntry) error {
|
||||
func (s *syncCopyMove) deleteEmptyDirectories(ctx context.Context, f fs.Fs, entriesMap map[string]fs.DirEntry) error {
|
||||
if len(entriesMap) == 0 {
|
||||
return nil
|
||||
}
|
||||
if accounting.Stats(ctx).Errored() && !fs.Config.IgnoreErrors {
|
||||
if accounting.Stats(ctx).Errored() && !s.ci.IgnoreErrors {
|
||||
fs.Errorf(f, "%v", fs.ErrorNotDeletingDirs)
|
||||
return fs.ErrorNotDeletingDirs
|
||||
}
|
||||
@ -729,14 +732,14 @@ func (s *syncCopyMove) makeRenameMap() {
|
||||
}
|
||||
|
||||
// pump all the dstFiles into in
|
||||
in := make(chan fs.Object, fs.Config.Checkers)
|
||||
in := make(chan fs.Object, s.ci.Checkers)
|
||||
go s.pumpMapToChan(s.dstFiles, in)
|
||||
|
||||
// now make a map of size,hash for all dstFiles
|
||||
s.renameMap = make(map[string][]fs.Object)
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(fs.Config.Transfers)
|
||||
for i := 0; i < fs.Config.Transfers; i++ {
|
||||
wg.Add(s.ci.Transfers)
|
||||
for i := 0; i < s.ci.Transfers; i++ {
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for obj := range in {
|
||||
@ -829,7 +832,7 @@ func (s *syncCopyMove) run() error {
|
||||
NoCheckDest: s.noCheckDest,
|
||||
NoUnicodeNormalization: s.noUnicodeNormalization,
|
||||
}
|
||||
s.processError(m.Run())
|
||||
s.processError(m.Run(s.ctx))
|
||||
|
||||
s.stopTrackRenames()
|
||||
if s.trackRenames {
|
||||
@ -860,7 +863,7 @@ func (s *syncCopyMove) run() error {
|
||||
|
||||
// Delete files after
|
||||
if s.deleteMode == fs.DeleteModeAfter {
|
||||
if s.currentError() != nil && !fs.Config.IgnoreErrors {
|
||||
if s.currentError() != nil && !s.ci.IgnoreErrors {
|
||||
fs.Errorf(s.fdst, "%v", fs.ErrorNotDeleting)
|
||||
} else {
|
||||
s.processError(s.deleteFiles(false))
|
||||
@ -869,10 +872,10 @@ func (s *syncCopyMove) run() error {
|
||||
|
||||
// Prune empty directories
|
||||
if s.deleteMode != fs.DeleteModeOff {
|
||||
if s.currentError() != nil && !fs.Config.IgnoreErrors {
|
||||
if s.currentError() != nil && !s.ci.IgnoreErrors {
|
||||
fs.Errorf(s.fdst, "%v", fs.ErrorNotDeletingDirs)
|
||||
} else {
|
||||
s.processError(deleteEmptyDirectories(s.ctx, s.fdst, s.dstEmptyDirs))
|
||||
s.processError(s.deleteEmptyDirectories(s.ctx, s.fdst, s.dstEmptyDirs))
|
||||
}
|
||||
}
|
||||
|
||||
@ -880,7 +883,7 @@ func (s *syncCopyMove) run() error {
|
||||
// if DoMove and --delete-empty-src-dirs flag is set
|
||||
if s.DoMove && s.deleteEmptySrcDirs {
|
||||
//delete empty subdirectories that were part of the move
|
||||
s.processError(deleteEmptyDirectories(s.ctx, s.fsrc, s.srcEmptyDirs))
|
||||
s.processError(s.deleteEmptyDirectories(s.ctx, s.fsrc, s.srcEmptyDirs))
|
||||
}
|
||||
|
||||
// Read the error out of the context if there is one
|
||||
@ -1038,12 +1041,13 @@ func (s *syncCopyMove) Match(ctx context.Context, dst, src fs.DirEntry) (recurse
|
||||
//
|
||||
// dir is the start directory, "" for root
|
||||
func runSyncCopyMove(ctx context.Context, fdst, fsrc fs.Fs, deleteMode fs.DeleteMode, DoMove bool, deleteEmptySrcDirs bool, copyEmptySrcDirs bool) error {
|
||||
ci := fs.GetConfig(ctx)
|
||||
if deleteMode != fs.DeleteModeOff && DoMove {
|
||||
return fserrors.FatalError(errors.New("can't delete and move at the same time"))
|
||||
}
|
||||
// Run an extra pass to delete only
|
||||
if deleteMode == fs.DeleteModeBefore {
|
||||
if fs.Config.TrackRenames {
|
||||
if ci.TrackRenames {
|
||||
return fserrors.FatalError(errors.New("can't use --delete-before with --track-renames"))
|
||||
}
|
||||
// only delete stuff during in this pass
|
||||
@ -1067,7 +1071,8 @@ func runSyncCopyMove(ctx context.Context, fdst, fsrc fs.Fs, deleteMode fs.Delete
|
||||
|
||||
// Sync fsrc into fdst
|
||||
func Sync(ctx context.Context, fdst, fsrc fs.Fs, copyEmptySrcDirs bool) error {
|
||||
return runSyncCopyMove(ctx, fdst, fsrc, fs.Config.DeleteMode, false, false, copyEmptySrcDirs)
|
||||
ci := fs.GetConfig(ctx)
|
||||
return runSyncCopyMove(ctx, fdst, fsrc, ci.DeleteMode, false, false, copyEmptySrcDirs)
|
||||
}
|
||||
|
||||
// CopyDir copies fsrc into fdst
|
||||
|
@ -39,14 +39,15 @@ func TestMain(m *testing.M) {
|
||||
// Check dry run is working
|
||||
func TestCopyWithDryRun(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
ci := fs.GetConfig(ctx)
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
file1 := r.WriteFile("sub dir/hello world", "hello world", t1)
|
||||
r.Mkdir(ctx, r.Fremote)
|
||||
|
||||
fs.Config.DryRun = true
|
||||
ci.DryRun = true
|
||||
err := CopyDir(ctx, r.Fremote, r.Flocal, false)
|
||||
fs.Config.DryRun = false
|
||||
ci.DryRun = false
|
||||
require.NoError(t, err)
|
||||
|
||||
fstest.CheckItems(t, r.Flocal, file1)
|
||||
@ -86,11 +87,12 @@ func TestCopyMissingDirectory(t *testing.T) {
|
||||
// Now with --no-traverse
|
||||
func TestCopyNoTraverse(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
ci := fs.GetConfig(ctx)
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
|
||||
fs.Config.NoTraverse = true
|
||||
defer func() { fs.Config.NoTraverse = false }()
|
||||
ci.NoTraverse = true
|
||||
defer func() { ci.NoTraverse = false }()
|
||||
|
||||
file1 := r.WriteFile("sub dir/hello world", "hello world", t1)
|
||||
|
||||
@ -104,11 +106,12 @@ func TestCopyNoTraverse(t *testing.T) {
|
||||
// Now with --check-first
|
||||
func TestCopyCheckFirst(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
ci := fs.GetConfig(ctx)
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
|
||||
fs.Config.CheckFirst = true
|
||||
defer func() { fs.Config.CheckFirst = false }()
|
||||
ci.CheckFirst = true
|
||||
defer func() { ci.CheckFirst = false }()
|
||||
|
||||
file1 := r.WriteFile("sub dir/hello world", "hello world", t1)
|
||||
|
||||
@ -122,11 +125,12 @@ func TestCopyCheckFirst(t *testing.T) {
|
||||
// Now with --no-traverse
|
||||
func TestSyncNoTraverse(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
ci := fs.GetConfig(ctx)
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
|
||||
fs.Config.NoTraverse = true
|
||||
defer func() { fs.Config.NoTraverse = false }()
|
||||
ci.NoTraverse = true
|
||||
defer func() { ci.NoTraverse = false }()
|
||||
|
||||
file1 := r.WriteFile("sub dir/hello world", "hello world", t1)
|
||||
|
||||
@ -141,14 +145,15 @@ func TestSyncNoTraverse(t *testing.T) {
|
||||
// Test copy with depth
|
||||
func TestCopyWithDepth(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
ci := fs.GetConfig(ctx)
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
file1 := r.WriteFile("sub dir/hello world", "hello world", t1)
|
||||
file2 := r.WriteFile("hello world2", "hello world2", t2)
|
||||
|
||||
// Check the MaxDepth too
|
||||
fs.Config.MaxDepth = 1
|
||||
defer func() { fs.Config.MaxDepth = -1 }()
|
||||
ci.MaxDepth = 1
|
||||
defer func() { ci.MaxDepth = -1 }()
|
||||
|
||||
err := CopyDir(ctx, r.Fremote, r.Flocal, false)
|
||||
require.NoError(t, err)
|
||||
@ -160,6 +165,7 @@ func TestCopyWithDepth(t *testing.T) {
|
||||
// Test copy with files from
|
||||
func testCopyWithFilesFrom(t *testing.T, noTraverse bool) {
|
||||
ctx := context.Background()
|
||||
ci := fs.GetConfig(ctx)
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
file1 := r.WriteFile("potato2", "hello world", t1)
|
||||
@ -173,12 +179,12 @@ func testCopyWithFilesFrom(t *testing.T, noTraverse bool) {
|
||||
|
||||
// Monkey patch the active filter
|
||||
oldFilter := filter.Active
|
||||
oldNoTraverse := fs.Config.NoTraverse
|
||||
oldNoTraverse := ci.NoTraverse
|
||||
filter.Active = f
|
||||
fs.Config.NoTraverse = noTraverse
|
||||
ci.NoTraverse = noTraverse
|
||||
unpatch := func() {
|
||||
filter.Active = oldFilter
|
||||
fs.Config.NoTraverse = oldNoTraverse
|
||||
ci.NoTraverse = oldNoTraverse
|
||||
}
|
||||
defer unpatch()
|
||||
|
||||
@ -332,10 +338,11 @@ func TestCopyRedownload(t *testing.T) {
|
||||
// to be transferred on the second sync.
|
||||
func TestSyncBasedOnCheckSum(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
ci := fs.GetConfig(ctx)
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
fs.Config.CheckSum = true
|
||||
defer func() { fs.Config.CheckSum = false }()
|
||||
ci.CheckSum = true
|
||||
defer func() { ci.CheckSum = false }()
|
||||
|
||||
file1 := r.WriteFile("check sum", "-", t1)
|
||||
fstest.CheckItems(t, r.Flocal, file1)
|
||||
@ -367,10 +374,11 @@ func TestSyncBasedOnCheckSum(t *testing.T) {
|
||||
// only, we expect nothing to to be transferred on the second sync.
|
||||
func TestSyncSizeOnly(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
ci := fs.GetConfig(ctx)
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
fs.Config.SizeOnly = true
|
||||
defer func() { fs.Config.SizeOnly = false }()
|
||||
ci.SizeOnly = true
|
||||
defer func() { ci.SizeOnly = false }()
|
||||
|
||||
file1 := r.WriteFile("sizeonly", "potato", t1)
|
||||
fstest.CheckItems(t, r.Flocal, file1)
|
||||
@ -402,10 +410,11 @@ func TestSyncSizeOnly(t *testing.T) {
|
||||
// transferred on the second sync.
|
||||
func TestSyncIgnoreSize(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
ci := fs.GetConfig(ctx)
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
fs.Config.IgnoreSize = true
|
||||
defer func() { fs.Config.IgnoreSize = false }()
|
||||
ci.IgnoreSize = true
|
||||
defer func() { ci.IgnoreSize = false }()
|
||||
|
||||
file1 := r.WriteFile("ignore-size", "contents", t1)
|
||||
fstest.CheckItems(t, r.Flocal, file1)
|
||||
@ -434,6 +443,7 @@ func TestSyncIgnoreSize(t *testing.T) {
|
||||
|
||||
func TestSyncIgnoreTimes(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
ci := fs.GetConfig(ctx)
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
file1 := r.WriteBoth(ctx, "existing", "potato", t1)
|
||||
@ -447,8 +457,8 @@ func TestSyncIgnoreTimes(t *testing.T) {
|
||||
// files were identical.
|
||||
assert.Equal(t, int64(0), accounting.GlobalStats().GetTransfers())
|
||||
|
||||
fs.Config.IgnoreTimes = true
|
||||
defer func() { fs.Config.IgnoreTimes = false }()
|
||||
ci.IgnoreTimes = true
|
||||
defer func() { ci.IgnoreTimes = false }()
|
||||
|
||||
accounting.GlobalStats().ResetCounters()
|
||||
err = Sync(ctx, r.Fremote, r.Flocal, false)
|
||||
@ -464,12 +474,13 @@ func TestSyncIgnoreTimes(t *testing.T) {
|
||||
|
||||
func TestSyncIgnoreExisting(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
ci := fs.GetConfig(ctx)
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
file1 := r.WriteFile("existing", "potato", t1)
|
||||
|
||||
fs.Config.IgnoreExisting = true
|
||||
defer func() { fs.Config.IgnoreExisting = false }()
|
||||
ci.IgnoreExisting = true
|
||||
defer func() { ci.IgnoreExisting = false }()
|
||||
|
||||
accounting.GlobalStats().ResetCounters()
|
||||
err := Sync(ctx, r.Fremote, r.Flocal, false)
|
||||
@ -488,10 +499,11 @@ func TestSyncIgnoreExisting(t *testing.T) {
|
||||
|
||||
func TestSyncIgnoreErrors(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
ci := fs.GetConfig(ctx)
|
||||
r := fstest.NewRun(t)
|
||||
fs.Config.IgnoreErrors = true
|
||||
ci.IgnoreErrors = true
|
||||
defer func() {
|
||||
fs.Config.IgnoreErrors = false
|
||||
ci.IgnoreErrors = false
|
||||
r.Finalise()
|
||||
}()
|
||||
file1 := r.WriteFile("a/potato2", "------------------------------------------------------------", t1)
|
||||
@ -561,6 +573,7 @@ func TestSyncIgnoreErrors(t *testing.T) {
|
||||
|
||||
func TestSyncAfterChangingModtimeOnly(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
ci := fs.GetConfig(ctx)
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
file1 := r.WriteFile("empty space", "-", t2)
|
||||
@ -569,8 +582,8 @@ func TestSyncAfterChangingModtimeOnly(t *testing.T) {
|
||||
fstest.CheckItems(t, r.Flocal, file1)
|
||||
fstest.CheckItems(t, r.Fremote, file2)
|
||||
|
||||
fs.Config.DryRun = true
|
||||
defer func() { fs.Config.DryRun = false }()
|
||||
ci.DryRun = true
|
||||
defer func() { ci.DryRun = false }()
|
||||
|
||||
accounting.GlobalStats().ResetCounters()
|
||||
err := Sync(ctx, r.Fremote, r.Flocal, false)
|
||||
@ -579,7 +592,7 @@ func TestSyncAfterChangingModtimeOnly(t *testing.T) {
|
||||
fstest.CheckItems(t, r.Flocal, file1)
|
||||
fstest.CheckItems(t, r.Fremote, file2)
|
||||
|
||||
fs.Config.DryRun = false
|
||||
ci.DryRun = false
|
||||
|
||||
accounting.GlobalStats().ResetCounters()
|
||||
err = Sync(ctx, r.Fremote, r.Flocal, false)
|
||||
@ -591,6 +604,7 @@ func TestSyncAfterChangingModtimeOnly(t *testing.T) {
|
||||
|
||||
func TestSyncAfterChangingModtimeOnlyWithNoUpdateModTime(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
ci := fs.GetConfig(ctx)
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
|
||||
@ -599,9 +613,9 @@ func TestSyncAfterChangingModtimeOnlyWithNoUpdateModTime(t *testing.T) {
|
||||
return
|
||||
}
|
||||
|
||||
fs.Config.NoUpdateModTime = true
|
||||
ci.NoUpdateModTime = true
|
||||
defer func() {
|
||||
fs.Config.NoUpdateModTime = false
|
||||
ci.NoUpdateModTime = false
|
||||
}()
|
||||
|
||||
file1 := r.WriteFile("empty space", "-", t2)
|
||||
@ -703,16 +717,17 @@ func TestSyncAfterChangingContentsOnly(t *testing.T) {
|
||||
// Sync after removing a file and adding a file --dry-run
|
||||
func TestSyncAfterRemovingAFileAndAddingAFileDryRun(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
ci := fs.GetConfig(ctx)
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
file1 := r.WriteFile("potato2", "------------------------------------------------------------", t1)
|
||||
file2 := r.WriteObject(ctx, "potato", "SMALLER BUT SAME DATE", t2)
|
||||
file3 := r.WriteBoth(ctx, "empty space", "-", t2)
|
||||
|
||||
fs.Config.DryRun = true
|
||||
ci.DryRun = true
|
||||
accounting.GlobalStats().ResetCounters()
|
||||
err := Sync(ctx, r.Fremote, r.Flocal, false)
|
||||
fs.Config.DryRun = false
|
||||
ci.DryRun = false
|
||||
require.NoError(t, err)
|
||||
|
||||
fstest.CheckItems(t, r.Flocal, file3, file1)
|
||||
@ -885,16 +900,20 @@ func TestSyncAfterRemovingAFileAndAddingAFileSubDirWithErrors(t *testing.T) {
|
||||
|
||||
// Sync test delete after
|
||||
func TestSyncDeleteAfter(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
ci := fs.GetConfig(ctx)
|
||||
// This is the default so we've checked this already
|
||||
// check it is the default
|
||||
require.Equal(t, fs.Config.DeleteMode, fs.DeleteModeAfter, "Didn't default to --delete-after")
|
||||
require.Equal(t, ci.DeleteMode, fs.DeleteModeAfter, "Didn't default to --delete-after")
|
||||
}
|
||||
|
||||
// Sync test delete during
|
||||
func TestSyncDeleteDuring(t *testing.T) {
|
||||
fs.Config.DeleteMode = fs.DeleteModeDuring
|
||||
ctx := context.Background()
|
||||
ci := fs.GetConfig(ctx)
|
||||
ci.DeleteMode = fs.DeleteModeDuring
|
||||
defer func() {
|
||||
fs.Config.DeleteMode = fs.DeleteModeDefault
|
||||
ci.DeleteMode = fs.DeleteModeDefault
|
||||
}()
|
||||
|
||||
TestSyncAfterRemovingAFileAndAddingAFile(t)
|
||||
@ -902,9 +921,11 @@ func TestSyncDeleteDuring(t *testing.T) {
|
||||
|
||||
// Sync test delete before
|
||||
func TestSyncDeleteBefore(t *testing.T) {
|
||||
fs.Config.DeleteMode = fs.DeleteModeBefore
|
||||
ctx := context.Background()
|
||||
ci := fs.GetConfig(ctx)
|
||||
ci.DeleteMode = fs.DeleteModeBefore
|
||||
defer func() {
|
||||
fs.Config.DeleteMode = fs.DeleteModeDefault
|
||||
ci.DeleteMode = fs.DeleteModeDefault
|
||||
}()
|
||||
|
||||
TestSyncAfterRemovingAFileAndAddingAFile(t)
|
||||
@ -913,12 +934,13 @@ func TestSyncDeleteBefore(t *testing.T) {
|
||||
// Copy test delete before - shouldn't delete anything
|
||||
func TestCopyDeleteBefore(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
ci := fs.GetConfig(ctx)
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
|
||||
fs.Config.DeleteMode = fs.DeleteModeBefore
|
||||
ci.DeleteMode = fs.DeleteModeBefore
|
||||
defer func() {
|
||||
fs.Config.DeleteMode = fs.DeleteModeDefault
|
||||
ci.DeleteMode = fs.DeleteModeDefault
|
||||
}()
|
||||
|
||||
file1 := r.WriteObject(ctx, "potato", "hopefully not deleted", t1)
|
||||
@ -997,6 +1019,7 @@ func TestSyncWithExcludeAndDeleteExcluded(t *testing.T) {
|
||||
// Test with UpdateOlder set
|
||||
func TestSyncWithUpdateOlder(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
ci := fs.GetConfig(ctx)
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
if fs.GetModifyWindow(ctx, r.Fremote) == fs.ModTimeNotSupported {
|
||||
@ -1016,12 +1039,12 @@ func TestSyncWithUpdateOlder(t *testing.T) {
|
||||
fourO := r.WriteObject(ctx, "four", "FOURFOUR", t2minus)
|
||||
fstest.CheckItems(t, r.Fremote, oneO, twoO, threeO, fourO)
|
||||
|
||||
fs.Config.UpdateOlder = true
|
||||
oldModifyWindow := fs.Config.ModifyWindow
|
||||
fs.Config.ModifyWindow = fs.ModTimeNotSupported
|
||||
ci.UpdateOlder = true
|
||||
oldModifyWindow := ci.ModifyWindow
|
||||
ci.ModifyWindow = fs.ModTimeNotSupported
|
||||
defer func() {
|
||||
fs.Config.UpdateOlder = false
|
||||
fs.Config.ModifyWindow = oldModifyWindow
|
||||
ci.UpdateOlder = false
|
||||
ci.ModifyWindow = oldModifyWindow
|
||||
}()
|
||||
|
||||
err := Sync(ctx, r.Fremote, r.Flocal, false)
|
||||
@ -1034,8 +1057,8 @@ func TestSyncWithUpdateOlder(t *testing.T) {
|
||||
}
|
||||
|
||||
// now enable checksum
|
||||
fs.Config.CheckSum = true
|
||||
defer func() { fs.Config.CheckSum = false }()
|
||||
ci.CheckSum = true
|
||||
defer func() { ci.CheckSum = false }()
|
||||
|
||||
err = Sync(ctx, r.Fremote, r.Flocal, false)
|
||||
require.NoError(t, err)
|
||||
@ -1045,6 +1068,7 @@ func TestSyncWithUpdateOlder(t *testing.T) {
|
||||
// Test with a max transfer duration
|
||||
func TestSyncWithMaxDuration(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
ci := fs.GetConfig(ctx)
|
||||
if *fstest.RemoteName != "" {
|
||||
t.Skip("Skipping test on non local remote")
|
||||
}
|
||||
@ -1052,14 +1076,14 @@ func TestSyncWithMaxDuration(t *testing.T) {
|
||||
defer r.Finalise()
|
||||
|
||||
maxDuration := 250 * time.Millisecond
|
||||
fs.Config.MaxDuration = maxDuration
|
||||
ci.MaxDuration = maxDuration
|
||||
bytesPerSecond := 300
|
||||
accounting.SetBwLimit(fs.SizeSuffix(bytesPerSecond))
|
||||
oldTransfers := fs.Config.Transfers
|
||||
fs.Config.Transfers = 1
|
||||
oldTransfers := ci.Transfers
|
||||
ci.Transfers = 1
|
||||
defer func() {
|
||||
fs.Config.MaxDuration = 0 // reset back to default
|
||||
fs.Config.Transfers = oldTransfers
|
||||
ci.MaxDuration = 0 // reset back to default
|
||||
ci.Transfers = oldTransfers
|
||||
accounting.SetBwLimit(fs.SizeSuffix(0))
|
||||
}()
|
||||
|
||||
@ -1089,12 +1113,13 @@ func TestSyncWithMaxDuration(t *testing.T) {
|
||||
// Test with TrackRenames set
|
||||
func TestSyncWithTrackRenames(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
ci := fs.GetConfig(ctx)
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
|
||||
fs.Config.TrackRenames = true
|
||||
ci.TrackRenames = true
|
||||
defer func() {
|
||||
fs.Config.TrackRenames = false
|
||||
ci.TrackRenames = false
|
||||
}()
|
||||
|
||||
haveHash := r.Fremote.Hashes().Overlap(r.Flocal.Hashes()).GetOne() != hash.None
|
||||
@ -1160,14 +1185,15 @@ func TestRenamesStrategyModtime(t *testing.T) {
|
||||
|
||||
func TestSyncWithTrackRenamesStrategyModtime(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
ci := fs.GetConfig(ctx)
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
|
||||
fs.Config.TrackRenames = true
|
||||
fs.Config.TrackRenamesStrategy = "modtime"
|
||||
ci.TrackRenames = true
|
||||
ci.TrackRenamesStrategy = "modtime"
|
||||
defer func() {
|
||||
fs.Config.TrackRenames = false
|
||||
fs.Config.TrackRenamesStrategy = "hash"
|
||||
ci.TrackRenames = false
|
||||
ci.TrackRenamesStrategy = "hash"
|
||||
}()
|
||||
|
||||
canTrackRenames := operations.CanServerSideMove(r.Fremote) && r.Fremote.Precision() != fs.ModTimeNotSupported
|
||||
@ -1199,14 +1225,15 @@ func TestSyncWithTrackRenamesStrategyModtime(t *testing.T) {
|
||||
|
||||
func TestSyncWithTrackRenamesStrategyLeaf(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
ci := fs.GetConfig(ctx)
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
|
||||
fs.Config.TrackRenames = true
|
||||
fs.Config.TrackRenamesStrategy = "leaf"
|
||||
ci.TrackRenames = true
|
||||
ci.TrackRenamesStrategy = "leaf"
|
||||
defer func() {
|
||||
fs.Config.TrackRenames = false
|
||||
fs.Config.TrackRenamesStrategy = "hash"
|
||||
ci.TrackRenames = false
|
||||
ci.TrackRenamesStrategy = "hash"
|
||||
}()
|
||||
|
||||
canTrackRenames := operations.CanServerSideMove(r.Fremote) && r.Fremote.Precision() != fs.ModTimeNotSupported
|
||||
@ -1445,12 +1472,13 @@ func TestSyncOverlap(t *testing.T) {
|
||||
// Test with CompareDest set
|
||||
func TestSyncCompareDest(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
ci := fs.GetConfig(ctx)
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
|
||||
fs.Config.CompareDest = r.FremoteName + "/CompareDest"
|
||||
ci.CompareDest = r.FremoteName + "/CompareDest"
|
||||
defer func() {
|
||||
fs.Config.CompareDest = ""
|
||||
ci.CompareDest = ""
|
||||
}()
|
||||
|
||||
fdst, err := fs.NewFs(ctx, r.FremoteName+"/dst")
|
||||
@ -1533,6 +1561,7 @@ func TestSyncCompareDest(t *testing.T) {
|
||||
// Test with CopyDest set
|
||||
func TestSyncCopyDest(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
ci := fs.GetConfig(ctx)
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
|
||||
@ -1540,9 +1569,9 @@ func TestSyncCopyDest(t *testing.T) {
|
||||
t.Skip("Skipping test as remote does not support server-side copy")
|
||||
}
|
||||
|
||||
fs.Config.CopyDest = r.FremoteName + "/CopyDest"
|
||||
ci.CopyDest = r.FremoteName + "/CopyDest"
|
||||
defer func() {
|
||||
fs.Config.CopyDest = ""
|
||||
ci.CopyDest = ""
|
||||
}()
|
||||
|
||||
fdst, err := fs.NewFs(ctx, r.FremoteName+"/dst")
|
||||
@ -1577,7 +1606,7 @@ func TestSyncCopyDest(t *testing.T) {
|
||||
|
||||
// check old dest, new copy, backup-dir
|
||||
|
||||
fs.Config.BackupDir = r.FremoteName + "/BackupDir"
|
||||
ci.BackupDir = r.FremoteName + "/BackupDir"
|
||||
|
||||
file3 := r.WriteObject(ctx, "dst/one", "one", t1)
|
||||
file2 := r.WriteObject(ctx, "CopyDest/one", "onet2", t2)
|
||||
@ -1594,7 +1623,7 @@ func TestSyncCopyDest(t *testing.T) {
|
||||
file3.Path = "BackupDir/one"
|
||||
|
||||
fstest.CheckItems(t, r.Fremote, file2, file2dst, file3)
|
||||
fs.Config.BackupDir = ""
|
||||
ci.BackupDir = ""
|
||||
|
||||
// check empty dest, new copy
|
||||
file4 := r.WriteObject(ctx, "CopyDest/two", "two", t2)
|
||||
@ -1637,6 +1666,7 @@ func TestSyncCopyDest(t *testing.T) {
|
||||
// Test with BackupDir set
|
||||
func testSyncBackupDir(t *testing.T, backupDir string, suffix string, suffixKeepExtension bool) {
|
||||
ctx := context.Background()
|
||||
ci := fs.GetConfig(ctx)
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
|
||||
@ -1646,10 +1676,10 @@ func testSyncBackupDir(t *testing.T, backupDir string, suffix string, suffixKeep
|
||||
r.Mkdir(ctx, r.Fremote)
|
||||
|
||||
if backupDir != "" {
|
||||
fs.Config.BackupDir = r.FremoteName + "/" + backupDir
|
||||
ci.BackupDir = r.FremoteName + "/" + backupDir
|
||||
backupDir += "/"
|
||||
} else {
|
||||
fs.Config.BackupDir = ""
|
||||
ci.BackupDir = ""
|
||||
backupDir = "dst/"
|
||||
// Exclude the suffix from the sync otherwise the sync
|
||||
// deletes the old backup files
|
||||
@ -1662,12 +1692,12 @@ func testSyncBackupDir(t *testing.T, backupDir string, suffix string, suffixKeep
|
||||
filter.Active = oldFlt
|
||||
}()
|
||||
}
|
||||
fs.Config.Suffix = suffix
|
||||
fs.Config.SuffixKeepExtension = suffixKeepExtension
|
||||
ci.Suffix = suffix
|
||||
ci.SuffixKeepExtension = suffixKeepExtension
|
||||
defer func() {
|
||||
fs.Config.BackupDir = ""
|
||||
fs.Config.Suffix = ""
|
||||
fs.Config.SuffixKeepExtension = false
|
||||
ci.BackupDir = ""
|
||||
ci.Suffix = ""
|
||||
ci.SuffixKeepExtension = false
|
||||
}()
|
||||
|
||||
// Make the setup so we have one, two, three in the dest
|
||||
@ -1742,6 +1772,7 @@ func TestSyncBackupDirSuffixOnly(t *testing.T) {
|
||||
// Test with Suffix set
|
||||
func testSyncSuffix(t *testing.T, suffix string, suffixKeepExtension bool) {
|
||||
ctx := context.Background()
|
||||
ci := fs.GetConfig(ctx)
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
|
||||
@ -1750,12 +1781,12 @@ func testSyncSuffix(t *testing.T, suffix string, suffixKeepExtension bool) {
|
||||
}
|
||||
r.Mkdir(ctx, r.Fremote)
|
||||
|
||||
fs.Config.Suffix = suffix
|
||||
fs.Config.SuffixKeepExtension = suffixKeepExtension
|
||||
ci.Suffix = suffix
|
||||
ci.SuffixKeepExtension = suffixKeepExtension
|
||||
defer func() {
|
||||
fs.Config.BackupDir = ""
|
||||
fs.Config.Suffix = ""
|
||||
fs.Config.SuffixKeepExtension = false
|
||||
ci.BackupDir = ""
|
||||
ci.Suffix = ""
|
||||
ci.SuffixKeepExtension = false
|
||||
}()
|
||||
|
||||
// Make the setup so we have one, two, three in the dest
|
||||
@ -1865,11 +1896,12 @@ func TestSyncUTFNorm(t *testing.T) {
|
||||
// Test --immutable
|
||||
func TestSyncImmutable(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
ci := fs.GetConfig(ctx)
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
|
||||
fs.Config.Immutable = true
|
||||
defer func() { fs.Config.Immutable = false }()
|
||||
ci.Immutable = true
|
||||
defer func() { ci.Immutable = false }()
|
||||
|
||||
// Create file on source
|
||||
file1 := r.WriteFile("existing", "potato", t1)
|
||||
@ -1899,6 +1931,7 @@ func TestSyncImmutable(t *testing.T) {
|
||||
// Test --ignore-case-sync
|
||||
func TestSyncIgnoreCase(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
ci := fs.GetConfig(ctx)
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
|
||||
@ -1907,8 +1940,8 @@ func TestSyncIgnoreCase(t *testing.T) {
|
||||
t.Skip("Skipping test as local or remote are case-insensitive")
|
||||
}
|
||||
|
||||
fs.Config.IgnoreCaseSync = true
|
||||
defer func() { fs.Config.IgnoreCaseSync = false }()
|
||||
ci.IgnoreCaseSync = true
|
||||
defer func() { ci.IgnoreCaseSync = false }()
|
||||
|
||||
// Create files with different filename casing
|
||||
file1 := r.WriteFile("existing", "potato", t1)
|
||||
@ -1927,25 +1960,26 @@ func TestSyncIgnoreCase(t *testing.T) {
|
||||
// Test that aborting on --max-transfer works
|
||||
func TestMaxTransfer(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
oldMaxTransfer := fs.Config.MaxTransfer
|
||||
oldTransfers := fs.Config.Transfers
|
||||
oldCheckers := fs.Config.Checkers
|
||||
oldCutoff := fs.Config.CutoffMode
|
||||
fs.Config.MaxTransfer = 3 * 1024
|
||||
fs.Config.Transfers = 1
|
||||
fs.Config.Checkers = 1
|
||||
fs.Config.CutoffMode = fs.CutoffModeHard
|
||||
ci := fs.GetConfig(ctx)
|
||||
oldMaxTransfer := ci.MaxTransfer
|
||||
oldTransfers := ci.Transfers
|
||||
oldCheckers := ci.Checkers
|
||||
oldCutoff := ci.CutoffMode
|
||||
ci.MaxTransfer = 3 * 1024
|
||||
ci.Transfers = 1
|
||||
ci.Checkers = 1
|
||||
ci.CutoffMode = fs.CutoffModeHard
|
||||
defer func() {
|
||||
fs.Config.MaxTransfer = oldMaxTransfer
|
||||
fs.Config.Transfers = oldTransfers
|
||||
fs.Config.Checkers = oldCheckers
|
||||
fs.Config.CutoffMode = oldCutoff
|
||||
ci.MaxTransfer = oldMaxTransfer
|
||||
ci.Transfers = oldTransfers
|
||||
ci.Checkers = oldCheckers
|
||||
ci.CutoffMode = oldCutoff
|
||||
}()
|
||||
|
||||
test := func(t *testing.T, cutoff fs.CutoffMode) {
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
fs.Config.CutoffMode = cutoff
|
||||
ci.CutoffMode = cutoff
|
||||
|
||||
if r.Fremote.Name() != "local" {
|
||||
t.Skip("This test only runs on local")
|
||||
|
@ -59,11 +59,12 @@ type Func func(path string, entries fs.DirEntries, err error) error
|
||||
//
|
||||
// NB (f, path) to be replaced by fs.Dir at some point
|
||||
func Walk(ctx context.Context, f fs.Fs, path string, includeAll bool, maxLevel int, fn Func) error {
|
||||
if fs.Config.NoTraverse && filter.Active.HaveFilesFrom() {
|
||||
ci := fs.GetConfig(ctx)
|
||||
if ci.NoTraverse && filter.Active.HaveFilesFrom() {
|
||||
return walkR(ctx, f, path, includeAll, maxLevel, fn, filter.Active.MakeListR(ctx, f.NewObject))
|
||||
}
|
||||
// FIXME should this just be maxLevel < 0 - why the maxLevel > 1
|
||||
if (maxLevel < 0 || maxLevel > 1) && fs.Config.UseListR && f.Features().ListR != nil {
|
||||
if (maxLevel < 0 || maxLevel > 1) && ci.UseListR && f.Features().ListR != nil {
|
||||
return walkListR(ctx, f, path, includeAll, maxLevel, fn)
|
||||
}
|
||||
return walkListDirSorted(ctx, f, path, includeAll, maxLevel, fn)
|
||||
@ -353,10 +354,11 @@ type listDirFunc func(ctx context.Context, fs fs.Fs, includeAll bool, dir string
|
||||
|
||||
func walk(ctx context.Context, f fs.Fs, path string, includeAll bool, maxLevel int, fn Func, listDir listDirFunc) error {
|
||||
var (
|
||||
wg sync.WaitGroup // sync closing of go routines
|
||||
traversing sync.WaitGroup // running directory traversals
|
||||
doClose sync.Once // close the channel once
|
||||
mu sync.Mutex // stop fn being called concurrently
|
||||
wg sync.WaitGroup // sync closing of go routines
|
||||
traversing sync.WaitGroup // running directory traversals
|
||||
doClose sync.Once // close the channel once
|
||||
mu sync.Mutex // stop fn being called concurrently
|
||||
ci = fs.GetConfig(ctx) // current config
|
||||
)
|
||||
// listJob describe a directory listing that needs to be done
|
||||
type listJob struct {
|
||||
@ -364,7 +366,7 @@ func walk(ctx context.Context, f fs.Fs, path string, includeAll bool, maxLevel i
|
||||
depth int
|
||||
}
|
||||
|
||||
in := make(chan listJob, fs.Config.Checkers)
|
||||
in := make(chan listJob, ci.Checkers)
|
||||
errs := make(chan error, 1)
|
||||
quit := make(chan struct{})
|
||||
closeQuit := func() {
|
||||
@ -377,7 +379,7 @@ func walk(ctx context.Context, f fs.Fs, path string, includeAll bool, maxLevel i
|
||||
}()
|
||||
})
|
||||
}
|
||||
for i := 0; i < fs.Config.Checkers; i++ {
|
||||
for i := 0; i < ci.Checkers; i++ {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
@ -553,8 +555,9 @@ func walkNDirTree(ctx context.Context, f fs.Fs, path string, includeAll bool, ma
|
||||
//
|
||||
// NB (f, path) to be replaced by fs.Dir at some point
|
||||
func NewDirTree(ctx context.Context, f fs.Fs, path string, includeAll bool, maxLevel int) (dirtree.DirTree, error) {
|
||||
ci := fs.GetConfig(ctx)
|
||||
// if --no-traverse and --files-from build DirTree just from files
|
||||
if fs.Config.NoTraverse && filter.Active.HaveFilesFrom() {
|
||||
if ci.NoTraverse && filter.Active.HaveFilesFrom() {
|
||||
return walkRDirTree(ctx, f, path, includeAll, maxLevel, filter.Active.MakeListR(ctx, f.NewObject))
|
||||
}
|
||||
// if have ListR; and recursing; and not using --files-from; then build a DirTree with ListR
|
||||
|
@ -59,10 +59,11 @@ func init() {
|
||||
// Initialise rclone for testing
|
||||
func Initialise() {
|
||||
ctx := context.Background()
|
||||
ci := fs.GetConfig(ctx)
|
||||
// Never ask for passwords, fail instead.
|
||||
// If your local config is encrypted set environment variable
|
||||
// "RCLONE_CONFIG_PASS=hunter2" (or your password)
|
||||
fs.Config.AskPassword = false
|
||||
ci.AskPassword = false
|
||||
// Override the config file from the environment - we don't
|
||||
// parse the flags any more so this doesn't happen
|
||||
// automatically
|
||||
@ -71,16 +72,16 @@ func Initialise() {
|
||||
}
|
||||
config.LoadConfig(ctx)
|
||||
if *Verbose {
|
||||
fs.Config.LogLevel = fs.LogLevelDebug
|
||||
ci.LogLevel = fs.LogLevelDebug
|
||||
}
|
||||
if *DumpHeaders {
|
||||
fs.Config.Dump |= fs.DumpHeaders
|
||||
ci.Dump |= fs.DumpHeaders
|
||||
}
|
||||
if *DumpBodies {
|
||||
fs.Config.Dump |= fs.DumpBodies
|
||||
ci.Dump |= fs.DumpBodies
|
||||
}
|
||||
fs.Config.LowLevelRetries = *LowLevelRetries
|
||||
fs.Config.UseListR = *UseListR
|
||||
ci.LowLevelRetries = *LowLevelRetries
|
||||
ci.UseListR = *UseListR
|
||||
}
|
||||
|
||||
// Item represents an item for checking
|
||||
|
@ -295,6 +295,7 @@ func Run(t *testing.T, opt *Opt) {
|
||||
isLocalRemote bool
|
||||
purged bool // whether the dir has been purged or not
|
||||
ctx = context.Background()
|
||||
ci = fs.GetConfig(ctx)
|
||||
unwrappableFsMethods = []string{"Command"} // these Fs methods don't need to be wrapped ever
|
||||
)
|
||||
|
||||
@ -316,10 +317,10 @@ func Run(t *testing.T, opt *Opt) {
|
||||
if remote.Features().ListR == nil {
|
||||
t.Skip("FS has no ListR interface")
|
||||
}
|
||||
previous := fs.Config.UseListR
|
||||
fs.Config.UseListR = true
|
||||
previous := ci.UseListR
|
||||
ci.UseListR = true
|
||||
return func() {
|
||||
fs.Config.UseListR = previous
|
||||
ci.UseListR = previous
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -4,6 +4,7 @@ package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"go/build"
|
||||
"io"
|
||||
@ -345,9 +346,10 @@ func (r *Run) Init() {
|
||||
r.CmdLine = append(r.CmdLine, "-list-retries", fmt.Sprint(listRetries))
|
||||
}
|
||||
r.Try = 1
|
||||
ci := fs.GetConfig(context.Background())
|
||||
if *verbose {
|
||||
r.CmdLine = append(r.CmdLine, "-verbose")
|
||||
fs.Config.LogLevel = fs.LogLevelDebug
|
||||
ci.LogLevel = fs.LogLevelDebug
|
||||
}
|
||||
if *runOnly != "" {
|
||||
r.CmdLine = append(r.CmdLine, prefix+"run", *runOnly)
|
||||
|
@ -353,7 +353,7 @@ func NewClientWithBaseClient(ctx context.Context, name string, m configmap.Mappe
|
||||
// NewClient gets a token from the config file and configures a Client
|
||||
// with it. It returns the client and a TokenSource which Invalidate may need to be called on
|
||||
func NewClient(ctx context.Context, name string, m configmap.Mapper, oauthConfig *oauth2.Config) (*http.Client, *TokenSource, error) {
|
||||
return NewClientWithBaseClient(ctx, name, m, oauthConfig, fshttp.NewClient(fs.Config))
|
||||
return NewClientWithBaseClient(ctx, name, m, oauthConfig, fshttp.NewClient(fs.GetConfig(ctx)))
|
||||
}
|
||||
|
||||
// AuthResult is returned from the web server after authorization
|
||||
@ -526,7 +526,7 @@ version recommended):
|
||||
}
|
||||
|
||||
// Exchange the code for a token
|
||||
ctx = Context(ctx, fshttp.NewClient(fs.Config))
|
||||
ctx = Context(ctx, fshttp.NewClient(fs.GetConfig(ctx)))
|
||||
token, err := oauthConfig.Exchange(ctx, auth.Code)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to get token")
|
||||
|
@ -276,6 +276,7 @@ func (fh *ReadFileHandle) readAt(p []byte, off int64) (n int, err error) {
|
||||
retries := 0
|
||||
reqSize := len(p)
|
||||
doReopen := false
|
||||
lowLevelRetries := fs.GetConfig(context.TODO()).LowLevelRetries
|
||||
for {
|
||||
if doSeek {
|
||||
// Are we attempting to seek beyond the end of the
|
||||
@ -312,11 +313,11 @@ func (fh *ReadFileHandle) readAt(p []byte, off int64) (n int, err error) {
|
||||
break
|
||||
}
|
||||
}
|
||||
if retries >= fs.Config.LowLevelRetries {
|
||||
if retries >= lowLevelRetries {
|
||||
break
|
||||
}
|
||||
retries++
|
||||
fs.Errorf(fh.remote, "ReadFileHandle.Read error: low level retry %d/%d: %v", retries, fs.Config.LowLevelRetries, err)
|
||||
fs.Errorf(fh.remote, "ReadFileHandle.Read error: low level retry %d/%d: %v", retries, lowLevelRetries, err)
|
||||
doSeek = true
|
||||
doReopen = true
|
||||
}
|
||||
|
@ -95,7 +95,7 @@ func New(ctx context.Context, fremote fs.Fs, opt *vfscommon.Options, avFn AddVir
|
||||
return nil, errors.Wrap(err, "failed to create cache meta remote")
|
||||
}
|
||||
|
||||
hashType, hashOption := operations.CommonHash(fcache, fremote)
|
||||
hashType, hashOption := operations.CommonHash(ctx, fcache, fremote)
|
||||
|
||||
c := &Cache{
|
||||
fremote: fremote,
|
||||
|
@ -283,7 +283,7 @@ func (dls *Downloaders) _ensureDownloader(r ranges.Range) (err error) {
|
||||
// defer log.Trace(dls.src, "r=%v", r)("err=%v", &err)
|
||||
|
||||
// The window includes potentially unread data in the buffer
|
||||
window := int64(fs.Config.BufferSize)
|
||||
window := int64(fs.GetConfig(context.TODO()).BufferSize)
|
||||
|
||||
// Increase the read range by the read ahead if set
|
||||
if dls.opt.ReadAhead > 0 {
|
||||
@ -521,7 +521,7 @@ func (dl *downloader) open(offset int64) (err error) {
|
||||
// if offset > 0 {
|
||||
// rangeOption = &fs.RangeOption{Start: offset, End: size - 1}
|
||||
// }
|
||||
// in0, err := operations.NewReOpen(dl.dls.ctx, dl.dls.src, fs.Config.LowLevelRetries, dl.dls.item.c.hashOption, rangeOption)
|
||||
// in0, err := operations.NewReOpen(dl.dls.ctx, dl.dls.src, ci.LowLevelRetries, dl.dls.item.c.hashOption, rangeOption)
|
||||
|
||||
in0 := chunkedreader.New(context.TODO(), dl.dls.src, int64(dl.dls.opt.ChunkSize), int64(dl.dls.opt.ChunkSizeLimit))
|
||||
_, err = in0.Seek(offset, 0)
|
||||
|
@ -491,7 +491,7 @@ func (item *Item) _createFile(osPath string) (err error) {
|
||||
// Open the local file from the object passed in. Wraps open()
|
||||
// to provide recovery from out of space error.
|
||||
func (item *Item) Open(o fs.Object) (err error) {
|
||||
for retries := 0; retries < fs.Config.LowLevelRetries; retries++ {
|
||||
for retries := 0; retries < fs.GetConfig(context.TODO()).LowLevelRetries; retries++ {
|
||||
item.preAccess()
|
||||
err = item.open(o)
|
||||
item.postAccess()
|
||||
@ -1190,7 +1190,7 @@ func (item *Item) setModTime(modTime time.Time) {
|
||||
func (item *Item) ReadAt(b []byte, off int64) (n int, err error) {
|
||||
n = 0
|
||||
var expBackOff int
|
||||
for retries := 0; retries < fs.Config.LowLevelRetries; retries++ {
|
||||
for retries := 0; retries < fs.GetConfig(context.TODO()).LowLevelRetries; retries++ {
|
||||
item.preAccess()
|
||||
n, err = item.readAt(b, off)
|
||||
item.postAccess()
|
||||
|
@ -416,7 +416,7 @@ func (wb *WriteBack) processItems(ctx context.Context) {
|
||||
resetTimer := true
|
||||
for wbItem := wb._peekItem(); wbItem != nil && time.Until(wbItem.expiry) <= 0; wbItem = wb._peekItem() {
|
||||
// If reached transfer limit don't restart the timer
|
||||
if wb.uploads >= fs.Config.Transfers {
|
||||
if wb.uploads >= fs.GetConfig(context.TODO()).Transfers {
|
||||
fs.Debugf(wbItem.name, "vfs cache: delaying writeback as --transfers exceeded")
|
||||
resetTimer = false
|
||||
break
|
||||
|
@ -493,10 +493,12 @@ func TestWriteBackGetStats(t *testing.T) {
|
||||
|
||||
// Test queuing more than fs.Config.Transfers
|
||||
func TestWriteBackMaxQueue(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
ci := fs.GetConfig(ctx)
|
||||
wb, cancel := newTestWriteBack(t)
|
||||
defer cancel()
|
||||
|
||||
maxTransfers := fs.Config.Transfers
|
||||
maxTransfers := ci.Transfers
|
||||
toTransfer := maxTransfers + 2
|
||||
|
||||
// put toTransfer things in the queue
|
||||
|
Loading…
x
Reference in New Issue
Block a user