From 5470d34740d03e1599bb4e349fe155c0acaff8fa Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Aleksandar=20Jankovi=C4=87?= Date: Mon, 24 Feb 2020 16:43:45 +0100 Subject: [PATCH] backend/s3: use low-level-retries as the number of SDK retries Amazon S3 is built to handle different kinds of workloads. In rare cases where S3 is not able to scale for whatever reason users will face status 500 errors. Main mechanism for handling these errors are retries. Amount of needed retries varies for each different use case. This change is making retries for s3 backend configurable by using --low-level-retries option. --- backend/s3/s3.go | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/backend/s3/s3.go b/backend/s3/s3.go index c76c21f89..fe53d1479 100644 --- a/backend/s3/s3.go +++ b/backend/s3/s3.go @@ -850,7 +850,6 @@ In Ceph, this can be increased with the "rgw list buckets max chunk" option. Default: memoryPoolFlushTime, Advanced: true, Help: `How often internal memory buffer pools will be flushed. - Uploads which requires additional buffers (f.e multipart) will use memory pool for allocations. This option controls how often unused buffers will be removed from the pool.`, }, { @@ -866,7 +865,6 @@ This option controls how often unused buffers will be removed from the pool.`, const ( metaMtime = "Mtime" // the meta key to store mtime in - eg X-Amz-Meta-Mtime metaMD5Hash = "Md5chksum" // the meta key to store md5hash in - maxRetries = 10 // number of retries to make of operations maxSizeForCopy = 5 * 1024 * 1024 * 1024 // The maximum size of object we can COPY maxUploadParts = 10000 // maximum allowed number of parts in a multi-part upload minChunkSize = fs.SizeSuffix(1024 * 1024 * 5) @@ -1095,7 +1093,7 @@ func s3Connection(opt *Options) (*s3.S3, *session.Session, error) { opt.ForcePathStyle = false } awsConfig := aws.NewConfig(). - WithMaxRetries(maxRetries). + WithMaxRetries(fs.Config.LowLevelRetries). WithCredentials(cred). WithHTTPClient(fshttp.NewClient(fs.Config)). WithS3ForcePathStyle(opt.ForcePathStyle). @@ -1202,12 +1200,17 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) { return nil, err } + pc := fs.NewPacer(pacer.NewS3(pacer.MinSleep(minSleep))) + // Set pacer retries to 0 because we are relying on SDK retry mechanism. + // Setting it to 1 because in context of pacer it means 1 attempt. + pc.SetRetries(1) + f := &Fs{ name: name, opt: *opt, c: c, ses: ses, - pacer: fs.NewPacer(pacer.NewS3(pacer.MinSleep(minSleep))), + pacer: pc, cache: bucket.NewCache(), srv: fshttp.NewClient(fs.Config), pools: make(map[int64]*pool.Pool),