1
0
mirror of https://github.com/rclone/rclone.git synced 2025-04-14 00:58:59 +02:00

googlecloudstorage: use lib/encoder

Co-authored-by: Nick Craig-Wood <nick@craig-wood.com>
This commit is contained in:
Fabian Möller 2019-05-19 17:54:46 +02:00 committed by Nick Craig-Wood
parent f55a99218c
commit 3304bb7a56
3 changed files with 25 additions and 6 deletions

View File

@ -32,6 +32,7 @@ import (
"github.com/rclone/rclone/fs/config/configmap" "github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct" "github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/obscure" "github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/encodings"
"github.com/rclone/rclone/fs/fserrors" "github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp" "github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/hash"
@ -68,6 +69,8 @@ var (
} }
) )
const enc = encodings.GoogleCloudStorage
// Register with Fs // Register with Fs
func init() { func init() {
fs.Register(&fs.RegInfo{ fs.Register(&fs.RegInfo{
@ -349,7 +352,8 @@ func parsePath(path string) (root string) {
// split returns bucket and bucketPath from the rootRelativePath // split returns bucket and bucketPath from the rootRelativePath
// relative to f.root // relative to f.root
func (f *Fs) split(rootRelativePath string) (bucketName, bucketPath string) { func (f *Fs) split(rootRelativePath string) (bucketName, bucketPath string) {
return bucket.Split(path.Join(f.root, rootRelativePath)) bucketName, bucketPath = bucket.Split(path.Join(f.root, rootRelativePath))
return enc.FromStandardName(bucketName), enc.FromStandardPath(bucketPath)
} }
// split returns bucket and bucketPath from the object // split returns bucket and bucketPath from the object
@ -438,8 +442,9 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
if f.rootBucket != "" && f.rootDirectory != "" { if f.rootBucket != "" && f.rootDirectory != "" {
// Check to see if the object exists // Check to see if the object exists
encodedDirectory := enc.FromStandardPath(f.rootDirectory)
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
_, err = f.svc.Objects.Get(f.rootBucket, f.rootDirectory).Context(ctx).Do() _, err = f.svc.Objects.Get(f.rootBucket, encodedDirectory).Context(ctx).Do()
return shouldRetry(err) return shouldRetry(err)
}) })
if err == nil { if err == nil {
@ -522,6 +527,7 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
if !strings.HasSuffix(remote, "/") { if !strings.HasSuffix(remote, "/") {
continue continue
} }
remote = enc.ToStandardPath(remote)
if !strings.HasPrefix(remote, prefix) { if !strings.HasPrefix(remote, prefix) {
fs.Logf(f, "Odd name received %q", remote) fs.Logf(f, "Odd name received %q", remote)
continue continue
@ -537,11 +543,12 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
} }
} }
for _, object := range objects.Items { for _, object := range objects.Items {
if !strings.HasPrefix(object.Name, prefix) { remote := enc.ToStandardPath(object.Name)
if !strings.HasPrefix(remote, prefix) {
fs.Logf(f, "Odd name received %q", object.Name) fs.Logf(f, "Odd name received %q", object.Name)
continue continue
} }
remote := object.Name[len(prefix):] remote = remote[len(prefix):]
isDirectory := strings.HasSuffix(remote, "/") isDirectory := strings.HasSuffix(remote, "/")
if addBucket { if addBucket {
remote = path.Join(bucket, remote) remote = path.Join(bucket, remote)
@ -613,7 +620,7 @@ func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error)
return nil, err return nil, err
} }
for _, bucket := range buckets.Items { for _, bucket := range buckets.Items {
d := fs.NewDir(bucket.Name, time.Time{}) d := fs.NewDir(enc.ToStandardName(bucket.Name), time.Time{})
entries = append(entries, d) entries = append(entries, d)
} }
if buckets.NextPageToken == "" { if buckets.NextPageToken == "" {

View File

@ -221,6 +221,18 @@ Google google cloud storage stores md5sums natively and rclone stores
modification times as metadata on the object, under the "mtime" key in modification times as metadata on the object, under the "mtime" key in
RFC3339 format accurate to 1ns. RFC3339 format accurate to 1ns.
#### Restricted filename characters
| Character | Value | Replacement |
| --------- |:-----:|:-----------:|
| NUL | 0x00 | ␀ |
| LF | 0x0A | ␊ |
| CR | 0x0D | ␍ |
| / | 0x2F | / |
Invalid UTF-8 bytes will also be [replaced](/overview/#invalid-utf8),
as they can't be used in JSON strings.
<!--- autogenerated options start - DO NOT EDIT, instead edit fs.RegInfo in backend/googlecloudstorage/googlecloudstorage.go then run make backenddocs --> <!--- autogenerated options start - DO NOT EDIT, instead edit fs.RegInfo in backend/googlecloudstorage/googlecloudstorage.go then run make backenddocs -->
### Standard Options ### Standard Options

View File

@ -112,7 +112,7 @@ const Dropbox = encoder.MultiEncoder(
// GoogleCloudStorage is the encoding used by the googlecloudstorage backend // GoogleCloudStorage is the encoding used by the googlecloudstorage backend
const GoogleCloudStorage = encoder.MultiEncoder( const GoogleCloudStorage = encoder.MultiEncoder(
uint(Base) | uint(Base) |
//encoder.EncodeCrLF | encoder.EncodeCrLf |
encoder.EncodeInvalidUtf8) encoder.EncodeInvalidUtf8)
// JottaCloud is the encoding used by the jottacloud backend // JottaCloud is the encoding used by the jottacloud backend