From 40af98b0b3f22b0c16ecef64ba30e0b008c74a03 Mon Sep 17 00:00:00 2001
From: remusb <remus.bunduc@gmail.com>
Date: Tue, 30 Jan 2018 00:05:04 +0200
Subject: [PATCH] cache: offline uploading

---
 backend/cache/cache.go                    |  646 ++++++--
 backend/cache/cache_internal_test.go      | 1741 +++++++++++++++------
 backend/cache/cache_mount_unix_test.go    |   78 +
 backend/cache/cache_mount_windows_test.go |  124 ++
 backend/cache/directory.go                |   10 +-
 backend/cache/handle.go                   |  188 ++-
 backend/cache/object.go                   |  203 +--
 backend/cache/plex.go                     |   39 +-
 backend/cache/storage_memory.go           |    2 -
 backend/cache/storage_persistent.go       |  383 ++++-
 docs/content/cache.md                     |   58 +-
 11 files changed, 2690 insertions(+), 782 deletions(-)
 create mode 100644 backend/cache/cache_mount_unix_test.go
 create mode 100644 backend/cache/cache_mount_windows_test.go

diff --git a/backend/cache/cache.go b/backend/cache/cache.go
index 9863f92a4..6a08dba5f 100644
--- a/backend/cache/cache.go
+++ b/backend/cache/cache.go
@@ -47,6 +47,8 @@ const (
 	DefCacheRps = -1
 	// DefCacheWrites will cache file data on writes through the cache
 	DefCacheWrites = false
+	// DefCacheTmpWaitTime says how long should files be stored in local cache before being uploaded
+	DefCacheTmpWaitTime = "15m"
 )
 
 // Globals
@@ -64,6 +66,8 @@ var (
 	cacheChunkNoMemory      = flags.BoolP("cache-chunk-no-memory", "", DefCacheChunkNoMemory, "Disable the in-memory cache for storing chunks during streaming")
 	cacheRps                = flags.IntP("cache-rps", "", int(DefCacheRps), "Limits the number of requests per second to the source FS. -1 disables the rate limiter")
 	cacheStoreWrites        = flags.BoolP("cache-writes", "", DefCacheWrites, "Will cache file data on writes through the FS")
+	cacheTempWritePath      = flags.StringP("cache-tmp-upload-path", "", "", "Directory to keep temporary files until they are uploaded to the cloud storage")
+	cacheTempWaitTime       = flags.StringP("cache-tmp-wait-time", "", DefCacheTmpWaitTime, "How long should files be stored in local cache before being uploaded")
 )
 
 // Register with Fs
@@ -140,66 +144,6 @@ func init() {
 	})
 }
 
-// ChunkStorage is a storage type that supports only chunk operations (i.e in RAM)
-type ChunkStorage interface {
-	// will check if the chunk is in storage. should be fast and not read the chunk itself if possible
-	HasChunk(cachedObject *Object, offset int64) bool
-
-	// returns the chunk in storage. return an error if it's not
-	GetChunk(cachedObject *Object, offset int64) ([]byte, error)
-
-	// add a new chunk
-	AddChunk(fp string, data []byte, offset int64) error
-
-	// if the storage can cleanup on a cron basis
-	// otherwise it can do a noop operation
-	CleanChunksByAge(chunkAge time.Duration)
-
-	// if the storage can cleanup chunks after we no longer need them
-	// otherwise it can do a noop operation
-	CleanChunksByNeed(offset int64)
-
-	// if the storage can cleanup chunks after the total size passes a certain point
-	// otherwise it can do a noop operation
-	CleanChunksBySize(maxSize int64)
-}
-
-// Storage is a storage type (Bolt) which needs to support both chunk and file based operations
-type Storage interface {
-	ChunkStorage
-
-	// will update/create a directory or an error if it's not found
-	AddDir(cachedDir *Directory) error
-
-	// will return a directory with all the entries in it or an error if it's not found
-	GetDirEntries(cachedDir *Directory) (fs.DirEntries, error)
-
-	// remove a directory and all the objects and chunks in it
-	RemoveDir(fp string) error
-
-	// remove a directory and all the objects and chunks in it
-	ExpireDir(cd *Directory) error
-
-	// will return an object (file) or error if it doesn't find it
-	GetObject(cachedObject *Object) (err error)
-
-	// add a new object to its parent directory
-	// the directory structure (all the parents of this object) is created if its not found
-	AddObject(cachedObject *Object) error
-
-	// remove an object and all its chunks
-	RemoveObject(fp string) error
-
-	// Stats returns stats about the cache storage
-	Stats() (map[string]map[string]interface{}, error)
-
-	// Purge will flush the entire cache
-	Purge()
-
-	// Close should be called when the program ends gracefully
-	Close()
-}
-
 // Fs represents a wrapped fs.Fs
 type Fs struct {
 	fs.Fs
@@ -208,7 +152,7 @@ type Fs struct {
 	name     string
 	root     string
 	features *fs.Features // optional features
-	cache    Storage
+	cache    *Persistent
 
 	fileAge            time.Duration
 	chunkSize          int64
@@ -219,27 +163,36 @@ type Fs struct {
 	totalMaxWorkers    int
 	chunkMemory        bool
 	cacheWrites        bool
+	tempWritePath      string
+	tempWriteWait      time.Duration
+	tempFs             fs.Fs
 
 	lastChunkCleanup time.Time
 	cleanupMu        sync.Mutex
 	rateLimiter      *rate.Limiter
 	plexConnector    *plexConnector
+	backgroundRunner *backgroundWriter
+	cleanupChan      chan bool
 }
 
-// NewFs contstructs an Fs from the path, container:path
+// NewFs constructs a Fs from the path, container:path
 func NewFs(name, rpath string) (fs.Fs, error) {
 	remote := config.FileGet(name, "remote")
 	if strings.HasPrefix(remote, name+":") {
 		return nil, errors.New("can't point cache remote at itself - check the value of the remote setting")
 	}
-	// Look for a file first
+	rpath = strings.Trim(rpath, "/")
 	remotePath := path.Join(remote, rpath)
 	wrappedFs, wrapErr := fs.NewFs(remotePath)
-	if wrapErr != fs.ErrorIsFile && wrapErr != nil {
+	if wrapErr != nil && wrapErr != fs.ErrorIsFile {
 		return nil, errors.Wrapf(wrapErr, "failed to make remote %q to wrap", remotePath)
 	}
+	var fsErr error
 	fs.Debugf(name, "wrapped %v:%v at root %v", wrappedFs.Name(), wrappedFs.Root(), rpath)
-
+	if wrapErr == fs.ErrorIsFile {
+		fsErr = fs.ErrorIsFile
+		rpath = cleanPath(path.Dir(rpath))
+	}
 	plexURL := config.FileGet(name, "plex_url")
 	plexToken := config.FileGet(name, "plex_token")
 	var chunkSize fs.SizeSuffix
@@ -249,7 +202,7 @@ func NewFs(name, rpath string) (fs.Fs, error) {
 	}
 	err := chunkSize.Set(chunkSizeString)
 	if err != nil {
-		return nil, errors.Wrapf(err, "failed to understand chunk size", chunkSizeString)
+		return nil, errors.Wrapf(err, "failed to understand chunk size %v", chunkSizeString)
 	}
 	var chunkTotalSize fs.SizeSuffix
 	chunkTotalSizeString := config.FileGet(name, "chunk_total_size", DefCacheTotalChunkSize)
@@ -258,7 +211,7 @@ func NewFs(name, rpath string) (fs.Fs, error) {
 	}
 	err = chunkTotalSize.Set(chunkTotalSizeString)
 	if err != nil {
-		return nil, errors.Wrapf(err, "failed to understand chunk total size", chunkTotalSizeString)
+		return nil, errors.Wrapf(err, "failed to understand chunk total size %v", chunkTotalSizeString)
 	}
 	chunkCleanIntervalStr := *cacheChunkCleanInterval
 	chunkCleanInterval, err := time.ParseDuration(chunkCleanIntervalStr)
@@ -271,7 +224,11 @@ func NewFs(name, rpath string) (fs.Fs, error) {
 	}
 	infoDuration, err := time.ParseDuration(infoAge)
 	if err != nil {
-		return nil, errors.Wrapf(err, "failed to understand duration", infoAge)
+		return nil, errors.Wrapf(err, "failed to understand duration %v", infoAge)
+	}
+	waitTime, err := time.ParseDuration(*cacheTempWaitTime)
+	if err != nil {
+		return nil, errors.Wrapf(err, "failed to understand duration %v", *cacheTempWaitTime)
 	}
 	// configure cache backend
 	if *cacheDbPurge {
@@ -291,6 +248,9 @@ func NewFs(name, rpath string) (fs.Fs, error) {
 		chunkMemory:        !*cacheChunkNoMemory,
 		cacheWrites:        *cacheStoreWrites,
 		lastChunkCleanup:   time.Now().Truncate(time.Hour * 24 * 30),
+		tempWritePath:      *cacheTempWritePath,
+		tempWriteWait:      waitTime,
+		cleanupChan:        make(chan bool, 1),
 	}
 	if f.chunkTotalSize < (f.chunkSize * int64(f.totalWorkers)) {
 		return nil, errors.Errorf("don't set cache-total-chunk-size(%v) less than cache-chunk-size(%v) * cache-workers(%v)",
@@ -361,7 +321,7 @@ func NewFs(name, rpath string) (fs.Fs, error) {
 			s := <-c
 			if s == syscall.SIGINT || s == syscall.SIGTERM {
 				fs.Debugf(f, "Got signal: %v", s)
-				f.cache.Close()
+				f.StopBackgroundRunners()
 			} else if s == syscall.SIGHUP {
 				fs.Infof(f, "Clearing cache from signal")
 				f.DirCacheFlush()
@@ -375,9 +335,39 @@ func NewFs(name, rpath string) (fs.Fs, error) {
 	fs.Infof(name, "Chunk Clean Interval: %v", f.chunkCleanInterval.String())
 	fs.Infof(name, "Workers: %v", f.totalWorkers)
 	fs.Infof(name, "File Age: %v", f.fileAge.String())
-	fs.Infof(name, "Cache Writes: %v", f.cacheWrites)
+	if f.cacheWrites {
+		fs.Infof(name, "Cache Writes: enabled")
+	}
 
-	go f.CleanUpCache(false)
+	if f.tempWritePath != "" {
+		err = os.MkdirAll(f.tempWritePath, os.ModePerm)
+		if err != nil {
+			return nil, errors.Wrapf(err, "failed to create cache directory %v", f.tempWritePath)
+		}
+		f.tempWritePath = filepath.ToSlash(f.tempWritePath)
+		f.tempFs, err = fs.NewFs(f.tempWritePath)
+		if err != nil {
+			return nil, errors.Wrapf(err, "failed to create temp fs: %v", err)
+		}
+		fs.Infof(name, "Upload Temp Rest Time: %v", f.tempWriteWait.String())
+		fs.Infof(name, "Upload Temp FS: %v", f.tempWritePath)
+		f.backgroundRunner, _ = initBackgroundUploader(f)
+		go f.backgroundRunner.run()
+	}
+
+	go func() {
+		for {
+			time.Sleep(f.chunkCleanInterval)
+			select {
+			case <-f.cleanupChan:
+				fs.Infof(f, "stopping cleanup")
+				return
+			default:
+				fs.Debugf(f, "starting cleanup")
+				f.CleanUpCache(false)
+			}
+		}
+	}()
 
 	// TODO: Explore something here but now it's not something we want
 	// when writing from cache, source FS will send a notification and clear it out immediately
@@ -394,21 +384,30 @@ func NewFs(name, rpath string) (fs.Fs, error) {
 	f.features = (&fs.Features{
 		CanHaveEmptyDirectories: true,
 		DuplicateFiles:          false, // storage doesn't permit this
-		Purge:                   f.Purge,
-		Copy:                    f.Copy,
-		Move:                    f.Move,
-		DirMove:                 f.DirMove,
 		DirChangeNotify:         nil,
-		PutUnchecked:            f.PutUnchecked,
-		PutStream:               f.PutStream,
-		CleanUp:                 f.CleanUp,
-		UnWrap:                  f.UnWrap,
-		WrapFs:                  f.WrapFs,
-		SetWrapper:              f.SetWrapper,
 	}).Fill(f).Mask(wrappedFs).WrapsFs(f, wrappedFs)
+	// override only those features that use a temp fs and it doesn't support them
+	if f.tempWritePath != "" {
+		if f.tempFs.Features().Copy == nil {
+			f.features.Copy = nil
+		}
+		if f.tempFs.Features().Move == nil {
+			f.features.Move = nil
+		}
+		if f.tempFs.Features().Move == nil {
+			f.features.Move = nil
+		}
+		if f.tempFs.Features().DirMove == nil {
+			f.features.DirMove = nil
+		}
+		if f.tempFs.Features().MergeDirs == nil {
+			f.features.MergeDirs = nil
+		}
+	}
+	// even if the wrapped fs doesn't support it, we still want it
 	f.features.DirCacheFlush = f.DirCacheFlush
 
-	return f, wrapErr
+	return f, fsErr
 }
 
 // Name of the remote (as passed into NewFs)
@@ -428,7 +427,7 @@ func (f *Fs) Features() *fs.Features {
 
 // String returns a description of the FS
 func (f *Fs) String() string {
-	return fmt.Sprintf("%s:%s", f.name, f.root)
+	return fmt.Sprintf("Cache remote %s:%s", f.name, f.root)
 }
 
 // ChunkSize returns the configured chunk size
@@ -436,33 +435,67 @@ func (f *Fs) ChunkSize() int64 {
 	return f.chunkSize
 }
 
+// InfoAge returns the configured file age
+func (f *Fs) InfoAge() time.Duration {
+	return f.fileAge
+}
+
+// TempUploadWaitTime returns the configured temp file upload wait time
+func (f *Fs) TempUploadWaitTime() time.Duration {
+	return f.tempWriteWait
+}
+
 // NewObject finds the Object at remote.
 func (f *Fs) NewObject(remote string) (fs.Object, error) {
+	var err error
+
+	fs.Debugf(f, "new object '%s'", remote)
 	co := NewObject(f, remote)
-	err := f.cache.GetObject(co)
+	// search for entry in cache and validate it
+	err = f.cache.GetObject(co)
 	if err != nil {
 		fs.Debugf(remote, "find: error: %v", err)
 	} else if time.Now().After(co.CacheTs.Add(f.fileAge)) {
-		fs.Debugf(remote, "find: cold object ts: %v", co.CacheTs)
+		fs.Debugf(co, "find: cold object: %+v", co)
 	} else {
-		fs.Debugf(remote, "find: warm object ts: %v", co.CacheTs)
+		fs.Debugf(co, "find: warm object: %v, expiring on: %v", co, co.CacheTs.Add(f.fileAge))
 		return co, nil
 	}
-	obj, err := f.Fs.NewObject(remote)
+
+	// search for entry in source or temp fs
+	var obj fs.Object
+	err = nil
+	if f.tempWritePath != "" {
+		obj, err = f.tempFs.NewObject(remote)
+		// not found in temp fs
+		if err != nil {
+			fs.Debugf(remote, "find: not found in local cache fs")
+			obj, err = f.Fs.NewObject(remote)
+		} else {
+			fs.Debugf(obj, "find: found in local cache fs")
+		}
+	} else {
+		obj, err = f.Fs.NewObject(remote)
+	}
+
+	// not found in either fs
 	if err != nil {
+		fs.Debugf(obj, "find failed: not found in either local or remote fs")
 		return nil, err
 	}
-	co = ObjectFromOriginal(f, obj)
-	co.persist()
+
+	// cache the new entry
+	co = ObjectFromOriginal(f, obj).persist()
+	fs.Debugf(co, "find: cached object")
 	return co, nil
 }
 
 // List the objects and directories in dir into entries
 func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
-	// clean cache
-	go f.CleanUpCache(false)
-
+	fs.Debugf(f, "list '%s'", dir)
 	cd := ShallowDirectory(f, dir)
+
+	// search for cached dir entries and validate them
 	entries, err = f.cache.GetDirEntries(cd)
 	if err != nil {
 		fs.Debugf(dir, "list: error: %v", err)
@@ -472,40 +505,83 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
 		// TODO: read empty dirs from source?
 		fs.Debugf(dir, "list: empty listing")
 	} else {
-		fs.Debugf(dir, "list: warm %v from cache for: %v, ts: %v", len(entries), cd.abs(), cd.CacheTs)
+		fs.Debugf(dir, "list: warm %v from cache for: %v, expiring on: %v", len(entries), cd.abs(), cd.CacheTs.Add(f.fileAge))
+		fs.Debugf(dir, "list: cached entries: %v", entries)
 		return entries, nil
 	}
+	// FIXME need to clean existing cached listing
 
+	// we first search any temporary files stored locally
+	var cachedEntries fs.DirEntries
+	if f.tempWritePath != "" {
+		queuedEntries, err := f.cache.searchPendingUploadFromDir(cd.abs())
+		if err != nil {
+			fs.Errorf(dir, "list: error getting pending uploads: %v", err)
+		} else {
+			fs.Debugf(dir, "list: read %v from temp fs", len(queuedEntries))
+			fs.Debugf(dir, "list: temp fs entries: %v", queuedEntries)
+
+			for _, queuedRemote := range queuedEntries {
+				queuedEntry, err := f.tempFs.NewObject(f.cleanRootFromPath(queuedRemote))
+				if err != nil {
+					fs.Debugf(dir, "list: temp file not found in local fs: %v", err)
+					continue
+				}
+				co := ObjectFromOriginal(f, queuedEntry).persist()
+				fs.Debugf(co, "list: cached temp object")
+				cachedEntries = append(cachedEntries, co)
+			}
+		}
+	}
+
+	// search from the source
 	entries, err = f.Fs.List(dir)
 	if err != nil {
 		return nil, err
 	}
 	fs.Debugf(dir, "list: read %v from source", len(entries))
+	fs.Debugf(dir, "list: source entries: %v", entries)
 
-	var cachedEntries fs.DirEntries
+	// and then iterate over the ones from source (temp Objects will override source ones)
 	for _, entry := range entries {
 		switch o := entry.(type) {
 		case fs.Object:
-			co := ObjectFromOriginal(f, o)
-			co.persist()
+			// skip over temporary objects (might be uploading)
+			found := false
+			for _, t := range cachedEntries {
+				if t.Remote() == o.Remote() {
+					found = true
+					break
+				}
+			}
+			if found {
+				continue
+			}
+			co := ObjectFromOriginal(f, o).persist()
 			cachedEntries = append(cachedEntries, co)
+			fs.Debugf(dir, "list: cached object: %v", co)
 		case fs.Directory:
-			cd := DirectoryFromOriginal(f, o)
-			err = f.cache.AddDir(cd)
-			cachedEntries = append(cachedEntries, cd)
+			cdd := DirectoryFromOriginal(f, o)
+			err := f.cache.AddDir(cdd)
+			if err != nil {
+				fs.Errorf(dir, "list: error caching dir from listing %v", o)
+			} else {
+				fs.Debugf(dir, "list: cached dir: %v", cdd)
+			}
+			cachedEntries = append(cachedEntries, cdd)
 		default:
-			err = errors.Errorf("Unknown object type %T", entry)
+			fs.Debugf(entry, "list: Unknown object type %T", entry)
 		}
 	}
+
+	// cache dir meta
+	t := time.Now()
+	cd.CacheTs = &t
+	err = f.cache.AddDir(cd)
 	if err != nil {
-		fs.Errorf(dir, "err caching listing: %v", err)
+		fs.Errorf(cd, "list: save error: '%v'", err)
 	} else {
-		t := time.Now()
-		cd.CacheTs = &t
-		err := f.cache.AddDir(cd)
-		if err != nil {
-			fs.Errorf(cd, "list: save error: %v", err)
-		}
+		fs.Debugf(dir, "list: cached dir: '%v', cache ts: %v", cd.abs(), cd.CacheTs)
 	}
 
 	return cachedEntries, nil
@@ -574,62 +650,105 @@ func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) {
 
 // Mkdir makes the directory (container, bucket)
 func (f *Fs) Mkdir(dir string) error {
+	fs.Debugf(f, "mkdir '%s'", dir)
 	err := f.Fs.Mkdir(dir)
 	if err != nil {
 		return err
 	}
-	if dir == "" && f.Root() == "" { // creating the root is possible but we don't need that cached as we have it already
-		fs.Debugf(dir, "skipping empty dir in cache")
-		return nil
-	}
-	fs.Infof(f, "create dir '%s'", dir)
+	fs.Debugf(dir, "mkdir: created dir in source fs")
 
-	// expire parent of new dir
 	cd := NewDirectory(f, cleanPath(dir))
 	err = f.cache.AddDir(cd)
 	if err != nil {
 		fs.Errorf(dir, "mkdir: add error: %v", err)
+	} else {
+		fs.Debugf(cd, "mkdir: added to cache")
 	}
+	// expire parent of new dir
 	parentCd := NewDirectory(f, cleanPath(path.Dir(dir)))
 	err = f.cache.ExpireDir(parentCd)
 	if err != nil {
-		fs.Errorf(dir, "mkdir: expire error: %v", err)
+		fs.Errorf(parentCd, "mkdir: cache expire error: %v", err)
+	} else {
+		fs.Infof(parentCd, "mkdir: cache expired")
 	}
 
-	// clean cache
-	go f.CleanUpCache(false)
 	return nil
 }
 
 // Rmdir removes the directory (container, bucket) if empty
 func (f *Fs) Rmdir(dir string) error {
-	err := f.Fs.Rmdir(dir)
-	if err != nil {
-		return err
+	fs.Debugf(f, "rmdir '%s'", dir)
+
+	if f.tempWritePath != "" {
+		// pause background uploads
+		f.backgroundRunner.pause()
+		defer f.backgroundRunner.play()
+
+		// we check if the source exists on the remote and make the same move on it too if it does
+		// otherwise, we skip this step
+		_, err := f.UnWrap().List(dir)
+		if err == nil {
+			err := f.Fs.Rmdir(dir)
+			if err != nil {
+				return err
+			}
+			fs.Debugf(dir, "rmdir: removed dir in source fs")
+		}
+
+		var queuedEntries []*Object
+		err = walk.Walk(f.tempFs, dir, true, -1, func(path string, entries fs.DirEntries, err error) error {
+			for _, o := range entries {
+				if oo, ok := o.(fs.Object); ok {
+					co := ObjectFromOriginal(f, oo)
+					queuedEntries = append(queuedEntries, co)
+				}
+			}
+			return nil
+		})
+		if err != nil {
+			fs.Errorf(dir, "rmdir: error getting pending uploads: %v", err)
+		} else {
+			fs.Debugf(dir, "rmdir: read %v from temp fs", len(queuedEntries))
+			fs.Debugf(dir, "rmdir: temp fs entries: %v", queuedEntries)
+			if len(queuedEntries) > 0 {
+				fs.Errorf(dir, "rmdir: temporary dir not empty")
+				return fs.ErrorDirectoryNotEmpty
+			}
+		}
+	} else {
+		err := f.Fs.Rmdir(dir)
+		if err != nil {
+			return err
+		}
+		fs.Debugf(dir, "rmdir: removed dir in source fs")
 	}
-	fs.Infof(f, "rm dir '%s'", dir)
 
 	// remove dir data
 	d := NewDirectory(f, dir)
-	err = f.cache.RemoveDir(d.abs())
+	err := f.cache.RemoveDir(d.abs())
 	if err != nil {
 		fs.Errorf(dir, "rmdir: remove error: %v", err)
+	} else {
+		fs.Debugf(d, "rmdir: removed from cache")
 	}
 	// expire parent
 	parentCd := NewDirectory(f, cleanPath(path.Dir(dir)))
 	err = f.cache.ExpireDir(parentCd)
 	if err != nil {
-		fs.Errorf(dir, "rmdir: expire error: %v", err)
+		fs.Errorf(dir, "rmdir: cache expire error: %v", err)
+	} else {
+		fs.Infof(parentCd, "rmdir: cache expired")
 	}
 
-	// clean cache
-	go f.CleanUpCache(false)
 	return nil
 }
 
 // DirMove moves src, srcRemote to this remote at dstRemote
 // using server side move operations.
 func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
+	fs.Debugf(f, "move dir '%s'/'%s' -> '%s'/'%s'", src.Root(), srcRemote, f.Root(), dstRemote)
+
 	do := f.Fs.Features().DirMove
 	if do == nil {
 		return fs.ErrorCantDirMove
@@ -643,36 +762,91 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
 		fs.Errorf(srcFs, "can't move directory - not wrapping same remotes")
 		return fs.ErrorCantDirMove
 	}
-	fs.Infof(f, "move dir '%s'/'%s' -> '%s'", srcRemote, srcFs.Root(), dstRemote)
 
-	err := do(src.Features().UnWrap(), srcRemote, dstRemote)
-	if err != nil {
-		return err
+	if f.tempWritePath != "" {
+		// pause background uploads
+		f.backgroundRunner.pause()
+		defer f.backgroundRunner.play()
+
+		// we check if the source exists on the remote and make the same move on it too if it does
+		// otherwise, we skip this step
+		_, err := srcFs.UnWrap().List(srcRemote)
+		if err == nil {
+			err := do(srcFs.UnWrap(), srcRemote, dstRemote)
+			if err != nil {
+				return err
+			}
+			fs.Debugf(srcRemote, "movedir: dir moved in the source fs")
+		}
+
+		var queuedEntries []*Object
+		err = walk.Walk(f.tempFs, srcRemote, true, -1, func(path string, entries fs.DirEntries, err error) error {
+			for _, o := range entries {
+				if oo, ok := o.(fs.Object); ok {
+					co := ObjectFromOriginal(f, oo)
+					queuedEntries = append(queuedEntries, co)
+					if co.tempFileStartedUpload() {
+						fs.Errorf(co, "can't move - upload has already started. need to finish that")
+						return fs.ErrorCantDirMove
+					}
+				}
+			}
+			return nil
+		})
+		if err != nil {
+			return err
+		}
+		fs.Debugf(srcRemote, "dirmove: read %v from temp fs", len(queuedEntries))
+		fs.Debugf(srcRemote, "dirmove: temp fs entries: %v", queuedEntries)
+
+		do := f.tempFs.Features().DirMove
+		if do == nil {
+			fs.Errorf(srcRemote, "dirmove: can't move dir in temp fs")
+			return fs.ErrorCantDirMove
+		}
+		err = do(f.tempFs, srcRemote, dstRemote)
+		if err != nil {
+			return err
+		}
+		err = f.cache.ReconcileTempUploads(f)
+		if err != nil {
+			return err
+		}
+	} else {
+		err := do(srcFs.UnWrap(), srcRemote, dstRemote)
+		if err != nil {
+			return err
+		}
+		fs.Debugf(srcRemote, "movedir: dir moved in the source fs")
 	}
 
 	// delete src dir from cache along with all chunks
 	srcDir := NewDirectory(srcFs, srcRemote)
-	err = f.cache.RemoveDir(srcDir.abs())
+	err := f.cache.RemoveDir(srcDir.abs())
 	if err != nil {
-		fs.Errorf(srcRemote, "dirmove: remove error: %v", err)
+		fs.Errorf(srcDir, "dirmove: remove error: %v", err)
+	} else {
+		fs.Debugf(srcDir, "dirmove: removed cached dir")
 	}
 	// expire src parent
 	srcParent := NewDirectory(f, cleanPath(path.Dir(srcRemote)))
 	err = f.cache.ExpireDir(srcParent)
 	if err != nil {
-		fs.Errorf(srcRemote, "dirmove: expire error: %v", err)
+		fs.Errorf(srcParent, "dirmove: cache expire error: %v", err)
+	} else {
+		fs.Debugf(srcParent, "dirmove: cache expired")
 	}
 
 	// expire parent dir at the destination path
 	dstParent := NewDirectory(f, cleanPath(path.Dir(dstRemote)))
 	err = f.cache.ExpireDir(dstParent)
 	if err != nil {
-		fs.Errorf(dstRemote, "dirmove: expire error: %v", err)
+		fs.Errorf(dstParent, "dirmove: cache expire error: %v", err)
+	} else {
+		fs.Debugf(dstParent, "dirmove: cache expired")
 	}
 	// TODO: precache dst dir and save the chunks
 
-	// clean cache
-	go f.CleanUpCache(false)
 	return nil
 }
 
@@ -746,32 +920,60 @@ type putFn func(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.O
 func (f *Fs) put(in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put putFn) (fs.Object, error) {
 	var err error
 	var obj fs.Object
-	if f.cacheWrites {
+
+	// queue for upload and store in temp fs if configured
+	if f.tempWritePath != "" {
+		obj, err = f.tempFs.Put(in, src, options...)
+		if err != nil {
+			fs.Errorf(obj, "put: failed to upload in temp fs: %v", err)
+			return nil, err
+		}
+		fs.Infof(obj, "put: uploaded in temp fs")
+		err = f.cache.addPendingUpload(path.Join(f.Root(), src.Remote()), false)
+		if err != nil {
+			fs.Errorf(obj, "put: failed to queue for upload: %v", err)
+			return nil, err
+		}
+		fs.Infof(obj, "put: queued for upload")
+		// if cache writes is enabled write it first through cache
+	} else if f.cacheWrites {
 		f.cacheReader(in, src, func(inn io.Reader) {
 			obj, err = put(inn, src, options...)
 		})
+		if err == nil {
+			fs.Debugf(obj, "put: uploaded to remote fs and saved in cache")
+		}
+		// last option: save it directly in remote fs
 	} else {
 		obj, err = put(in, src, options...)
+		if err == nil {
+			fs.Debugf(obj, "put: uploaded to remote fs")
+		}
 	}
+	// validate and stop if errors are found
 	if err != nil {
-		fs.Errorf(src, "error saving in cache: %v", err)
+		fs.Errorf(src, "put: error uploading: %v", err)
 		return nil, err
 	}
+
+	// cache the new file
 	cachedObj := ObjectFromOriginal(f, obj).persist()
+	fs.Debugf(cachedObj, "put: added to cache")
 	// expire parent
-	err = f.cache.ExpireDir(cachedObj.parentDir())
+	parentCd := NewDirectory(f, cleanPath(path.Dir(cachedObj.Remote())))
+	err = f.cache.ExpireDir(parentCd)
 	if err != nil {
-		fs.Errorf(cachedObj, "put: expire error: %v", err)
+		fs.Errorf(cachedObj, "put: cache expire error: %v", err)
+	} else {
+		fs.Infof(parentCd, "put: cache expired")
 	}
 
-	// clean cache
-	go f.CleanUpCache(false)
 	return cachedObj, nil
 }
 
 // Put in to the remote path with the modTime given of the given size
 func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
-	fs.Infof(f, "put data at '%s'", src.Remote())
+	fs.Debugf(f, "put data at '%s'", src.Remote())
 	return f.put(in, src, options, f.Fs.Put)
 }
 
@@ -781,7 +983,7 @@ func (f *Fs) PutUnchecked(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOpt
 	if do == nil {
 		return nil, errors.New("can't PutUnchecked")
 	}
-	fs.Infof(f, "put data unchecked in '%s'", src.Remote())
+	fs.Debugf(f, "put data unchecked in '%s'", src.Remote())
 	return f.put(in, src, options, do)
 }
 
@@ -791,111 +993,172 @@ func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption
 	if do == nil {
 		return nil, errors.New("can't PutStream")
 	}
-	fs.Infof(f, "put data streaming in '%s'", src.Remote())
+	fs.Debugf(f, "put data streaming in '%s'", src.Remote())
 	return f.put(in, src, options, do)
 }
 
 // Copy src to this remote using server side copy operations.
 func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
+	fs.Debugf(f, "copy obj '%s' -> '%s'", src, remote)
+
 	do := f.Fs.Features().Copy
 	if do == nil {
 		fs.Errorf(src, "source remote (%v) doesn't support Copy", src.Fs())
 		return nil, fs.ErrorCantCopy
 	}
-
+	// the source must be a cached object or we abort
 	srcObj, ok := src.(*Object)
 	if !ok {
 		fs.Errorf(srcObj, "can't copy - not same remote type")
 		return nil, fs.ErrorCantCopy
 	}
+	// both the source cache fs and this cache fs need to wrap the same remote
 	if srcObj.CacheFs.Fs.Name() != f.Fs.Name() {
-		fs.Errorf(srcObj, "can't copy - not wrapping same remote types")
+		fs.Errorf(srcObj, "can't copy - not wrapping same remotes")
+		return nil, fs.ErrorCantCopy
+	}
+	// refresh from source or abort
+	if err := srcObj.refreshFromSource(false); err != nil {
+		fs.Errorf(f, "can't copy %v - %v", src, err)
 		return nil, fs.ErrorCantCopy
 	}
-	fs.Infof(f, "copy obj '%s' -> '%s'", srcObj.abs(), remote)
 
-	// store in cache
-	if err := srcObj.refreshFromSource(); err != nil {
-		fs.Errorf(f, "can't move %v - %v", src, err)
-		return nil, fs.ErrorCantCopy
+	if srcObj.isTempFile() {
+		// we check if the feature is stil active
+		if f.tempWritePath == "" {
+			fs.Errorf(srcObj, "can't copy - this is a local cached file but this feature is turned off this run")
+			return nil, fs.ErrorCantCopy
+		}
+
+		do = srcObj.ParentFs.Features().Copy
+		if do == nil {
+			fs.Errorf(src, "parent remote (%v) doesn't support Copy", srcObj.ParentFs)
+			return nil, fs.ErrorCantCopy
+		}
 	}
+
 	obj, err := do(srcObj.Object, remote)
 	if err != nil {
 		fs.Errorf(srcObj, "error moving in cache: %v", err)
 		return nil, err
 	}
+	fs.Debugf(obj, "copy: file copied")
 
 	// persist new
 	co := ObjectFromOriginal(f, obj).persist()
+	fs.Debugf(co, "copy: added to cache")
 	// expire the destination path
-	err = f.cache.ExpireDir(co.parentDir())
+	parentCd := NewDirectory(f, cleanPath(path.Dir(co.Remote())))
+	err = f.cache.ExpireDir(parentCd)
 	if err != nil {
-		fs.Errorf(co, "copy: expire error: %v", err)
+		fs.Errorf(parentCd, "copy: cache expire error: %v", err)
+	} else {
+		fs.Infof(parentCd, "copy: cache expired")
 	}
-
 	// expire src parent
 	srcParent := NewDirectory(f, cleanPath(path.Dir(src.Remote())))
 	err = f.cache.ExpireDir(srcParent)
 	if err != nil {
-		fs.Errorf(src, "copy: expire error: %v", err)
+		fs.Errorf(srcParent, "copy: cache expire error: %v", err)
+	} else {
+		fs.Infof(srcParent, "copy: cache expired")
 	}
 
-	// clean cache
-	go f.CleanUpCache(false)
 	return co, nil
 }
 
 // Move src to this remote using server side move operations.
 func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
+	fs.Debugf(f, "moving obj '%s' -> %s", src, remote)
+
+	// if source fs doesn't support move abort
 	do := f.Fs.Features().Move
 	if do == nil {
 		fs.Errorf(src, "source remote (%v) doesn't support Move", src.Fs())
 		return nil, fs.ErrorCantMove
 	}
-
+	// the source must be a cached object or we abort
 	srcObj, ok := src.(*Object)
 	if !ok {
 		fs.Errorf(srcObj, "can't move - not same remote type")
 		return nil, fs.ErrorCantMove
 	}
+	// both the source cache fs and this cache fs need to wrap the same remote
 	if srcObj.CacheFs.Fs.Name() != f.Fs.Name() {
 		fs.Errorf(srcObj, "can't move - not wrapping same remote types")
 		return nil, fs.ErrorCantMove
 	}
-	fs.Infof(f, "moving obj '%s' -> %s", srcObj.abs(), remote)
-
-	// save in cache
-	if err := srcObj.refreshFromSource(); err != nil {
+	// refresh from source or abort
+	if err := srcObj.refreshFromSource(false); err != nil {
 		fs.Errorf(f, "can't move %v - %v", src, err)
 		return nil, fs.ErrorCantMove
 	}
+
+	// if this is a temp object then we perform the changes locally
+	if srcObj.isTempFile() {
+		// we check if the feature is stil active
+		if f.tempWritePath == "" {
+			fs.Errorf(srcObj, "can't move - this is a local cached file but this feature is turned off this run")
+			return nil, fs.ErrorCantMove
+		}
+		// pause background uploads
+		f.backgroundRunner.pause()
+		defer f.backgroundRunner.play()
+
+		// started uploads can't be moved until they complete
+		if srcObj.tempFileStartedUpload() {
+			fs.Errorf(srcObj, "can't move - upload has already started. need to finish that")
+			return nil, fs.ErrorCantMove
+		}
+		do = f.tempFs.Features().Move
+
+		// we must also update the pending queue
+		err := f.cache.updatePendingUpload(srcObj.abs(), func(item *tempUploadInfo) error {
+			item.DestPath = path.Join(f.Root(), remote)
+			item.AddedOn = time.Now()
+			return nil
+		})
+		if err != nil {
+			fs.Errorf(srcObj, "failed to rename queued file for upload: %v", err)
+			return nil, fs.ErrorCantMove
+		}
+		fs.Debugf(srcObj, "move: queued file moved to %v", remote)
+	}
+
 	obj, err := do(srcObj.Object, remote)
 	if err != nil {
-		fs.Errorf(srcObj, "error moving in cache: %v", err)
+		fs.Errorf(srcObj, "error moving: %v", err)
 		return nil, err
 	}
+	fs.Debugf(obj, "move: file moved")
 
 	// remove old
 	err = f.cache.RemoveObject(srcObj.abs())
 	if err != nil {
 		fs.Errorf(srcObj, "move: remove error: %v", err)
+	} else {
+		fs.Debugf(srcObj, "move: removed from cache")
 	}
 	// expire old parent
-	err = f.cache.ExpireDir(srcObj.parentDir())
+	parentCd := NewDirectory(f, cleanPath(path.Dir(srcObj.Remote())))
+	err = f.cache.ExpireDir(parentCd)
 	if err != nil {
-		fs.Errorf(srcObj, "move: expire error: %v", err)
+		fs.Errorf(parentCd, "move: parent cache expire error: %v", err)
+	} else {
+		fs.Infof(parentCd, "move: cache expired")
 	}
-
 	// persist new
 	cachedObj := ObjectFromOriginal(f, obj).persist()
+	fs.Debugf(cachedObj, "move: added to cache")
 	// expire new parent
-	err = f.cache.ExpireDir(cachedObj.parentDir())
+	parentCd = NewDirectory(f, cleanPath(path.Dir(cachedObj.Remote())))
+	err = f.cache.ExpireDir(parentCd)
 	if err != nil {
-		fs.Errorf(cachedObj, "move: expire error: %v", err)
+		fs.Errorf(parentCd, "move: expire error: %v", err)
+	} else {
+		fs.Infof(parentCd, "move: cache expired")
 	}
 
-	// clean cache
-	go f.CleanUpCache(false)
 	return cachedObj, nil
 }
 
@@ -939,8 +1202,8 @@ func (f *Fs) Stats() (map[string]map[string]interface{}, error) {
 	return f.cache.Stats()
 }
 
-// OpenRateLimited will execute a closure under a rate limiter watch
-func (f *Fs) OpenRateLimited(fn func() (io.ReadCloser, error)) (io.ReadCloser, error) {
+// openRateLimited will execute a closure under a rate limiter watch
+func (f *Fs) openRateLimited(fn func() (io.ReadCloser, error)) (io.ReadCloser, error) {
 	var err error
 	ctx, cancel := context.WithTimeout(context.Background(), time.Second*10)
 	defer cancel()
@@ -968,6 +1231,17 @@ func (f *Fs) CleanUpCache(ignoreLastTs bool) {
 	}
 }
 
+// StopBackgroundRunners will signall all the runners to stop their work
+// can be triggered from a terminate signal or from testing between runs
+func (f *Fs) StopBackgroundRunners() {
+	f.cleanupChan <- false
+	if f.tempWritePath != "" {
+		f.backgroundRunner.close()
+	}
+	f.cache.Close()
+	fs.Debugf(f, "Services stopped")
+}
+
 // UnWrap returns the Fs that this Fs is wrapping
 func (f *Fs) UnWrap() fs.Fs {
 	return f.Fs
@@ -983,7 +1257,7 @@ func (f *Fs) SetWrapper(wrapper fs.Fs) {
 	f.wrapper = wrapper
 }
 
-// Wrap returns the Fs that is wrapping this Fs
+// isWrappedByCrypt checks if this is wrapped by a crypt remote
 func (f *Fs) isWrappedByCrypt() (*crypt.Fs, bool) {
 	if f.wrapper == nil {
 		return nil, false
@@ -992,11 +1266,39 @@ func (f *Fs) isWrappedByCrypt() (*crypt.Fs, bool) {
 	return c, ok
 }
 
+// cleanRootFromPath trims the root of the current fs from a path
+func (f *Fs) cleanRootFromPath(p string) string {
+	if f.Root() != "" {
+		p = p[len(f.Root()):] // trim out root
+		if len(p) > 0 {       // remove first separator
+			p = p[1:]
+		}
+	}
+
+	return p
+}
+
+func (f *Fs) isRootInPath(p string) bool {
+	if f.Root() == "" {
+		return true
+	}
+	return strings.HasPrefix(p, f.Root()+"/")
+}
+
 // DirCacheFlush flushes the dir cache
 func (f *Fs) DirCacheFlush() {
 	_ = f.cache.RemoveDir("")
 }
 
+// GetBackgroundUploadChannel returns a channel that can be listened to for remote activities that happen
+// in the background
+func (f *Fs) GetBackgroundUploadChannel() chan BackgroundUploadState {
+	if f.tempWritePath != "" {
+		return f.backgroundRunner.notifyCh
+	}
+	return nil
+}
+
 func cleanPath(p string) string {
 	p = path.Clean(p)
 	if p == "." || p == "/" {
diff --git a/backend/cache/cache_internal_test.go b/backend/cache/cache_internal_test.go
index f9523b62e..cb7ca5d03 100644
--- a/backend/cache/cache_internal_test.go
+++ b/backend/cache/cache_internal_test.go
@@ -4,188 +4,267 @@ package cache_test
 
 import (
 	"bytes"
-	"fmt"
 	"io"
 	"io/ioutil"
+	"log"
 	"math/rand"
+	"os"
 	"path"
 	"path/filepath"
 	"runtime"
 	"strconv"
+	"strings"
 	"testing"
 	"time"
 
-	//"os"
-	"os/exec"
-	//"strings"
+	"github.com/pkg/errors"
+
+	"encoding/base64"
+	goflag "flag"
+	"fmt"
+	"runtime/debug"
 
 	"github.com/ncw/rclone/backend/cache"
-	"github.com/ncw/rclone/fs/config"
-	"github.com/ncw/rclone/fs/object"
-	//"github.com/ncw/rclone/cmd/mount"
-	//_ "github.com/ncw/rclone/cmd/cmount"
-	//"github.com/ncw/rclone/cmd/mountlib"
+	"github.com/ncw/rclone/backend/crypt"
 	_ "github.com/ncw/rclone/backend/drive"
 	"github.com/ncw/rclone/backend/local"
 	"github.com/ncw/rclone/fs"
+	"github.com/ncw/rclone/fs/config"
+	"github.com/ncw/rclone/fs/object"
 	"github.com/ncw/rclone/fstest"
+	"github.com/ncw/rclone/vfs"
+	"github.com/ncw/rclone/vfs/vfsflags"
 	flag "github.com/spf13/pflag"
 	"github.com/stretchr/testify/require"
 )
 
-var (
-	infoAge    = time.Second * 10
-	chunkClean = time.Second
-	okDiff     = time.Second * 9 // really big diff here but the build machines seem to be slow. need a different way for this
-	workers    = 2
+const (
+	// these 2 passwords are test random
+	cryptPassword1     = "3XcvMMdsV3d-HGAReTMdNH-5FcX5q32_lUeA"                                                     // oGJdUbQc7s8
+	cryptPassword2     = "NlgTBEIe-qibA7v-FoMfuX6Cw8KlLai_aMvV"                                                     // mv4mZW572HM
+	cryptedTextBase64  = "UkNMT05FAAC320i2xIee0BiNyknSPBn+Qcw3q9FhIFp3tvq6qlqvbsno3PnxmEFeJG3jDBnR/wku2gHWeQ=="     // one content
+	cryptedText2Base64 = "UkNMT05FAAATcQkVsgjBh8KafCKcr0wdTa1fMmV0U8hsCLGFoqcvxKVmvv7wx3Hf5EXxFcki2FFV4sdpmSrb9Q==" // updated content
 )
 
+var (
+	remoteName                  string
+	mountDir                    string
+	uploadDir                   string
+	useMount                    bool
+	runInstance                 *run
+	errNotSupported             = errors.New("not supported")
+	decryptedToEncryptedRemotes = map[string]string{
+		"one":               "lm4u7jjt3c85bf56vjqgeenuno",
+		"second":            "qvt1ochrkcfbptp5mu9ugb2l14",
+		"test":              "jn4tegjtpqro30t3o11thb4b5s",
+		"test2":             "qakvqnh8ttei89e0gc76crpql4",
+		"data.bin":          "0q2847tfko6mhj3dag3r809qbc",
+		"ticw/data.bin":     "5mv97b0ule6pht33srae5pice8/0q2847tfko6mhj3dag3r809qbc",
+		"tiutfo/test/one":   "legd371aa8ol36tjfklt347qnc/jn4tegjtpqro30t3o11thb4b5s/lm4u7jjt3c85bf56vjqgeenuno",
+		"tiuufo/test/one":   "vi6u1olqhirqv14cd8qlej1mgo/jn4tegjtpqro30t3o11thb4b5s/lm4u7jjt3c85bf56vjqgeenuno",
+		"tiutfo/second/one": "legd371aa8ol36tjfklt347qnc/qvt1ochrkcfbptp5mu9ugb2l14/lm4u7jjt3c85bf56vjqgeenuno",
+		"second/one":        "qvt1ochrkcfbptp5mu9ugb2l14/lm4u7jjt3c85bf56vjqgeenuno",
+		"test/one":          "jn4tegjtpqro30t3o11thb4b5s/lm4u7jjt3c85bf56vjqgeenuno",
+		"test/second":       "jn4tegjtpqro30t3o11thb4b5s/qvt1ochrkcfbptp5mu9ugb2l14",
+		"test/third":        "jn4tegjtpqro30t3o11thb4b5s/2nd7fjiop5h3ihfj1vl953aa5g",
+		"test/0.bin":        "jn4tegjtpqro30t3o11thb4b5s/e6frddt058b6kvbpmlstlndmtk",
+		"test/1.bin":        "jn4tegjtpqro30t3o11thb4b5s/kck472nt1k7qbmob0mt1p1crgc",
+		"test/2.bin":        "jn4tegjtpqro30t3o11thb4b5s/744oe9ven2rmak4u27if51qk24",
+		"test/3.bin":        "jn4tegjtpqro30t3o11thb4b5s/2bjd8kef0u5lmsu6qhqll34bcs",
+		"test/4.bin":        "jn4tegjtpqro30t3o11thb4b5s/cvjs73iv0a82v0c7r67avllh7s",
+		"test/5.bin":        "jn4tegjtpqro30t3o11thb4b5s/0plkdo790b6bnmt33qsdqmhv9c",
+		"test/6.bin":        "jn4tegjtpqro30t3o11thb4b5s/s5r633srnjtbh83893jovjt5d0",
+		"test/7.bin":        "jn4tegjtpqro30t3o11thb4b5s/6rq45tr9bjsammku622flmqsu4",
+		"test/8.bin":        "jn4tegjtpqro30t3o11thb4b5s/37bc6tcl3e31qb8cadvjb749vk",
+		"test/9.bin":        "jn4tegjtpqro30t3o11thb4b5s/t4pr35hnls32789o8fk0chk1ec",
+	}
+)
+
+func init() {
+	goflag.StringVar(&remoteName, "remote-internal", "TestCache", "Remote to test with, defaults to local filesystem")
+	goflag.StringVar(&mountDir, "mount-dir-internal", "", "")
+	goflag.StringVar(&uploadDir, "upload-dir-internal", "", "")
+	goflag.BoolVar(&useMount, "cache-use-mount", false, "Test only with mount")
+}
+
+// TestMain drives the tests
+func TestMain(m *testing.M) {
+	goflag.Parse()
+	var rc int
+
+	runInstance = newRun()
+	rc = m.Run()
+	os.Exit(rc)
+}
+
 func TestInternalListRootAndInnerRemotes(t *testing.T) {
-	rootFs, boltDb := newLocalCacheFs(t, "tilrair-local", "tilrair-cache", nil)
-	defer cleanupFs(t, rootFs, boltDb)
+	id := fmt.Sprintf("tilrair%v", time.Now().Unix())
+	rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, nil, nil)
+	defer runInstance.cleanupFs(t, rootFs, boltDb)
 
 	// Instantiate inner fs
 	innerFolder := "inner"
-	err := rootFs.Mkdir(innerFolder)
-	require.NoError(t, err)
-	innerFs, err := fs.NewFs("tilrair-cache:" + innerFolder)
+	runInstance.mkdir(t, rootFs, innerFolder)
+	rootFs2, boltDb2 := runInstance.newCacheFs(t, remoteName, id+"/"+innerFolder, true, true, nil, nil)
+	defer runInstance.cleanupFs(t, rootFs2, boltDb2)
+
+	runInstance.writeObjectString(t, rootFs2, "one", "content")
+	listRoot := runInstance.list(t, rootFs, "")
+	listRootInner := runInstance.list(t, rootFs, innerFolder)
+	listInner, err := rootFs2.List("")
 	require.NoError(t, err)
 
-	obj := writeObjectString(t, innerFs, "one", "content")
-
-	listRoot, err := rootFs.List("")
-	require.NoError(t, err)
-	listRootInner, err := rootFs.List(innerFolder)
-	require.NoError(t, err)
-	listInner, err := innerFs.List("")
-	require.NoError(t, err)
-
-	require.Lenf(t, listRoot, 1, "remote %v should have 1 entry", rootFs.Root())
-	require.Lenf(t, listRootInner, 1, "remote %v should have 1 entry in %v", rootFs.Root(), innerFolder)
-	require.Lenf(t, listInner, 1, "remote %v should have 1 entry", innerFs.Root())
-
-	err = obj.Remove()
-	require.NoError(t, err)
+	require.Len(t, listRoot, 1)
+	require.Len(t, listRootInner, 1)
+	require.Len(t, listInner, 1)
 }
 
 func TestInternalObjWrapFsFound(t *testing.T) {
-	rootFs, boltDb := newLocalCacheFs(t, "tiowff-local", "tiowff-cache", nil)
-	defer cleanupFs(t, rootFs, boltDb)
+	id := fmt.Sprintf("tiowff%v", time.Now().Unix())
+	rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, nil, nil)
+	defer runInstance.cleanupFs(t, rootFs, boltDb)
 
-	cfs, err := getCacheFs(rootFs)
+	cfs, err := runInstance.getCacheFs(rootFs)
 	require.NoError(t, err)
 	wrappedFs := cfs.UnWrap()
-	data := "content"
-	writeObjectString(t, wrappedFs, "second", data)
 
-	listRoot, err := rootFs.List("")
-	require.NoError(t, err)
-	require.Lenf(t, listRoot, 1, "remote %v should have 1 entry", rootFs.Root())
+	var testData []byte
+	if runInstance.rootIsCrypt {
+		testData, err = base64.StdEncoding.DecodeString(cryptedTextBase64)
+		require.NoError(t, err)
+	} else {
+		testData = []byte("test content")
+	}
 
-	co, err := rootFs.NewObject("second")
-	require.NoError(t, err)
-	r, err := co.Open()
-	require.NoError(t, err)
-	cachedData, err := ioutil.ReadAll(r)
-	require.NoError(t, err)
-	err = r.Close()
-	require.NoError(t, err)
+	runInstance.writeObjectBytes(t, wrappedFs, runInstance.encryptRemoteIfNeeded(t, "test"), testData)
+	listRoot := runInstance.list(t, rootFs, "")
+	require.Len(t, listRoot, 1)
 
-	strCached := string(cachedData)
-	require.Equal(t, data, strCached)
+	cachedData := runInstance.readDataFromRemote(t, rootFs, "test", 0, int64(len([]byte("test content"))), false)
+	require.Equal(t, "test content", string(cachedData))
 
-	err = co.Remove()
+	err = runInstance.rm(t, rootFs, "test")
 	require.NoError(t, err)
-
-	listRoot, err = wrappedFs.List("")
-	require.NoError(t, err)
-	require.Lenf(t, listRoot, 0, "remote %v should have 0 entries: %v", wrappedFs.Root(), listRoot)
+	listRoot = runInstance.list(t, rootFs, "")
+	require.Len(t, listRoot, 0)
 }
 
 func TestInternalObjNotFound(t *testing.T) {
-	rootFs, boltDb := newLocalCacheFs(t, "tionf-local", "tionf-cache", nil)
-	defer cleanupFs(t, rootFs, boltDb)
+	id := fmt.Sprintf("tionf%v", time.Now().Unix())
+	rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
+	defer runInstance.cleanupFs(t, rootFs, boltDb)
 
 	obj, err := rootFs.NewObject("404")
 	require.Error(t, err)
 	require.Nil(t, obj)
 }
 
-func TestInternalCachedWrittenContentMatches(t *testing.T) {
-	rootFs, boltDb := newLocalCacheFs(t, "ticwcm-local", "ticwcm-cache", nil)
-	defer cleanupFs(t, rootFs, boltDb)
+func TestInternalRemoteWrittenFileFoundInMount(t *testing.T) {
+	if !runInstance.useMount {
+		t.Skip("test needs mount mode")
+	}
+	id := fmt.Sprintf("tirwffim%v", time.Now().Unix())
+	rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, nil, nil)
+	defer runInstance.cleanupFs(t, rootFs, boltDb)
 
-	cfs, err := getCacheFs(rootFs)
+	cfs, err := runInstance.getCacheFs(rootFs)
+	require.NoError(t, err)
+
+	var testData []byte
+	if runInstance.rootIsCrypt {
+		testData, err = base64.StdEncoding.DecodeString(cryptedTextBase64)
+		require.NoError(t, err)
+	} else {
+		testData = []byte("test content")
+	}
+
+	runInstance.writeObjectBytes(t, cfs.UnWrap(), runInstance.encryptRemoteIfNeeded(t, "test"), testData)
+	data := runInstance.readDataFromRemote(t, rootFs, "test", 0, int64(len([]byte("test content"))), false)
+	require.Equal(t, "test content", string(data))
+}
+
+func TestInternalCachedWrittenContentMatches(t *testing.T) {
+	id := fmt.Sprintf("ticwcm%v", time.Now().Unix())
+	rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
+	defer runInstance.cleanupFs(t, rootFs, boltDb)
+
+	cfs, err := runInstance.getCacheFs(rootFs)
 	require.NoError(t, err)
 	chunkSize := cfs.ChunkSize()
 
 	// create some rand test data
-	testData := make([]byte, (chunkSize*4 + chunkSize/2))
-	testSize, err := rand.Read(testData)
-	require.Equal(t, len(testData), testSize, "data size doesn't match")
-	require.NoError(t, err)
+	testData := runInstance.randomBytes(t, chunkSize*4+chunkSize/2)
 
 	// write the object
-	o := writeObjectBytes(t, rootFs, "data.bin", testData)
-	require.Equal(t, o.Size(), int64(testSize))
+	runInstance.writeRemoteBytes(t, rootFs, "data.bin", testData)
 
 	// check sample of data from in-file
 	sampleStart := chunkSize / 2
 	sampleEnd := chunkSize
 	testSample := testData[sampleStart:sampleEnd]
-	checkSample := readDataFromObj(t, o, sampleStart, sampleEnd, false)
+	checkSample := runInstance.readDataFromRemote(t, rootFs, "data.bin", sampleStart, sampleEnd, false)
 	require.Equal(t, int64(len(checkSample)), sampleEnd-sampleStart)
 	require.Equal(t, checkSample, testSample)
 }
 
 func TestInternalCachedUpdatedContentMatches(t *testing.T) {
-	rootFs, boltDb := newLocalCacheFs(t, "ticucm-local", "ticucm-cache", nil)
-	defer cleanupFs(t, rootFs, boltDb)
+	id := fmt.Sprintf("ticucm%v", time.Now().Unix())
+	rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
+	defer runInstance.cleanupFs(t, rootFs, boltDb)
+	var err error
 
 	// create some rand test data
-	testData1 := []byte(fstest.RandomString(100))
-	testData2 := []byte(fstest.RandomString(200))
+	var testData1 []byte
+	var testData2 []byte
+	if runInstance.rootIsCrypt {
+		testData1, err = base64.StdEncoding.DecodeString(cryptedTextBase64)
+		require.NoError(t, err)
+		testData2, err = base64.StdEncoding.DecodeString(cryptedText2Base64)
+		require.NoError(t, err)
+	} else {
+		testData1 = []byte(fstest.RandomString(100))
+		testData2 = []byte(fstest.RandomString(200))
+	}
 
 	// write the object
-	o := updateObjectBytes(t, rootFs, "data.bin", testData1, testData2)
+	o := runInstance.updateObjectRemote(t, rootFs, "data.bin", testData1, testData2)
 	require.Equal(t, o.Size(), int64(len(testData2)))
 
 	// check data from in-file
-	reader, err := o.Open()
-	require.NoError(t, err)
-	checkSample, err := ioutil.ReadAll(reader)
-	_ = reader.Close()
-	require.NoError(t, err)
+	checkSample := runInstance.readDataFromRemote(t, rootFs, "data.bin", 0, int64(len(testData2)), false)
 	require.Equal(t, checkSample, testData2)
 }
 
 func TestInternalWrappedWrittenContentMatches(t *testing.T) {
-	rootFs, boltDb := newLocalCacheFs(t, "tiwwcm-local", "tiwwcm-cache", nil)
-	defer cleanupFs(t, rootFs, boltDb)
+	id := fmt.Sprintf("tiwwcm%v", time.Now().Unix())
+	vfsflags.Opt.DirCacheTime = time.Second
+	rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, nil, nil)
+	defer runInstance.cleanupFs(t, rootFs, boltDb)
+	if runInstance.rootIsCrypt {
+		t.Skip("test skipped with crypt remote")
+	}
 
-	cfs, err := getCacheFs(rootFs)
+	cfs, err := runInstance.getCacheFs(rootFs)
 	require.NoError(t, err)
 	chunkSize := cfs.ChunkSize()
 
 	// create some rand test data
-	testData := make([]byte, (chunkSize*4 + chunkSize/2))
-	testSize, err := rand.Read(testData)
-	require.Equal(t, len(testData), testSize)
-	require.NoError(t, err)
+	testSize := chunkSize*4 + chunkSize/2
+	testData := runInstance.randomBytes(t, testSize)
 
 	// write the object
-	o := writeObjectBytes(t, cfs.UnWrap(), "data.bin", testData)
+	o := runInstance.writeObjectBytes(t, cfs.UnWrap(), "data.bin", testData)
 	require.Equal(t, o.Size(), int64(testSize))
+	time.Sleep(time.Second * 3)
 
-	o2, err := rootFs.NewObject("data.bin")
-	require.NoError(t, err)
-	require.Equal(t, o2.Size(), o.Size())
+	data2 := runInstance.readDataFromRemote(t, rootFs, "data.bin", 0, int64(testSize), false)
+	require.Equal(t, int64(len(data2)), o.Size())
 
 	// check sample of data from in-file
 	sampleStart := chunkSize / 2
 	sampleEnd := chunkSize
 	testSample := testData[sampleStart:sampleEnd]
-	checkSample := readDataFromObj(t, o2, sampleStart, sampleEnd, false)
+	checkSample := runInstance.readDataFromRemote(t, rootFs, "data.bin", sampleStart, sampleEnd, false)
 	require.Equal(t, len(checkSample), len(testSample))
 
 	for i := 0; i < len(checkSample); i++ {
@@ -194,177 +273,130 @@ func TestInternalWrappedWrittenContentMatches(t *testing.T) {
 }
 
 func TestInternalLargeWrittenContentMatches(t *testing.T) {
-	t.Skip("FIXME disabled because it is unreliable")
-	rootFs, boltDb := newLocalCacheFs(t, "tilwcm-local", "tilwcm-cache", nil)
-	defer cleanupFs(t, rootFs, boltDb)
-
-	cfs, err := getCacheFs(rootFs)
-	require.NoError(t, err)
-	chunkSize := cfs.ChunkSize()
-
-	// create some rand test data
-	testData := make([]byte, (chunkSize*10 + chunkSize/2))
-	testSize, err := rand.Read(testData)
-	require.Equal(t, len(testData), testSize)
-	require.NoError(t, err)
-
-	// write the object
-	o := writeObjectBytes(t, cfs.UnWrap(), "data.bin", testData)
-	require.Equal(t, o.Size(), int64(testSize))
-
-	o2, err := rootFs.NewObject("data.bin")
-	require.NoError(t, err)
-	require.Equal(t, o2.Size(), o.Size())
-
-	// check data from in-file
-	checkSample := readDataFromObj(t, o2, int64(0), int64(testSize), false)
-	require.Equal(t, len(checkSample), len(testData))
-
-	for i := 0; i < len(checkSample); i++ {
-		require.Equal(t, testData[i], checkSample[i], "byte: %d (%d), chunk: %d", int64(i)%chunkSize, i, int64(i)/chunkSize)
+	id := fmt.Sprintf("tilwcm%v", time.Now().Unix())
+	vfsflags.Opt.DirCacheTime = time.Second
+	rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, nil, nil)
+	defer runInstance.cleanupFs(t, rootFs, boltDb)
+	if runInstance.rootIsCrypt {
+		t.Skip("test skipped with crypt remote")
 	}
-}
 
-func TestInternalLargeWrittenContentMatches2(t *testing.T) {
-	cryptFs, boltDb := newLocalCacheCryptFs(t, "tilwcm2-local", "tilwcm2-cache", "tilwcm2-crypt", true, nil)
-	defer cleanupFs(t, cryptFs, boltDb)
-
-	cfs, err := getCacheFs(cryptFs)
+	cfs, err := runInstance.getCacheFs(rootFs)
 	require.NoError(t, err)
 	chunkSize := cfs.ChunkSize()
-	fileSize := 87197196
-	readOffset := 87195648
 
 	// create some rand test data
-	testData := make([]byte, fileSize)
-	testSize, err := rand.Read(testData)
-	require.Equal(t, len(testData), testSize)
-	require.NoError(t, err)
+	testSize := chunkSize*10 + chunkSize/2
+	testData := runInstance.randomBytes(t, testSize)
 
 	// write the object
-	o := writeObjectBytes(t, cryptFs, "data.bin", testData)
-	require.Equal(t, o.Size(), int64(testSize))
+	runInstance.writeObjectBytes(t, cfs.UnWrap(), "data.bin", testData)
+	time.Sleep(time.Second * 3)
 
-	o2, err := cryptFs.NewObject("data.bin")
-	require.NoError(t, err)
-	require.Equal(t, o2.Size(), o.Size())
-
-	// check data from in-file
-	reader, err := o2.Open(&fs.SeekOption{Offset: int64(readOffset)})
-	require.NoError(t, err)
-	rs, ok := reader.(io.Seeker)
-	require.True(t, ok)
-	checkOffset, err := rs.Seek(int64(readOffset), 0)
-	require.NoError(t, err)
-	require.Equal(t, checkOffset, int64(readOffset))
-	checkSample, err := ioutil.ReadAll(reader)
-	require.NoError(t, err)
-	_ = reader.Close()
-
-	require.Equal(t, len(checkSample), fileSize-readOffset)
-	for i := 0; i < fileSize-readOffset; i++ {
-		require.Equal(t, testData[readOffset+i], checkSample[i], "byte: %d (%d), chunk: %d", int64(i)%chunkSize, i, int64(i)/chunkSize)
+	readData := runInstance.readDataFromRemote(t, rootFs, "data.bin", 0, testSize, false)
+	for i := 0; i < len(readData); i++ {
+		require.Equalf(t, testData[i], readData[i], "at byte %v", i)
 	}
 }
 
 func TestInternalWrappedFsChangeNotSeen(t *testing.T) {
-	rootFs, boltDb := newLocalCacheFs(t, "tiwfcns-local", "tiwfcns-cache", nil)
-	defer cleanupFs(t, rootFs, boltDb)
+	id := fmt.Sprintf("tiwfcns%v", time.Now().Unix())
+	rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
+	defer runInstance.cleanupFs(t, rootFs, boltDb)
 
-	cfs, err := getCacheFs(rootFs)
+	cfs, err := runInstance.getCacheFs(rootFs)
 	require.NoError(t, err)
 	chunkSize := cfs.ChunkSize()
 
 	// create some rand test data
-	co := writeObjectRandomBytes(t, rootFs, (chunkSize*4 + chunkSize/2))
+	testData := runInstance.randomBytes(t, (chunkSize*4 + chunkSize/2))
+	runInstance.writeRemoteBytes(t, rootFs, "data.bin", testData)
 
 	// update in the wrapped fs
-	o, err := cfs.UnWrap().NewObject(co.Remote())
+	o, err := cfs.UnWrap().NewObject(runInstance.encryptRemoteIfNeeded(t, "data.bin"))
 	require.NoError(t, err)
-	err = o.SetModTime(co.ModTime().Truncate(time.Hour))
+	wrappedTime := time.Now().Add(time.Hour * -1)
+	err = o.SetModTime(wrappedTime)
 	require.NoError(t, err)
 
 	// get a new instance from the cache
-	co2, err := rootFs.NewObject(o.Remote())
+	co, err := rootFs.NewObject("data.bin")
 	require.NoError(t, err)
-
-	require.NotEqual(t, o.ModTime().String(), co.ModTime().String())
-	require.NotEqual(t, o.ModTime().String(), co2.ModTime().String())
-	require.Equal(t, co.ModTime().String(), co2.ModTime().String())
+	require.NotEqual(t, co.ModTime().String(), o.ModTime().String())
 }
 
 func TestInternalChangeSeenAfterDirCacheFlush(t *testing.T) {
-	rootFs, boltDb := newLocalCacheFs(t, "ticsadcf-local", "ticsadcf-cache", nil)
-	defer cleanupFs(t, rootFs, boltDb)
+	id := fmt.Sprintf("ticsadcf%v", time.Now().Unix())
+	rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
+	defer runInstance.cleanupFs(t, rootFs, boltDb)
 
-	cfs, err := getCacheFs(rootFs)
+	cfs, err := runInstance.getCacheFs(rootFs)
 	require.NoError(t, err)
 	chunkSize := cfs.ChunkSize()
 
 	// create some rand test data
-	co := writeObjectRandomBytes(t, rootFs, (chunkSize*4 + chunkSize/2))
+	testData := runInstance.randomBytes(t, (chunkSize*4 + chunkSize/2))
+	runInstance.writeRemoteBytes(t, rootFs, "data.bin", testData)
 
 	// update in the wrapped fs
-	o, err := cfs.UnWrap().NewObject(co.Remote())
+	o, err := cfs.UnWrap().NewObject(runInstance.encryptRemoteIfNeeded(t, "data.bin"))
 	require.NoError(t, err)
-	err = o.SetModTime(co.ModTime().Add(-1 * time.Hour))
+	wrappedTime := time.Now().Add(-1 * time.Hour)
+	err = o.SetModTime(wrappedTime)
 	require.NoError(t, err)
 
 	// get a new instance from the cache
-	co2, err := rootFs.NewObject(o.Remote())
+	co, err := rootFs.NewObject("data.bin")
 	require.NoError(t, err)
-
 	require.NotEqual(t, o.ModTime().String(), co.ModTime().String())
-	require.NotEqual(t, o.ModTime().String(), co2.ModTime().String())
-	require.Equal(t, co.ModTime().String(), co2.ModTime().String())
 
 	cfs.DirCacheFlush() // flush the cache
 
-	l, err := cfs.UnWrap().List("")
-	require.NoError(t, err)
-	require.Len(t, l, 1)
-	o2 := l[0]
-
 	// get a new instance from the cache
-	co, err = rootFs.NewObject(o.Remote())
+	co, err = rootFs.NewObject("data.bin")
 	require.NoError(t, err)
-	require.Equal(t, o2.ModTime().String(), co.ModTime().String())
+	require.Equal(t, wrappedTime.Unix(), co.ModTime().Unix())
 }
 
 func TestInternalCacheWrites(t *testing.T) {
-	rootFs, boltDb := newLocalCacheFs(t, "ticw-local", "ticw-cache", map[string]string{"cache-writes": "true"})
-	defer cleanupFs(t, rootFs, boltDb)
+	id := "ticw"
+	rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, map[string]string{"cache-writes": "true"})
+	defer runInstance.cleanupFs(t, rootFs, boltDb)
 
-	cfs, err := getCacheFs(rootFs)
+	cfs, err := runInstance.getCacheFs(rootFs)
 	require.NoError(t, err)
 	chunkSize := cfs.ChunkSize()
 
 	// create some rand test data
-	co := writeObjectRandomBytes(t, rootFs, (chunkSize*4 + chunkSize/2))
+	earliestTime := time.Now()
+	testData := runInstance.randomBytes(t, (chunkSize*4 + chunkSize/2))
+	runInstance.writeRemoteBytes(t, rootFs, "data.bin", testData)
 	expectedTs := time.Now()
-	ts, err := boltDb.GetChunkTs(path.Join(rootFs.Root(), co.Remote()), 0)
+	ts, err := boltDb.GetChunkTs(runInstance.encryptRemoteIfNeeded(t, path.Join(rootFs.Root(), "data.bin")), 0)
 	require.NoError(t, err)
-	require.WithinDuration(t, expectedTs, ts, okDiff)
+	require.WithinDuration(t, expectedTs, ts, expectedTs.Sub(earliestTime))
 }
 
 func TestInternalMaxChunkSizeRespected(t *testing.T) {
-	rootFs, boltDb := newLocalCacheFs(t, "timcsr-local", "timcsr-cache", map[string]string{"cache-workers": "1"})
-	defer cleanupFs(t, rootFs, boltDb)
+	id := fmt.Sprintf("timcsr%v", time.Now().Unix())
+	rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, map[string]string{"cache-workers": "1"})
+	defer runInstance.cleanupFs(t, rootFs, boltDb)
 
-	cfs, err := getCacheFs(rootFs)
+	cfs, err := runInstance.getCacheFs(rootFs)
 	require.NoError(t, err)
 	chunkSize := cfs.ChunkSize()
 	totalChunks := 20
 
 	// create some rand test data
-	obj := writeObjectRandomBytes(t, cfs, (int64(totalChunks-1)*chunkSize + chunkSize/2))
-	o, err := rootFs.NewObject(obj.Remote())
+	testData := runInstance.randomBytes(t, (int64(totalChunks-1)*chunkSize + chunkSize/2))
+	runInstance.writeRemoteBytes(t, rootFs, "data.bin", testData)
+	o, err := cfs.NewObject(runInstance.encryptRemoteIfNeeded(t, "data.bin"))
 	require.NoError(t, err)
 	co, ok := o.(*cache.Object)
 	require.True(t, ok)
 
 	for i := 0; i < 4; i++ { // read first 4
-		_ = readDataFromObj(t, co, chunkSize*int64(i), chunkSize*int64(i+1), false)
+		_ = runInstance.readDataFromObj(t, co, chunkSize*int64(i), chunkSize*int64(i+1), false)
 	}
 	cfs.CleanUpCache(true)
 	// the last 2 **must** be in the cache
@@ -372,7 +404,7 @@ func TestInternalMaxChunkSizeRespected(t *testing.T) {
 	require.True(t, boltDb.HasChunk(co, chunkSize*3))
 
 	for i := 4; i < 6; i++ { // read next 2
-		_ = readDataFromObj(t, co, chunkSize*int64(i), chunkSize*int64(i+1), false)
+		_ = runInstance.readDataFromObj(t, co, chunkSize*int64(i), chunkSize*int64(i+1), false)
 	}
 	cfs.CleanUpCache(true)
 	// the last 2 **must** be in the cache
@@ -381,36 +413,382 @@ func TestInternalMaxChunkSizeRespected(t *testing.T) {
 }
 
 func TestInternalExpiredEntriesRemoved(t *testing.T) {
-	rootFs, boltDb := newLocalCacheFs(t, "tieer-local", "tieer-cache", map[string]string{"info_age": "5s"})
-	defer cleanupFs(t, rootFs, boltDb)
-
-	cfs, err := getCacheFs(rootFs)
+	id := fmt.Sprintf("tieer%v", time.Now().Unix())
+	vfsflags.Opt.DirCacheTime = time.Second * 4 // needs to be lower than the defined
+	rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, map[string]string{"info_age": "5s"}, nil)
+	defer runInstance.cleanupFs(t, rootFs, boltDb)
+	cfs, err := runInstance.getCacheFs(rootFs)
 	require.NoError(t, err)
 
 	// create some rand test data
-	_ = writeObjectString(t, cfs, "one", "one content")
-	err = cfs.Mkdir("test")
-	require.NoError(t, err)
-	_ = writeObjectString(t, cfs, "test/second", "second content")
+	runInstance.writeRemoteString(t, rootFs, "one", "one content")
+	runInstance.mkdir(t, rootFs, "test")
+	runInstance.writeRemoteString(t, rootFs, "test/second", "second content")
 
-	l, err := cfs.List("test")
-	require.NoError(t, err)
+	l := runInstance.list(t, rootFs, "test")
 	require.Len(t, l, 1)
 
-	err = cfs.UnWrap().Mkdir("test/test2")
+	err = cfs.UnWrap().Mkdir(runInstance.encryptRemoteIfNeeded(t, "test/third"))
 	require.NoError(t, err)
 
-	l, err = cfs.List("test")
-	require.NoError(t, err)
+	l = runInstance.list(t, rootFs, "test")
 	require.Len(t, l, 1)
 
-	waitTime := time.Second * 5
-	t.Logf("Waiting %v seconds for cache to expire\n", waitTime)
-	time.Sleep(waitTime)
-
-	l, err = cfs.List("test")
+	err = runInstance.retryBlock(func() error {
+		l = runInstance.list(t, rootFs, "test")
+		if len(l) != 2 {
+			return errors.New("list is not 2")
+		}
+		return nil
+	}, 10, time.Second)
 	require.NoError(t, err)
-	require.Len(t, l, 2)
+}
+
+func TestInternalUploadTempDirCreated(t *testing.T) {
+	id := fmt.Sprintf("tiutdc%v", time.Now().Unix())
+	rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true,
+		nil,
+		map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id)})
+	defer runInstance.cleanupFs(t, rootFs, boltDb)
+
+	_, err := os.Stat(path.Join(runInstance.tmpUploadDir, id))
+	require.NoError(t, err)
+}
+
+func testInternalUploadQueueOneFile(t *testing.T, id string, rootFs fs.Fs, boltDb *cache.Persistent) {
+	// create some rand test data
+	testSize := int64(524288000)
+	testReader := runInstance.randomReader(t, testSize)
+	bu := runInstance.listenForBackgroundUpload(t, rootFs, "one")
+	runInstance.writeRemoteReader(t, rootFs, "one", testReader)
+	// validate that it exists in temp fs
+	ti, err := os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "one")))
+	require.NoError(t, err)
+
+	if runInstance.rootIsCrypt {
+		require.Equal(t, int64(524416032), ti.Size())
+	} else {
+		require.Equal(t, testSize, ti.Size())
+	}
+	de1 := runInstance.list(t, rootFs, "")
+	require.Len(t, de1, 1)
+
+	runInstance.completeBackgroundUpload(t, "one", bu)
+	// check if it was removed from temp fs
+	_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "one")))
+	require.True(t, os.IsNotExist(err))
+
+	// check if it can be read
+	data2 := runInstance.readDataFromRemote(t, rootFs, "one", 0, int64(1024), false)
+	require.Len(t, data2, 1024)
+}
+
+func TestInternalUploadQueueOneFileNoRest(t *testing.T) {
+	id := fmt.Sprintf("tiuqofnr%v", time.Now().Unix())
+	rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
+		nil,
+		map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "0s"})
+	defer runInstance.cleanupFs(t, rootFs, boltDb)
+
+	testInternalUploadQueueOneFile(t, id, rootFs, boltDb)
+}
+
+func TestInternalUploadQueueOneFileWithRest(t *testing.T) {
+	id := fmt.Sprintf("tiuqofwr%v", time.Now().Unix())
+	rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
+		nil,
+		map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "1m"})
+	defer runInstance.cleanupFs(t, rootFs, boltDb)
+
+	testInternalUploadQueueOneFile(t, id, rootFs, boltDb)
+}
+
+func TestInternalUploadQueueMoreFiles(t *testing.T) {
+	id := fmt.Sprintf("tiuqmf%v", time.Now().Unix())
+	rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
+		nil,
+		map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "1s"})
+	defer runInstance.cleanupFs(t, rootFs, boltDb)
+
+	err := rootFs.Mkdir("test")
+	require.NoError(t, err)
+	minSize := 5242880
+	maxSize := 10485760
+	totalFiles := 10
+	rand.Seed(time.Now().Unix())
+
+	lastFile := ""
+	for i := 0; i < totalFiles; i++ {
+		size := int64(rand.Intn(maxSize-minSize) + minSize)
+		testReader := runInstance.randomReader(t, size)
+		remote := "test/" + strconv.Itoa(i) + ".bin"
+		runInstance.writeRemoteReader(t, rootFs, remote, testReader)
+
+		// validate that it exists in temp fs
+		ti, err := os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, remote)))
+		require.NoError(t, err)
+		require.Equal(t, size, runInstance.cleanSize(t, ti.Size()))
+
+		if runInstance.wrappedIsExternal && i < totalFiles-1 {
+			time.Sleep(time.Second * 3)
+		}
+		lastFile = remote
+	}
+
+	// check if cache lists all files, likely temp upload didn't finish yet
+	de1 := runInstance.list(t, rootFs, "test")
+	require.Len(t, de1, totalFiles)
+
+	// wait for background uploader to do its thing
+	runInstance.completeAllBackgroundUploads(t, rootFs, lastFile)
+
+	// retry until we have no more temp files and fail if they don't go down to 0
+	tf, err := ioutil.ReadDir(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test")))
+	require.NoError(t, err)
+	require.Len(t, tf, 0)
+
+	// check if cache lists all files
+	de1 = runInstance.list(t, rootFs, "test")
+	require.Len(t, de1, totalFiles)
+}
+
+func TestInternalUploadTempFileOperations(t *testing.T) {
+	id := "tiutfo"
+	rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
+		nil,
+		map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "1h"})
+	defer runInstance.cleanupFs(t, rootFs, boltDb)
+
+	boltDb.PurgeTempUploads()
+
+	// create some rand test data
+	runInstance.mkdir(t, rootFs, "test")
+	runInstance.writeRemoteString(t, rootFs, "test/one", "one content")
+
+	// check if it can be read
+	data1 := runInstance.readDataFromRemote(t, rootFs, "test/one", 0, int64(len([]byte("one content"))), false)
+	require.Equal(t, []byte("one content"), data1)
+	// validate that it exists in temp fs
+	_, err := os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
+	require.NoError(t, err)
+
+	// test DirMove - allowed
+	err = runInstance.dirMove(t, rootFs, "test", "second")
+	if err != errNotSupported {
+		require.NoError(t, err)
+		_, err = rootFs.NewObject("test/one")
+		require.Error(t, err)
+		_, err = rootFs.NewObject("second/one")
+		require.NoError(t, err)
+		// validate that it exists in temp fs
+		_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
+		require.Error(t, err)
+		_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "second/one")))
+		require.NoError(t, err)
+		started, err := boltDb.SearchPendingUpload(runInstance.encryptRemoteIfNeeded(t, path.Join(id, "test/one")))
+		require.Error(t, err)
+		started, err = boltDb.SearchPendingUpload(runInstance.encryptRemoteIfNeeded(t, path.Join(id, "second/one")))
+		require.NoError(t, err)
+		require.False(t, started)
+		runInstance.mkdir(t, rootFs, "test")
+		runInstance.writeRemoteString(t, rootFs, "test/one", "one content")
+	}
+
+	// test Rmdir - allowed
+	err = runInstance.rm(t, rootFs, "test")
+	require.Error(t, err)
+	require.Contains(t, err.Error(), "directory not empty")
+	_, err = rootFs.NewObject("test/one")
+	require.NoError(t, err)
+	// validate that it exists in temp fs
+	_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
+	require.NoError(t, err)
+	started, err := boltDb.SearchPendingUpload(runInstance.encryptRemoteIfNeeded(t, path.Join(id, "test/one")))
+	require.False(t, started)
+	require.NoError(t, err)
+
+	// test Move/Rename -- allowed
+	err = runInstance.move(t, rootFs, path.Join("test", "one"), path.Join("test", "second"))
+	if err != errNotSupported {
+		require.NoError(t, err)
+		// try to read from it
+		_, err = rootFs.NewObject("test/one")
+		require.Error(t, err)
+		_, err = rootFs.NewObject("test/second")
+		require.NoError(t, err)
+		data2 := runInstance.readDataFromRemote(t, rootFs, "test/second", 0, int64(len([]byte("one content"))), false)
+		require.Equal(t, []byte("one content"), data2)
+		// validate that it exists in temp fs
+		_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
+		require.Error(t, err)
+		_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/second")))
+		require.NoError(t, err)
+		runInstance.writeRemoteString(t, rootFs, "test/one", "one content")
+	}
+
+	// test Copy -- allowed
+	err = runInstance.copy(t, rootFs, path.Join("test", "one"), path.Join("test", "third"))
+	if err != errNotSupported {
+		require.NoError(t, err)
+		_, err = rootFs.NewObject("test/one")
+		require.NoError(t, err)
+		_, err = rootFs.NewObject("test/third")
+		require.NoError(t, err)
+		data2 := runInstance.readDataFromRemote(t, rootFs, "test/third", 0, int64(len([]byte("one content"))), false)
+		require.Equal(t, []byte("one content"), data2)
+		// validate that it exists in temp fs
+		_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
+		require.NoError(t, err)
+		_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/third")))
+		require.NoError(t, err)
+	}
+
+	// test Remove -- allowed
+	err = runInstance.rm(t, rootFs, "test/one")
+	require.NoError(t, err)
+	_, err = rootFs.NewObject("test/one")
+	require.Error(t, err)
+	// validate that it doesn't exist in temp fs
+	_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
+	require.Error(t, err)
+	runInstance.writeRemoteString(t, rootFs, "test/one", "one content")
+
+	// test Update -- allowed
+	firstModTime, err := runInstance.modTime(t, rootFs, "test/one")
+	require.NoError(t, err)
+	err = runInstance.updateData(t, rootFs, "test/one", "one content", " updated")
+	require.NoError(t, err)
+	obj2, err := rootFs.NewObject("test/one")
+	require.NoError(t, err)
+	data2 := runInstance.readDataFromObj(t, obj2, 0, int64(len("one content updated")), false)
+	require.Equal(t, "one content updated", string(data2))
+	tmpInfo, err := os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
+	require.NoError(t, err)
+	if runInstance.rootIsCrypt {
+		require.Equal(t, int64(67), tmpInfo.Size())
+	} else {
+		require.Equal(t, int64(len(data2)), tmpInfo.Size())
+	}
+
+	// test SetModTime -- allowed
+	secondModTime, err := runInstance.modTime(t, rootFs, "test/one")
+	require.NoError(t, err)
+	require.NotEqual(t, secondModTime, firstModTime)
+	require.NotEqual(t, time.Time{}, firstModTime)
+	require.NotEqual(t, time.Time{}, secondModTime)
+}
+
+func TestInternalUploadUploadingFileOperations(t *testing.T) {
+	id := "tiuufo"
+	rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
+		nil,
+		map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "1h"})
+	defer runInstance.cleanupFs(t, rootFs, boltDb)
+
+	boltDb.PurgeTempUploads()
+
+	// create some rand test data
+	runInstance.mkdir(t, rootFs, "test")
+	runInstance.writeRemoteString(t, rootFs, "test/one", "one content")
+
+	// check if it can be read
+	data1 := runInstance.readDataFromRemote(t, rootFs, "test/one", 0, int64(len([]byte("one content"))), false)
+	require.Equal(t, []byte("one content"), data1)
+	// validate that it exists in temp fs
+	_, err := os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
+	require.NoError(t, err)
+
+	err = boltDb.SetPendingUploadToStarted(runInstance.encryptRemoteIfNeeded(t, path.Join(rootFs.Root(), "test/one")))
+	require.NoError(t, err)
+
+	// test DirMove
+	err = runInstance.dirMove(t, rootFs, "test", "second")
+	if err != errNotSupported {
+		require.Error(t, err)
+		_, err = rootFs.NewObject("test/one")
+		require.NoError(t, err)
+		// validate that it exists in temp fs
+		_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
+		require.NoError(t, err)
+		_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "second/one")))
+		require.Error(t, err)
+	}
+
+	// test Rmdir
+	err = runInstance.rm(t, rootFs, "test")
+	require.Error(t, err)
+	_, err = rootFs.NewObject("test/one")
+	require.NoError(t, err)
+	// validate that it doesn't exist in temp fs
+	_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
+	require.NoError(t, err)
+
+	// test Move/Rename
+	err = runInstance.move(t, rootFs, path.Join("test", "one"), path.Join("test", "second"))
+	if err != errNotSupported {
+		require.Error(t, err)
+		// try to read from it
+		_, err = rootFs.NewObject("test/one")
+		require.NoError(t, err)
+		_, err = rootFs.NewObject("test/second")
+		require.Error(t, err)
+		// validate that it exists in temp fs
+		_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
+		require.NoError(t, err)
+		_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/second")))
+		require.Error(t, err)
+	}
+
+	// test Copy -- allowed
+	err = runInstance.copy(t, rootFs, path.Join("test", "one"), path.Join("test", "third"))
+	if err != errNotSupported {
+		require.NoError(t, err)
+		_, err = rootFs.NewObject("test/one")
+		require.NoError(t, err)
+		_, err = rootFs.NewObject("test/third")
+		require.NoError(t, err)
+		data2 := runInstance.readDataFromRemote(t, rootFs, "test/third", 0, int64(len([]byte("one content"))), false)
+		require.Equal(t, []byte("one content"), data2)
+		// validate that it exists in temp fs
+		_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
+		require.NoError(t, err)
+		_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/third")))
+		require.NoError(t, err)
+	}
+
+	// test Remove
+	err = runInstance.rm(t, rootFs, "test/one")
+	require.Error(t, err)
+	_, err = rootFs.NewObject("test/one")
+	require.NoError(t, err)
+	// validate that it doesn't exist in temp fs
+	_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
+	require.NoError(t, err)
+	runInstance.writeRemoteString(t, rootFs, "test/one", "one content")
+
+	// test Update - this seems to work. Why? FIXME
+	//firstModTime, err := runInstance.modTime(t, rootFs, "test/one")
+	//require.NoError(t, err)
+	//err = runInstance.updateData(t, rootFs, "test/one", "one content", " updated", func() {
+	//	data2 := runInstance.readDataFromRemote(t, rootFs, "test/one", 0, int64(len("one content updated")), true)
+	//	require.Equal(t, "one content", string(data2))
+	//
+	//	tmpInfo, err := os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
+	//	require.NoError(t, err)
+	//	if runInstance.rootIsCrypt {
+	//		require.Equal(t, int64(67), tmpInfo.Size())
+	//	} else {
+	//		require.Equal(t, int64(len(data2)), tmpInfo.Size())
+	//	}
+	//})
+	//require.Error(t, err)
+
+	// test SetModTime -- seems to work cause of previous
+	//secondModTime, err := runInstance.modTime(t, rootFs, "test/one")
+	//require.NoError(t, err)
+	//require.Equal(t, secondModTime, firstModTime)
+	//require.NotEqual(t, time.Time{}, firstModTime)
+	//require.NotEqual(t, time.Time{}, secondModTime)
 }
 
 // FIXME, enable this when mount is sorted out
@@ -420,7 +798,7 @@ func TestInternalExpiredEntriesRemoved(t *testing.T) {
 //		t.Skip("Not yet")
 //	}
 //	id := "tifm1904"
-//	rootFs, _ := newLocalCacheCryptFs(t, "test-local", "test-cache", "test-crypt", false,
+//	rootFs, _ := newCacheFs(t, RemoteName, id, false,
 //		map[string]string{"chunk_size": "5M", "info_age": "1m", "chunk_total_size": "500M", "cache-writes": "true"})
 //	mntPoint := path.Join("/tmp", "tifm1904-mnt")
 //	testPoint := path.Join(mntPoint, id)
@@ -472,273 +850,764 @@ func TestInternalExpiredEntriesRemoved(t *testing.T) {
 //	}
 //}
 
-func writeObjectRandomBytes(t *testing.T, f fs.Fs, size int64) fs.Object {
-	remote := strconv.Itoa(rand.Int()) + ".bin"
-	// create some rand test data
-	testData := make([]byte, size)
-	testSize, err := rand.Read(testData)
-	require.Equal(t, size, int64(len(testData)))
-	require.Equal(t, size, int64(testSize))
-	require.NoError(t, err)
-
-	o := writeObjectBytes(t, f, remote, testData)
-	require.Equal(t, size, o.Size())
-
-	return o
+// run holds the remotes for a test run
+type run struct {
+	okDiff            time.Duration
+	allCfgMap         map[string]string
+	allFlagMap        map[string]string
+	runDefaultCfgMap  map[string]string
+	runDefaultFlagMap map[string]string
+	mntDir            string
+	tmpUploadDir      string
+	useMount          bool
+	isMounted         bool
+	rootIsCrypt       bool
+	wrappedIsExternal bool
+	unmountFn         func() error
+	unmountRes        chan error
+	vfs               *vfs.VFS
+	tempFiles         []*os.File
 }
 
-func writeObjectString(t *testing.T, f fs.Fs, remote, content string) fs.Object {
-	return writeObjectBytes(t, f, remote, []byte(content))
-}
-
-func writeObjectBytes(t *testing.T, f fs.Fs, remote string, data []byte) fs.Object {
-	in := bytes.NewReader(data)
-	modTime := time.Now()
-	objInfo := object.NewStaticObjectInfo(remote, modTime, int64(len(data)), true, nil, f)
-
-	obj, err := f.Put(in, objInfo)
-	require.NoError(t, err)
-
-	return obj
-}
-
-func updateObjectBytes(t *testing.T, f fs.Fs, remote string, data1 []byte, data2 []byte) fs.Object {
-	in1 := bytes.NewReader(data1)
-	in2 := bytes.NewReader(data2)
-	objInfo1 := object.NewStaticObjectInfo(remote, time.Now(), int64(len(data1)), true, nil, f)
-	objInfo2 := object.NewStaticObjectInfo(remote, time.Now(), int64(len(data2)), true, nil, f)
-
-	obj, err := f.Put(in1, objInfo1)
-	require.NoError(t, err)
-	obj, err = f.NewObject(remote)
-	require.NoError(t, err)
-	err = obj.Update(in2, objInfo2)
-
-	return obj
-}
-
-func readDataFromObj(t *testing.T, co fs.Object, offset, end int64, useSeek bool) []byte {
-	var reader io.ReadCloser
+func newRun() *run {
 	var err error
-	size := end - offset
-	checkSample := make([]byte, size)
+	r := &run{
+		okDiff:    time.Second * 9, // really big diff here but the build machines seem to be slow. need a different way for this
+		useMount:  useMount,
+		isMounted: false,
+	}
 
-	reader, err = co.Open(&fs.SeekOption{Offset: offset})
-	require.NoError(t, err)
+	r.allCfgMap = map[string]string{
+		"plex_url":         "",
+		"plex_username":    "",
+		"plex_password":    "",
+		"chunk_size":       cache.DefCacheChunkSize,
+		"info_age":         cache.DefCacheInfoAge,
+		"chunk_total_size": cache.DefCacheTotalChunkSize,
+	}
+	r.allFlagMap = map[string]string{
+		"cache-db-path":              filepath.Join(config.CacheDir, "cache-backend"),
+		"cache-chunk-path":           filepath.Join(config.CacheDir, "cache-backend"),
+		"cache-db-purge":             "true",
+		"cache-chunk-size":           cache.DefCacheChunkSize,
+		"cache-total-chunk-size":     cache.DefCacheTotalChunkSize,
+		"cache-chunk-clean-interval": cache.DefCacheChunkCleanInterval,
+		"cache-info-age":             cache.DefCacheInfoAge,
+		"cache-read-retries":         strconv.Itoa(cache.DefCacheReadRetries),
+		"cache-workers":              strconv.Itoa(cache.DefCacheTotalWorkers),
+		"cache-chunk-no-memory":      "false",
+		"cache-rps":                  strconv.Itoa(cache.DefCacheRps),
+		"cache-writes":               "false",
+		"cache-tmp-upload-path":      "",
+		"cache-tmp-wait-time":        cache.DefCacheTmpWaitTime,
+	}
+	r.runDefaultCfgMap = make(map[string]string)
+	for key, value := range r.allCfgMap {
+		r.runDefaultCfgMap[key] = value
+	}
+	r.runDefaultFlagMap = make(map[string]string)
+	for key, value := range r.allFlagMap {
+		r.runDefaultFlagMap[key] = value
+	}
+	if mountDir == "" {
+		if runtime.GOOS != "windows" {
+			r.mntDir, err = ioutil.TempDir("", "rclonecache-mount")
+			if err != nil {
+				log.Fatalf("Failed to create mount dir: %v", err)
+				return nil
+			}
+		} else {
+			// Find a free drive letter
+			drive := ""
+			for letter := 'E'; letter <= 'Z'; letter++ {
+				drive = string(letter) + ":"
+				_, err := os.Stat(drive + "\\")
+				if os.IsNotExist(err) {
+					goto found
+				}
+			}
+			log.Print("Couldn't find free drive letter for test")
+		found:
+			r.mntDir = drive
+		}
+	} else {
+		r.mntDir = mountDir
+	}
+	log.Printf("Mount Dir: %v", r.mntDir)
 
-	totalRead, err := io.ReadFull(reader, checkSample)
-	require.NoError(t, err)
-	_ = reader.Close()
-	require.Equal(t, int64(totalRead), size, "wrong data read size from file")
+	if uploadDir == "" {
+		r.tmpUploadDir, err = ioutil.TempDir("", "rclonecache-tmp")
+		if err != nil {
+			log.Fatalf("Failed to create temp dir: %v", err)
+		}
+	} else {
+		r.tmpUploadDir = uploadDir
+	}
+	log.Printf("Temp Upload Dir: %v", r.tmpUploadDir)
 
-	return checkSample
+	return r
 }
 
-func cleanupFs(t *testing.T, f fs.Fs, b *cache.Persistent) {
-	err := f.Features().Purge()
-	require.NoError(t, err)
-	b.Close()
+func (r *run) encryptRemoteIfNeeded(t *testing.T, remote string) string {
+	if !runInstance.rootIsCrypt || len(decryptedToEncryptedRemotes) == 0 {
+		return remote
+	}
+
+	enc, ok := decryptedToEncryptedRemotes[remote]
+	if !ok {
+		t.Fatalf("Failed to find decrypted -> encrypted mapping for '%v'", remote)
+		return remote
+	}
+	return enc
 }
 
-func newLocalCacheCryptFs(t *testing.T, localRemote, cacheRemote, cryptRemote string, purge bool, cfg map[string]string) (fs.Fs, *cache.Persistent) {
+func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool, cfg map[string]string, flags map[string]string) (fs.Fs, *cache.Persistent) {
 	fstest.Initialise()
+	remoteExists := false
+	for _, s := range config.FileSections() {
+		if s == remote {
+			remoteExists = true
+		}
+	}
+	if !remoteExists && needRemote {
+		t.Skipf("Need remote (%v) to exist", remote)
+		return nil, nil
+	}
+
+	// if the remote doesn't exist, create a new one with a local one for it
+	// identify which is the cache remote (it can be wrapped by a crypt too)
+	rootIsCrypt := false
+	cacheRemote := remote
+	if !remoteExists {
+		localRemote := remote + "-local"
+		config.FileSet(localRemote, "type", "local")
+		config.FileSet(localRemote, "nounc", "true")
+		config.FileSet(remote, "type", "cache")
+		config.FileSet(remote, "remote", localRemote+":/var/tmp/"+localRemote)
+	} else {
+		remoteType := fs.ConfigFileGet(remote, "type", "")
+		if remoteType == "" {
+			t.Skipf("skipped due to invalid remote type for %v", remote)
+			return nil, nil
+		}
+		if remoteType != "cache" {
+			if remoteType == "crypt" {
+				rootIsCrypt = true
+				config.FileSet(remote, "password", cryptPassword1)
+				config.FileSet(remote, "password2", cryptPassword2)
+			}
+			remoteRemote := fs.ConfigFileGet(remote, "remote", "")
+			if remoteRemote == "" {
+				t.Skipf("skipped due to invalid remote wrapper for %v", remote)
+				return nil, nil
+			}
+			remoteRemoteParts := strings.Split(remoteRemote, ":")
+			remoteWrapping := remoteRemoteParts[0]
+			remoteType := fs.ConfigFileGet(remoteWrapping, "type", "")
+			if remoteType != "cache" {
+				t.Skipf("skipped due to invalid remote type for %v: '%v'", remoteWrapping, remoteType)
+				return nil, nil
+			}
+			cacheRemote = remoteWrapping
+		}
+	}
+	runInstance.rootIsCrypt = rootIsCrypt
 	dbPath := filepath.Join(config.CacheDir, "cache-backend", cacheRemote+".db")
 	chunkPath := filepath.Join(config.CacheDir, "cache-backend", cacheRemote)
 	boltDb, err := cache.GetPersistent(dbPath, chunkPath, &cache.Features{PurgeDb: true})
 	require.NoError(t, err)
 
-	localExists := false
-	cacheExists := false
-	cryptExists := false
-	for _, s := range config.FileSections() {
-		if s == localRemote {
-			localExists = true
-		}
-		if s == cacheRemote {
-			cacheExists = true
-		}
-		if s == cryptRemote {
-			cryptExists = true
+	for k, v := range r.runDefaultCfgMap {
+		if c, ok := cfg[k]; ok {
+			config.FileSet(cacheRemote, k, c)
+		} else {
+			config.FileSet(cacheRemote, k, v)
 		}
 	}
-
-	localRemoteWrap := ""
-	if !localExists {
-		localRemoteWrap = localRemote + ":/var/tmp/" + localRemote
-		config.FileSet(localRemote, "type", "local")
-		config.FileSet(localRemote, "nounc", "true")
-	}
-
-	if !cacheExists {
-		config.FileSet(cacheRemote, "type", "cache")
-		config.FileSet(cacheRemote, "remote", localRemoteWrap)
-	}
-	if c, ok := cfg["chunk_size"]; ok {
-		config.FileSet(cacheRemote, "chunk_size", c)
-	} else {
-		config.FileSet(cacheRemote, "chunk_size", "1m")
-	}
-	if c, ok := cfg["chunk_total_size"]; ok {
-		config.FileSet(cacheRemote, "chunk_total_size", c)
-	} else {
-		config.FileSet(cacheRemote, "chunk_total_size", "2m")
-	}
-	if c, ok := cfg["info_age"]; ok {
-		config.FileSet(cacheRemote, "info_age", c)
-	} else {
-		config.FileSet(cacheRemote, "info_age", infoAge.String())
-	}
-
-	if !cryptExists {
-		t.Skipf("Skipping due to missing crypt remote: %v", cryptRemote)
-	}
-
-	if c, ok := cfg["cache-chunk-no-memory"]; ok {
-		_ = flag.Set("cache-chunk-no-memory", c)
-	} else {
-		_ = flag.Set("cache-chunk-no-memory", "true")
-	}
-	if c, ok := cfg["cache-workers"]; ok {
-		_ = flag.Set("cache-workers", c)
-	} else {
-		_ = flag.Set("cache-workers", strconv.Itoa(workers))
-	}
-	if c, ok := cfg["cache-chunk-clean-interval"]; ok {
-		_ = flag.Set("cache-chunk-clean-interval", c)
-	} else {
-		_ = flag.Set("cache-chunk-clean-interval", chunkClean.String())
-	}
-	if c, ok := cfg["cache-writes"]; ok {
-		_ = flag.Set("cache-writes", c)
-	} else {
-		_ = flag.Set("cache-writes", strconv.FormatBool(cache.DefCacheWrites))
+	for k, v := range r.runDefaultFlagMap {
+		if c, ok := flags[k]; ok {
+			_ = flag.Set(k, c)
+		} else {
+			_ = flag.Set(k, v)
+		}
 	}
+	fs.Config.LowLevelRetries = 1
 
 	// Instantiate root
-	f, err := fs.NewFs(cryptRemote + ":")
+	if purge {
+		boltDb.PurgeTempUploads()
+		_ = os.RemoveAll(path.Join(runInstance.tmpUploadDir, id))
+	}
+	f, err := fs.NewFs(remote + ":" + id)
 	require.NoError(t, err)
+	cfs, err := r.getCacheFs(f)
+	require.NoError(t, err)
+	_, isCache := cfs.Features().UnWrap().(*cache.Fs)
+	_, isCrypt := cfs.Features().UnWrap().(*crypt.Fs)
+	_, isLocal := cfs.Features().UnWrap().(*local.Fs)
+	if isCache || isCrypt || isLocal {
+		r.wrappedIsExternal = true
+	} else {
+		r.wrappedIsExternal = true
+	}
+
 	if purge {
 		_ = f.Features().Purge()
 		require.NoError(t, err)
 	}
 	err = f.Mkdir("")
 	require.NoError(t, err)
+	if r.useMount && !r.isMounted {
+		r.mountFs(t, f)
+	}
 
 	return f, boltDb
 }
 
-func newLocalCacheFs(t *testing.T, localRemote, cacheRemote string, cfg map[string]string) (fs.Fs, *cache.Persistent) {
-	fstest.Initialise()
-	dbPath := filepath.Join(config.CacheDir, "cache-backend", cacheRemote+".db")
-	chunkPath := filepath.Join(config.CacheDir, "cache-backend", cacheRemote)
-	boltDb, err := cache.GetPersistent(dbPath, chunkPath, &cache.Features{PurgeDb: true})
+func (r *run) cleanupFs(t *testing.T, f fs.Fs, b *cache.Persistent) {
+	if r.useMount && r.isMounted {
+		r.unmountFs(t, f)
+	}
+
+	err := f.Features().Purge()
+	require.NoError(t, err)
+	cfs, err := r.getCacheFs(f)
+	require.NoError(t, err)
+	cfs.StopBackgroundRunners()
+
+	if r.useMount && runtime.GOOS != "windows" {
+		err = os.RemoveAll(r.mntDir)
+		require.NoError(t, err)
+	}
+	err = os.RemoveAll(r.tmpUploadDir)
 	require.NoError(t, err)
 
-	localExists := false
-	cacheExists := false
-	for _, s := range config.FileSections() {
-		if s == localRemote {
-			localExists = true
-		}
-		if s == cacheRemote {
-			cacheExists = true
-		}
+	for _, f := range r.tempFiles {
+		_ = f.Close()
+		_ = os.Remove(f.Name())
 	}
-
-	localRemoteWrap := ""
-	if !localExists {
-		localRemoteWrap = localRemote + ":/var/tmp/" + localRemote
-		config.FileSet(localRemote, "type", "local")
-		config.FileSet(localRemote, "nounc", "true")
+	r.tempFiles = nil
+	debug.FreeOSMemory()
+	for k, v := range r.runDefaultFlagMap {
+		_ = flag.Set(k, v)
 	}
-
-	if !cacheExists {
-		config.FileSet(cacheRemote, "type", "cache")
-		config.FileSet(cacheRemote, "remote", localRemoteWrap)
-	}
-	if c, ok := cfg["chunk_size"]; ok {
-		config.FileSet(cacheRemote, "chunk_size", c)
-	} else {
-		config.FileSet(cacheRemote, "chunk_size", "1m")
-	}
-	if c, ok := cfg["chunk_total_size"]; ok {
-		config.FileSet(cacheRemote, "chunk_total_size", c)
-	} else {
-		config.FileSet(cacheRemote, "chunk_total_size", "2m")
-	}
-	if c, ok := cfg["info_age"]; ok {
-		config.FileSet(cacheRemote, "info_age", c)
-	} else {
-		config.FileSet(cacheRemote, "info_age", infoAge.String())
-	}
-
-	if c, ok := cfg["cache-chunk-no-memory"]; ok {
-		_ = flag.Set("cache-chunk-no-memory", c)
-	} else {
-		_ = flag.Set("cache-chunk-no-memory", "true")
-	}
-	if c, ok := cfg["cache-workers"]; ok {
-		_ = flag.Set("cache-workers", c)
-	} else {
-		_ = flag.Set("cache-workers", strconv.Itoa(workers))
-	}
-	if c, ok := cfg["cache-chunk-clean-interval"]; ok {
-		_ = flag.Set("cache-chunk-clean-interval", c)
-	} else {
-		_ = flag.Set("cache-chunk-clean-interval", chunkClean.String())
-	}
-	if c, ok := cfg["cache-writes"]; ok {
-		_ = flag.Set("cache-writes", c)
-	} else {
-		_ = flag.Set("cache-writes", strconv.FormatBool(cache.DefCacheWrites))
-	}
-
-	// Instantiate root
-	f, err := fs.NewFs(cacheRemote + ":")
-	require.NoError(t, err)
-	_ = f.Features().Purge()
-	require.NoError(t, err)
-	err = f.Mkdir("")
-	require.NoError(t, err)
-
-	return f, boltDb
 }
 
-//func mountFs(t *testing.T, f fs.Fs, mntPoint string) {
-//	if runtime.GOOS == "windows" {
-//		t.Skip("Skipping test cause on windows")
-//		return
-//	}
-//
-//	_ = flag.Set("debug-fuse", "false")
-//
-//	go func() {
-//		mountlib.DebugFUSE = false
-//		mountlib.AllowOther = true
-//		mount.Mount(f, mntPoint)
-//	}()
-//
-//	time.Sleep(time.Second * 3)
-//}
+func (r *run) randomBytes(t *testing.T, size int64) []byte {
+	testData := make([]byte, size)
+	testSize, err := rand.Read(testData)
+	require.Equal(t, size, int64(len(testData)))
+	require.Equal(t, size, int64(testSize))
+	require.NoError(t, err)
+	return testData
+}
 
-func unmountFs(t *testing.T, mntPoint string) {
-	var out []byte
+func (r *run) randomReader(t *testing.T, size int64) io.ReadCloser {
+	chunk := int64(1024)
+	cnt := size / chunk
+	left := size % chunk
+	f, err := ioutil.TempFile("", "rclonecache-tempfile")
+	require.NoError(t, err)
+
+	for i := 0; i < int(cnt); i++ {
+		data := r.randomBytes(t, chunk)
+		_, _ = f.Write(data)
+	}
+	data := r.randomBytes(t, int64(left))
+	_, _ = f.Write(data)
+	_, _ = f.Seek(int64(0), 0)
+	r.tempFiles = append(r.tempFiles, f)
+
+	return f
+}
+
+func (r *run) writeRemoteRandomBytes(t *testing.T, f fs.Fs, p string, size int64) string {
+	remote := path.Join(p, strconv.Itoa(rand.Int())+".bin")
+	// create some rand test data
+	testData := r.randomBytes(t, size)
+
+	r.writeRemoteBytes(t, f, remote, testData)
+	return remote
+}
+
+func (r *run) writeObjectRandomBytes(t *testing.T, f fs.Fs, p string, size int64) fs.Object {
+	remote := path.Join(p, strconv.Itoa(rand.Int())+".bin")
+	// create some rand test data
+	testData := r.randomBytes(t, size)
+
+	return r.writeObjectBytes(t, f, remote, testData)
+}
+
+func (r *run) writeRemoteString(t *testing.T, f fs.Fs, remote, content string) {
+	r.writeRemoteBytes(t, f, remote, []byte(content))
+}
+
+func (r *run) writeObjectString(t *testing.T, f fs.Fs, remote, content string) fs.Object {
+	return r.writeObjectBytes(t, f, remote, []byte(content))
+}
+
+func (r *run) writeRemoteBytes(t *testing.T, f fs.Fs, remote string, data []byte) {
 	var err error
 
-	if runtime.GOOS == "windows" {
-		t.Skip("Skipping test cause on windows")
-		return
-	} else if runtime.GOOS == "linux" {
-		out, err = exec.Command("fusermount", "-u", mntPoint).Output()
-	} else if runtime.GOOS == "darwin" {
-		out, err = exec.Command("diskutil", "unmount", mntPoint).Output()
+	if r.useMount {
+		err = r.retryBlock(func() error {
+			return ioutil.WriteFile(path.Join(r.mntDir, remote), data, 0600)
+		}, 3, time.Second*3)
+		require.NoError(t, err)
+		r.vfs.WaitForWriters(10 * time.Second)
+	} else {
+		r.writeObjectBytes(t, f, remote, data)
+	}
+}
+
+func (r *run) writeRemoteReader(t *testing.T, f fs.Fs, remote string, in io.ReadCloser) {
+	defer func() {
+		_ = in.Close()
+	}()
+
+	if r.useMount {
+		out, err := os.Create(path.Join(r.mntDir, remote))
+		require.NoError(t, err)
+		defer func() {
+			_ = out.Close()
+		}()
+
+		_, err = io.Copy(out, in)
+		require.NoError(t, err)
+		r.vfs.WaitForWriters(10 * time.Second)
+	} else {
+		r.writeObjectReader(t, f, remote, in)
+	}
+}
+
+func (r *run) writeObjectBytes(t *testing.T, f fs.Fs, remote string, data []byte) fs.Object {
+	in := bytes.NewReader(data)
+	_ = r.writeObjectReader(t, f, remote, in)
+	o, err := f.NewObject(remote)
+	require.NoError(t, err)
+	require.Equal(t, int64(len(data)), o.Size())
+	return o
+}
+
+func (r *run) writeObjectReader(t *testing.T, f fs.Fs, remote string, in io.Reader) fs.Object {
+	modTime := time.Now()
+	objInfo := object.NewStaticObjectInfo(remote, modTime, -1, true, nil, f)
+	obj, err := f.Put(in, objInfo)
+	require.NoError(t, err)
+	if r.useMount {
+		r.vfs.WaitForWriters(10 * time.Second)
 	}
 
-	t.Logf("Unmount output: %v", string(out))
+	return obj
+}
+
+func (r *run) updateObjectRemote(t *testing.T, f fs.Fs, remote string, data1 []byte, data2 []byte) fs.Object {
+	var err error
+	var obj fs.Object
+
+	if r.useMount {
+		err = ioutil.WriteFile(path.Join(r.mntDir, remote), data1, 0600)
+		require.NoError(t, err)
+		r.vfs.WaitForWriters(10 * time.Second)
+		err = ioutil.WriteFile(path.Join(r.mntDir, remote), data2, 0600)
+		require.NoError(t, err)
+		r.vfs.WaitForWriters(10 * time.Second)
+		obj, err = f.NewObject(remote)
+	} else {
+		in1 := bytes.NewReader(data1)
+		in2 := bytes.NewReader(data2)
+		objInfo1 := object.NewStaticObjectInfo(remote, time.Now(), int64(len(data1)), true, nil, f)
+		objInfo2 := object.NewStaticObjectInfo(remote, time.Now(), int64(len(data2)), true, nil, f)
+
+		obj, err = f.Put(in1, objInfo1)
+		require.NoError(t, err)
+		obj, err = f.NewObject(remote)
+		require.NoError(t, err)
+		err = obj.Update(in2, objInfo2)
+	}
+	require.NoError(t, err)
+
+	return obj
+}
+
+func (r *run) readDataFromRemote(t *testing.T, f fs.Fs, remote string, offset, end int64, noLengthCheck bool) []byte {
+	size := end - offset
+	checkSample := make([]byte, size)
+
+	if r.useMount {
+		f, err := os.Open(path.Join(r.mntDir, remote))
+		defer func() {
+			_ = f.Close()
+		}()
+		require.NoError(t, err)
+		_, _ = f.Seek(offset, 0)
+		totalRead, err := io.ReadFull(f, checkSample)
+		checkSample = checkSample[:totalRead]
+		if err == io.EOF || err == io.ErrUnexpectedEOF {
+			err = nil
+		}
+		require.NoError(t, err)
+		if !noLengthCheck {
+			require.Equal(t, size, int64(totalRead))
+		}
+		require.NoError(t, err)
+	} else {
+		co, err := f.NewObject(remote)
+		require.NoError(t, err)
+		checkSample = r.readDataFromObj(t, co, offset, end, noLengthCheck)
+	}
+	if !noLengthCheck {
+		require.Equal(t, size, int64(len(checkSample)), "wrong data read size from file")
+	}
+	return checkSample
+}
+
+func (r *run) readDataFromObj(t *testing.T, o fs.Object, offset, end int64, noLengthCheck bool) []byte {
+	size := end - offset
+	checkSample := make([]byte, size)
+	reader, err := o.Open(&fs.SeekOption{Offset: offset})
+	require.NoError(t, err)
+	totalRead, err := io.ReadFull(reader, checkSample)
+	if (err == io.EOF || err == io.ErrUnexpectedEOF) && noLengthCheck {
+		err = nil
+		checkSample = checkSample[:totalRead]
+	}
+	require.NoError(t, err)
+	_ = reader.Close()
+	return checkSample
+}
+
+func (r *run) mkdir(t *testing.T, f fs.Fs, remote string) {
+	var err error
+	if r.useMount {
+		err = os.Mkdir(path.Join(r.mntDir, remote), 0700)
+	} else {
+		err = f.Mkdir(remote)
+	}
 	require.NoError(t, err)
 }
 
-func getCacheFs(f fs.Fs) (*cache.Fs, error) {
+func (r *run) rm(t *testing.T, f fs.Fs, remote string) error {
+	var err error
+
+	if r.useMount {
+		err = os.Remove(path.Join(r.mntDir, remote))
+	} else {
+		var obj fs.Object
+		obj, err = f.NewObject(remote)
+		if err != nil {
+			err = f.Rmdir(remote)
+		} else {
+			err = obj.Remove()
+		}
+	}
+
+	return err
+}
+
+func (r *run) list(t *testing.T, f fs.Fs, remote string) []interface{} {
+	var err error
+	var l []interface{}
+	if r.useMount {
+		var list []os.FileInfo
+		list, err = ioutil.ReadDir(path.Join(r.mntDir, remote))
+		for _, ll := range list {
+			l = append(l, ll)
+		}
+	} else {
+		var list fs.DirEntries
+		list, err = f.List(remote)
+		for _, ll := range list {
+			l = append(l, ll)
+		}
+	}
+	require.NoError(t, err)
+	return l
+}
+
+func (r *run) listPath(t *testing.T, f fs.Fs, remote string) []string {
+	var err error
+	var l []string
+	if r.useMount {
+		var list []os.FileInfo
+		list, err = ioutil.ReadDir(path.Join(r.mntDir, remote))
+		for _, ll := range list {
+			l = append(l, ll.Name())
+		}
+	} else {
+		var list fs.DirEntries
+		list, err = f.List(remote)
+		for _, ll := range list {
+			l = append(l, ll.Remote())
+		}
+	}
+	require.NoError(t, err)
+	return l
+}
+
+func (r *run) copyFile(t *testing.T, f fs.Fs, src, dst string) error {
+	in, err := os.Open(src)
+	if err != nil {
+		return err
+	}
+	defer func() {
+		_ = in.Close()
+	}()
+
+	out, err := os.Create(dst)
+	if err != nil {
+		return err
+	}
+	defer func() {
+		_ = out.Close()
+	}()
+
+	_, err = io.Copy(out, in)
+	return err
+}
+
+func (r *run) dirMove(t *testing.T, rootFs fs.Fs, src, dst string) error {
+	var err error
+
+	if runInstance.useMount {
+		err = os.Rename(path.Join(runInstance.mntDir, src), path.Join(runInstance.mntDir, dst))
+		if err != nil {
+			return err
+		}
+		r.vfs.WaitForWriters(10 * time.Second)
+	} else if rootFs.Features().DirMove != nil {
+		err = rootFs.Features().DirMove(rootFs, src, dst)
+		if err != nil {
+			return err
+		}
+	} else {
+		t.Logf("DirMove not supported by %v", rootFs)
+		return errNotSupported
+	}
+
+	return err
+}
+
+func (r *run) move(t *testing.T, rootFs fs.Fs, src, dst string) error {
+	var err error
+
+	if runInstance.useMount {
+		err = os.Rename(path.Join(runInstance.mntDir, src), path.Join(runInstance.mntDir, dst))
+		if err != nil {
+			return err
+		}
+		r.vfs.WaitForWriters(10 * time.Second)
+	} else if rootFs.Features().Move != nil {
+		obj1, err := rootFs.NewObject(src)
+		if err != nil {
+			return err
+		}
+		_, err = rootFs.Features().Move(obj1, dst)
+		if err != nil {
+			return err
+		}
+	} else {
+		t.Logf("Move not supported by %v", rootFs)
+		return errNotSupported
+	}
+
+	return err
+}
+
+func (r *run) copy(t *testing.T, rootFs fs.Fs, src, dst string) error {
+	var err error
+
+	if r.useMount {
+		err = r.copyFile(t, rootFs, path.Join(r.mntDir, src), path.Join(r.mntDir, dst))
+		if err != nil {
+			return err
+		}
+		r.vfs.WaitForWriters(10 * time.Second)
+	} else if rootFs.Features().Copy != nil {
+		obj, err := rootFs.NewObject(src)
+		if err != nil {
+			return err
+		}
+		_, err = rootFs.Features().Copy(obj, dst)
+		if err != nil {
+			return err
+		}
+	} else {
+		t.Logf("Copy not supported by %v", rootFs)
+		return errNotSupported
+	}
+
+	return err
+}
+
+func (r *run) modTime(t *testing.T, rootFs fs.Fs, src string) (time.Time, error) {
+	var err error
+
+	if r.useMount {
+		fi, err := os.Stat(path.Join(runInstance.mntDir, src))
+		if err != nil {
+			return time.Time{}, err
+		}
+		return fi.ModTime(), nil
+	}
+	obj1, err := rootFs.NewObject(src)
+	if err != nil {
+		return time.Time{}, err
+	}
+	return obj1.ModTime(), nil
+}
+
+func (r *run) updateData(t *testing.T, rootFs fs.Fs, src, data, append string) error {
+	var err error
+
+	if r.useMount {
+		f, err := os.OpenFile(path.Join(runInstance.mntDir, src), os.O_TRUNC|os.O_CREATE|os.O_WRONLY, 0644)
+		if err != nil {
+			return err
+		}
+		_, err = f.WriteString(data + append)
+		if err != nil {
+			_ = f.Close()
+			return err
+		}
+		err = f.Close()
+		if err != nil {
+			return err
+		}
+		r.vfs.WaitForWriters(10 * time.Second)
+	} else {
+		obj1, err := rootFs.NewObject(src)
+		if err != nil {
+			return err
+		}
+		data1 := []byte(data + append)
+		r := bytes.NewReader(data1)
+		objInfo1 := object.NewStaticObjectInfo(src, time.Now(), int64(len(data1)), true, nil, rootFs)
+		err = obj1.Update(r, objInfo1)
+		if err != nil {
+			return err
+		}
+	}
+
+	return err
+}
+
+func (r *run) cleanSize(t *testing.T, size int64) int64 {
+	if r.rootIsCrypt {
+		denominator := int64(65536 + 16)
+		size = size - 32
+		quotient := size / denominator
+		remainder := size % denominator
+		return (quotient*65536 + remainder - 16)
+	}
+
+	return size
+}
+
+func (r *run) listenForBackgroundUpload(t *testing.T, f fs.Fs, remote string) chan error {
+	cfs, err := r.getCacheFs(f)
+	require.NoError(t, err)
+	buCh := cfs.GetBackgroundUploadChannel()
+	require.NotNil(t, buCh)
+	maxDuration := time.Minute * 3
+	if r.wrappedIsExternal {
+		maxDuration = time.Minute * 10
+	}
+
+	waitCh := make(chan error)
+	go func() {
+		var err error
+		var state cache.BackgroundUploadState
+
+		for i := 0; i < 2; i++ {
+			select {
+			case state = <-buCh:
+				// continue
+			case <-time.After(maxDuration):
+				waitCh <- errors.Errorf("Timed out waiting for background upload: %v", remote)
+				return
+			}
+			checkRemote := state.Remote
+			if r.rootIsCrypt {
+				cryptFs := f.(*crypt.Fs)
+				checkRemote, err = cryptFs.DecryptFileName(checkRemote)
+				if err != nil {
+					waitCh <- err
+					return
+				}
+			}
+			if checkRemote == remote && cache.BackgroundUploadStarted != state.Status {
+				waitCh <- state.Error
+				return
+			}
+		}
+		waitCh <- errors.Errorf("Too many attempts to wait for the background upload: %v", remote)
+	}()
+	return waitCh
+}
+
+func (r *run) completeBackgroundUpload(t *testing.T, remote string, waitCh chan error) {
+	var err error
+	maxDuration := time.Minute * 3
+	if r.wrappedIsExternal {
+		maxDuration = time.Minute * 10
+	}
+	select {
+	case err = <-waitCh:
+		// continue
+	case <-time.After(maxDuration):
+		t.Fatalf("Timed out waiting to complete the background upload %v", remote)
+		return
+	}
+	require.NoError(t, err)
+}
+
+func (r *run) completeAllBackgroundUploads(t *testing.T, f fs.Fs, lastRemote string) {
+	var state cache.BackgroundUploadState
+	var err error
+
+	maxDuration := time.Minute * 5
+	if r.wrappedIsExternal {
+		maxDuration = time.Minute * 15
+	}
+	cfs, err := r.getCacheFs(f)
+	require.NoError(t, err)
+	buCh := cfs.GetBackgroundUploadChannel()
+	require.NotNil(t, buCh)
+
+	for {
+		select {
+		case state = <-buCh:
+			checkRemote := state.Remote
+			if r.rootIsCrypt {
+				cryptFs := f.(*crypt.Fs)
+				checkRemote, err = cryptFs.DecryptFileName(checkRemote)
+				require.NoError(t, err)
+			}
+			if checkRemote == lastRemote && cache.BackgroundUploadCompleted == state.Status {
+				require.NoError(t, state.Error)
+				return
+			}
+		case <-time.After(maxDuration):
+			t.Fatalf("Timed out waiting to complete the background upload %v", lastRemote)
+			return
+		}
+	}
+}
+
+func (r *run) retryBlock(block func() error, maxRetries int, rate time.Duration) error {
+	var err error
+	for i := 0; i < maxRetries; i++ {
+		err = block()
+		if err == nil {
+			return nil
+		}
+		time.Sleep(rate)
+	}
+	return err
+}
+
+func (r *run) getCacheFs(f fs.Fs) (*cache.Fs, error) {
 	cfs, ok := f.(*cache.Fs)
 	if ok {
 		return cfs, nil
@@ -751,7 +1620,7 @@ func getCacheFs(f fs.Fs) (*cache.Fs, error) {
 		}
 	}
 
-	return nil, fmt.Errorf("didn't found a cache fs")
+	return nil, errors.New("didn't found a cache fs")
 }
 
 var (
diff --git a/backend/cache/cache_mount_unix_test.go b/backend/cache/cache_mount_unix_test.go
new file mode 100644
index 000000000..dfc478d52
--- /dev/null
+++ b/backend/cache/cache_mount_unix_test.go
@@ -0,0 +1,78 @@
+// +build !plan9,!windows,go1.7
+
+package cache_test
+
+import (
+	"os"
+	"testing"
+	"time"
+
+	"bazil.org/fuse"
+	fusefs "bazil.org/fuse/fs"
+	"github.com/ncw/rclone/cmd/mount"
+	"github.com/ncw/rclone/cmd/mountlib"
+	"github.com/ncw/rclone/fs"
+	"github.com/stretchr/testify/require"
+)
+
+func (r *run) mountFs(t *testing.T, f fs.Fs) {
+	device := f.Name() + ":" + f.Root()
+	var options = []fuse.MountOption{
+		fuse.MaxReadahead(uint32(mountlib.MaxReadAhead)),
+		fuse.Subtype("rclone"),
+		fuse.FSName(device), fuse.VolumeName(device),
+		fuse.NoAppleDouble(),
+		fuse.NoAppleXattr(),
+		fuse.AllowOther(),
+	}
+	err := os.MkdirAll(r.mntDir, os.ModePerm)
+	require.NoError(t, err)
+	c, err := fuse.Mount(r.mntDir, options...)
+	require.NoError(t, err)
+	filesys := mount.NewFS(f)
+	server := fusefs.New(c, nil)
+
+	// Serve the mount point in the background returning error to errChan
+	r.unmountRes = make(chan error, 1)
+	go func() {
+		err := server.Serve(filesys)
+		closeErr := c.Close()
+		if err == nil {
+			err = closeErr
+		}
+		r.unmountRes <- err
+	}()
+
+	// check if the mount process has an error to report
+	<-c.Ready
+	require.NoError(t, c.MountError)
+
+	r.unmountFn = func() error {
+		// Shutdown the VFS
+		filesys.VFS.Shutdown()
+		return fuse.Unmount(r.mntDir)
+	}
+
+	r.vfs = filesys.VFS
+	r.isMounted = true
+}
+
+func (r *run) unmountFs(t *testing.T, f fs.Fs) {
+	var err error
+
+	for i := 0; i < 4; i++ {
+		err = r.unmountFn()
+		if err != nil {
+			//log.Printf("signal to umount failed - retrying: %v", err)
+			time.Sleep(3 * time.Second)
+			continue
+		}
+		break
+	}
+	require.NoError(t, err)
+	err = <-r.unmountRes
+	require.NoError(t, err)
+	err = r.vfs.CleanUp()
+	require.NoError(t, err)
+	r.isMounted = false
+}
diff --git a/backend/cache/cache_mount_windows_test.go b/backend/cache/cache_mount_windows_test.go
new file mode 100644
index 000000000..b0f16e49e
--- /dev/null
+++ b/backend/cache/cache_mount_windows_test.go
@@ -0,0 +1,124 @@
+// +build windows,go1.7
+
+package cache_test
+
+import (
+	"fmt"
+	"os"
+	"testing"
+	"time"
+
+	"github.com/billziss-gh/cgofuse/fuse"
+	"github.com/ncw/rclone/cmd/cmount"
+	"github.com/ncw/rclone/cmd/mountlib"
+	"github.com/ncw/rclone/fs"
+	"github.com/pkg/errors"
+	"github.com/stretchr/testify/require"
+)
+
+// waitFor runs fn() until it returns true or the timeout expires
+func waitFor(fn func() bool) (ok bool) {
+	const totalWait = 10 * time.Second
+	const individualWait = 10 * time.Millisecond
+	for i := 0; i < int(totalWait/individualWait); i++ {
+		ok = fn()
+		if ok {
+			return ok
+		}
+		time.Sleep(individualWait)
+	}
+	return false
+}
+
+func (r *run) mountFs(t *testing.T, f fs.Fs) {
+	// FIXME implement cmount
+	t.Skip("windows not supported yet")
+
+	device := f.Name() + ":" + f.Root()
+	options := []string{
+		"-o", "fsname=" + device,
+		"-o", "subtype=rclone",
+		"-o", fmt.Sprintf("max_readahead=%d", mountlib.MaxReadAhead),
+		"-o", "uid=-1",
+		"-o", "gid=-1",
+		"-o", "allow_other",
+		// This causes FUSE to supply O_TRUNC with the Open
+		// call which is more efficient for cmount.  However
+		// it does not work with cgofuse on Windows with
+		// WinFSP so cmount must work with or without it.
+		"-o", "atomic_o_trunc",
+		"--FileSystemName=rclone",
+	}
+
+	fsys := cmount.NewFS(f)
+	host := fuse.NewFileSystemHost(fsys)
+
+	// Serve the mount point in the background returning error to errChan
+	r.unmountRes = make(chan error, 1)
+	go func() {
+		var err error
+		ok := host.Mount(r.mntDir, options)
+		if !ok {
+			err = errors.New("mount failed")
+		}
+		r.unmountRes <- err
+	}()
+
+	// unmount
+	r.unmountFn = func() error {
+		// Shutdown the VFS
+		fsys.VFS.Shutdown()
+		if host.Unmount() {
+			if !waitFor(func() bool {
+				_, err := os.Stat(r.mntDir)
+				return err != nil
+			}) {
+				t.Fatalf("mountpoint %q didn't disappear after unmount - continuing anyway", r.mntDir)
+			}
+			return nil
+		}
+		return errors.New("host unmount failed")
+	}
+
+	// Wait for the filesystem to become ready, checking the file
+	// system didn't blow up before starting
+	select {
+	case err := <-r.unmountRes:
+		require.NoError(t, err)
+	case <-time.After(time.Second * 3):
+	}
+
+	// Wait for the mount point to be available on Windows
+	// On Windows the Init signal comes slightly before the mount is ready
+	if !waitFor(func() bool {
+		_, err := os.Stat(r.mntDir)
+		return err == nil
+	}) {
+		t.Errorf("mountpoint %q didn't became available on mount", r.mntDir)
+	}
+
+	r.vfs = fsys.VFS
+	r.isMounted = true
+}
+
+func (r *run) unmountFs(t *testing.T, f fs.Fs) {
+	// FIXME implement cmount
+	t.Skip("windows not supported yet")
+	var err error
+
+	for i := 0; i < 4; i++ {
+		err = r.unmountFn()
+		if err != nil {
+			//log.Printf("signal to umount failed - retrying: %v", err)
+			time.Sleep(3 * time.Second)
+			continue
+		}
+		break
+	}
+	require.NoError(t, err)
+	err = <-r.unmountRes
+	require.NoError(t, err)
+	err = r.vfs.CleanUp()
+	require.NoError(t, err)
+	r.isMounted = false
+}
diff --git a/backend/cache/directory.go b/backend/cache/directory.go
index 3457aa2ad..b429ae7a7 100644
--- a/backend/cache/directory.go
+++ b/backend/cache/directory.go
@@ -93,15 +93,7 @@ func (d *Directory) String() string {
 
 // Remote returns the remote path
 func (d *Directory) Remote() string {
-	p := cleanPath(path.Join(d.Dir, d.Name))
-	if d.CacheFs.Root() != "" {
-		p = p[len(d.CacheFs.Root()):] // trim out root
-		if len(p) > 0 {               // remove first separator
-			p = p[1:]
-		}
-	}
-
-	return p
+	return d.CacheFs.cleanRootFromPath(d.abs())
 }
 
 // abs returns the absolute path to the dir
diff --git a/backend/cache/handle.go b/backend/cache/handle.go
index f3200aa64..3fc84d070 100644
--- a/backend/cache/handle.go
+++ b/backend/cache/handle.go
@@ -9,14 +9,42 @@ import (
 	"sync"
 	"time"
 
+	"path"
+	"runtime"
+	"strings"
+
 	"github.com/ncw/rclone/fs"
+	"github.com/ncw/rclone/fs/operations"
 	"github.com/pkg/errors"
 )
 
+var uploaderMap = make(map[string]*backgroundWriter)
+var uploaderMapMx sync.Mutex
+
+// initBackgroundUploader returns a single instance
+func initBackgroundUploader(fs *Fs) (*backgroundWriter, error) {
+	// write lock to create one
+	uploaderMapMx.Lock()
+	defer uploaderMapMx.Unlock()
+	if b, ok := uploaderMap[fs.String()]; ok {
+		// if it was already started we close it so that it can be started again
+		if b.running {
+			b.close()
+		} else {
+			return b, nil
+		}
+	}
+
+	bb := newBackgroundWriter(fs)
+	uploaderMap[fs.String()] = bb
+	return uploaderMap[fs.String()], nil
+}
+
 // Handle is managing the read/write/seek operations on an open handle
 type Handle struct {
 	cachedObject   *Object
-	memory         ChunkStorage
+	cfs            *Fs
+	memory         *Memory
 	preloadQueue   chan int64
 	preloadOffset  int64
 	offset         int64
@@ -31,20 +59,21 @@ type Handle struct {
 }
 
 // NewObjectHandle returns a new Handle for an existing Object
-func NewObjectHandle(o *Object) *Handle {
+func NewObjectHandle(o *Object, cfs *Fs) *Handle {
 	r := &Handle{
 		cachedObject:  o,
+		cfs:           cfs,
 		offset:        0,
 		preloadOffset: -1, // -1 to trigger the first preload
 
-		UseMemory: o.CacheFs.chunkMemory,
+		UseMemory: cfs.chunkMemory,
 		reading:   false,
 	}
 	r.seenOffsets = make(map[int64]bool)
 	r.memory = NewMemory(-1)
 
 	// create a larger buffer to queue up requests
-	r.preloadQueue = make(chan int64, o.CacheFs.totalWorkers*10)
+	r.preloadQueue = make(chan int64, r.cfs.totalWorkers*10)
 	r.confirmReading = make(chan bool)
 	r.startReadWorkers()
 	return r
@@ -52,11 +81,11 @@ func NewObjectHandle(o *Object) *Handle {
 
 // cacheFs is a convenience method to get the parent cache FS of the object's manager
 func (r *Handle) cacheFs() *Fs {
-	return r.cachedObject.CacheFs
+	return r.cfs
 }
 
 // storage is a convenience method to get the persistent storage of the object's manager
-func (r *Handle) storage() Storage {
+func (r *Handle) storage() *Persistent {
 	return r.cacheFs().cache
 }
 
@@ -76,7 +105,7 @@ func (r *Handle) startReadWorkers() {
 		if !r.cacheFs().plexConnector.isConnected() {
 			err := r.cacheFs().plexConnector.authenticate()
 			if err != nil {
-				fs.Infof(r, "failed to authenticate to Plex: %v", err)
+				fs.Errorf(r, "failed to authenticate to Plex: %v", err)
 			}
 		}
 		if r.cacheFs().plexConnector.isConnected() {
@@ -113,7 +142,7 @@ func (r *Handle) scaleWorkers(desired int) {
 	}
 	// ignore first scale out from 0
 	if current != 0 {
-		fs.Infof(r, "scale workers to %v", desired)
+		fs.Debugf(r, "scale workers to %v", desired)
 	}
 }
 
@@ -156,7 +185,6 @@ func (r *Handle) queueOffset(offset int64) {
 		if r.UseMemory {
 			go r.memory.CleanChunksByNeed(offset)
 		}
-		go r.cacheFs().CleanUpCache(false)
 		r.confirmExternalReading()
 		r.preloadOffset = offset
 
@@ -305,7 +333,6 @@ func (r *Handle) Close() error {
 		}
 	}
 
-	go r.cacheFs().CleanUpCache(false)
 	fs.Debugf(r, "cache reader closed %v", r.offset)
 	return nil
 }
@@ -357,11 +384,11 @@ func (w *worker) String() string {
 //   - if it supports seeking it will seek to the desired offset and return the same reader
 //   - if it doesn't support seeking it will close a possible existing one and open at the desired offset
 //   - if there's no reader associated with this worker, it will create one
-func (w *worker) reader(offset, end int64) (io.ReadCloser, error) {
+func (w *worker) reader(offset, end int64, closeOpen bool) (io.ReadCloser, error) {
 	var err error
 	r := w.rc
 	if w.rc == nil {
-		r, err = w.r.cacheFs().OpenRateLimited(func() (io.ReadCloser, error) {
+		r, err = w.r.cacheFs().openRateLimited(func() (io.ReadCloser, error) {
 			return w.r.cachedObject.Object.Open(&fs.SeekOption{Offset: offset}, &fs.RangeOption{Start: offset, End: end})
 		})
 		if err != nil {
@@ -370,14 +397,16 @@ func (w *worker) reader(offset, end int64) (io.ReadCloser, error) {
 		return r, nil
 	}
 
-	seekerObj, ok := r.(io.Seeker)
-	if ok {
-		_, err = seekerObj.Seek(offset, os.SEEK_SET)
-		return r, err
+	if !closeOpen {
+		seekerObj, ok := r.(io.Seeker)
+		if ok {
+			_, err = seekerObj.Seek(offset, os.SEEK_SET)
+			return r, err
+		}
 	}
 
 	_ = w.rc.Close()
-	return w.r.cacheFs().OpenRateLimited(func() (io.ReadCloser, error) {
+	return w.r.cacheFs().openRateLimited(func() (io.ReadCloser, error) {
 		r, err = w.r.cachedObject.Object.Open(&fs.SeekOption{Offset: offset}, &fs.RangeOption{Start: offset, End: end})
 		if err != nil {
 			return nil, err
@@ -463,10 +492,18 @@ func (w *worker) download(chunkStart, chunkEnd int64, retry int) {
 		time.Sleep(time.Second * time.Duration(retry))
 	}
 
-	w.rc, err = w.reader(chunkStart, chunkEnd)
+	closeOpen := false
+	if retry > 0 {
+		closeOpen = true
+	}
+	w.rc, err = w.reader(chunkStart, chunkEnd, closeOpen)
 	// we seem to be getting only errors so we abort
 	if err != nil {
 		fs.Errorf(w, "object open failed %v: %v", chunkStart, err)
+		err = w.r.cachedObject.refreshFromSource(true)
+		if err != nil {
+			fs.Errorf(w, "%v", err)
+		}
 		w.download(chunkStart, chunkEnd, retry+1)
 		return
 	}
@@ -476,6 +513,10 @@ func (w *worker) download(chunkStart, chunkEnd int64, retry int) {
 	sourceRead, err = io.ReadFull(w.rc, data)
 	if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
 		fs.Errorf(w, "failed to read chunk %v: %v", chunkStart, err)
+		err = w.r.cachedObject.refreshFromSource(true)
+		if err != nil {
+			fs.Errorf(w, "%v", err)
+		}
 		w.download(chunkStart, chunkEnd, retry+1)
 		return
 	}
@@ -483,7 +524,7 @@ func (w *worker) download(chunkStart, chunkEnd int64, retry int) {
 	if err == io.ErrUnexpectedEOF {
 		fs.Debugf(w, "partial downloaded chunk %v", fs.SizeSuffix(chunkStart))
 	} else {
-		fs.Debugf(w, "downloaded chunk %v", fs.SizeSuffix(chunkStart))
+		fs.Debugf(w, "downloaded chunk %v", chunkStart)
 	}
 
 	if w.r.UseMemory {
@@ -499,6 +540,115 @@ func (w *worker) download(chunkStart, chunkEnd int64, retry int) {
 	}
 }
 
+const (
+	// BackgroundUploadStarted is a state for a temp file that has started upload
+	BackgroundUploadStarted = iota
+	// BackgroundUploadCompleted is a state for a temp file that has completed upload
+	BackgroundUploadCompleted
+	// BackgroundUploadError is a state for a temp file that has an error upload
+	BackgroundUploadError
+)
+
+// BackgroundUploadState is an entity that maps to an existing file which is stored on the temp fs
+type BackgroundUploadState struct {
+	Remote string
+	Status int
+	Error  error
+}
+
+type backgroundWriter struct {
+	fs       *Fs
+	stateCh  chan int
+	running  bool
+	notifyCh chan BackgroundUploadState
+}
+
+func newBackgroundWriter(f *Fs) *backgroundWriter {
+	b := &backgroundWriter{
+		fs:       f,
+		stateCh:  make(chan int),
+		notifyCh: make(chan BackgroundUploadState),
+	}
+
+	return b
+}
+
+func (b *backgroundWriter) close() {
+	b.stateCh <- 2
+}
+
+func (b *backgroundWriter) pause() {
+	b.stateCh <- 1
+}
+
+func (b *backgroundWriter) play() {
+	b.stateCh <- 0
+}
+
+func (b *backgroundWriter) notify(remote string, status int, err error) {
+	state := BackgroundUploadState{
+		Remote: remote,
+		Status: status,
+		Error:  err,
+	}
+	select {
+	case b.notifyCh <- state:
+		fs.Debugf(remote, "notified background upload state: %v", state.Status)
+	default:
+	}
+}
+
+func (b *backgroundWriter) run() {
+	state := 0
+	for {
+		b.running = true
+		select {
+		case s := <-b.stateCh:
+			state = s
+		default:
+			//
+		}
+		switch state {
+		case 1:
+			runtime.Gosched()
+			time.Sleep(time.Millisecond * 500)
+			continue
+		case 2:
+			b.running = false
+			return
+		}
+
+		absPath, err := b.fs.cache.getPendingUpload(b.fs.Root(), b.fs.tempWriteWait)
+		if err != nil || absPath == "" || !b.fs.isRootInPath(absPath) {
+			time.Sleep(time.Second)
+			continue
+		}
+
+		remote := b.fs.cleanRootFromPath(absPath)
+		b.notify(remote, BackgroundUploadStarted, nil)
+		fs.Infof(remote, "background upload: started upload")
+		err = operations.MoveFile(b.fs.UnWrap(), b.fs.tempFs, remote, remote)
+		if err != nil {
+			b.notify(remote, BackgroundUploadError, err)
+			_ = b.fs.cache.rollbackPendingUpload(absPath)
+			fs.Errorf(remote, "background upload: %v", err)
+			continue
+		}
+		fs.Infof(remote, "background upload: uploaded entry")
+		err = b.fs.cache.removePendingUpload(absPath)
+		if err != nil && !strings.Contains(err.Error(), "pending upload not found") {
+			fs.Errorf(remote, "background upload: %v", err)
+		}
+		parentCd := NewDirectory(b.fs, cleanPath(path.Dir(remote)))
+		err = b.fs.cache.ExpireDir(parentCd)
+		if err != nil {
+			fs.Errorf(parentCd, "background upload: cache expire error: %v", err)
+		}
+		fs.Infof(remote, "finished background upload")
+		b.notify(remote, BackgroundUploadCompleted, nil)
+	}
+}
+
 // Check the interfaces are satisfied
 var (
 	_ io.ReadCloser = (*Handle)(nil)
diff --git a/backend/cache/object.go b/backend/cache/object.go
index c17b458fb..68c0becec 100644
--- a/backend/cache/object.go
+++ b/backend/cache/object.go
@@ -3,24 +3,28 @@
 package cache
 
 import (
-	"encoding/json"
 	"io"
 	"os"
 	"path"
 	"sync"
 	"time"
 
-	"strconv"
-
 	"github.com/ncw/rclone/fs"
 	"github.com/ncw/rclone/fs/hash"
 	"github.com/ncw/rclone/lib/readers"
+	"github.com/pkg/errors"
+)
+
+const (
+	objectInCache       = "Object"
+	objectPendingUpload = "TempObject"
 )
 
 // Object is a generic file like object that stores basic information about it
 type Object struct {
 	fs.Object `json:"-"`
 
+	ParentFs      fs.Fs                `json:"-"`        // parent fs
 	CacheFs       *Fs                  `json:"-"`        // cache fs
 	Name          string               `json:"name"`     // name of the directory
 	Dir           string               `json:"dir"`      // abs path of the object
@@ -29,79 +33,64 @@ type Object struct {
 	CacheStorable bool                 `json:"storable"` // says whether this object can be stored
 	CacheType     string               `json:"cacheType"`
 	CacheTs       time.Time            `json:"cacheTs"`
-	cacheHashes   map[hash.Type]string // all supported hashes cached
+	CacheHashes   map[hash.Type]string // all supported hashes cached
 
 	refreshMutex sync.Mutex
 }
 
 // NewObject builds one from a generic fs.Object
-func NewObject(f *Fs, remote string) *Object { //0745 379 768
+func NewObject(f *Fs, remote string) *Object {
 	fullRemote := path.Join(f.Root(), remote)
 	dir, name := path.Split(fullRemote)
 
+	cacheType := objectInCache
+	parentFs := f.UnWrap()
+	if f.tempWritePath != "" {
+		_, err := f.cache.SearchPendingUpload(fullRemote)
+		if err == nil { // queued for upload
+			cacheType = objectPendingUpload
+			parentFs = f.tempFs
+			fs.Debugf(fullRemote, "pending upload found")
+		}
+	}
+
 	co := &Object{
+		ParentFs:      parentFs,
 		CacheFs:       f,
 		Name:          cleanPath(name),
 		Dir:           cleanPath(dir),
 		CacheModTime:  time.Now().UnixNano(),
 		CacheSize:     0,
 		CacheStorable: false,
-		CacheType:     "Object",
+		CacheType:     cacheType,
 		CacheTs:       time.Now(),
 	}
 	return co
 }
 
-// MarshalJSON is needed to override the hashes map (needed to support older versions of Go)
-func (o *Object) MarshalJSON() ([]byte, error) {
-	hashes := make(map[string]string)
-	for k, v := range o.cacheHashes {
-		hashes[strconv.Itoa(int(k))] = v
-	}
-
-	type Alias Object
-	return json.Marshal(&struct {
-		Hashes map[string]string `json:"hashes"`
-		*Alias
-	}{
-		Alias:  (*Alias)(o),
-		Hashes: hashes,
-	})
-}
-
-// UnmarshalJSON is needed to override the CacheHashes map (needed to support older versions of Go)
-func (o *Object) UnmarshalJSON(b []byte) error {
-	type Alias Object
-	aux := &struct {
-		Hashes map[string]string `json:"hashes"`
-		*Alias
-	}{
-		Alias: (*Alias)(o),
-	}
-	if err := json.Unmarshal(b, &aux); err != nil {
-		return err
-	}
-
-	o.cacheHashes = make(map[hash.Type]string)
-	for k, v := range aux.Hashes {
-		ht, _ := strconv.Atoi(k)
-		o.cacheHashes[hash.Type(ht)] = v
-	}
-
-	return nil
-}
-
 // ObjectFromOriginal builds one from a generic fs.Object
 func ObjectFromOriginal(f *Fs, o fs.Object) *Object {
 	var co *Object
 	fullRemote := cleanPath(path.Join(f.Root(), o.Remote()))
-
 	dir, name := path.Split(fullRemote)
+
+	cacheType := objectInCache
+	parentFs := f.UnWrap()
+	if f.tempWritePath != "" {
+		_, err := f.cache.SearchPendingUpload(fullRemote)
+		if err == nil { // queued for upload
+			cacheType = objectPendingUpload
+			parentFs = f.tempFs
+			fs.Debugf(fullRemote, "pending upload found")
+		}
+	}
+
 	co = &Object{
+		ParentFs:  parentFs,
 		CacheFs:   f,
 		Name:      cleanPath(name),
 		Dir:       cleanPath(dir),
-		CacheType: "Object",
+		CacheType: cacheType,
 		CacheTs:   time.Now(),
 	}
 	co.updateData(o)
@@ -114,7 +103,7 @@ func (o *Object) updateData(source fs.Object) {
 	o.CacheSize = source.Size()
 	o.CacheStorable = source.Storable()
 	o.CacheTs = time.Now()
-	o.cacheHashes = make(map[hash.Type]string)
+	o.CacheHashes = make(map[hash.Type]string)
 }
 
 // Fs returns its FS info
@@ -133,14 +122,7 @@ func (o *Object) String() string {
 // Remote returns the remote path
 func (o *Object) Remote() string {
 	p := path.Join(o.Dir, o.Name)
-	if o.CacheFs.Root() != "" {
-		p = p[len(o.CacheFs.Root()):] // trim out root
-		if len(p) > 0 {               // remove first separator
-			p = p[1:]
-		}
-	}
-
-	return p
+	return o.CacheFs.cleanRootFromPath(p)
 }
 
 // abs returns the absolute path to the object
@@ -148,17 +130,6 @@ func (o *Object) abs() string {
 	return path.Join(o.Dir, o.Name)
 }
 
-// parentRemote returns the absolute path parent remote
-func (o *Object) parentRemote() string {
-	absPath := o.abs()
-	return cleanPath(path.Dir(absPath))
-}
-
-// parentDir returns the absolute path parent remote
-func (o *Object) parentDir() *Directory {
-	return NewDirectory(o.CacheFs, cleanPath(path.Dir(o.Remote())))
-}
-
 // ModTime returns the cached ModTime
 func (o *Object) ModTime() time.Time {
 	return time.Unix(0, o.CacheModTime)
@@ -175,17 +146,24 @@ func (o *Object) Storable() bool {
 }
 
 // refreshFromSource requests the original FS for the object in case it comes from a cached entry
-func (o *Object) refreshFromSource() error {
+func (o *Object) refreshFromSource(force bool) error {
 	o.refreshMutex.Lock()
 	defer o.refreshMutex.Unlock()
+	var err error
+	var liveObject fs.Object
 
-	if o.Object != nil {
+	if o.Object != nil && !force {
 		return nil
 	}
-
-	liveObject, err := o.CacheFs.Fs.NewObject(o.Remote())
+	if o.isTempFile() {
+		liveObject, err = o.ParentFs.NewObject(o.Remote())
+		err = errors.Wrapf(err, "in parent fs %v", o.ParentFs)
+	} else {
+		liveObject, err = o.CacheFs.Fs.NewObject(o.Remote())
+		err = errors.Wrapf(err, "in cache fs %v", o.CacheFs.Fs)
+	}
 	if err != nil {
-		fs.Errorf(o, "error refreshing object: %v", err)
+		fs.Errorf(o, "error refreshing object in : %v", err)
 		return err
 	}
 	o.updateData(liveObject)
@@ -196,7 +174,7 @@ func (o *Object) refreshFromSource() error {
 
 // SetModTime sets the ModTime of this object
 func (o *Object) SetModTime(t time.Time) error {
-	if err := o.refreshFromSource(); err != nil {
+	if err := o.refreshFromSource(false); err != nil {
 		return err
 	}
 
@@ -207,19 +185,19 @@ func (o *Object) SetModTime(t time.Time) error {
 
 	o.CacheModTime = t.UnixNano()
 	o.persist()
-	fs.Debugf(o.Fs(), "updated ModTime %v: %v", o, t)
+	fs.Debugf(o, "updated ModTime: %v", t)
 
 	return nil
 }
 
 // Open is used to request a specific part of the file using fs.RangeOption
 func (o *Object) Open(options ...fs.OpenOption) (io.ReadCloser, error) {
-	if err := o.refreshFromSource(); err != nil {
+	if err := o.refreshFromSource(true); err != nil {
 		return nil, err
 	}
 
 	var err error
-	cacheReader := NewObjectHandle(o)
+	cacheReader := NewObjectHandle(o, o.CacheFs)
 	var offset, limit int64 = 0, -1
 	for _, option := range options {
 		switch x := option.(type) {
@@ -239,23 +217,34 @@ func (o *Object) Open(options ...fs.OpenOption) (io.ReadCloser, error) {
 
 // Update will change the object data
 func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
-	if err := o.refreshFromSource(); err != nil {
+	if err := o.refreshFromSource(false); err != nil {
 		return err
 	}
-	fs.Infof(o, "updating object contents with size %v", src.Size())
-
-	// deleting cached chunks and info to be replaced with new ones
-	_ = o.CacheFs.cache.RemoveObject(o.abs())
+	// pause background uploads if active
+	if o.CacheFs.tempWritePath != "" {
+		o.CacheFs.backgroundRunner.pause()
+		defer o.CacheFs.backgroundRunner.play()
+		// don't allow started uploads
+		if o.isTempFile() && o.tempFileStartedUpload() {
+			return errors.Errorf("%v is currently uploading, can't update", o)
+		}
+	}
+	fs.Debugf(o, "updating object contents with size %v", src.Size())
 
+	// FIXME use reliable upload
 	err := o.Object.Update(in, src, options...)
 	if err != nil {
 		fs.Errorf(o, "error updating source: %v", err)
 		return err
 	}
 
+	// deleting cached chunks and info to be replaced with new ones
+	_ = o.CacheFs.cache.RemoveObject(o.abs())
+
 	o.CacheModTime = src.ModTime().UnixNano()
 	o.CacheSize = src.Size()
-	o.cacheHashes = make(map[hash.Type]string)
+	o.CacheHashes = make(map[hash.Type]string)
+	o.CacheTs = time.Now()
 	o.persist()
 
 	return nil
@@ -263,41 +252,50 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
 
 // Remove deletes the object from both the cache and the source
 func (o *Object) Remove() error {
-	if err := o.refreshFromSource(); err != nil {
+	if err := o.refreshFromSource(false); err != nil {
 		return err
 	}
+	// pause background uploads if active
+	if o.CacheFs.tempWritePath != "" {
+		o.CacheFs.backgroundRunner.pause()
+		defer o.CacheFs.backgroundRunner.play()
+		// don't allow started uploads
+		if o.isTempFile() && o.tempFileStartedUpload() {
+			return errors.Errorf("%v is currently uploading, can't delete", o)
+		}
+	}
 	err := o.Object.Remove()
 	if err != nil {
 		return err
 	}
-	fs.Infof(o, "removing object")
 
+	fs.Debugf(o, "removing object")
 	_ = o.CacheFs.cache.RemoveObject(o.abs())
-	return err
+	_ = o.CacheFs.cache.removePendingUpload(o.abs())
+	_ = o.CacheFs.cache.ExpireDir(NewDirectory(o.CacheFs, cleanPath(path.Dir(o.Remote()))))
+
+	return nil
 }
 
 // Hash requests a hash of the object and stores in the cache
 // since it might or might not be called, this is lazy loaded
 func (o *Object) Hash(ht hash.Type) (string, error) {
-	if o.cacheHashes == nil {
-		o.cacheHashes = make(map[hash.Type]string)
+	if o.CacheHashes == nil {
+		o.CacheHashes = make(map[hash.Type]string)
 	}
 
-	cachedHash, found := o.cacheHashes[ht]
+	cachedHash, found := o.CacheHashes[ht]
 	if found {
 		return cachedHash, nil
 	}
-
-	if err := o.refreshFromSource(); err != nil {
+	if err := o.refreshFromSource(false); err != nil {
 		return "", err
 	}
-
 	liveHash, err := o.Object.Hash(ht)
 	if err != nil {
 		return "", err
 	}
-
-	o.cacheHashes[ht] = liveHash
+	o.CacheHashes[ht] = liveHash
 
 	o.persist()
 	fs.Debugf(o, "object hash cached: %v", liveHash)
@@ -314,6 +312,25 @@ func (o *Object) persist() *Object {
 	return o
 }
 
+func (o *Object) isTempFile() bool {
+	_, err := o.CacheFs.cache.SearchPendingUpload(o.abs())
+	if err != nil {
+		o.CacheType = objectInCache
+		return false
+	}
+
+	o.CacheType = objectPendingUpload
+	return true
+}
+
+func (o *Object) tempFileStartedUpload() bool {
+	started, err := o.CacheFs.cache.SearchPendingUpload(o.abs())
+	if err != nil {
+		return false
+	}
+	return started
+}
+
 var (
 	_ fs.Object = (*Object)(nil)
 )
diff --git a/backend/cache/plex.go b/backend/cache/plex.go
index 5b2b7f095..0bc40901a 100644
--- a/backend/cache/plex.go
+++ b/backend/cache/plex.go
@@ -12,6 +12,9 @@ import (
 
 	"sync"
 
+	"bytes"
+	"io/ioutil"
+
 	"github.com/ncw/rclone/fs"
 	"github.com/ncw/rclone/fs/config"
 )
@@ -127,6 +130,17 @@ func (p *plexConnector) isConfigured() bool {
 }
 
 func (p *plexConnector) isPlaying(co *Object) bool {
+	var err error
+
+	remote := co.Remote()
+	if cr, yes := p.f.isWrappedByCrypt(); yes {
+		remote, err = cr.DecryptFileName(co.Remote())
+		if err != nil {
+			fs.Errorf("plex", "can not decrypt wrapped file: %v", err)
+			return false
+		}
+	}
+
 	isPlaying := false
 	req, err := http.NewRequest("GET", fmt.Sprintf("%s/status/sessions", p.url.String()), nil)
 	if err != nil {
@@ -180,31 +194,12 @@ func (p *plexConnector) isPlaying(co *Object) bool {
 		if err != nil {
 			return false
 		}
-		var data map[string]interface{}
-		err = json.NewDecoder(resp.Body).Decode(&data)
+		var data []byte
+		data, err = ioutil.ReadAll(resp.Body)
 		if err != nil {
 			return false
 		}
-
-		remote := co.Remote()
-		if cr, yes := co.CacheFs.isWrappedByCrypt(); yes {
-			remote, err = cr.DecryptFileName(co.Remote())
-			if err != nil {
-				fs.Errorf("plex", "can not decrypt wrapped file: %v", err)
-				continue
-			}
-		}
-		fpGen, ok := get(data, "MediaContainer", "Metadata", 0, "Media", 0, "Part", 0, "file")
-		if !ok {
-			fs.Errorf("plex", "failed to understand: %v", data)
-			continue
-		}
-		fp, ok := fpGen.(string)
-		if !ok {
-			fs.Errorf("plex", "failed to understand: %v", fp)
-			continue
-		}
-		if strings.Contains(fp, remote) {
+		if bytes.Contains(data, []byte(remote)) {
 			isPlaying = true
 			break
 		}
diff --git a/backend/cache/storage_memory.go b/backend/cache/storage_memory.go
index ddb7825f3..f66b405e3 100644
--- a/backend/cache/storage_memory.go
+++ b/backend/cache/storage_memory.go
@@ -14,8 +14,6 @@ import (
 
 // Memory is a wrapper of transient storage for a go-cache store
 type Memory struct {
-	ChunkStorage
-
 	db *cache.Cache
 }
 
diff --git a/backend/cache/storage_persistent.go b/backend/cache/storage_persistent.go
index b997c813e..bbc77c2a4 100644
--- a/backend/cache/storage_persistent.go
+++ b/backend/cache/storage_persistent.go
@@ -16,8 +16,11 @@ import (
 
 	"io/ioutil"
 
+	"fmt"
+
 	bolt "github.com/coreos/bbolt"
 	"github.com/ncw/rclone/fs"
+	"github.com/ncw/rclone/fs/walk"
 	"github.com/pkg/errors"
 )
 
@@ -26,6 +29,7 @@ const (
 	RootBucket   = "root"
 	RootTsBucket = "rootTs"
 	DataTsBucket = "dataTs"
+	tempBucket   = "pending"
 )
 
 // Features flags for this storage type
@@ -42,6 +46,12 @@ func GetPersistent(dbPath, chunkPath string, f *Features) (*Persistent, error) {
 	boltMapMx.Lock()
 	defer boltMapMx.Unlock()
 	if b, ok := boltMap[dbPath]; ok {
+		if !b.open {
+			err := b.connect()
+			if err != nil {
+				return nil, err
+			}
+		}
 		return b, nil
 	}
 
@@ -59,15 +69,26 @@ type chunkInfo struct {
 	Size   int64
 }
 
+type tempUploadInfo struct {
+	DestPath string
+	AddedOn  time.Time
+	Started  bool
+}
+
+// String representation of a tempUploadInfo
+func (t *tempUploadInfo) String() string {
+	return fmt.Sprintf("%v - %v (%v)", t.DestPath, t.Started, t.AddedOn)
+}
+
 // Persistent is a wrapper of persistent storage for a bolt.DB file
 type Persistent struct {
-	Storage
-
-	dbPath     string
-	dataPath   string
-	db         *bolt.DB
-	cleanupMux sync.Mutex
-	features   *Features
+	dbPath       string
+	dataPath     string
+	open         bool
+	db           *bolt.DB
+	cleanupMux   sync.Mutex
+	tempQueueMux sync.Mutex
+	features     *Features
 }
 
 // newPersistent builds a new wrapper and connects to the bolt.DB file
@@ -78,7 +99,7 @@ func newPersistent(dbPath, chunkPath string, f *Features) (*Persistent, error) {
 		features: f,
 	}
 
-	err := b.Connect()
+	err := b.connect()
 	if err != nil {
 		fs.Errorf(dbPath, "Error opening storage cache. Is there another rclone running on the same remote? %v", err)
 		return nil, err
@@ -92,41 +113,32 @@ func (b *Persistent) String() string {
 	return "<Cache DB> " + b.dbPath
 }
 
-// Connect creates a connection to the configured file
+// connect creates a connection to the configured file
 // refreshDb will delete the file before to create an empty DB if it's set to true
-func (b *Persistent) Connect() error {
-	var db *bolt.DB
+func (b *Persistent) connect() error {
 	var err error
 
-	if b.features.PurgeDb {
-		err := os.Remove(b.dbPath)
-		if err != nil {
-			fs.Errorf(b, "failed to remove cache file: %v", err)
-		}
-		err = os.RemoveAll(b.dataPath)
-		if err != nil {
-			fs.Errorf(b, "failed to remove cache data: %v", err)
-		}
-	}
-
 	err = os.MkdirAll(b.dataPath, os.ModePerm)
 	if err != nil {
 		return errors.Wrapf(err, "failed to create a data directory %q", b.dataPath)
 	}
-	db, err = bolt.Open(b.dbPath, 0644, &bolt.Options{Timeout: 1 * time.Second})
+	b.db, err = bolt.Open(b.dbPath, 0644, &bolt.Options{Timeout: 1 * time.Second})
 	if err != nil {
 		return errors.Wrapf(err, "failed to open a cache connection to %q", b.dbPath)
 	}
-
-	_ = db.Update(func(tx *bolt.Tx) error {
+	if b.features.PurgeDb {
+		b.Purge()
+	}
+	_ = b.db.Update(func(tx *bolt.Tx) error {
 		_, _ = tx.CreateBucketIfNotExists([]byte(RootBucket))
 		_, _ = tx.CreateBucketIfNotExists([]byte(RootTsBucket))
 		_, _ = tx.CreateBucketIfNotExists([]byte(DataTsBucket))
+		_, _ = tx.CreateBucketIfNotExists([]byte(tempBucket))
 
 		return nil
 	})
 
-	b.db = db
+	b.open = true
 	return nil
 }
 
@@ -136,7 +148,9 @@ func (b *Persistent) getBucket(dir string, createIfMissing bool, tx *bolt.Tx) *b
 	cleanPath(dir)
 
 	entries := strings.FieldsFunc(dir, func(c rune) bool {
-		return os.PathSeparator == c
+		// cover Windows where rclone still uses '/' as path separator
+		// this should be safe as '/' is not a valid Windows character
+		return (os.PathSeparator == c || c == rune('/'))
 	})
 	bucket := tx.Bucket([]byte(RootBucket))
 
@@ -478,6 +492,7 @@ func (b *Persistent) CleanChunksBySize(maxSize int64) {
 	b.cleanupMux.Lock()
 	defer b.cleanupMux.Unlock()
 	var cntChunks int
+	var roughlyCleaned fs.SizeSuffix
 
 	err := b.db.Update(func(tx *bolt.Tx) error {
 		dataTsBucket := tx.Bucket([]byte(DataTsBucket))
@@ -499,6 +514,7 @@ func (b *Persistent) CleanChunksBySize(maxSize int64) {
 
 		if totalSize > maxSize {
 			needToClean := totalSize - maxSize
+			roughlyCleaned = fs.SizeSuffix(needToClean)
 			for k, v := c.First(); k != nil; k, v = c.Next() {
 				var ci chunkInfo
 				err := json.Unmarshal(v, &ci)
@@ -521,7 +537,10 @@ func (b *Persistent) CleanChunksBySize(maxSize int64) {
 				}
 			}
 		}
-		fs.Infof("cache", "deleted (%v) chunks", cntChunks)
+		if cntChunks > 0 {
+			fs.Infof("cache-cleanup", "chunks %v, est. size: %v", cntChunks, roughlyCleaned.String())
+
+		}
 		return nil
 	})
 
@@ -691,6 +710,313 @@ func (b *Persistent) iterateBuckets(buk *bolt.Bucket, bucketFn func(name string)
 	return err
 }
 
+func (b *Persistent) dumpRoot() string {
+	var itBuckets func(buk *bolt.Bucket) map[string]interface{}
+
+	itBuckets = func(buk *bolt.Bucket) map[string]interface{} {
+		m := make(map[string]interface{})
+		c := buk.Cursor()
+		for k, v := c.First(); k != nil; k, v = c.Next() {
+			if v == nil {
+				buk2 := buk.Bucket(k)
+				m[string(k)] = itBuckets(buk2)
+			} else {
+				m[string(k)] = "-"
+			}
+		}
+		return m
+	}
+	var mm map[string]interface{}
+	_ = b.db.View(func(tx *bolt.Tx) error {
+		mm = itBuckets(tx.Bucket([]byte(RootBucket)))
+		return nil
+	})
+	raw, _ := json.MarshalIndent(mm, "", "  ")
+	return string(raw)
+}
+
+// addPendingUpload adds a new file to the pending queue of uploads
+func (b *Persistent) addPendingUpload(destPath string, started bool) error {
+	return b.db.Update(func(tx *bolt.Tx) error {
+		bucket, err := tx.CreateBucketIfNotExists([]byte(tempBucket))
+		if err != nil {
+			return errors.Errorf("couldn't bucket for %v", tempBucket)
+		}
+		tempObj := &tempUploadInfo{
+			DestPath: destPath,
+			AddedOn:  time.Now(),
+			Started:  started,
+		}
+
+		// cache Object Info
+		encoded, err := json.Marshal(tempObj)
+		if err != nil {
+			return errors.Errorf("couldn't marshal object (%v) info: %v", destPath, err)
+		}
+		err = bucket.Put([]byte(destPath), []byte(encoded))
+		if err != nil {
+			return errors.Errorf("couldn't cache object (%v) info: %v", destPath, err)
+		}
+
+		return nil
+	})
+}
+
+// getPendingUpload returns the next file from the pending queue of uploads
+func (b *Persistent) getPendingUpload(inRoot string, waitTime time.Duration) (destPath string, err error) {
+	b.tempQueueMux.Lock()
+	defer b.tempQueueMux.Unlock()
+
+	err = b.db.Update(func(tx *bolt.Tx) error {
+		bucket, err := tx.CreateBucketIfNotExists([]byte(tempBucket))
+		if err != nil {
+			return errors.Errorf("couldn't bucket for %v", tempBucket)
+		}
+
+		c := bucket.Cursor()
+		for k, v := c.Seek([]byte(inRoot)); k != nil && bytes.HasPrefix(k, []byte(inRoot)); k, v = c.Next() {
+			//for k, v := c.First(); k != nil; k, v = c.Next() {
+			var tempObj = &tempUploadInfo{}
+			err = json.Unmarshal(v, tempObj)
+			if err != nil {
+				fs.Errorf(b, "failed to read pending upload: %v", err)
+				continue
+			}
+			// skip over started uploads
+			if tempObj.Started || time.Now().Before(tempObj.AddedOn.Add(waitTime)) {
+				continue
+			}
+
+			tempObj.Started = true
+			v2, err := json.Marshal(tempObj)
+			if err != nil {
+				fs.Errorf(b, "failed to update pending upload: %v", err)
+				continue
+			}
+			err = bucket.Put(k, v2)
+			if err != nil {
+				fs.Errorf(b, "failed to update pending upload: %v", err)
+				continue
+			}
+
+			destPath = tempObj.DestPath
+			return nil
+		}
+
+		return errors.Errorf("no pending upload found")
+	})
+
+	return destPath, err
+}
+
+// SearchPendingUpload returns the file info from the pending queue of uploads
+func (b *Persistent) SearchPendingUpload(remote string) (started bool, err error) {
+	err = b.db.View(func(tx *bolt.Tx) error {
+		bucket := tx.Bucket([]byte(tempBucket))
+		if bucket == nil {
+			return errors.Errorf("couldn't bucket for %v", tempBucket)
+		}
+
+		var tempObj = &tempUploadInfo{}
+		v := bucket.Get([]byte(remote))
+		err = json.Unmarshal(v, tempObj)
+		if err != nil {
+			return errors.Errorf("pending upload (%v) not found %v", remote, err)
+		}
+
+		started = tempObj.Started
+		return nil
+	})
+
+	return started, err
+}
+
+// searchPendingUploadFromDir files currently pending upload from a single dir
+func (b *Persistent) searchPendingUploadFromDir(dir string) (remotes []string, err error) {
+	err = b.db.View(func(tx *bolt.Tx) error {
+		bucket := tx.Bucket([]byte(tempBucket))
+		if bucket == nil {
+			return errors.Errorf("couldn't bucket for %v", tempBucket)
+		}
+
+		c := bucket.Cursor()
+		for k, v := c.First(); k != nil; k, v = c.Next() {
+			var tempObj = &tempUploadInfo{}
+			err = json.Unmarshal(v, tempObj)
+			if err != nil {
+				fs.Errorf(b, "failed to read pending upload: %v", err)
+				continue
+			}
+			parentDir := cleanPath(path.Dir(tempObj.DestPath))
+			if dir == parentDir {
+				remotes = append(remotes, tempObj.DestPath)
+			}
+		}
+
+		return nil
+	})
+
+	return remotes, err
+}
+
+func (b *Persistent) rollbackPendingUpload(remote string) error {
+	b.tempQueueMux.Lock()
+	defer b.tempQueueMux.Unlock()
+
+	return b.db.Update(func(tx *bolt.Tx) error {
+		bucket, err := tx.CreateBucketIfNotExists([]byte(tempBucket))
+		if err != nil {
+			return errors.Errorf("couldn't bucket for %v", tempBucket)
+		}
+		var tempObj = &tempUploadInfo{}
+		v := bucket.Get([]byte(remote))
+		err = json.Unmarshal(v, tempObj)
+		if err != nil {
+			return errors.Errorf("pending upload (%v) not found %v", remote, err)
+		}
+		tempObj.Started = false
+		v2, err := json.Marshal(tempObj)
+		if err != nil {
+			return errors.Errorf("pending upload not updated %v", err)
+		}
+		err = bucket.Put([]byte(tempObj.DestPath), v2)
+		if err != nil {
+			return errors.Errorf("pending upload not updated %v", err)
+		}
+		return nil
+	})
+}
+
+func (b *Persistent) removePendingUpload(remote string) error {
+	b.tempQueueMux.Lock()
+	defer b.tempQueueMux.Unlock()
+
+	return b.db.Update(func(tx *bolt.Tx) error {
+		bucket, err := tx.CreateBucketIfNotExists([]byte(tempBucket))
+		if err != nil {
+			return errors.Errorf("couldn't bucket for %v", tempBucket)
+		}
+		return bucket.Delete([]byte(remote))
+	})
+}
+
+// updatePendingUpload allows to update an existing item in the queue while checking if it's not started in the same
+// transaction. If it is started, it will not allow the update
+func (b *Persistent) updatePendingUpload(remote string, fn func(item *tempUploadInfo) error) error {
+	b.tempQueueMux.Lock()
+	defer b.tempQueueMux.Unlock()
+
+	return b.db.Update(func(tx *bolt.Tx) error {
+		bucket, err := tx.CreateBucketIfNotExists([]byte(tempBucket))
+		if err != nil {
+			return errors.Errorf("couldn't bucket for %v", tempBucket)
+		}
+
+		var tempObj = &tempUploadInfo{}
+		v := bucket.Get([]byte(remote))
+		err = json.Unmarshal(v, tempObj)
+		if err != nil {
+			return errors.Errorf("pending upload (%v) not found %v", remote, err)
+		}
+		if tempObj.Started {
+			return errors.Errorf("pending upload already started %v", remote)
+		}
+		err = fn(tempObj)
+		if err != nil {
+			return err
+		}
+		if remote != tempObj.DestPath {
+			err := bucket.Delete([]byte(remote))
+			if err != nil {
+				return err
+			}
+			// if this is removed then the entry can be removed too
+			if tempObj.DestPath == "" {
+				return nil
+			}
+		}
+		v2, err := json.Marshal(tempObj)
+		if err != nil {
+			return errors.Errorf("pending upload not updated %v", err)
+		}
+		err = bucket.Put([]byte(tempObj.DestPath), v2)
+		if err != nil {
+			return errors.Errorf("pending upload not updated %v", err)
+		}
+
+		return nil
+	})
+}
+
+// SetPendingUploadToStarted is a way to mark an entry as started (even if it's not already)
+// TO BE USED IN TESTING ONLY
+func (b *Persistent) SetPendingUploadToStarted(remote string) error {
+	return b.updatePendingUpload(remote, func(item *tempUploadInfo) error {
+		item.Started = true
+		return nil
+	})
+}
+
+// ReconcileTempUploads will recursively look for all the files in the temp directory and add them to the queue
+func (b *Persistent) ReconcileTempUploads(cacheFs *Fs) error {
+	return b.db.Update(func(tx *bolt.Tx) error {
+		_ = tx.DeleteBucket([]byte(tempBucket))
+		bucket, err := tx.CreateBucketIfNotExists([]byte(tempBucket))
+		if err != nil {
+			return err
+		}
+
+		var queuedEntries []fs.Object
+		err = walk.Walk(cacheFs.tempFs, "", true, -1, func(path string, entries fs.DirEntries, err error) error {
+			for _, o := range entries {
+				if oo, ok := o.(fs.Object); ok {
+					queuedEntries = append(queuedEntries, oo)
+				}
+			}
+			return nil
+		})
+		if err != nil {
+			return err
+		}
+
+		fs.Debugf(cacheFs, "reconciling temporary uploads")
+		for _, queuedEntry := range queuedEntries {
+			destPath := path.Join(cacheFs.Root(), queuedEntry.Remote())
+			tempObj := &tempUploadInfo{
+				DestPath: destPath,
+				AddedOn:  time.Now(),
+				Started:  false,
+			}
+
+			// cache Object Info
+			encoded, err := json.Marshal(tempObj)
+			if err != nil {
+				return errors.Errorf("couldn't marshal object (%v) info: %v", queuedEntry, err)
+			}
+			err = bucket.Put([]byte(destPath), []byte(encoded))
+			if err != nil {
+				return errors.Errorf("couldn't cache object (%v) info: %v", destPath, err)
+			}
+			fs.Debugf(cacheFs, "reconciled temporary upload: %v", destPath)
+		}
+
+		return nil
+	})
+}
+
+// PurgeTempUploads will remove all the pending uploads from the queue
+// TO BE USED IN TESTING ONLY
+func (b *Persistent) PurgeTempUploads() {
+	b.tempQueueMux.Lock()
+	defer b.tempQueueMux.Unlock()
+
+	_ = b.db.Update(func(tx *bolt.Tx) error {
+		_ = tx.DeleteBucket([]byte(tempBucket))
+		_, _ = tx.CreateBucketIfNotExists([]byte(tempBucket))
+		return nil
+	})
+}
+
 // Close should be called when the program ends gracefully
 func (b *Persistent) Close() {
 	b.cleanupMux.Lock()
@@ -700,6 +1026,7 @@ func (b *Persistent) Close() {
 	if err != nil {
 		fs.Errorf(b, "closing handle: %v", err)
 	}
+	b.open = false
 }
 
 // itob returns an 8-byte big endian representation of v.
diff --git a/docs/content/cache.md b/docs/content/cache.md
index 3501e26c9..49cfc27a2 100644
--- a/docs/content/cache.md
+++ b/docs/content/cache.md
@@ -109,12 +109,38 @@ To start a cached mount
 
     rclone mount --allow-other test-cache: /var/tmp/test-cache
 
+### Write Features ###
+
+### Offline uploading ###
+
+In an effort to make writing through cache more reliable, the backend 
+now supports this feature which can be activated by specifying a
+`cache-tmp-upload-path`.
+
+A files goes through these states when using this feature:
+
+1. An upload is started (usually by copying a file on the cache remote)
+2. When the copy to the temporary location is complete the file is part 
+of the cached remote and looks and behaves like any other file (reading included)
+3. After `cache-tmp-wait-time` passes and the file is next in line, `rclone move` 
+is used to move the file to the cloud provider
+4. Reading the file still works during the upload but most modifications on it will be prohibited
+5. Once the move is complete the file is unlocked for modifications as it
+becomes as any other regular file
+6. If the file is being read through `cache` when it's actually
+deleted from the temporary path then `cache` will simply swap the source
+to the cloud provider without interrupting the reading (small blip can happen though)
+
+Files are uploaded in sequence and only one file is uploaded at a time.
+Uploads will be stored in a queue and be processed based on the order they were added.
+The queue and the temporary storage is persistent across restarts and even purges of the cache.
+
 ### Write Support ###
 
 Writes are supported through `cache`.
 One caveat is that a mounted cache remote does not add any retry or fallback
 mechanism to the upload operation. This will depend on the implementation
-of the wrapped remote.
+of the wrapped remote. Consider using `Offline uploading` for reliable writes.
 
 One special case is covered with `cache-writes` which will cache the file
 data at the same time as the upload when it is enabled making it available
@@ -157,6 +183,16 @@ Affected settings:
 
 ### Known issues ###
 
+#### Mount and --dir-cache-time ####
+
+--dir-cache-time controls the first layer of directory caching which works at the mount layer.
+Being an independent caching mechanism from the `cache` backend, it will manage its own entries
+based on the configured time.
+
+To avoid getting in a scenario where dir cache has obsolete data and cache would have the correct
+one, try to set `--dir-cache-time` to a lower time than `--cache-info-age`. Default values are
+already configured in this way. 
+
 #### Windows support - Experimental ####
 
 There are a couple of issues with Windows `mount` functionality that still require some investigations.
@@ -341,3 +377,23 @@ you can enable this flag to have their data stored in the cache store at the
 same time during upload.
 
 **Default**: not set
+
+#### --cache-tmp-upload-path=PATH ####
+
+This is the path where `cache` will use as a temporary storage for new files
+that need to be uploaded to the cloud provider.
+
+Specifying a value will enable this feature. Without it, it is completely disabled
+and files will be uploaded directly to the cloud provider
+
+**Default**: empty
+
+#### --cache-tmp-wait-time=DURATION ####
+
+This is the duration that a file must wait in the temporary location
+_cache-tmp-upload-path_ before it is selected for upload.
+
+Note that only one file is uploaded at a time and it can take longer to
+start the upload if a queue formed for this purpose.
+
+**Default**: 15m