diff --git a/cmd/mountlib/mount.go b/cmd/mountlib/mount.go index c14eed789..7a7974ba4 100644 --- a/cmd/mountlib/mount.go +++ b/cmd/mountlib/mount.go @@ -403,27 +403,7 @@ func (m *MountPoint) Wait() error { fnHandle := atexit.Register(finalise) defer atexit.Unregister(fnHandle) - // Reload VFS cache on SIGHUP - sigHup := make(chan os.Signal, 1) - NotifyOnSigHup(sigHup) - var err error - - waiting := true - for waiting { - select { - // umount triggered outside the app - case err = <-m.ErrChan: - waiting = false - // user sent SIGHUP to clear the cache - case <-sigHup: - root, err := m.VFS.Root() - if err != nil { - fs.Errorf(m.VFS.Fs(), "Error reading root: %v", err) - } else { - root.ForgetAll() - } - } - } + err := <-m.ErrChan finalise() diff --git a/cmd/serve/docker/driver.go b/cmd/serve/docker/driver.go index 86885d336..804a8bc02 100644 --- a/cmd/serve/docker/driver.go +++ b/cmd/serve/docker/driver.go @@ -76,7 +76,6 @@ func NewDriver(ctx context.Context, root string, mntOpt *mountlib.Options, vfsOp // start mount monitoring drv.hupChan = make(chan os.Signal, 1) drv.monChan = make(chan bool, 1) - mountlib.NotifyOnSigHup(drv.hupChan) go drv.monitor() // unmount all volumes on exit diff --git a/cmd/mountlib/sighup.go b/vfs/sighup.go similarity index 93% rename from cmd/mountlib/sighup.go rename to vfs/sighup.go index 2dfbf9621..fadcbcdda 100644 --- a/cmd/mountlib/sighup.go +++ b/vfs/sighup.go @@ -1,6 +1,6 @@ //go:build !plan9 && !js -package mountlib +package vfs import ( "os" diff --git a/cmd/mountlib/sighup_unsupported.go b/vfs/sighup_unsupported.go similarity index 90% rename from cmd/mountlib/sighup_unsupported.go rename to vfs/sighup_unsupported.go index 8e786f696..19e08a34b 100644 --- a/cmd/mountlib/sighup_unsupported.go +++ b/vfs/sighup_unsupported.go @@ -1,6 +1,6 @@ //go:build plan9 || js -package mountlib +package vfs import ( "os" diff --git a/vfs/vfs.go b/vfs/vfs.go index ecde90b19..1d2f71dd1 100644 --- a/vfs/vfs.go +++ b/vfs/vfs.go @@ -179,6 +179,7 @@ type VFS struct { root *Dir Opt vfscommon.Options cache *vfscache.Cache + cancel context.CancelFunc cancelCache context.CancelFunc usageMu sync.Mutex usageTime time.Time @@ -197,8 +198,10 @@ var ( // DefaultOpt will be used func New(f fs.Fs, opt *vfscommon.Options) *VFS { fsDir := fs.NewDir("", time.Now()) + ctx, cancel := context.WithCancel(context.Background()) vfs := &VFS{ - f: f, + f: f, + cancel: cancel, } vfs.inUse.Store(1) @@ -259,6 +262,9 @@ func New(f fs.Fs, opt *vfscommon.Options) *VFS { go vfs.refresh() } + // Handle supported signals + go vfs.signalHandler(ctx) + // This can take some time so do it after the Pin vfs.SetCacheMode(vfs.Opt.CacheMode) @@ -274,6 +280,27 @@ func (vfs *VFS) refresh() { } } +// Reload VFS cache on SIGHUP +func (vfs *VFS) signalHandler(ctx context.Context) { + sigHup := make(chan os.Signal, 1) + NotifyOnSigHup(sigHup) + + waiting := true + for waiting { + select { + case <-ctx.Done(): + waiting = false + case <-sigHup: + root, err := vfs.Root() + if err != nil { + fs.Errorf(vfs.Fs(), "Error reading root: %v", err) + } else { + root.ForgetAll() + } + } + } +} + // Stats returns info about the VFS func (vfs *VFS) Stats() (out rc.Params) { out = make(rc.Params) @@ -372,6 +399,9 @@ func (vfs *VFS) Shutdown() { close(vfs.pollChan) vfs.pollChan = nil } + + // Cancel any background go routines + vfs.cancel() } // CleanUp deletes the contents of the on disk cache