mirror of
https://github.com/jesseduffield/lazygit.git
synced 2025-06-17 00:18:05 +02:00
switch to fork of go-git
This commit is contained in:
48
vendor/github.com/jesseduffield/go-git/v5/storage/filesystem/config.go
generated
vendored
Normal file
48
vendor/github.com/jesseduffield/go-git/v5/storage/filesystem/config.go
generated
vendored
Normal file
@ -0,0 +1,48 @@
|
||||
package filesystem
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/jesseduffield/go-git/v5/config"
|
||||
"github.com/jesseduffield/go-git/v5/storage/filesystem/dotgit"
|
||||
"github.com/jesseduffield/go-git/v5/utils/ioutil"
|
||||
)
|
||||
|
||||
type ConfigStorage struct {
|
||||
dir *dotgit.DotGit
|
||||
}
|
||||
|
||||
func (c *ConfigStorage) Config() (conf *config.Config, err error) {
|
||||
f, err := c.dir.Config()
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return config.NewConfig(), nil
|
||||
}
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
defer ioutil.CheckClose(f, &err)
|
||||
return config.ReadConfig(f)
|
||||
}
|
||||
|
||||
func (c *ConfigStorage) SetConfig(cfg *config.Config) (err error) {
|
||||
if err = cfg.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
f, err := c.dir.ConfigWriter()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer ioutil.CheckClose(f, &err)
|
||||
|
||||
b, err := cfg.Marshal()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = f.Write(b)
|
||||
return err
|
||||
}
|
37
vendor/github.com/jesseduffield/go-git/v5/storage/filesystem/deltaobject.go
generated
vendored
Normal file
37
vendor/github.com/jesseduffield/go-git/v5/storage/filesystem/deltaobject.go
generated
vendored
Normal file
@ -0,0 +1,37 @@
|
||||
package filesystem
|
||||
|
||||
import (
|
||||
"github.com/jesseduffield/go-git/v5/plumbing"
|
||||
)
|
||||
|
||||
type deltaObject struct {
|
||||
plumbing.EncodedObject
|
||||
base plumbing.Hash
|
||||
hash plumbing.Hash
|
||||
size int64
|
||||
}
|
||||
|
||||
func newDeltaObject(
|
||||
obj plumbing.EncodedObject,
|
||||
hash plumbing.Hash,
|
||||
base plumbing.Hash,
|
||||
size int64) plumbing.DeltaObject {
|
||||
return &deltaObject{
|
||||
EncodedObject: obj,
|
||||
hash: hash,
|
||||
base: base,
|
||||
size: size,
|
||||
}
|
||||
}
|
||||
|
||||
func (o *deltaObject) BaseHash() plumbing.Hash {
|
||||
return o.base
|
||||
}
|
||||
|
||||
func (o *deltaObject) ActualSize() int64 {
|
||||
return o.size
|
||||
}
|
||||
|
||||
func (o *deltaObject) ActualHash() plumbing.Hash {
|
||||
return o.hash
|
||||
}
|
1188
vendor/github.com/jesseduffield/go-git/v5/storage/filesystem/dotgit/dotgit.go
generated
vendored
Normal file
1188
vendor/github.com/jesseduffield/go-git/v5/storage/filesystem/dotgit/dotgit.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
81
vendor/github.com/jesseduffield/go-git/v5/storage/filesystem/dotgit/dotgit_rewrite_packed_refs.go
generated
vendored
Normal file
81
vendor/github.com/jesseduffield/go-git/v5/storage/filesystem/dotgit/dotgit_rewrite_packed_refs.go
generated
vendored
Normal file
@ -0,0 +1,81 @@
|
||||
package dotgit
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
"runtime"
|
||||
|
||||
"github.com/go-git/go-billy/v5"
|
||||
"github.com/jesseduffield/go-git/v5/utils/ioutil"
|
||||
)
|
||||
|
||||
func (d *DotGit) openAndLockPackedRefsMode() int {
|
||||
if billy.CapabilityCheck(d.fs, billy.ReadAndWriteCapability) {
|
||||
return os.O_RDWR
|
||||
}
|
||||
|
||||
return os.O_RDONLY
|
||||
}
|
||||
|
||||
func (d *DotGit) rewritePackedRefsWhileLocked(
|
||||
tmp billy.File, pr billy.File) error {
|
||||
// Try plain rename. If we aren't using the bare Windows filesystem as the
|
||||
// storage layer, we might be able to get away with a rename over a locked
|
||||
// file.
|
||||
err := d.fs.Rename(tmp.Name(), pr.Name())
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// If we are in a filesystem that does not support rename (e.g. sivafs)
|
||||
// a full copy is done.
|
||||
if err == billy.ErrNotSupported {
|
||||
return d.copyNewFile(tmp, pr)
|
||||
}
|
||||
|
||||
if runtime.GOOS != "windows" {
|
||||
return err
|
||||
}
|
||||
|
||||
// Otherwise, Windows doesn't let us rename over a locked file, so
|
||||
// we have to do a straight copy. Unfortunately this could result
|
||||
// in a partially-written file if the process fails before the
|
||||
// copy completes.
|
||||
return d.copyToExistingFile(tmp, pr)
|
||||
}
|
||||
|
||||
func (d *DotGit) copyToExistingFile(tmp, pr billy.File) error {
|
||||
_, err := pr.Seek(0, io.SeekStart)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = pr.Truncate(0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = tmp.Seek(0, io.SeekStart)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = io.Copy(pr, tmp)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (d *DotGit) copyNewFile(tmp billy.File, pr billy.File) (err error) {
|
||||
prWrite, err := d.fs.Create(pr.Name())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer ioutil.CheckClose(prWrite, &err)
|
||||
|
||||
_, err = tmp.Seek(0, io.SeekStart)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = io.Copy(prWrite, tmp)
|
||||
|
||||
return err
|
||||
}
|
90
vendor/github.com/jesseduffield/go-git/v5/storage/filesystem/dotgit/dotgit_setref.go
generated
vendored
Normal file
90
vendor/github.com/jesseduffield/go-git/v5/storage/filesystem/dotgit/dotgit_setref.go
generated
vendored
Normal file
@ -0,0 +1,90 @@
|
||||
package dotgit
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/jesseduffield/go-git/v5/plumbing"
|
||||
"github.com/jesseduffield/go-git/v5/utils/ioutil"
|
||||
|
||||
"github.com/go-git/go-billy/v5"
|
||||
)
|
||||
|
||||
func (d *DotGit) setRef(fileName, content string, old *plumbing.Reference) (err error) {
|
||||
if billy.CapabilityCheck(d.fs, billy.ReadAndWriteCapability) {
|
||||
return d.setRefRwfs(fileName, content, old)
|
||||
}
|
||||
|
||||
return d.setRefNorwfs(fileName, content, old)
|
||||
}
|
||||
|
||||
func (d *DotGit) setRefRwfs(fileName, content string, old *plumbing.Reference) (err error) {
|
||||
// If we are not checking an old ref, just truncate the file.
|
||||
mode := os.O_RDWR | os.O_CREATE
|
||||
if old == nil {
|
||||
mode |= os.O_TRUNC
|
||||
}
|
||||
|
||||
f, err := d.fs.OpenFile(fileName, mode, 0666)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer ioutil.CheckClose(f, &err)
|
||||
|
||||
// Lock is unlocked by the deferred Close above. This is because Unlock
|
||||
// does not imply a fsync and thus there would be a race between
|
||||
// Unlock+Close and other concurrent writers. Adding Sync to go-billy
|
||||
// could work, but this is better (and avoids superfluous syncs).
|
||||
err = f.Lock()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// this is a no-op to call even when old is nil.
|
||||
err = d.checkReferenceAndTruncate(f, old)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = f.Write([]byte(content))
|
||||
return err
|
||||
}
|
||||
|
||||
// There are some filesystems that don't support opening files in RDWD mode.
|
||||
// In these filesystems the standard SetRef function can not be used as it
|
||||
// reads the reference file to check that it's not modified before updating it.
|
||||
//
|
||||
// This version of the function writes the reference without extra checks
|
||||
// making it compatible with these simple filesystems. This is usually not
|
||||
// a problem as they should be accessed by only one process at a time.
|
||||
func (d *DotGit) setRefNorwfs(fileName, content string, old *plumbing.Reference) error {
|
||||
_, err := d.fs.Stat(fileName)
|
||||
if err == nil && old != nil {
|
||||
fRead, err := d.fs.Open(fileName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ref, err := d.readReferenceFrom(fRead, old.Name().String())
|
||||
fRead.Close()
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if ref.Hash() != old.Hash() {
|
||||
return fmt.Errorf("reference has changed concurrently")
|
||||
}
|
||||
}
|
||||
|
||||
f, err := d.fs.Create(fileName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer f.Close()
|
||||
|
||||
_, err = f.Write([]byte(content))
|
||||
return err
|
||||
}
|
111
vendor/github.com/jesseduffield/go-git/v5/storage/filesystem/dotgit/repository_filesystem.go
generated
vendored
Normal file
111
vendor/github.com/jesseduffield/go-git/v5/storage/filesystem/dotgit/repository_filesystem.go
generated
vendored
Normal file
@ -0,0 +1,111 @@
|
||||
package dotgit
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/go-git/go-billy/v5"
|
||||
)
|
||||
|
||||
// RepositoryFilesystem is a billy.Filesystem compatible object wrapper
|
||||
// which handles dot-git filesystem operations and supports commondir according to git scm layout:
|
||||
// https://github.com/git/git/blob/master/Documentation/gitrepository-layout.txt
|
||||
type RepositoryFilesystem struct {
|
||||
dotGitFs billy.Filesystem
|
||||
commonDotGitFs billy.Filesystem
|
||||
}
|
||||
|
||||
func NewRepositoryFilesystem(dotGitFs, commonDotGitFs billy.Filesystem) *RepositoryFilesystem {
|
||||
return &RepositoryFilesystem{
|
||||
dotGitFs: dotGitFs,
|
||||
commonDotGitFs: commonDotGitFs,
|
||||
}
|
||||
}
|
||||
|
||||
func (fs *RepositoryFilesystem) mapToRepositoryFsByPath(path string) billy.Filesystem {
|
||||
// Nothing to decide if commondir not defined
|
||||
if fs.commonDotGitFs == nil {
|
||||
return fs.dotGitFs
|
||||
}
|
||||
|
||||
cleanPath := filepath.Clean(path)
|
||||
|
||||
// Check exceptions for commondir (https://git-scm.com/docs/gitrepository-layout#Documentation/gitrepository-layout.txt)
|
||||
switch cleanPath {
|
||||
case fs.dotGitFs.Join(logsPath, "HEAD"):
|
||||
return fs.dotGitFs
|
||||
case fs.dotGitFs.Join(refsPath, "bisect"), fs.dotGitFs.Join(refsPath, "rewritten"), fs.dotGitFs.Join(refsPath, "worktree"):
|
||||
return fs.dotGitFs
|
||||
}
|
||||
|
||||
// Determine dot-git root by first path element.
|
||||
// There are some elements which should always use commondir when commondir defined.
|
||||
// Usual dot-git root will be used for the rest of files.
|
||||
switch strings.Split(cleanPath, string(filepath.Separator))[0] {
|
||||
case objectsPath, refsPath, packedRefsPath, configPath, branchesPath, hooksPath, infoPath, remotesPath, logsPath, shallowPath, worktreesPath:
|
||||
return fs.commonDotGitFs
|
||||
default:
|
||||
return fs.dotGitFs
|
||||
}
|
||||
}
|
||||
|
||||
func (fs *RepositoryFilesystem) Create(filename string) (billy.File, error) {
|
||||
return fs.mapToRepositoryFsByPath(filename).Create(filename)
|
||||
}
|
||||
|
||||
func (fs *RepositoryFilesystem) Open(filename string) (billy.File, error) {
|
||||
return fs.mapToRepositoryFsByPath(filename).Open(filename)
|
||||
}
|
||||
|
||||
func (fs *RepositoryFilesystem) OpenFile(filename string, flag int, perm os.FileMode) (billy.File, error) {
|
||||
return fs.mapToRepositoryFsByPath(filename).OpenFile(filename, flag, perm)
|
||||
}
|
||||
|
||||
func (fs *RepositoryFilesystem) Stat(filename string) (os.FileInfo, error) {
|
||||
return fs.mapToRepositoryFsByPath(filename).Stat(filename)
|
||||
}
|
||||
|
||||
func (fs *RepositoryFilesystem) Rename(oldpath, newpath string) error {
|
||||
return fs.mapToRepositoryFsByPath(oldpath).Rename(oldpath, newpath)
|
||||
}
|
||||
|
||||
func (fs *RepositoryFilesystem) Remove(filename string) error {
|
||||
return fs.mapToRepositoryFsByPath(filename).Remove(filename)
|
||||
}
|
||||
|
||||
func (fs *RepositoryFilesystem) Join(elem ...string) string {
|
||||
return fs.dotGitFs.Join(elem...)
|
||||
}
|
||||
|
||||
func (fs *RepositoryFilesystem) TempFile(dir, prefix string) (billy.File, error) {
|
||||
return fs.mapToRepositoryFsByPath(dir).TempFile(dir, prefix)
|
||||
}
|
||||
|
||||
func (fs *RepositoryFilesystem) ReadDir(path string) ([]os.FileInfo, error) {
|
||||
return fs.mapToRepositoryFsByPath(path).ReadDir(path)
|
||||
}
|
||||
|
||||
func (fs *RepositoryFilesystem) MkdirAll(filename string, perm os.FileMode) error {
|
||||
return fs.mapToRepositoryFsByPath(filename).MkdirAll(filename, perm)
|
||||
}
|
||||
|
||||
func (fs *RepositoryFilesystem) Lstat(filename string) (os.FileInfo, error) {
|
||||
return fs.mapToRepositoryFsByPath(filename).Lstat(filename)
|
||||
}
|
||||
|
||||
func (fs *RepositoryFilesystem) Symlink(target, link string) error {
|
||||
return fs.mapToRepositoryFsByPath(target).Symlink(target, link)
|
||||
}
|
||||
|
||||
func (fs *RepositoryFilesystem) Readlink(link string) (string, error) {
|
||||
return fs.mapToRepositoryFsByPath(link).Readlink(link)
|
||||
}
|
||||
|
||||
func (fs *RepositoryFilesystem) Chroot(path string) (billy.Filesystem, error) {
|
||||
return fs.mapToRepositoryFsByPath(path).Chroot(path)
|
||||
}
|
||||
|
||||
func (fs *RepositoryFilesystem) Root() string {
|
||||
return fs.dotGitFs.Root()
|
||||
}
|
284
vendor/github.com/jesseduffield/go-git/v5/storage/filesystem/dotgit/writers.go
generated
vendored
Normal file
284
vendor/github.com/jesseduffield/go-git/v5/storage/filesystem/dotgit/writers.go
generated
vendored
Normal file
@ -0,0 +1,284 @@
|
||||
package dotgit
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/jesseduffield/go-git/v5/plumbing"
|
||||
"github.com/jesseduffield/go-git/v5/plumbing/format/idxfile"
|
||||
"github.com/jesseduffield/go-git/v5/plumbing/format/objfile"
|
||||
"github.com/jesseduffield/go-git/v5/plumbing/format/packfile"
|
||||
|
||||
"github.com/go-git/go-billy/v5"
|
||||
)
|
||||
|
||||
// PackWriter is a io.Writer that generates the packfile index simultaneously,
|
||||
// a packfile.Decoder is used with a file reader to read the file being written
|
||||
// this operation is synchronized with the write operations.
|
||||
// The packfile is written in a temp file, when Close is called this file
|
||||
// is renamed/moved (depends on the Filesystem implementation) to the final
|
||||
// location, if the PackWriter is not used, nothing is written
|
||||
type PackWriter struct {
|
||||
Notify func(plumbing.Hash, *idxfile.Writer)
|
||||
|
||||
fs billy.Filesystem
|
||||
fr, fw billy.File
|
||||
synced *syncedReader
|
||||
checksum plumbing.Hash
|
||||
parser *packfile.Parser
|
||||
writer *idxfile.Writer
|
||||
result chan error
|
||||
}
|
||||
|
||||
func newPackWrite(fs billy.Filesystem) (*PackWriter, error) {
|
||||
fw, err := fs.TempFile(fs.Join(objectsPath, packPath), "tmp_pack_")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
fr, err := fs.Open(fw.Name())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
writer := &PackWriter{
|
||||
fs: fs,
|
||||
fw: fw,
|
||||
fr: fr,
|
||||
synced: newSyncedReader(fw, fr),
|
||||
result: make(chan error),
|
||||
}
|
||||
|
||||
go writer.buildIndex()
|
||||
return writer, nil
|
||||
}
|
||||
|
||||
func (w *PackWriter) buildIndex() {
|
||||
s := packfile.NewScanner(w.synced)
|
||||
w.writer = new(idxfile.Writer)
|
||||
var err error
|
||||
w.parser, err = packfile.NewParser(s, w.writer)
|
||||
if err != nil {
|
||||
w.result <- err
|
||||
return
|
||||
}
|
||||
|
||||
checksum, err := w.parser.Parse()
|
||||
if err != nil {
|
||||
w.result <- err
|
||||
return
|
||||
}
|
||||
|
||||
w.checksum = checksum
|
||||
w.result <- err
|
||||
}
|
||||
|
||||
// waitBuildIndex waits until buildIndex function finishes, this can terminate
|
||||
// with a packfile.ErrEmptyPackfile, this means that nothing was written so we
|
||||
// ignore the error
|
||||
func (w *PackWriter) waitBuildIndex() error {
|
||||
err := <-w.result
|
||||
if err == packfile.ErrEmptyPackfile {
|
||||
return nil
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (w *PackWriter) Write(p []byte) (int, error) {
|
||||
return w.synced.Write(p)
|
||||
}
|
||||
|
||||
// Close closes all the file descriptors and save the final packfile, if nothing
|
||||
// was written, the tempfiles are deleted without writing a packfile.
|
||||
func (w *PackWriter) Close() error {
|
||||
defer func() {
|
||||
if w.Notify != nil && w.writer != nil && w.writer.Finished() {
|
||||
w.Notify(w.checksum, w.writer)
|
||||
}
|
||||
|
||||
close(w.result)
|
||||
}()
|
||||
|
||||
if err := w.synced.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := w.waitBuildIndex(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := w.fr.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := w.fw.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if w.writer == nil || !w.writer.Finished() {
|
||||
return w.clean()
|
||||
}
|
||||
|
||||
return w.save()
|
||||
}
|
||||
|
||||
func (w *PackWriter) clean() error {
|
||||
return w.fs.Remove(w.fw.Name())
|
||||
}
|
||||
|
||||
func (w *PackWriter) save() error {
|
||||
base := w.fs.Join(objectsPath, packPath, fmt.Sprintf("pack-%s", w.checksum))
|
||||
idx, err := w.fs.Create(fmt.Sprintf("%s.idx", base))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := w.encodeIdx(idx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := idx.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return w.fs.Rename(w.fw.Name(), fmt.Sprintf("%s.pack", base))
|
||||
}
|
||||
|
||||
func (w *PackWriter) encodeIdx(writer io.Writer) error {
|
||||
idx, err := w.writer.Index()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
e := idxfile.NewEncoder(writer)
|
||||
_, err = e.Encode(idx)
|
||||
return err
|
||||
}
|
||||
|
||||
type syncedReader struct {
|
||||
w io.Writer
|
||||
r io.ReadSeeker
|
||||
|
||||
blocked, done uint32
|
||||
written, read uint64
|
||||
news chan bool
|
||||
}
|
||||
|
||||
func newSyncedReader(w io.Writer, r io.ReadSeeker) *syncedReader {
|
||||
return &syncedReader{
|
||||
w: w,
|
||||
r: r,
|
||||
news: make(chan bool),
|
||||
}
|
||||
}
|
||||
|
||||
func (s *syncedReader) Write(p []byte) (n int, err error) {
|
||||
defer func() {
|
||||
written := atomic.AddUint64(&s.written, uint64(n))
|
||||
read := atomic.LoadUint64(&s.read)
|
||||
if written > read {
|
||||
s.wake()
|
||||
}
|
||||
}()
|
||||
|
||||
n, err = s.w.Write(p)
|
||||
return
|
||||
}
|
||||
|
||||
func (s *syncedReader) Read(p []byte) (n int, err error) {
|
||||
defer func() { atomic.AddUint64(&s.read, uint64(n)) }()
|
||||
|
||||
for {
|
||||
s.sleep()
|
||||
n, err = s.r.Read(p)
|
||||
if err == io.EOF && !s.isDone() && n == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
break
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (s *syncedReader) isDone() bool {
|
||||
return atomic.LoadUint32(&s.done) == 1
|
||||
}
|
||||
|
||||
func (s *syncedReader) isBlocked() bool {
|
||||
return atomic.LoadUint32(&s.blocked) == 1
|
||||
}
|
||||
|
||||
func (s *syncedReader) wake() {
|
||||
if s.isBlocked() {
|
||||
atomic.StoreUint32(&s.blocked, 0)
|
||||
s.news <- true
|
||||
}
|
||||
}
|
||||
|
||||
func (s *syncedReader) sleep() {
|
||||
read := atomic.LoadUint64(&s.read)
|
||||
written := atomic.LoadUint64(&s.written)
|
||||
if read >= written {
|
||||
atomic.StoreUint32(&s.blocked, 1)
|
||||
<-s.news
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func (s *syncedReader) Seek(offset int64, whence int) (int64, error) {
|
||||
if whence == io.SeekCurrent {
|
||||
return s.r.Seek(offset, whence)
|
||||
}
|
||||
|
||||
p, err := s.r.Seek(offset, whence)
|
||||
atomic.StoreUint64(&s.read, uint64(p))
|
||||
|
||||
return p, err
|
||||
}
|
||||
|
||||
func (s *syncedReader) Close() error {
|
||||
atomic.StoreUint32(&s.done, 1)
|
||||
close(s.news)
|
||||
return nil
|
||||
}
|
||||
|
||||
type ObjectWriter struct {
|
||||
objfile.Writer
|
||||
fs billy.Filesystem
|
||||
f billy.File
|
||||
}
|
||||
|
||||
func newObjectWriter(fs billy.Filesystem) (*ObjectWriter, error) {
|
||||
f, err := fs.TempFile(fs.Join(objectsPath, packPath), "tmp_obj_")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &ObjectWriter{
|
||||
Writer: (*objfile.NewWriter(f)),
|
||||
fs: fs,
|
||||
f: f,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (w *ObjectWriter) Close() error {
|
||||
if err := w.Writer.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := w.f.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return w.save()
|
||||
}
|
||||
|
||||
func (w *ObjectWriter) save() error {
|
||||
hash := w.Hash().String()
|
||||
file := w.fs.Join(objectsPath, hash[0:2], hash[2:40])
|
||||
|
||||
return w.fs.Rename(w.f.Name(), file)
|
||||
}
|
54
vendor/github.com/jesseduffield/go-git/v5/storage/filesystem/index.go
generated
vendored
Normal file
54
vendor/github.com/jesseduffield/go-git/v5/storage/filesystem/index.go
generated
vendored
Normal file
@ -0,0 +1,54 @@
|
||||
package filesystem
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"os"
|
||||
|
||||
"github.com/jesseduffield/go-git/v5/plumbing/format/index"
|
||||
"github.com/jesseduffield/go-git/v5/storage/filesystem/dotgit"
|
||||
"github.com/jesseduffield/go-git/v5/utils/ioutil"
|
||||
)
|
||||
|
||||
type IndexStorage struct {
|
||||
dir *dotgit.DotGit
|
||||
}
|
||||
|
||||
func (s *IndexStorage) SetIndex(idx *index.Index) (err error) {
|
||||
f, err := s.dir.IndexWriter()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer ioutil.CheckClose(f, &err)
|
||||
bw := bufio.NewWriter(f)
|
||||
defer func() {
|
||||
if e := bw.Flush(); err == nil && e != nil {
|
||||
err = e
|
||||
}
|
||||
}()
|
||||
|
||||
e := index.NewEncoder(bw)
|
||||
err = e.Encode(idx)
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *IndexStorage) Index() (i *index.Index, err error) {
|
||||
idx := &index.Index{
|
||||
Version: 2,
|
||||
}
|
||||
|
||||
f, err := s.dir.Index()
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return idx, nil
|
||||
}
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
defer ioutil.CheckClose(f, &err)
|
||||
|
||||
d := index.NewDecoder(bufio.NewReader(f))
|
||||
err = d.Decode(idx)
|
||||
return idx, err
|
||||
}
|
20
vendor/github.com/jesseduffield/go-git/v5/storage/filesystem/module.go
generated
vendored
Normal file
20
vendor/github.com/jesseduffield/go-git/v5/storage/filesystem/module.go
generated
vendored
Normal file
@ -0,0 +1,20 @@
|
||||
package filesystem
|
||||
|
||||
import (
|
||||
"github.com/jesseduffield/go-git/v5/plumbing/cache"
|
||||
"github.com/jesseduffield/go-git/v5/storage"
|
||||
"github.com/jesseduffield/go-git/v5/storage/filesystem/dotgit"
|
||||
)
|
||||
|
||||
type ModuleStorage struct {
|
||||
dir *dotgit.DotGit
|
||||
}
|
||||
|
||||
func (s *ModuleStorage) Module(name string) (storage.Storer, error) {
|
||||
fs, err := s.dir.Module(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return NewStorage(fs, cache.NewObjectLRUDefault()), nil
|
||||
}
|
848
vendor/github.com/jesseduffield/go-git/v5/storage/filesystem/object.go
generated
vendored
Normal file
848
vendor/github.com/jesseduffield/go-git/v5/storage/filesystem/object.go
generated
vendored
Normal file
@ -0,0 +1,848 @@
|
||||
package filesystem
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/jesseduffield/go-git/v5/plumbing"
|
||||
"github.com/jesseduffield/go-git/v5/plumbing/cache"
|
||||
"github.com/jesseduffield/go-git/v5/plumbing/format/idxfile"
|
||||
"github.com/jesseduffield/go-git/v5/plumbing/format/objfile"
|
||||
"github.com/jesseduffield/go-git/v5/plumbing/format/packfile"
|
||||
"github.com/jesseduffield/go-git/v5/plumbing/storer"
|
||||
"github.com/jesseduffield/go-git/v5/storage/filesystem/dotgit"
|
||||
"github.com/jesseduffield/go-git/v5/utils/ioutil"
|
||||
|
||||
"github.com/go-git/go-billy/v5"
|
||||
)
|
||||
|
||||
type ObjectStorage struct {
|
||||
options Options
|
||||
|
||||
// objectCache is an object cache uses to cache delta's bases and also recently
|
||||
// loaded loose objects
|
||||
objectCache cache.Object
|
||||
|
||||
dir *dotgit.DotGit
|
||||
index map[plumbing.Hash]idxfile.Index
|
||||
|
||||
packList []plumbing.Hash
|
||||
packListIdx int
|
||||
packfiles map[plumbing.Hash]*packfile.Packfile
|
||||
}
|
||||
|
||||
// NewObjectStorage creates a new ObjectStorage with the given .git directory and cache.
|
||||
func NewObjectStorage(dir *dotgit.DotGit, objectCache cache.Object) *ObjectStorage {
|
||||
return NewObjectStorageWithOptions(dir, objectCache, Options{})
|
||||
}
|
||||
|
||||
// NewObjectStorageWithOptions creates a new ObjectStorage with the given .git directory, cache and extra options
|
||||
func NewObjectStorageWithOptions(dir *dotgit.DotGit, objectCache cache.Object, ops Options) *ObjectStorage {
|
||||
return &ObjectStorage{
|
||||
options: ops,
|
||||
objectCache: objectCache,
|
||||
dir: dir,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *ObjectStorage) requireIndex() error {
|
||||
if s.index != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
s.index = make(map[plumbing.Hash]idxfile.Index)
|
||||
packs, err := s.dir.ObjectPacks()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, h := range packs {
|
||||
if err := s.loadIdxFile(h); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Reindex indexes again all packfiles. Useful if git changed packfiles externally
|
||||
func (s *ObjectStorage) Reindex() {
|
||||
s.index = nil
|
||||
}
|
||||
|
||||
func (s *ObjectStorage) loadIdxFile(h plumbing.Hash) (err error) {
|
||||
f, err := s.dir.ObjectPackIdx(h)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer ioutil.CheckClose(f, &err)
|
||||
|
||||
idxf := idxfile.NewMemoryIndex()
|
||||
d := idxfile.NewDecoder(f)
|
||||
if err = d.Decode(idxf); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s.index[h] = idxf
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *ObjectStorage) NewEncodedObject() plumbing.EncodedObject {
|
||||
return &plumbing.MemoryObject{}
|
||||
}
|
||||
|
||||
func (s *ObjectStorage) PackfileWriter() (io.WriteCloser, error) {
|
||||
if err := s.requireIndex(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
w, err := s.dir.NewObjectPack()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
w.Notify = func(h plumbing.Hash, writer *idxfile.Writer) {
|
||||
index, err := writer.Index()
|
||||
if err == nil {
|
||||
s.index[h] = index
|
||||
}
|
||||
}
|
||||
|
||||
return w, nil
|
||||
}
|
||||
|
||||
// SetEncodedObject adds a new object to the storage.
|
||||
func (s *ObjectStorage) SetEncodedObject(o plumbing.EncodedObject) (h plumbing.Hash, err error) {
|
||||
if o.Type() == plumbing.OFSDeltaObject || o.Type() == plumbing.REFDeltaObject {
|
||||
return plumbing.ZeroHash, plumbing.ErrInvalidType
|
||||
}
|
||||
|
||||
ow, err := s.dir.NewObject()
|
||||
if err != nil {
|
||||
return plumbing.ZeroHash, err
|
||||
}
|
||||
|
||||
defer ioutil.CheckClose(ow, &err)
|
||||
|
||||
or, err := o.Reader()
|
||||
if err != nil {
|
||||
return plumbing.ZeroHash, err
|
||||
}
|
||||
|
||||
defer ioutil.CheckClose(or, &err)
|
||||
|
||||
if err = ow.WriteHeader(o.Type(), o.Size()); err != nil {
|
||||
return plumbing.ZeroHash, err
|
||||
}
|
||||
|
||||
if _, err = io.Copy(ow, or); err != nil {
|
||||
return plumbing.ZeroHash, err
|
||||
}
|
||||
|
||||
return o.Hash(), err
|
||||
}
|
||||
|
||||
// HasEncodedObject returns nil if the object exists, without actually
|
||||
// reading the object data from storage.
|
||||
func (s *ObjectStorage) HasEncodedObject(h plumbing.Hash) (err error) {
|
||||
// Check unpacked objects
|
||||
f, err := s.dir.Object(h)
|
||||
if err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
// Fall through to check packed objects.
|
||||
} else {
|
||||
defer ioutil.CheckClose(f, &err)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Check packed objects.
|
||||
if err := s.requireIndex(); err != nil {
|
||||
return err
|
||||
}
|
||||
_, _, offset := s.findObjectInPackfile(h)
|
||||
if offset == -1 {
|
||||
return plumbing.ErrObjectNotFound
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *ObjectStorage) encodedObjectSizeFromUnpacked(h plumbing.Hash) (
|
||||
size int64, err error) {
|
||||
f, err := s.dir.Object(h)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return 0, plumbing.ErrObjectNotFound
|
||||
}
|
||||
|
||||
return 0, err
|
||||
}
|
||||
|
||||
r, err := objfile.NewReader(f)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer ioutil.CheckClose(r, &err)
|
||||
|
||||
_, size, err = r.Header()
|
||||
return size, err
|
||||
}
|
||||
|
||||
func (s *ObjectStorage) packfile(idx idxfile.Index, pack plumbing.Hash) (*packfile.Packfile, error) {
|
||||
if p := s.packfileFromCache(pack); p != nil {
|
||||
return p, nil
|
||||
}
|
||||
|
||||
f, err := s.dir.ObjectPack(pack)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var p *packfile.Packfile
|
||||
if s.objectCache != nil {
|
||||
p = packfile.NewPackfileWithCache(idx, s.dir.Fs(), f, s.objectCache)
|
||||
} else {
|
||||
p = packfile.NewPackfile(idx, s.dir.Fs(), f)
|
||||
}
|
||||
|
||||
return p, s.storePackfileInCache(pack, p)
|
||||
}
|
||||
|
||||
func (s *ObjectStorage) packfileFromCache(hash plumbing.Hash) *packfile.Packfile {
|
||||
if s.packfiles == nil {
|
||||
if s.options.KeepDescriptors {
|
||||
s.packfiles = make(map[plumbing.Hash]*packfile.Packfile)
|
||||
} else if s.options.MaxOpenDescriptors > 0 {
|
||||
s.packList = make([]plumbing.Hash, s.options.MaxOpenDescriptors)
|
||||
s.packfiles = make(map[plumbing.Hash]*packfile.Packfile, s.options.MaxOpenDescriptors)
|
||||
}
|
||||
}
|
||||
|
||||
return s.packfiles[hash]
|
||||
}
|
||||
|
||||
func (s *ObjectStorage) storePackfileInCache(hash plumbing.Hash, p *packfile.Packfile) error {
|
||||
if s.options.KeepDescriptors {
|
||||
s.packfiles[hash] = p
|
||||
return nil
|
||||
}
|
||||
|
||||
if s.options.MaxOpenDescriptors <= 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// start over as the limit of packList is hit
|
||||
if s.packListIdx >= len(s.packList) {
|
||||
s.packListIdx = 0
|
||||
}
|
||||
|
||||
// close the existing packfile if open
|
||||
if next := s.packList[s.packListIdx]; !next.IsZero() {
|
||||
open := s.packfiles[next]
|
||||
delete(s.packfiles, next)
|
||||
if open != nil {
|
||||
if err := open.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// cache newly open packfile
|
||||
s.packList[s.packListIdx] = hash
|
||||
s.packfiles[hash] = p
|
||||
s.packListIdx++
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *ObjectStorage) encodedObjectSizeFromPackfile(h plumbing.Hash) (
|
||||
size int64, err error) {
|
||||
if err := s.requireIndex(); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
pack, _, offset := s.findObjectInPackfile(h)
|
||||
if offset == -1 {
|
||||
return 0, plumbing.ErrObjectNotFound
|
||||
}
|
||||
|
||||
idx := s.index[pack]
|
||||
hash, err := idx.FindHash(offset)
|
||||
if err == nil {
|
||||
obj, ok := s.objectCache.Get(hash)
|
||||
if ok {
|
||||
return obj.Size(), nil
|
||||
}
|
||||
} else if err != nil && err != plumbing.ErrObjectNotFound {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
p, err := s.packfile(idx, pack)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
if !s.options.KeepDescriptors && s.options.MaxOpenDescriptors == 0 {
|
||||
defer ioutil.CheckClose(p, &err)
|
||||
}
|
||||
|
||||
return p.GetSizeByOffset(offset)
|
||||
}
|
||||
|
||||
// EncodedObjectSize returns the plaintext size of the given object,
|
||||
// without actually reading the full object data from storage.
|
||||
func (s *ObjectStorage) EncodedObjectSize(h plumbing.Hash) (
|
||||
size int64, err error) {
|
||||
size, err = s.encodedObjectSizeFromUnpacked(h)
|
||||
if err != nil && err != plumbing.ErrObjectNotFound {
|
||||
return 0, err
|
||||
} else if err == nil {
|
||||
return size, nil
|
||||
}
|
||||
|
||||
return s.encodedObjectSizeFromPackfile(h)
|
||||
}
|
||||
|
||||
// EncodedObject returns the object with the given hash, by searching for it in
|
||||
// the packfile and the git object directories.
|
||||
func (s *ObjectStorage) EncodedObject(t plumbing.ObjectType, h plumbing.Hash) (plumbing.EncodedObject, error) {
|
||||
var obj plumbing.EncodedObject
|
||||
var err error
|
||||
|
||||
if s.index != nil {
|
||||
obj, err = s.getFromPackfile(h, false)
|
||||
if err == plumbing.ErrObjectNotFound {
|
||||
obj, err = s.getFromUnpacked(h)
|
||||
}
|
||||
} else {
|
||||
obj, err = s.getFromUnpacked(h)
|
||||
if err == plumbing.ErrObjectNotFound {
|
||||
obj, err = s.getFromPackfile(h, false)
|
||||
}
|
||||
}
|
||||
|
||||
// If the error is still object not found, check if it's a shared object
|
||||
// repository.
|
||||
if err == plumbing.ErrObjectNotFound {
|
||||
dotgits, e := s.dir.Alternates()
|
||||
if e == nil {
|
||||
// Create a new object storage with the DotGit(s) and check for the
|
||||
// required hash object. Skip when not found.
|
||||
for _, dg := range dotgits {
|
||||
o := NewObjectStorage(dg, s.objectCache)
|
||||
enobj, enerr := o.EncodedObject(t, h)
|
||||
if enerr != nil {
|
||||
continue
|
||||
}
|
||||
return enobj, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if plumbing.AnyObject != t && obj.Type() != t {
|
||||
return nil, plumbing.ErrObjectNotFound
|
||||
}
|
||||
|
||||
return obj, nil
|
||||
}
|
||||
|
||||
// DeltaObject returns the object with the given hash, by searching for
|
||||
// it in the packfile and the git object directories.
|
||||
func (s *ObjectStorage) DeltaObject(t plumbing.ObjectType,
|
||||
h plumbing.Hash) (plumbing.EncodedObject, error) {
|
||||
obj, err := s.getFromUnpacked(h)
|
||||
if err == plumbing.ErrObjectNotFound {
|
||||
obj, err = s.getFromPackfile(h, true)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if plumbing.AnyObject != t && obj.Type() != t {
|
||||
return nil, plumbing.ErrObjectNotFound
|
||||
}
|
||||
|
||||
return obj, nil
|
||||
}
|
||||
|
||||
func (s *ObjectStorage) getFromUnpacked(h plumbing.Hash) (obj plumbing.EncodedObject, err error) {
|
||||
f, err := s.dir.Object(h)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return nil, plumbing.ErrObjectNotFound
|
||||
}
|
||||
|
||||
return nil, err
|
||||
}
|
||||
defer ioutil.CheckClose(f, &err)
|
||||
|
||||
if cacheObj, found := s.objectCache.Get(h); found {
|
||||
return cacheObj, nil
|
||||
}
|
||||
|
||||
obj = s.NewEncodedObject()
|
||||
r, err := objfile.NewReader(f)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
defer ioutil.CheckClose(r, &err)
|
||||
|
||||
t, size, err := r.Header()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
obj.SetType(t)
|
||||
obj.SetSize(size)
|
||||
w, err := obj.Writer()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
defer ioutil.CheckClose(w, &err)
|
||||
|
||||
s.objectCache.Put(obj)
|
||||
|
||||
_, err = io.Copy(w, r)
|
||||
return obj, err
|
||||
}
|
||||
|
||||
// Get returns the object with the given hash, by searching for it in
|
||||
// the packfile.
|
||||
func (s *ObjectStorage) getFromPackfile(h plumbing.Hash, canBeDelta bool) (
|
||||
plumbing.EncodedObject, error) {
|
||||
|
||||
if err := s.requireIndex(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
pack, hash, offset := s.findObjectInPackfile(h)
|
||||
if offset == -1 {
|
||||
return nil, plumbing.ErrObjectNotFound
|
||||
}
|
||||
|
||||
idx := s.index[pack]
|
||||
p, err := s.packfile(idx, pack)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !s.options.KeepDescriptors && s.options.MaxOpenDescriptors == 0 {
|
||||
defer ioutil.CheckClose(p, &err)
|
||||
}
|
||||
|
||||
if canBeDelta {
|
||||
return s.decodeDeltaObjectAt(p, offset, hash)
|
||||
}
|
||||
|
||||
return s.decodeObjectAt(p, offset)
|
||||
}
|
||||
|
||||
func (s *ObjectStorage) decodeObjectAt(
|
||||
p *packfile.Packfile,
|
||||
offset int64,
|
||||
) (plumbing.EncodedObject, error) {
|
||||
hash, err := p.FindHash(offset)
|
||||
if err == nil {
|
||||
obj, ok := s.objectCache.Get(hash)
|
||||
if ok {
|
||||
return obj, nil
|
||||
}
|
||||
}
|
||||
|
||||
if err != nil && err != plumbing.ErrObjectNotFound {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return p.GetByOffset(offset)
|
||||
}
|
||||
|
||||
func (s *ObjectStorage) decodeDeltaObjectAt(
|
||||
p *packfile.Packfile,
|
||||
offset int64,
|
||||
hash plumbing.Hash,
|
||||
) (plumbing.EncodedObject, error) {
|
||||
scan := p.Scanner()
|
||||
header, err := scan.SeekObjectHeader(offset)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var (
|
||||
base plumbing.Hash
|
||||
)
|
||||
|
||||
switch header.Type {
|
||||
case plumbing.REFDeltaObject:
|
||||
base = header.Reference
|
||||
case plumbing.OFSDeltaObject:
|
||||
base, err = p.FindHash(header.OffsetReference)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
default:
|
||||
return s.decodeObjectAt(p, offset)
|
||||
}
|
||||
|
||||
obj := &plumbing.MemoryObject{}
|
||||
obj.SetType(header.Type)
|
||||
w, err := obj.Writer()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if _, _, err := scan.NextObject(w); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return newDeltaObject(obj, hash, base, header.Length), nil
|
||||
}
|
||||
|
||||
func (s *ObjectStorage) findObjectInPackfile(h plumbing.Hash) (plumbing.Hash, plumbing.Hash, int64) {
|
||||
for packfile, index := range s.index {
|
||||
offset, err := index.FindOffset(h)
|
||||
if err == nil {
|
||||
return packfile, h, offset
|
||||
}
|
||||
}
|
||||
|
||||
return plumbing.ZeroHash, plumbing.ZeroHash, -1
|
||||
}
|
||||
|
||||
func (s *ObjectStorage) HashesWithPrefix(prefix []byte) ([]plumbing.Hash, error) {
|
||||
hashes, err := s.dir.ObjectsWithPrefix(prefix)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// TODO: This could be faster with some idxfile changes,
|
||||
// or diving into the packfile.
|
||||
for _, index := range s.index {
|
||||
ei, err := index.Entries()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for {
|
||||
e, err := ei.Next()
|
||||
if err == io.EOF {
|
||||
break
|
||||
} else if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if bytes.HasPrefix(e.Hash[:], prefix) {
|
||||
hashes = append(hashes, e.Hash)
|
||||
}
|
||||
}
|
||||
ei.Close()
|
||||
}
|
||||
|
||||
return hashes, nil
|
||||
}
|
||||
|
||||
// IterEncodedObjects returns an iterator for all the objects in the packfile
|
||||
// with the given type.
|
||||
func (s *ObjectStorage) IterEncodedObjects(t plumbing.ObjectType) (storer.EncodedObjectIter, error) {
|
||||
objects, err := s.dir.Objects()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
seen := make(map[plumbing.Hash]struct{})
|
||||
var iters []storer.EncodedObjectIter
|
||||
if len(objects) != 0 {
|
||||
iters = append(iters, &objectsIter{s: s, t: t, h: objects})
|
||||
seen = hashListAsMap(objects)
|
||||
}
|
||||
|
||||
packi, err := s.buildPackfileIters(t, seen)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
iters = append(iters, packi)
|
||||
return storer.NewMultiEncodedObjectIter(iters), nil
|
||||
}
|
||||
|
||||
func (s *ObjectStorage) buildPackfileIters(
|
||||
t plumbing.ObjectType,
|
||||
seen map[plumbing.Hash]struct{},
|
||||
) (storer.EncodedObjectIter, error) {
|
||||
if err := s.requireIndex(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
packs, err := s.dir.ObjectPacks()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &lazyPackfilesIter{
|
||||
hashes: packs,
|
||||
open: func(h plumbing.Hash) (storer.EncodedObjectIter, error) {
|
||||
pack, err := s.dir.ObjectPack(h)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return newPackfileIter(
|
||||
s.dir.Fs(), pack, t, seen, s.index[h],
|
||||
s.objectCache, s.options.KeepDescriptors,
|
||||
)
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Close closes all opened files.
|
||||
func (s *ObjectStorage) Close() error {
|
||||
var firstError error
|
||||
if s.options.KeepDescriptors || s.options.MaxOpenDescriptors > 0 {
|
||||
for _, packfile := range s.packfiles {
|
||||
err := packfile.Close()
|
||||
if firstError == nil && err != nil {
|
||||
firstError = err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
s.packfiles = nil
|
||||
s.dir.Close()
|
||||
|
||||
return firstError
|
||||
}
|
||||
|
||||
type lazyPackfilesIter struct {
|
||||
hashes []plumbing.Hash
|
||||
open func(h plumbing.Hash) (storer.EncodedObjectIter, error)
|
||||
cur storer.EncodedObjectIter
|
||||
}
|
||||
|
||||
func (it *lazyPackfilesIter) Next() (plumbing.EncodedObject, error) {
|
||||
for {
|
||||
if it.cur == nil {
|
||||
if len(it.hashes) == 0 {
|
||||
return nil, io.EOF
|
||||
}
|
||||
h := it.hashes[0]
|
||||
it.hashes = it.hashes[1:]
|
||||
|
||||
sub, err := it.open(h)
|
||||
if err == io.EOF {
|
||||
continue
|
||||
} else if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
it.cur = sub
|
||||
}
|
||||
ob, err := it.cur.Next()
|
||||
if err == io.EOF {
|
||||
it.cur.Close()
|
||||
it.cur = nil
|
||||
continue
|
||||
} else if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ob, nil
|
||||
}
|
||||
}
|
||||
|
||||
func (it *lazyPackfilesIter) ForEach(cb func(plumbing.EncodedObject) error) error {
|
||||
return storer.ForEachIterator(it, cb)
|
||||
}
|
||||
|
||||
func (it *lazyPackfilesIter) Close() {
|
||||
if it.cur != nil {
|
||||
it.cur.Close()
|
||||
it.cur = nil
|
||||
}
|
||||
it.hashes = nil
|
||||
}
|
||||
|
||||
type packfileIter struct {
|
||||
pack billy.File
|
||||
iter storer.EncodedObjectIter
|
||||
seen map[plumbing.Hash]struct{}
|
||||
|
||||
// tells whether the pack file should be left open after iteration or not
|
||||
keepPack bool
|
||||
}
|
||||
|
||||
// NewPackfileIter returns a new EncodedObjectIter for the provided packfile
|
||||
// and object type. Packfile and index file will be closed after they're
|
||||
// used. If keepPack is true the packfile won't be closed after the iteration
|
||||
// finished.
|
||||
func NewPackfileIter(
|
||||
fs billy.Filesystem,
|
||||
f billy.File,
|
||||
idxFile billy.File,
|
||||
t plumbing.ObjectType,
|
||||
keepPack bool,
|
||||
) (storer.EncodedObjectIter, error) {
|
||||
idx := idxfile.NewMemoryIndex()
|
||||
if err := idxfile.NewDecoder(idxFile).Decode(idx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := idxFile.Close(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
seen := make(map[plumbing.Hash]struct{})
|
||||
return newPackfileIter(fs, f, t, seen, idx, nil, keepPack)
|
||||
}
|
||||
|
||||
func newPackfileIter(
|
||||
fs billy.Filesystem,
|
||||
f billy.File,
|
||||
t plumbing.ObjectType,
|
||||
seen map[plumbing.Hash]struct{},
|
||||
index idxfile.Index,
|
||||
cache cache.Object,
|
||||
keepPack bool,
|
||||
) (storer.EncodedObjectIter, error) {
|
||||
var p *packfile.Packfile
|
||||
if cache != nil {
|
||||
p = packfile.NewPackfileWithCache(index, fs, f, cache)
|
||||
} else {
|
||||
p = packfile.NewPackfile(index, fs, f)
|
||||
}
|
||||
|
||||
iter, err := p.GetByType(t)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &packfileIter{
|
||||
pack: f,
|
||||
iter: iter,
|
||||
seen: seen,
|
||||
keepPack: keepPack,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (iter *packfileIter) Next() (plumbing.EncodedObject, error) {
|
||||
for {
|
||||
obj, err := iter.iter.Next()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if _, ok := iter.seen[obj.Hash()]; ok {
|
||||
continue
|
||||
}
|
||||
|
||||
return obj, nil
|
||||
}
|
||||
}
|
||||
|
||||
func (iter *packfileIter) ForEach(cb func(plumbing.EncodedObject) error) error {
|
||||
for {
|
||||
o, err := iter.Next()
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
iter.Close()
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
if err := cb(o); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (iter *packfileIter) Close() {
|
||||
iter.iter.Close()
|
||||
if !iter.keepPack {
|
||||
_ = iter.pack.Close()
|
||||
}
|
||||
}
|
||||
|
||||
type objectsIter struct {
|
||||
s *ObjectStorage
|
||||
t plumbing.ObjectType
|
||||
h []plumbing.Hash
|
||||
}
|
||||
|
||||
func (iter *objectsIter) Next() (plumbing.EncodedObject, error) {
|
||||
if len(iter.h) == 0 {
|
||||
return nil, io.EOF
|
||||
}
|
||||
|
||||
obj, err := iter.s.getFromUnpacked(iter.h[0])
|
||||
iter.h = iter.h[1:]
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if iter.t != plumbing.AnyObject && iter.t != obj.Type() {
|
||||
return iter.Next()
|
||||
}
|
||||
|
||||
return obj, err
|
||||
}
|
||||
|
||||
func (iter *objectsIter) ForEach(cb func(plumbing.EncodedObject) error) error {
|
||||
for {
|
||||
o, err := iter.Next()
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
if err := cb(o); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (iter *objectsIter) Close() {
|
||||
iter.h = []plumbing.Hash{}
|
||||
}
|
||||
|
||||
func hashListAsMap(l []plumbing.Hash) map[plumbing.Hash]struct{} {
|
||||
m := make(map[plumbing.Hash]struct{}, len(l))
|
||||
for _, h := range l {
|
||||
m[h] = struct{}{}
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
func (s *ObjectStorage) ForEachObjectHash(fun func(plumbing.Hash) error) error {
|
||||
err := s.dir.ForEachObjectHash(fun)
|
||||
if err == storer.ErrStop {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *ObjectStorage) LooseObjectTime(hash plumbing.Hash) (time.Time, error) {
|
||||
fi, err := s.dir.ObjectStat(hash)
|
||||
if err != nil {
|
||||
return time.Time{}, err
|
||||
}
|
||||
return fi.ModTime(), nil
|
||||
}
|
||||
|
||||
func (s *ObjectStorage) DeleteLooseObject(hash plumbing.Hash) error {
|
||||
return s.dir.ObjectDelete(hash)
|
||||
}
|
||||
|
||||
func (s *ObjectStorage) ObjectPacks() ([]plumbing.Hash, error) {
|
||||
return s.dir.ObjectPacks()
|
||||
}
|
||||
|
||||
func (s *ObjectStorage) DeleteOldObjectPackAndIndex(h plumbing.Hash, t time.Time) error {
|
||||
return s.dir.DeleteOldObjectPackAndIndex(h, t)
|
||||
}
|
44
vendor/github.com/jesseduffield/go-git/v5/storage/filesystem/reference.go
generated
vendored
Normal file
44
vendor/github.com/jesseduffield/go-git/v5/storage/filesystem/reference.go
generated
vendored
Normal file
@ -0,0 +1,44 @@
|
||||
package filesystem
|
||||
|
||||
import (
|
||||
"github.com/jesseduffield/go-git/v5/plumbing"
|
||||
"github.com/jesseduffield/go-git/v5/plumbing/storer"
|
||||
"github.com/jesseduffield/go-git/v5/storage/filesystem/dotgit"
|
||||
)
|
||||
|
||||
type ReferenceStorage struct {
|
||||
dir *dotgit.DotGit
|
||||
}
|
||||
|
||||
func (r *ReferenceStorage) SetReference(ref *plumbing.Reference) error {
|
||||
return r.dir.SetRef(ref, nil)
|
||||
}
|
||||
|
||||
func (r *ReferenceStorage) CheckAndSetReference(ref, old *plumbing.Reference) error {
|
||||
return r.dir.SetRef(ref, old)
|
||||
}
|
||||
|
||||
func (r *ReferenceStorage) Reference(n plumbing.ReferenceName) (*plumbing.Reference, error) {
|
||||
return r.dir.Ref(n)
|
||||
}
|
||||
|
||||
func (r *ReferenceStorage) IterReferences() (storer.ReferenceIter, error) {
|
||||
refs, err := r.dir.Refs()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return storer.NewReferenceSliceIter(refs), nil
|
||||
}
|
||||
|
||||
func (r *ReferenceStorage) RemoveReference(n plumbing.ReferenceName) error {
|
||||
return r.dir.RemoveRef(n)
|
||||
}
|
||||
|
||||
func (r *ReferenceStorage) CountLooseRefs() (int, error) {
|
||||
return r.dir.CountLooseRefs()
|
||||
}
|
||||
|
||||
func (r *ReferenceStorage) PackRefs() error {
|
||||
return r.dir.PackRefs()
|
||||
}
|
54
vendor/github.com/jesseduffield/go-git/v5/storage/filesystem/shallow.go
generated
vendored
Normal file
54
vendor/github.com/jesseduffield/go-git/v5/storage/filesystem/shallow.go
generated
vendored
Normal file
@ -0,0 +1,54 @@
|
||||
package filesystem
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
|
||||
"github.com/jesseduffield/go-git/v5/plumbing"
|
||||
"github.com/jesseduffield/go-git/v5/storage/filesystem/dotgit"
|
||||
"github.com/jesseduffield/go-git/v5/utils/ioutil"
|
||||
)
|
||||
|
||||
// ShallowStorage where the shallow commits are stored, an internal to
|
||||
// manipulate the shallow file
|
||||
type ShallowStorage struct {
|
||||
dir *dotgit.DotGit
|
||||
}
|
||||
|
||||
// SetShallow save the shallows in the shallow file in the .git folder as one
|
||||
// commit per line represented by 40-byte hexadecimal object terminated by a
|
||||
// newline.
|
||||
func (s *ShallowStorage) SetShallow(commits []plumbing.Hash) error {
|
||||
f, err := s.dir.ShallowWriter()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer ioutil.CheckClose(f, &err)
|
||||
for _, h := range commits {
|
||||
if _, err := fmt.Fprintf(f, "%s\n", h); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// Shallow return the shallow commits reading from shallo file from .git
|
||||
func (s *ShallowStorage) Shallow() ([]plumbing.Hash, error) {
|
||||
f, err := s.dir.Shallow()
|
||||
if f == nil || err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
defer ioutil.CheckClose(f, &err)
|
||||
|
||||
var hash []plumbing.Hash
|
||||
|
||||
scn := bufio.NewScanner(f)
|
||||
for scn.Scan() {
|
||||
hash = append(hash, plumbing.NewHash(scn.Text()))
|
||||
}
|
||||
|
||||
return hash, scn.Err()
|
||||
}
|
73
vendor/github.com/jesseduffield/go-git/v5/storage/filesystem/storage.go
generated
vendored
Normal file
73
vendor/github.com/jesseduffield/go-git/v5/storage/filesystem/storage.go
generated
vendored
Normal file
@ -0,0 +1,73 @@
|
||||
// Package filesystem is a storage backend base on filesystems
|
||||
package filesystem
|
||||
|
||||
import (
|
||||
"github.com/jesseduffield/go-git/v5/plumbing/cache"
|
||||
"github.com/jesseduffield/go-git/v5/storage/filesystem/dotgit"
|
||||
|
||||
"github.com/go-git/go-billy/v5"
|
||||
)
|
||||
|
||||
// Storage is an implementation of git.Storer that stores data on disk in the
|
||||
// standard git format (this is, the .git directory). Zero values of this type
|
||||
// are not safe to use, see the NewStorage function below.
|
||||
type Storage struct {
|
||||
fs billy.Filesystem
|
||||
dir *dotgit.DotGit
|
||||
|
||||
ObjectStorage
|
||||
ReferenceStorage
|
||||
IndexStorage
|
||||
ShallowStorage
|
||||
ConfigStorage
|
||||
ModuleStorage
|
||||
}
|
||||
|
||||
// Options holds configuration for the storage.
|
||||
type Options struct {
|
||||
// ExclusiveAccess means that the filesystem is not modified externally
|
||||
// while the repo is open.
|
||||
ExclusiveAccess bool
|
||||
// KeepDescriptors makes the file descriptors to be reused but they will
|
||||
// need to be manually closed calling Close().
|
||||
KeepDescriptors bool
|
||||
// MaxOpenDescriptors is the max number of file descriptors to keep
|
||||
// open. If KeepDescriptors is true, all file descriptors will remain open.
|
||||
MaxOpenDescriptors int
|
||||
}
|
||||
|
||||
// NewStorage returns a new Storage backed by a given `fs.Filesystem` and cache.
|
||||
func NewStorage(fs billy.Filesystem, cache cache.Object) *Storage {
|
||||
return NewStorageWithOptions(fs, cache, Options{})
|
||||
}
|
||||
|
||||
// NewStorageWithOptions returns a new Storage with extra options,
|
||||
// backed by a given `fs.Filesystem` and cache.
|
||||
func NewStorageWithOptions(fs billy.Filesystem, cache cache.Object, ops Options) *Storage {
|
||||
dirOps := dotgit.Options{
|
||||
ExclusiveAccess: ops.ExclusiveAccess,
|
||||
}
|
||||
dir := dotgit.NewWithOptions(fs, dirOps)
|
||||
|
||||
return &Storage{
|
||||
fs: fs,
|
||||
dir: dir,
|
||||
|
||||
ObjectStorage: *NewObjectStorageWithOptions(dir, cache, ops),
|
||||
ReferenceStorage: ReferenceStorage{dir: dir},
|
||||
IndexStorage: IndexStorage{dir: dir},
|
||||
ShallowStorage: ShallowStorage{dir: dir},
|
||||
ConfigStorage: ConfigStorage{dir: dir},
|
||||
ModuleStorage: ModuleStorage{dir: dir},
|
||||
}
|
||||
}
|
||||
|
||||
// Filesystem returns the underlying filesystem
|
||||
func (s *Storage) Filesystem() billy.Filesystem {
|
||||
return s.fs
|
||||
}
|
||||
|
||||
// Init initializes .git directory
|
||||
func (s *Storage) Init() error {
|
||||
return s.dir.Initialize()
|
||||
}
|
320
vendor/github.com/jesseduffield/go-git/v5/storage/memory/storage.go
generated
vendored
Normal file
320
vendor/github.com/jesseduffield/go-git/v5/storage/memory/storage.go
generated
vendored
Normal file
@ -0,0 +1,320 @@
|
||||
// Package memory is a storage backend base on memory
|
||||
package memory
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/jesseduffield/go-git/v5/config"
|
||||
"github.com/jesseduffield/go-git/v5/plumbing"
|
||||
"github.com/jesseduffield/go-git/v5/plumbing/format/index"
|
||||
"github.com/jesseduffield/go-git/v5/plumbing/storer"
|
||||
"github.com/jesseduffield/go-git/v5/storage"
|
||||
)
|
||||
|
||||
var ErrUnsupportedObjectType = fmt.Errorf("unsupported object type")
|
||||
|
||||
// Storage is an implementation of git.Storer that stores data on memory, being
|
||||
// ephemeral. The use of this storage should be done in controlled environments,
|
||||
// since the representation in memory of some repository can fill the machine
|
||||
// memory. in the other hand this storage has the best performance.
|
||||
type Storage struct {
|
||||
ConfigStorage
|
||||
ObjectStorage
|
||||
ShallowStorage
|
||||
IndexStorage
|
||||
ReferenceStorage
|
||||
ModuleStorage
|
||||
}
|
||||
|
||||
// NewStorage returns a new Storage base on memory
|
||||
func NewStorage() *Storage {
|
||||
return &Storage{
|
||||
ReferenceStorage: make(ReferenceStorage),
|
||||
ConfigStorage: ConfigStorage{},
|
||||
ShallowStorage: ShallowStorage{},
|
||||
ObjectStorage: ObjectStorage{
|
||||
Objects: make(map[plumbing.Hash]plumbing.EncodedObject),
|
||||
Commits: make(map[plumbing.Hash]plumbing.EncodedObject),
|
||||
Trees: make(map[plumbing.Hash]plumbing.EncodedObject),
|
||||
Blobs: make(map[plumbing.Hash]plumbing.EncodedObject),
|
||||
Tags: make(map[plumbing.Hash]plumbing.EncodedObject),
|
||||
},
|
||||
ModuleStorage: make(ModuleStorage),
|
||||
}
|
||||
}
|
||||
|
||||
type ConfigStorage struct {
|
||||
config *config.Config
|
||||
}
|
||||
|
||||
func (c *ConfigStorage) SetConfig(cfg *config.Config) error {
|
||||
if err := cfg.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
c.config = cfg
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *ConfigStorage) Config() (*config.Config, error) {
|
||||
if c.config == nil {
|
||||
c.config = config.NewConfig()
|
||||
}
|
||||
|
||||
return c.config, nil
|
||||
}
|
||||
|
||||
type IndexStorage struct {
|
||||
index *index.Index
|
||||
}
|
||||
|
||||
func (c *IndexStorage) SetIndex(idx *index.Index) error {
|
||||
c.index = idx
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *IndexStorage) Index() (*index.Index, error) {
|
||||
if c.index == nil {
|
||||
c.index = &index.Index{Version: 2}
|
||||
}
|
||||
|
||||
return c.index, nil
|
||||
}
|
||||
|
||||
type ObjectStorage struct {
|
||||
Objects map[plumbing.Hash]plumbing.EncodedObject
|
||||
Commits map[plumbing.Hash]plumbing.EncodedObject
|
||||
Trees map[plumbing.Hash]plumbing.EncodedObject
|
||||
Blobs map[plumbing.Hash]plumbing.EncodedObject
|
||||
Tags map[plumbing.Hash]plumbing.EncodedObject
|
||||
}
|
||||
|
||||
func (o *ObjectStorage) NewEncodedObject() plumbing.EncodedObject {
|
||||
return &plumbing.MemoryObject{}
|
||||
}
|
||||
|
||||
func (o *ObjectStorage) SetEncodedObject(obj plumbing.EncodedObject) (plumbing.Hash, error) {
|
||||
h := obj.Hash()
|
||||
o.Objects[h] = obj
|
||||
|
||||
switch obj.Type() {
|
||||
case plumbing.CommitObject:
|
||||
o.Commits[h] = o.Objects[h]
|
||||
case plumbing.TreeObject:
|
||||
o.Trees[h] = o.Objects[h]
|
||||
case plumbing.BlobObject:
|
||||
o.Blobs[h] = o.Objects[h]
|
||||
case plumbing.TagObject:
|
||||
o.Tags[h] = o.Objects[h]
|
||||
default:
|
||||
return h, ErrUnsupportedObjectType
|
||||
}
|
||||
|
||||
return h, nil
|
||||
}
|
||||
|
||||
func (o *ObjectStorage) HasEncodedObject(h plumbing.Hash) (err error) {
|
||||
if _, ok := o.Objects[h]; !ok {
|
||||
return plumbing.ErrObjectNotFound
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *ObjectStorage) EncodedObjectSize(h plumbing.Hash) (
|
||||
size int64, err error) {
|
||||
obj, ok := o.Objects[h]
|
||||
if !ok {
|
||||
return 0, plumbing.ErrObjectNotFound
|
||||
}
|
||||
|
||||
return obj.Size(), nil
|
||||
}
|
||||
|
||||
func (o *ObjectStorage) EncodedObject(t plumbing.ObjectType, h plumbing.Hash) (plumbing.EncodedObject, error) {
|
||||
obj, ok := o.Objects[h]
|
||||
if !ok || (plumbing.AnyObject != t && obj.Type() != t) {
|
||||
return nil, plumbing.ErrObjectNotFound
|
||||
}
|
||||
|
||||
return obj, nil
|
||||
}
|
||||
|
||||
func (o *ObjectStorage) IterEncodedObjects(t plumbing.ObjectType) (storer.EncodedObjectIter, error) {
|
||||
var series []plumbing.EncodedObject
|
||||
switch t {
|
||||
case plumbing.AnyObject:
|
||||
series = flattenObjectMap(o.Objects)
|
||||
case plumbing.CommitObject:
|
||||
series = flattenObjectMap(o.Commits)
|
||||
case plumbing.TreeObject:
|
||||
series = flattenObjectMap(o.Trees)
|
||||
case plumbing.BlobObject:
|
||||
series = flattenObjectMap(o.Blobs)
|
||||
case plumbing.TagObject:
|
||||
series = flattenObjectMap(o.Tags)
|
||||
}
|
||||
|
||||
return storer.NewEncodedObjectSliceIter(series), nil
|
||||
}
|
||||
|
||||
func flattenObjectMap(m map[plumbing.Hash]plumbing.EncodedObject) []plumbing.EncodedObject {
|
||||
objects := make([]plumbing.EncodedObject, 0, len(m))
|
||||
for _, obj := range m {
|
||||
objects = append(objects, obj)
|
||||
}
|
||||
return objects
|
||||
}
|
||||
|
||||
func (o *ObjectStorage) Begin() storer.Transaction {
|
||||
return &TxObjectStorage{
|
||||
Storage: o,
|
||||
Objects: make(map[plumbing.Hash]plumbing.EncodedObject),
|
||||
}
|
||||
}
|
||||
|
||||
func (o *ObjectStorage) ForEachObjectHash(fun func(plumbing.Hash) error) error {
|
||||
for h := range o.Objects {
|
||||
err := fun(h)
|
||||
if err != nil {
|
||||
if err == storer.ErrStop {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *ObjectStorage) ObjectPacks() ([]plumbing.Hash, error) {
|
||||
return nil, nil
|
||||
}
|
||||
func (o *ObjectStorage) DeleteOldObjectPackAndIndex(plumbing.Hash, time.Time) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
var errNotSupported = fmt.Errorf("Not supported")
|
||||
|
||||
func (o *ObjectStorage) LooseObjectTime(hash plumbing.Hash) (time.Time, error) {
|
||||
return time.Time{}, errNotSupported
|
||||
}
|
||||
func (o *ObjectStorage) DeleteLooseObject(plumbing.Hash) error {
|
||||
return errNotSupported
|
||||
}
|
||||
|
||||
type TxObjectStorage struct {
|
||||
Storage *ObjectStorage
|
||||
Objects map[plumbing.Hash]plumbing.EncodedObject
|
||||
}
|
||||
|
||||
func (tx *TxObjectStorage) SetEncodedObject(obj plumbing.EncodedObject) (plumbing.Hash, error) {
|
||||
h := obj.Hash()
|
||||
tx.Objects[h] = obj
|
||||
|
||||
return h, nil
|
||||
}
|
||||
|
||||
func (tx *TxObjectStorage) EncodedObject(t plumbing.ObjectType, h plumbing.Hash) (plumbing.EncodedObject, error) {
|
||||
obj, ok := tx.Objects[h]
|
||||
if !ok || (plumbing.AnyObject != t && obj.Type() != t) {
|
||||
return nil, plumbing.ErrObjectNotFound
|
||||
}
|
||||
|
||||
return obj, nil
|
||||
}
|
||||
|
||||
func (tx *TxObjectStorage) Commit() error {
|
||||
for h, obj := range tx.Objects {
|
||||
delete(tx.Objects, h)
|
||||
if _, err := tx.Storage.SetEncodedObject(obj); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (tx *TxObjectStorage) Rollback() error {
|
||||
tx.Objects = make(map[plumbing.Hash]plumbing.EncodedObject)
|
||||
return nil
|
||||
}
|
||||
|
||||
type ReferenceStorage map[plumbing.ReferenceName]*plumbing.Reference
|
||||
|
||||
func (r ReferenceStorage) SetReference(ref *plumbing.Reference) error {
|
||||
if ref != nil {
|
||||
r[ref.Name()] = ref
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r ReferenceStorage) CheckAndSetReference(ref, old *plumbing.Reference) error {
|
||||
if ref == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if old != nil {
|
||||
tmp := r[ref.Name()]
|
||||
if tmp != nil && tmp.Hash() != old.Hash() {
|
||||
return storage.ErrReferenceHasChanged
|
||||
}
|
||||
}
|
||||
r[ref.Name()] = ref
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r ReferenceStorage) Reference(n plumbing.ReferenceName) (*plumbing.Reference, error) {
|
||||
ref, ok := r[n]
|
||||
if !ok {
|
||||
return nil, plumbing.ErrReferenceNotFound
|
||||
}
|
||||
|
||||
return ref, nil
|
||||
}
|
||||
|
||||
func (r ReferenceStorage) IterReferences() (storer.ReferenceIter, error) {
|
||||
var refs []*plumbing.Reference
|
||||
for _, ref := range r {
|
||||
refs = append(refs, ref)
|
||||
}
|
||||
|
||||
return storer.NewReferenceSliceIter(refs), nil
|
||||
}
|
||||
|
||||
func (r ReferenceStorage) CountLooseRefs() (int, error) {
|
||||
return len(r), nil
|
||||
}
|
||||
|
||||
func (r ReferenceStorage) PackRefs() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r ReferenceStorage) RemoveReference(n plumbing.ReferenceName) error {
|
||||
delete(r, n)
|
||||
return nil
|
||||
}
|
||||
|
||||
type ShallowStorage []plumbing.Hash
|
||||
|
||||
func (s *ShallowStorage) SetShallow(commits []plumbing.Hash) error {
|
||||
*s = commits
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s ShallowStorage) Shallow() ([]plumbing.Hash, error) {
|
||||
return s, nil
|
||||
}
|
||||
|
||||
type ModuleStorage map[string]*Storage
|
||||
|
||||
func (s ModuleStorage) Module(name string) (storage.Storer, error) {
|
||||
if m, ok := s[name]; ok {
|
||||
return m, nil
|
||||
}
|
||||
|
||||
m := NewStorage()
|
||||
s[name] = m
|
||||
|
||||
return m, nil
|
||||
}
|
30
vendor/github.com/jesseduffield/go-git/v5/storage/storer.go
generated
vendored
Normal file
30
vendor/github.com/jesseduffield/go-git/v5/storage/storer.go
generated
vendored
Normal file
@ -0,0 +1,30 @@
|
||||
package storage
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/jesseduffield/go-git/v5/config"
|
||||
"github.com/jesseduffield/go-git/v5/plumbing/storer"
|
||||
)
|
||||
|
||||
var ErrReferenceHasChanged = errors.New("reference has changed concurrently")
|
||||
|
||||
// Storer is a generic storage of objects, references and any information
|
||||
// related to a particular repository. The package github.com/jesseduffield/go-git/v5/storage
|
||||
// contains two implementation a filesystem base implementation (such as `.git`)
|
||||
// and a memory implementations being ephemeral
|
||||
type Storer interface {
|
||||
storer.EncodedObjectStorer
|
||||
storer.ReferenceStorer
|
||||
storer.ShallowStorer
|
||||
storer.IndexStorer
|
||||
config.ConfigStorer
|
||||
ModuleStorer
|
||||
}
|
||||
|
||||
// ModuleStorer allows interact with the modules' Storers
|
||||
type ModuleStorer interface {
|
||||
// Module returns a Storer representing a submodule, if not exists returns a
|
||||
// new empty Storer is returned
|
||||
Module(name string) (Storer, error)
|
||||
}
|
Reference in New Issue
Block a user