1
0
mirror of https://github.com/jesseduffield/lazygit.git synced 2025-06-17 00:18:05 +02:00

Bump go-git

This commit is contained in:
Stefan Haller
2025-04-09 10:38:46 +02:00
parent da0105c16b
commit 4cf49ff449
527 changed files with 70489 additions and 10167 deletions

View File

@ -1,4 +1,4 @@
// Package binary implements sintax-sugar functions on top of the standard
// Package binary implements syntax-sugar functions on top of the standard
// library binary package
package binary

View File

@ -7,7 +7,7 @@ import (
"errors"
"io"
"github.com/jbenet/go-context/io"
ctxio "github.com/jbenet/go-context/io"
)
type readPeeker interface {
@ -55,6 +55,28 @@ func NewReadCloser(r io.Reader, c io.Closer) io.ReadCloser {
return &readCloser{Reader: r, closer: c}
}
type readCloserCloser struct {
io.ReadCloser
closer func() error
}
func (r *readCloserCloser) Close() (err error) {
defer func() {
if err == nil {
err = r.closer()
return
}
_ = r.closer()
}()
return r.ReadCloser.Close()
}
// NewReadCloserWithCloser creates an `io.ReadCloser` with the given `io.ReaderCloser` and
// `io.Closer` that ensures that the closer is closed on close
func NewReadCloserWithCloser(r io.ReadCloser, c func() error) io.ReadCloser {
return &readCloserCloser{ReadCloser: r, closer: c}
}
type writeCloser struct {
io.Writer
closer io.Closer
@ -82,6 +104,24 @@ func WriteNopCloser(w io.Writer) io.WriteCloser {
return writeNopCloser{w}
}
type readerAtAsReader struct {
io.ReaderAt
offset int64
}
func (r *readerAtAsReader) Read(bs []byte) (int, error) {
n, err := r.ReaderAt.ReadAt(bs, r.offset)
r.offset += int64(n)
return n, err
}
func NewReaderUsingReaderAt(r io.ReaderAt, offset int64) io.Reader {
return &readerAtAsReader{
ReaderAt: r,
offset: offset,
}
}
// CheckClose calls Close on the given io.Closer. If the given *error points to
// nil, it will be assigned the error returned by Close. Otherwise, any error
// returned by Close will be ignored. CheckClose is usually called with defer.
@ -155,7 +195,7 @@ func NewWriterOnError(w io.Writer, notify func(error)) io.Writer {
}
// NewWriteCloserOnError returns a io.WriteCloser that call the notify function
//when an unexpected (!io.EOF) error happens, after call Write function.
// when an unexpected (!io.EOF) error happens, after call Write function.
func NewWriteCloserOnError(w io.WriteCloser, notify func(error)) io.WriteCloser {
return NewWriteCloser(NewWriterOnError(w, notify), w)
}

View File

@ -1,12 +1,17 @@
package merkletrie
import (
"errors"
"fmt"
"io"
"github.com/jesseduffield/go-git/v5/utils/merkletrie/noder"
)
var (
ErrEmptyFileName = errors.New("empty filename in tree entry")
)
// Action values represent the kind of things a Change can represent:
// insertion, deletions or modifications of files.
type Action int
@ -121,6 +126,10 @@ func (l *Changes) AddRecursiveDelete(root noder.Path) error {
type noderToChangeFn func(noder.Path) Change // NewInsert or NewDelete
func (l *Changes) addRecursive(root noder.Path, ctor noderToChangeFn) error {
if root.String() == "" {
return ErrEmptyFileName
}
if !root.IsDir() {
l.Add(ctor(root))
return nil

View File

@ -11,7 +11,7 @@ package merkletrie
// corresponding changes and move the iterators further over both
// trees.
//
// The table bellow show all the possible comparison results, along
// The table below shows all the possible comparison results, along
// with what changes should we produce and how to advance the
// iterators.
//
@ -55,7 +55,7 @@ package merkletrie
// Here is a full list of all the cases that are similar and how to
// merge them together into more general cases. Each general case
// is labeled with an uppercase letter for further reference, and it
// is followed by the pseudocode of the checks you have to perfrom
// is followed by the pseudocode of the checks you have to perform
// on both noders to see if you are in such a case, the actions to
// perform (i.e. what changes to output) and how to advance the
// iterators of each tree to continue the comparison process.
@ -304,13 +304,38 @@ func DiffTreeContext(ctx context.Context, fromTree, toTree noder.Noder,
return nil, err
}
case onlyToRemains:
if err = ret.AddRecursiveInsert(to); err != nil {
return nil, err
if to.Skip() {
if err = ret.AddRecursiveDelete(to); err != nil {
return nil, err
}
} else {
if err = ret.AddRecursiveInsert(to); err != nil {
return nil, err
}
}
if err = ii.nextTo(); err != nil {
return nil, err
}
case bothHaveNodes:
if from.Skip() {
if err = ret.AddRecursiveDelete(from); err != nil {
return nil, err
}
if err := ii.nextBoth(); err != nil {
return nil, err
}
break
}
if to.Skip() {
if err = ret.AddRecursiveDelete(to); err != nil {
return nil, err
}
if err := ii.nextBoth(); err != nil {
return nil, err
}
break
}
if err = diffNodes(&ret, ii); err != nil {
return nil, err
}

View File

@ -29,6 +29,8 @@ type node struct {
hash []byte
children []noder.Noder
isDir bool
mode os.FileMode
size int64
}
// NewRootNode returns the root node based on a given billy.Filesystem.
@ -48,8 +50,15 @@ func NewRootNode(
// difftree algorithm will detect changes in the contents of files and also in
// their mode.
//
// Please note that the hash is calculated on first invocation of Hash(),
// meaning that it will not update when the underlying file changes
// between invocations.
//
// The hash of a directory is always a 24-bytes slice of zero values
func (n *node) Hash() []byte {
if n.hash == nil {
n.calculateHash()
}
return n.hash
}
@ -61,6 +70,10 @@ func (n *node) IsDir() bool {
return n.isDir
}
func (n *node) Skip() bool {
return false
}
func (n *node) Children() ([]noder.Noder, error) {
if err := n.calculateChildren(); err != nil {
return nil, err
@ -99,6 +112,10 @@ func (n *node) calculateChildren() error {
continue
}
if file.Mode()&os.ModeSocket != 0 {
continue
}
c, err := n.newChildNode(file)
if err != nil {
return err
@ -113,81 +130,74 @@ func (n *node) calculateChildren() error {
func (n *node) newChildNode(file os.FileInfo) (*node, error) {
path := path.Join(n.path, file.Name())
hash, err := n.calculateHash(path, file)
if err != nil {
return nil, err
}
node := &node{
fs: n.fs,
submodules: n.submodules,
path: path,
hash: hash,
isDir: file.IsDir(),
size: file.Size(),
mode: file.Mode(),
}
if hash, isSubmodule := n.submodules[path]; isSubmodule {
node.hash = append(hash[:], filemode.Submodule.Bytes()...)
if _, isSubmodule := n.submodules[path]; isSubmodule {
node.isDir = false
}
return node, nil
}
func (n *node) calculateHash(path string, file os.FileInfo) ([]byte, error) {
if file.IsDir() {
return make([]byte, 24), nil
func (n *node) calculateHash() {
if n.isDir {
n.hash = make([]byte, 24)
return
}
mode, err := filemode.NewFromOSFileMode(n.mode)
if err != nil {
n.hash = plumbing.ZeroHash[:]
return
}
if submoduleHash, isSubmodule := n.submodules[n.path]; isSubmodule {
n.hash = append(submoduleHash[:], filemode.Submodule.Bytes()...)
return
}
var hash plumbing.Hash
var err error
if file.Mode()&os.ModeSymlink != 0 {
hash, err = n.doCalculateHashForSymlink(path, file)
if n.mode&os.ModeSymlink != 0 {
hash = n.doCalculateHashForSymlink()
} else {
hash, err = n.doCalculateHashForRegular(path, file)
hash = n.doCalculateHashForRegular()
}
if err != nil {
return nil, err
}
mode, err := filemode.NewFromOSFileMode(file.Mode())
if err != nil {
return nil, err
}
return append(hash[:], mode.Bytes()...), nil
n.hash = append(hash[:], mode.Bytes()...)
}
func (n *node) doCalculateHashForRegular(path string, file os.FileInfo) (plumbing.Hash, error) {
f, err := n.fs.Open(path)
func (n *node) doCalculateHashForRegular() plumbing.Hash {
f, err := n.fs.Open(n.path)
if err != nil {
return plumbing.ZeroHash, err
return plumbing.ZeroHash
}
defer f.Close()
h := plumbing.NewHasher(plumbing.BlobObject, file.Size())
h := plumbing.NewHasher(plumbing.BlobObject, n.size)
if _, err := io.Copy(h, f); err != nil {
return plumbing.ZeroHash, err
return plumbing.ZeroHash
}
return h.Sum(), nil
return h.Sum()
}
func (n *node) doCalculateHashForSymlink(path string, file os.FileInfo) (plumbing.Hash, error) {
target, err := n.fs.Readlink(path)
func (n *node) doCalculateHashForSymlink() plumbing.Hash {
target, err := n.fs.Readlink(n.path)
if err != nil {
return plumbing.ZeroHash, err
return plumbing.ZeroHash
}
h := plumbing.NewHasher(plumbing.BlobObject, file.Size())
h := plumbing.NewHasher(plumbing.BlobObject, n.size)
if _, err := h.Write([]byte(target)); err != nil {
return plumbing.ZeroHash, err
return plumbing.ZeroHash
}
return h.Sum(), nil
return h.Sum()
}
func (n *node) String() string {

View File

@ -19,6 +19,7 @@ type node struct {
entry *index.Entry
children []noder.Noder
isDir bool
skip bool
}
// NewRootNode returns the root node of a computed tree from a index.Index,
@ -39,7 +40,7 @@ func NewRootNode(idx *index.Index) noder.Noder {
continue
}
n := &node{path: fullpath}
n := &node{path: fullpath, skip: e.SkipWorktree}
if fullpath == e.Name {
n.entry = e
} else {
@ -58,6 +59,10 @@ func (n *node) String() string {
return n.path
}
func (n *node) Skip() bool {
return n.skip
}
// Hash the hash of a filesystem is a 24-byte slice, is the result of
// concatenating the computed plumbing.Hash of the file as a Blob and its
// plumbing.FileMode; that way the difftree algorithm will detect changes in the

View File

@ -53,6 +53,7 @@ type Noder interface {
// implement NumChildren in O(1) while Children is usually more
// complex.
NumChildren() (int, error)
Skip() bool
}
// NoChildren represents the children of a noder without children.

View File

@ -15,6 +15,14 @@ import (
// not be used.
type Path []Noder
func (p Path) Skip() bool {
if len(p) > 0 {
return p.Last().Skip()
}
return false
}
// String returns the full path of the final noder as a string, using
// "/" as the separator.
func (p Path) String() string {

View File

@ -0,0 +1,29 @@
package sync
import (
"bufio"
"io"
"sync"
)
var bufioReader = sync.Pool{
New: func() interface{} {
return bufio.NewReader(nil)
},
}
// GetBufioReader returns a *bufio.Reader that is managed by a sync.Pool.
// Returns a bufio.Reader that is reset with reader and ready for use.
//
// After use, the *bufio.Reader should be put back into the sync.Pool
// by calling PutBufioReader.
func GetBufioReader(reader io.Reader) *bufio.Reader {
r := bufioReader.Get().(*bufio.Reader)
r.Reset(reader)
return r
}
// PutBufioReader puts reader back into its sync.Pool.
func PutBufioReader(reader *bufio.Reader) {
bufioReader.Put(reader)
}

View File

@ -0,0 +1,51 @@
package sync
import (
"bytes"
"sync"
)
var (
byteSlice = sync.Pool{
New: func() interface{} {
b := make([]byte, 16*1024)
return &b
},
}
bytesBuffer = sync.Pool{
New: func() interface{} {
return bytes.NewBuffer(nil)
},
}
)
// GetByteSlice returns a *[]byte that is managed by a sync.Pool.
// The initial slice length will be 16384 (16kb).
//
// After use, the *[]byte should be put back into the sync.Pool
// by calling PutByteSlice.
func GetByteSlice() *[]byte {
buf := byteSlice.Get().(*[]byte)
return buf
}
// PutByteSlice puts buf back into its sync.Pool.
func PutByteSlice(buf *[]byte) {
byteSlice.Put(buf)
}
// GetBytesBuffer returns a *bytes.Buffer that is managed by a sync.Pool.
// Returns a buffer that is reset and ready for use.
//
// After use, the *bytes.Buffer should be put back into the sync.Pool
// by calling PutBytesBuffer.
func GetBytesBuffer() *bytes.Buffer {
buf := bytesBuffer.Get().(*bytes.Buffer)
buf.Reset()
return buf
}
// PutBytesBuffer puts buf back into its sync.Pool.
func PutBytesBuffer(buf *bytes.Buffer) {
bytesBuffer.Put(buf)
}

View File

@ -0,0 +1,74 @@
package sync
import (
"bytes"
"compress/zlib"
"io"
"sync"
)
var (
zlibInitBytes = []byte{0x78, 0x9c, 0x01, 0x00, 0x00, 0xff, 0xff, 0x00, 0x00, 0x00, 0x01}
zlibReader = sync.Pool{
New: func() interface{} {
r, _ := zlib.NewReader(bytes.NewReader(zlibInitBytes))
return ZLibReader{
Reader: r.(zlibReadCloser),
}
},
}
zlibWriter = sync.Pool{
New: func() interface{} {
return zlib.NewWriter(nil)
},
}
)
type zlibReadCloser interface {
io.ReadCloser
zlib.Resetter
}
type ZLibReader struct {
dict *[]byte
Reader zlibReadCloser
}
// GetZlibReader returns a ZLibReader that is managed by a sync.Pool.
// Returns a ZLibReader that is reset using a dictionary that is
// also managed by a sync.Pool.
//
// After use, the ZLibReader should be put back into the sync.Pool
// by calling PutZlibReader.
func GetZlibReader(r io.Reader) (ZLibReader, error) {
z := zlibReader.Get().(ZLibReader)
z.dict = GetByteSlice()
err := z.Reader.Reset(r, *z.dict)
return z, err
}
// PutZlibReader puts z back into its sync.Pool, first closing the reader.
// The Byte slice dictionary is also put back into its sync.Pool.
func PutZlibReader(z ZLibReader) {
z.Reader.Close()
PutByteSlice(z.dict)
zlibReader.Put(z)
}
// GetZlibWriter returns a *zlib.Writer that is managed by a sync.Pool.
// Returns a writer that is reset with w and ready for use.
//
// After use, the *zlib.Writer should be put back into the sync.Pool
// by calling PutZlibWriter.
func GetZlibWriter(w io.Writer) *zlib.Writer {
z := zlibWriter.Get().(*zlib.Writer)
z.Reset(w)
return z
}
// PutZlibWriter puts w back into its sync.Pool.
func PutZlibWriter(w *zlib.Writer) {
zlibWriter.Put(w)
}

View File

@ -0,0 +1,55 @@
package trace
import (
"fmt"
"log"
"os"
"sync/atomic"
)
var (
// logger is the logger to use for tracing.
logger = newLogger()
// current is the targets that are enabled for tracing.
current atomic.Int32
)
func newLogger() *log.Logger {
return log.New(os.Stderr, "", log.Ltime|log.Lmicroseconds|log.Lshortfile)
}
// Target is a tracing target.
type Target int32
const (
// General traces general operations.
General Target = 1 << iota
// Packet traces git packets.
Packet
)
// SetTarget sets the tracing targets.
func SetTarget(target Target) {
current.Store(int32(target))
}
// SetLogger sets the logger to use for tracing.
func SetLogger(l *log.Logger) {
logger = l
}
// Print prints the given message only if the target is enabled.
func (t Target) Print(args ...interface{}) {
if int32(t)&current.Load() != 0 {
logger.Output(2, fmt.Sprint(args...)) // nolint: errcheck
}
}
// Printf prints the given message only if the target is enabled.
func (t Target) Printf(format string, args ...interface{}) {
if int32(t)&current.Load() != 0 {
logger.Output(2, fmt.Sprintf(format, args...)) // nolint: errcheck
}
}