mirror of
https://github.com/jesseduffield/lazygit.git
synced 2025-06-17 00:18:05 +02:00
Bump go-git
This commit is contained in:
576
vendor/github.com/jesseduffield/go-git/v5/remote.go
generated
vendored
576
vendor/github.com/jesseduffield/go-git/v5/remote.go
generated
vendored
@ -5,9 +5,13 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/go-git/go-billy/v5/osfs"
|
||||
|
||||
"github.com/jesseduffield/go-git/v5/config"
|
||||
"github.com/jesseduffield/go-git/v5/internal/url"
|
||||
"github.com/jesseduffield/go-git/v5/plumbing"
|
||||
"github.com/jesseduffield/go-git/v5/plumbing/cache"
|
||||
"github.com/jesseduffield/go-git/v5/plumbing/format/packfile"
|
||||
@ -30,14 +34,31 @@ var (
|
||||
ErrDeleteRefNotSupported = errors.New("server does not support delete-refs")
|
||||
ErrForceNeeded = errors.New("some refs were not updated")
|
||||
ErrExactSHA1NotSupported = errors.New("server does not support exact SHA1 refspec")
|
||||
ErrEmptyUrls = errors.New("URLs cannot be empty")
|
||||
)
|
||||
|
||||
type NoMatchingRefSpecError struct {
|
||||
refSpec config.RefSpec
|
||||
}
|
||||
|
||||
func (e NoMatchingRefSpecError) Error() string {
|
||||
return fmt.Sprintf("couldn't find remote ref %q", e.refSpec.Src())
|
||||
}
|
||||
|
||||
func (e NoMatchingRefSpecError) Is(target error) bool {
|
||||
_, ok := target.(NoMatchingRefSpecError)
|
||||
return ok
|
||||
}
|
||||
|
||||
const (
|
||||
// This describes the maximum number of commits to walk when
|
||||
// computing the haves to send to a server, for each ref in the
|
||||
// repo containing this remote, when not using the multi-ack
|
||||
// protocol. Setting this to 0 means there is no limit.
|
||||
maxHavesToVisitPerRef = 100
|
||||
|
||||
// peeledSuffix is the suffix used to build peeled reference names.
|
||||
peeledSuffix = "^{}"
|
||||
)
|
||||
|
||||
// Remote represents a connection to a remote repository.
|
||||
@ -62,7 +83,7 @@ func (r *Remote) String() string {
|
||||
var fetch, push string
|
||||
if len(r.c.URLs) > 0 {
|
||||
fetch = r.c.URLs[0]
|
||||
push = r.c.URLs[0]
|
||||
push = r.c.URLs[len(r.c.URLs)-1]
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%s\t%s (fetch)\n%[1]s\t%[3]s (push)", r.c.Name, fetch, push)
|
||||
@ -78,7 +99,7 @@ func (r *Remote) Push(o *PushOptions) error {
|
||||
// the remote was already up-to-date.
|
||||
//
|
||||
// The provided Context must be non-nil. If the context expires before the
|
||||
// operation is complete, an error is returned. The context only affects to the
|
||||
// operation is complete, an error is returned. The context only affects the
|
||||
// transport operations.
|
||||
func (r *Remote) PushContext(ctx context.Context, o *PushOptions) (err error) {
|
||||
if err := o.Validate(); err != nil {
|
||||
@ -89,14 +110,18 @@ func (r *Remote) PushContext(ctx context.Context, o *PushOptions) (err error) {
|
||||
return fmt.Errorf("remote names don't match: %s != %s", o.RemoteName, r.c.Name)
|
||||
}
|
||||
|
||||
s, err := newSendPackSession(r.c.URLs[0], o.Auth)
|
||||
if o.RemoteURL == "" && len(r.c.URLs) > 0 {
|
||||
o.RemoteURL = r.c.URLs[len(r.c.URLs)-1]
|
||||
}
|
||||
|
||||
s, err := newSendPackSession(o.RemoteURL, o.Auth, o.InsecureSkipTLS, o.CABundle, o.ProxyOptions)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer ioutil.CheckClose(s, &err)
|
||||
|
||||
ar, err := s.AdvertisedReferences()
|
||||
ar, err := s.AdvertisedReferencesContext(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -106,6 +131,10 @@ func (r *Remote) PushContext(ctx context.Context, o *PushOptions) (err error) {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := r.checkRequireRemoteRefs(o.RequireRemoteRefs, remoteRefs); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
isDelete := false
|
||||
allDelete := true
|
||||
for _, rs := range o.RefSpecs {
|
||||
@ -126,7 +155,7 @@ func (r *Remote) PushContext(ctx context.Context, o *PushOptions) (err error) {
|
||||
if o.Force {
|
||||
for i := 0; i < len(o.RefSpecs); i++ {
|
||||
rs := &o.RefSpecs[i]
|
||||
if !rs.IsForceUpdate() {
|
||||
if !rs.IsForceUpdate() && !rs.IsDelete() {
|
||||
o.RefSpecs[i] = config.RefSpec("+" + rs.String())
|
||||
}
|
||||
}
|
||||
@ -165,12 +194,12 @@ func (r *Remote) PushContext(ctx context.Context, o *PushOptions) (err error) {
|
||||
var hashesToPush []plumbing.Hash
|
||||
// Avoid the expensive revlist operation if we're only doing deletes.
|
||||
if !allDelete {
|
||||
if r.c.IsFirstURLLocal() {
|
||||
if url.IsLocalEndpoint(o.RemoteURL) {
|
||||
// If we're are pushing to a local repo, it might be much
|
||||
// faster to use a local storage layer to get the commits
|
||||
// to ignore, when calculating the object revlist.
|
||||
localStorer := filesystem.NewStorage(
|
||||
osfs.New(r.c.URLs[0]), cache.NewObjectLRUDefault())
|
||||
osfs.New(o.RemoteURL), cache.NewObjectLRUDefault())
|
||||
hashesToPush, err = revlist.ObjectsWithStorageForIgnores(
|
||||
r.s, localStorer, objects, haves)
|
||||
} else {
|
||||
@ -196,17 +225,87 @@ func (r *Remote) PushContext(ctx context.Context, o *PushOptions) (err error) {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = rs.Error(); err != nil {
|
||||
return err
|
||||
if rs != nil {
|
||||
if err = rs.Error(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return r.updateRemoteReferenceStorage(req, rs)
|
||||
return r.updateRemoteReferenceStorage(req)
|
||||
}
|
||||
|
||||
func (r *Remote) useRefDeltas(ar *packp.AdvRefs) bool {
|
||||
return !ar.Capabilities.Supports(capability.OFSDelta)
|
||||
}
|
||||
|
||||
func (r *Remote) addReachableTags(localRefs []*plumbing.Reference, remoteRefs storer.ReferenceStorer, req *packp.ReferenceUpdateRequest) error {
|
||||
tags := make(map[plumbing.Reference]struct{})
|
||||
// get a list of all tags locally
|
||||
for _, ref := range localRefs {
|
||||
if strings.HasPrefix(string(ref.Name()), "refs/tags") {
|
||||
tags[*ref] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
remoteRefIter, err := remoteRefs.IterReferences()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// remove any that are already on the remote
|
||||
if err := remoteRefIter.ForEach(func(reference *plumbing.Reference) error {
|
||||
delete(tags, *reference)
|
||||
return nil
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for tag := range tags {
|
||||
tagObject, err := object.GetObject(r.s, tag.Hash())
|
||||
var tagCommit *object.Commit
|
||||
if err != nil {
|
||||
return fmt.Errorf("get tag object: %w", err)
|
||||
}
|
||||
|
||||
if tagObject.Type() != plumbing.TagObject {
|
||||
continue
|
||||
}
|
||||
|
||||
annotatedTag, ok := tagObject.(*object.Tag)
|
||||
if !ok {
|
||||
return errors.New("could not get annotated tag object")
|
||||
}
|
||||
|
||||
tagCommit, err = object.GetCommit(r.s, annotatedTag.Target)
|
||||
if err != nil {
|
||||
return fmt.Errorf("get annotated tag commit: %w", err)
|
||||
}
|
||||
|
||||
// only include tags that are reachable from one of the refs
|
||||
// already being pushed
|
||||
for _, cmd := range req.Commands {
|
||||
if tag.Name() == cmd.Name {
|
||||
continue
|
||||
}
|
||||
|
||||
if strings.HasPrefix(cmd.Name.String(), "refs/tags") {
|
||||
continue
|
||||
}
|
||||
|
||||
c, err := object.GetCommit(r.s, cmd.New)
|
||||
if err != nil {
|
||||
return fmt.Errorf("get commit %v: %w", cmd.Name, err)
|
||||
}
|
||||
|
||||
if isAncestor, err := tagCommit.IsAncestor(c); err == nil && isAncestor {
|
||||
req.Commands = append(req.Commands, &packp.Command{Name: tag.Name(), New: tag.Hash()})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Remote) newReferenceUpdateRequest(
|
||||
o *PushOptions,
|
||||
localRefs []*plumbing.Reference,
|
||||
@ -224,16 +323,33 @@ func (r *Remote) newReferenceUpdateRequest(
|
||||
}
|
||||
}
|
||||
|
||||
if err := r.addReferencesToUpdate(o.RefSpecs, localRefs, remoteRefs, req, o.Prune); err != nil {
|
||||
if ar.Capabilities.Supports(capability.PushOptions) {
|
||||
_ = req.Capabilities.Set(capability.PushOptions)
|
||||
for k, v := range o.Options {
|
||||
req.Options = append(req.Options, &packp.Option{Key: k, Value: v})
|
||||
}
|
||||
}
|
||||
|
||||
if o.Atomic && ar.Capabilities.Supports(capability.Atomic) {
|
||||
_ = req.Capabilities.Set(capability.Atomic)
|
||||
}
|
||||
|
||||
if err := r.addReferencesToUpdate(o.RefSpecs, localRefs, remoteRefs, req, o.Prune, o.ForceWithLease); err != nil {
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if o.FollowTags {
|
||||
if err := r.addReachableTags(localRefs, remoteRefs, req); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return req, nil
|
||||
}
|
||||
|
||||
func (r *Remote) updateRemoteReferenceStorage(
|
||||
req *packp.ReferenceUpdateRequest,
|
||||
result *packp.ReportStatus,
|
||||
) error {
|
||||
|
||||
for _, spec := range r.c.Fetch {
|
||||
@ -267,7 +383,7 @@ func (r *Remote) updateRemoteReferenceStorage(
|
||||
// no changes to be fetched, or an error.
|
||||
//
|
||||
// The provided Context must be non-nil. If the context expires before the
|
||||
// operation is complete, an error is returned. The context only affects to the
|
||||
// operation is complete, an error is returned. The context only affects the
|
||||
// transport operations.
|
||||
func (r *Remote) FetchContext(ctx context.Context, o *FetchOptions) error {
|
||||
_, err := r.fetch(ctx, o)
|
||||
@ -296,14 +412,18 @@ func (r *Remote) fetch(ctx context.Context, o *FetchOptions) (sto storer.Referen
|
||||
o.RefSpecs = r.c.Fetch
|
||||
}
|
||||
|
||||
s, err := newUploadPackSession(r.c.URLs[0], o.Auth)
|
||||
if o.RemoteURL == "" {
|
||||
o.RemoteURL = r.c.URLs[0]
|
||||
}
|
||||
|
||||
s, err := newUploadPackSession(o.RemoteURL, o.Auth, o.InsecureSkipTLS, o.CABundle, o.ProxyOptions)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
defer ioutil.CheckClose(s, &err)
|
||||
|
||||
ar, err := s.AdvertisedReferences()
|
||||
ar, err := s.AdvertisedReferencesContext(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -327,14 +447,21 @@ func (r *Remote) fetch(ctx context.Context, o *FetchOptions) (sto storer.Referen
|
||||
return nil, err
|
||||
}
|
||||
|
||||
refs, err := calculateRefs(o.RefSpecs, remoteRefs, o.Tags)
|
||||
refs, specToRefs, err := calculateRefs(o.RefSpecs, remoteRefs, o.Tags)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
req.Wants, err = getWants(r.s, refs)
|
||||
if !req.Depth.IsZero() {
|
||||
req.Shallows, err = r.s.Shallow()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("existing checkout is not shallow")
|
||||
}
|
||||
}
|
||||
|
||||
req.Wants, err = getWants(r.s, refs, o.Depth)
|
||||
if len(req.Wants) > 0 {
|
||||
req.Haves, err = getHaves(localRefs, remoteRefs, r.s)
|
||||
req.Haves, err = getHaves(localRefs, remoteRefs, r.s, o.Depth)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -344,20 +471,69 @@ func (r *Remote) fetch(ctx context.Context, o *FetchOptions) (sto storer.Referen
|
||||
}
|
||||
}
|
||||
|
||||
updated, err := r.updateLocalReferenceStorage(o.RefSpecs, refs, remoteRefs, o.Tags, o.Force)
|
||||
var updatedPrune bool
|
||||
if o.Prune {
|
||||
updatedPrune, err = r.pruneRemotes(o.RefSpecs, localRefs, remoteRefs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
updated, err := r.updateLocalReferenceStorage(o.RefSpecs, refs, remoteRefs, specToRefs, o.Tags, o.Force)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !updated {
|
||||
return remoteRefs, NoErrAlreadyUpToDate
|
||||
updated, err = depthChanged(req.Shallows, r.s)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error checking depth change: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
if !updated && !updatedPrune {
|
||||
// No references updated, but may have fetched new objects, check if we now have any of our wants
|
||||
for _, hash := range req.Wants {
|
||||
exists, _ := objectExists(r.s, hash)
|
||||
if exists {
|
||||
updated = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !updated {
|
||||
return remoteRefs, NoErrAlreadyUpToDate
|
||||
}
|
||||
}
|
||||
|
||||
return remoteRefs, nil
|
||||
}
|
||||
|
||||
func newUploadPackSession(url string, auth transport.AuthMethod) (transport.UploadPackSession, error) {
|
||||
c, ep, err := newClient(url)
|
||||
func depthChanged(before []plumbing.Hash, s storage.Storer) (bool, error) {
|
||||
after, err := s.Shallow()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if len(before) != len(after) {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
bm := make(map[plumbing.Hash]bool, len(before))
|
||||
for _, b := range before {
|
||||
bm[b] = true
|
||||
}
|
||||
for _, a := range after {
|
||||
if _, ok := bm[a]; !ok {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func newUploadPackSession(url string, auth transport.AuthMethod, insecure bool, cabundle []byte, proxyOpts transport.ProxyOptions) (transport.UploadPackSession, error) {
|
||||
c, ep, err := newClient(url, insecure, cabundle, proxyOpts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -365,8 +541,8 @@ func newUploadPackSession(url string, auth transport.AuthMethod) (transport.Uplo
|
||||
return c.NewUploadPackSession(ep, auth)
|
||||
}
|
||||
|
||||
func newSendPackSession(url string, auth transport.AuthMethod) (transport.ReceivePackSession, error) {
|
||||
c, ep, err := newClient(url)
|
||||
func newSendPackSession(url string, auth transport.AuthMethod, insecure bool, cabundle []byte, proxyOpts transport.ProxyOptions) (transport.ReceivePackSession, error) {
|
||||
c, ep, err := newClient(url, insecure, cabundle, proxyOpts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -374,11 +550,14 @@ func newSendPackSession(url string, auth transport.AuthMethod) (transport.Receiv
|
||||
return c.NewReceivePackSession(ep, auth)
|
||||
}
|
||||
|
||||
func newClient(url string) (transport.Transport, *transport.Endpoint, error) {
|
||||
func newClient(url string, insecure bool, cabundle []byte, proxyOpts transport.ProxyOptions) (transport.Transport, *transport.Endpoint, error) {
|
||||
ep, err := transport.NewEndpoint(url)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
ep.InsecureSkipTLS = insecure
|
||||
ep.CaBundle = cabundle
|
||||
ep.Proxy = proxyOpts
|
||||
|
||||
c, err := client.NewClient(ep)
|
||||
if err != nil {
|
||||
@ -393,6 +572,10 @@ func (r *Remote) fetchPack(ctx context.Context, o *FetchOptions, s transport.Upl
|
||||
|
||||
reader, err := s.UploadPack(ctx, req)
|
||||
if err != nil {
|
||||
if errors.Is(err, transport.ErrEmptyUploadPackRequest) {
|
||||
// XXX: no packfile provided, everything is up-to-date.
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
@ -411,12 +594,34 @@ func (r *Remote) fetchPack(ctx context.Context, o *FetchOptions, s transport.Upl
|
||||
return err
|
||||
}
|
||||
|
||||
func (r *Remote) pruneRemotes(specs []config.RefSpec, localRefs []*plumbing.Reference, remoteRefs memory.ReferenceStorage) (bool, error) {
|
||||
var updatedPrune bool
|
||||
for _, spec := range specs {
|
||||
rev := spec.Reverse()
|
||||
for _, ref := range localRefs {
|
||||
if !rev.Match(ref.Name()) {
|
||||
continue
|
||||
}
|
||||
_, err := remoteRefs.Reference(rev.Dst(ref.Name()))
|
||||
if errors.Is(err, plumbing.ErrReferenceNotFound) {
|
||||
updatedPrune = true
|
||||
err := r.s.RemoveReference(ref.Name())
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return updatedPrune, nil
|
||||
}
|
||||
|
||||
func (r *Remote) addReferencesToUpdate(
|
||||
refspecs []config.RefSpec,
|
||||
localRefs []*plumbing.Reference,
|
||||
remoteRefs storer.ReferenceStorer,
|
||||
req *packp.ReferenceUpdateRequest,
|
||||
prune bool,
|
||||
forceWithLease *ForceWithLease,
|
||||
) error {
|
||||
// This references dictionary will be used to search references by name.
|
||||
refsDict := make(map[string]*plumbing.Reference)
|
||||
@ -430,7 +635,7 @@ func (r *Remote) addReferencesToUpdate(
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
err := r.addOrUpdateReferences(rs, localRefs, refsDict, remoteRefs, req)
|
||||
err := r.addOrUpdateReferences(rs, localRefs, refsDict, remoteRefs, req, forceWithLease)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -452,20 +657,25 @@ func (r *Remote) addOrUpdateReferences(
|
||||
refsDict map[string]*plumbing.Reference,
|
||||
remoteRefs storer.ReferenceStorer,
|
||||
req *packp.ReferenceUpdateRequest,
|
||||
forceWithLease *ForceWithLease,
|
||||
) error {
|
||||
// If it is not a wilcard refspec we can directly search for the reference
|
||||
// If it is not a wildcard refspec we can directly search for the reference
|
||||
// in the references dictionary.
|
||||
if !rs.IsWildcard() {
|
||||
ref, ok := refsDict[rs.Src()]
|
||||
if !ok {
|
||||
commit, err := object.GetCommit(r.s, plumbing.NewHash(rs.Src()))
|
||||
if err == nil {
|
||||
return r.addCommit(rs, remoteRefs, commit.Hash, req)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
return r.addReferenceIfRefSpecMatches(rs, remoteRefs, ref, req)
|
||||
return r.addReferenceIfRefSpecMatches(rs, remoteRefs, ref, req, forceWithLease)
|
||||
}
|
||||
|
||||
for _, ref := range localRefs {
|
||||
err := r.addReferenceIfRefSpecMatches(rs, remoteRefs, ref, req)
|
||||
err := r.addReferenceIfRefSpecMatches(rs, remoteRefs, ref, req, forceWithLease)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -512,9 +722,46 @@ func (r *Remote) deleteReferences(rs config.RefSpec,
|
||||
})
|
||||
}
|
||||
|
||||
func (r *Remote) addCommit(rs config.RefSpec,
|
||||
remoteRefs storer.ReferenceStorer, localCommit plumbing.Hash,
|
||||
req *packp.ReferenceUpdateRequest) error {
|
||||
|
||||
if rs.IsWildcard() {
|
||||
return errors.New("can't use wildcard together with hash refspecs")
|
||||
}
|
||||
|
||||
cmd := &packp.Command{
|
||||
Name: rs.Dst(""),
|
||||
Old: plumbing.ZeroHash,
|
||||
New: localCommit,
|
||||
}
|
||||
remoteRef, err := remoteRefs.Reference(cmd.Name)
|
||||
if err == nil {
|
||||
if remoteRef.Type() != plumbing.HashReference {
|
||||
// TODO: check actual git behavior here
|
||||
return nil
|
||||
}
|
||||
|
||||
cmd.Old = remoteRef.Hash()
|
||||
} else if err != plumbing.ErrReferenceNotFound {
|
||||
return err
|
||||
}
|
||||
if cmd.Old == cmd.New {
|
||||
return nil
|
||||
}
|
||||
if !rs.IsForceUpdate() {
|
||||
if err := checkFastForwardUpdate(r.s, remoteRefs, cmd); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
req.Commands = append(req.Commands, cmd)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Remote) addReferenceIfRefSpecMatches(rs config.RefSpec,
|
||||
remoteRefs storer.ReferenceStorer, localRef *plumbing.Reference,
|
||||
req *packp.ReferenceUpdateRequest) error {
|
||||
req *packp.ReferenceUpdateRequest, forceWithLease *ForceWithLease) error {
|
||||
|
||||
if localRef.Type() != plumbing.HashReference {
|
||||
return nil
|
||||
@ -533,7 +780,7 @@ func (r *Remote) addReferenceIfRefSpecMatches(rs config.RefSpec,
|
||||
remoteRef, err := remoteRefs.Reference(cmd.Name)
|
||||
if err == nil {
|
||||
if remoteRef.Type() != plumbing.HashReference {
|
||||
//TODO: check actual git behavior here
|
||||
// TODO: check actual git behavior here
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -546,7 +793,11 @@ func (r *Remote) addReferenceIfRefSpecMatches(rs config.RefSpec,
|
||||
return nil
|
||||
}
|
||||
|
||||
if !rs.IsForceUpdate() {
|
||||
if forceWithLease != nil {
|
||||
if err = r.checkForceWithLease(localRef, cmd, forceWithLease); err != nil {
|
||||
return err
|
||||
}
|
||||
} else if !rs.IsForceUpdate() {
|
||||
if err := checkFastForwardUpdate(r.s, remoteRefs, cmd); err != nil {
|
||||
return err
|
||||
}
|
||||
@ -556,6 +807,31 @@ func (r *Remote) addReferenceIfRefSpecMatches(rs config.RefSpec,
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Remote) checkForceWithLease(localRef *plumbing.Reference, cmd *packp.Command, forceWithLease *ForceWithLease) error {
|
||||
remotePrefix := fmt.Sprintf("refs/remotes/%s/", r.Config().Name)
|
||||
|
||||
ref, err := storer.ResolveReference(
|
||||
r.s,
|
||||
plumbing.ReferenceName(remotePrefix+strings.Replace(localRef.Name().String(), "refs/heads/", "", -1)))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if forceWithLease.RefName.String() == "" || (forceWithLease.RefName == cmd.Name) {
|
||||
expectedOID := ref.Hash()
|
||||
|
||||
if !forceWithLease.Hash.IsZero() {
|
||||
expectedOID = forceWithLease.Hash
|
||||
}
|
||||
|
||||
if cmd.Old != expectedOID {
|
||||
return fmt.Errorf("non-fast-forward update: %s", cmd.Name.String())
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Remote) references() ([]*plumbing.Reference, error) {
|
||||
var localRefs []*plumbing.Reference
|
||||
|
||||
@ -607,23 +883,19 @@ func getHavesFromRef(
|
||||
remoteRefs map[plumbing.Hash]bool,
|
||||
s storage.Storer,
|
||||
haves map[plumbing.Hash]bool,
|
||||
depth int,
|
||||
) error {
|
||||
h := ref.Hash()
|
||||
if haves[h] {
|
||||
return nil
|
||||
}
|
||||
|
||||
// No need to load the commit if we know the remote already
|
||||
// has this hash.
|
||||
if remoteRefs[h] {
|
||||
haves[h] = true
|
||||
return nil
|
||||
}
|
||||
|
||||
commit, err := object.GetCommit(s, h)
|
||||
if err != nil {
|
||||
// Ignore the error if this isn't a commit.
|
||||
haves[ref.Hash()] = true
|
||||
if !errors.Is(err, plumbing.ErrObjectNotFound) {
|
||||
// Ignore the error if this isn't a commit.
|
||||
haves[ref.Hash()] = true
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -632,7 +904,13 @@ func getHavesFromRef(
|
||||
// commits from the history of each ref.
|
||||
walker := object.NewCommitPreorderIter(commit, haves, nil)
|
||||
toVisit := maxHavesToVisitPerRef
|
||||
return walker.ForEach(func(c *object.Commit) error {
|
||||
// But only need up to the requested depth
|
||||
if depth > 0 && depth < maxHavesToVisitPerRef {
|
||||
toVisit = depth
|
||||
}
|
||||
// It is safe to ignore any error here as we are just trying to find the references that we already have
|
||||
// An example of a legitimate failure is we have a shallow clone and don't have the previous commit(s)
|
||||
_ = walker.ForEach(func(c *object.Commit) error {
|
||||
haves[c.Hash] = true
|
||||
toVisit--
|
||||
// If toVisit starts out at 0 (indicating there is no
|
||||
@ -643,12 +921,15 @@ func getHavesFromRef(
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func getHaves(
|
||||
localRefs []*plumbing.Reference,
|
||||
remoteRefStorer storer.ReferenceStorer,
|
||||
s storage.Storer,
|
||||
depth int,
|
||||
) ([]plumbing.Hash, error) {
|
||||
haves := map[plumbing.Hash]bool{}
|
||||
|
||||
@ -669,7 +950,7 @@ func getHaves(
|
||||
continue
|
||||
}
|
||||
|
||||
err = getHavesFromRef(ref, remoteRefs, s, haves)
|
||||
err = getHavesFromRef(ref, remoteRefs, s, haves, depth)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -689,42 +970,41 @@ func calculateRefs(
|
||||
spec []config.RefSpec,
|
||||
remoteRefs storer.ReferenceStorer,
|
||||
tagMode TagMode,
|
||||
) (memory.ReferenceStorage, error) {
|
||||
) (memory.ReferenceStorage, [][]*plumbing.Reference, error) {
|
||||
if tagMode == AllTags {
|
||||
spec = append(spec, refspecAllTags)
|
||||
}
|
||||
|
||||
refs := make(memory.ReferenceStorage)
|
||||
for _, s := range spec {
|
||||
if err := doCalculateRefs(s, remoteRefs, refs); err != nil {
|
||||
return nil, err
|
||||
// list of references matched for each spec
|
||||
specToRefs := make([][]*plumbing.Reference, len(spec))
|
||||
for i := range spec {
|
||||
var err error
|
||||
specToRefs[i], err = doCalculateRefs(spec[i], remoteRefs, refs)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return refs, nil
|
||||
return refs, specToRefs, nil
|
||||
}
|
||||
|
||||
func doCalculateRefs(
|
||||
s config.RefSpec,
|
||||
remoteRefs storer.ReferenceStorer,
|
||||
refs memory.ReferenceStorage,
|
||||
) error {
|
||||
iter, err := remoteRefs.IterReferences()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
) ([]*plumbing.Reference, error) {
|
||||
var refList []*plumbing.Reference
|
||||
|
||||
if s.IsExactSHA1() {
|
||||
ref := plumbing.NewHashReference(s.Dst(""), plumbing.NewHash(s.Src()))
|
||||
return refs.SetReference(ref)
|
||||
|
||||
refList = append(refList, ref)
|
||||
return refList, refs.SetReference(ref)
|
||||
}
|
||||
|
||||
var matched bool
|
||||
err = iter.ForEach(func(ref *plumbing.Reference) error {
|
||||
if !s.Match(ref.Name()) {
|
||||
return nil
|
||||
}
|
||||
|
||||
onMatched := func(ref *plumbing.Reference) error {
|
||||
if ref.Type() == plumbing.SymbolicReference {
|
||||
target, err := storer.ResolveReference(remoteRefs, ref.Name())
|
||||
if err != nil {
|
||||
@ -739,25 +1019,49 @@ func doCalculateRefs(
|
||||
}
|
||||
|
||||
matched = true
|
||||
if err := refs.SetReference(ref); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !s.IsWildcard() {
|
||||
return storer.ErrStop
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
if !matched && !s.IsWildcard() {
|
||||
return fmt.Errorf("couldn't find remote ref %q", s.Src())
|
||||
refList = append(refList, ref)
|
||||
return refs.SetReference(ref)
|
||||
}
|
||||
|
||||
return err
|
||||
var ret error
|
||||
if s.IsWildcard() {
|
||||
iter, err := remoteRefs.IterReferences()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ret = iter.ForEach(func(ref *plumbing.Reference) error {
|
||||
if !s.Match(ref.Name()) {
|
||||
return nil
|
||||
}
|
||||
|
||||
return onMatched(ref)
|
||||
})
|
||||
} else {
|
||||
var resolvedRef *plumbing.Reference
|
||||
src := s.Src()
|
||||
resolvedRef, ret = expand_ref(remoteRefs, plumbing.ReferenceName(src))
|
||||
if ret == nil {
|
||||
ret = onMatched(resolvedRef)
|
||||
}
|
||||
}
|
||||
|
||||
if !matched && !s.IsWildcard() {
|
||||
return nil, NoMatchingRefSpecError{refSpec: s}
|
||||
}
|
||||
|
||||
return refList, ret
|
||||
}
|
||||
|
||||
func getWants(localStorer storage.Storer, refs memory.ReferenceStorage) ([]plumbing.Hash, error) {
|
||||
func getWants(localStorer storage.Storer, refs memory.ReferenceStorage, depth int) ([]plumbing.Hash, error) {
|
||||
// If depth is anything other than 1 and the repo has shallow commits then just because we have the commit
|
||||
// at the reference doesn't mean that we don't still need to fetch the parents
|
||||
shallow := false
|
||||
if depth != 1 {
|
||||
if s, _ := localStorer.Shallow(); len(s) > 0 {
|
||||
shallow = true
|
||||
}
|
||||
}
|
||||
|
||||
wants := map[plumbing.Hash]bool{}
|
||||
for _, ref := range refs {
|
||||
hash := ref.Hash()
|
||||
@ -766,7 +1070,7 @@ func getWants(localStorer storage.Storer, refs memory.ReferenceStorage) ([]plumb
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !exists {
|
||||
if !exists || shallow {
|
||||
wants[hash] = true
|
||||
}
|
||||
}
|
||||
@ -802,7 +1106,7 @@ func checkFastForwardUpdate(s storer.EncodedObjectStorer, remoteRefs storer.Refe
|
||||
return fmt.Errorf("non-fast-forward update: %s", cmd.Name.String())
|
||||
}
|
||||
|
||||
ff, err := isFastForward(s, cmd.Old, cmd.New)
|
||||
ff, err := isFastForward(s, cmd.Old, cmd.New, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -814,14 +1118,28 @@ func checkFastForwardUpdate(s storer.EncodedObjectStorer, remoteRefs storer.Refe
|
||||
return nil
|
||||
}
|
||||
|
||||
func isFastForward(s storer.EncodedObjectStorer, old, new plumbing.Hash) (bool, error) {
|
||||
func isFastForward(s storer.EncodedObjectStorer, old, new plumbing.Hash, earliestShallow *plumbing.Hash) (bool, error) {
|
||||
c, err := object.GetCommit(s, new)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
parentsToIgnore := []plumbing.Hash{}
|
||||
if earliestShallow != nil {
|
||||
earliestCommit, err := object.GetCommit(s, *earliestShallow)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
parentsToIgnore = earliestCommit.ParentHashes
|
||||
}
|
||||
|
||||
found := false
|
||||
iter := object.NewCommitPreorderIter(c, nil, nil)
|
||||
// stop iterating at the earliest shallow commit, ignoring its parents
|
||||
// note: when pull depth is smaller than the number of new changes on the remote, this fails due to missing parents.
|
||||
// as far as i can tell, without the commits in-between the shallow pull and the earliest shallow, there's no
|
||||
// real way of telling whether it will be a fast-forward merge.
|
||||
iter := object.NewCommitPreorderIter(c, nil, parentsToIgnore)
|
||||
err = iter.ForEach(func(c *object.Commit) error {
|
||||
if c.Hash != old {
|
||||
return nil
|
||||
@ -909,34 +1227,35 @@ func buildSidebandIfSupported(l *capability.List, reader io.Reader, p sideband.P
|
||||
func (r *Remote) updateLocalReferenceStorage(
|
||||
specs []config.RefSpec,
|
||||
fetchedRefs, remoteRefs memory.ReferenceStorage,
|
||||
specToRefs [][]*plumbing.Reference,
|
||||
tagMode TagMode,
|
||||
force bool,
|
||||
) (updated bool, err error) {
|
||||
isWildcard := true
|
||||
forceNeeded := false
|
||||
|
||||
for _, spec := range specs {
|
||||
for i, spec := range specs {
|
||||
if !spec.IsWildcard() {
|
||||
isWildcard = false
|
||||
}
|
||||
|
||||
for _, ref := range fetchedRefs {
|
||||
if !spec.Match(ref.Name()) && !spec.IsExactSHA1() {
|
||||
continue
|
||||
}
|
||||
|
||||
for _, ref := range specToRefs[i] {
|
||||
if ref.Type() != plumbing.HashReference {
|
||||
continue
|
||||
}
|
||||
|
||||
localName := spec.Dst(ref.Name())
|
||||
// If localName doesn't start with "refs/" then treat as a branch.
|
||||
if !strings.HasPrefix(localName.String(), "refs/") {
|
||||
localName = plumbing.NewBranchReferenceName(localName.String())
|
||||
}
|
||||
old, _ := storer.ResolveReference(r.s, localName)
|
||||
new := plumbing.NewHashReference(localName, ref.Hash())
|
||||
|
||||
// If the ref exists locally as a branch and force is not specified,
|
||||
// only update if the new ref is an ancestor of the old
|
||||
if old != nil && old.Name().IsBranch() && !force && !spec.IsForceUpdate() {
|
||||
ff, err := isFastForward(r.s, old.Hash(), new.Hash())
|
||||
// If the ref exists locally as a non-tag and force is not
|
||||
// specified, only update if the new ref is an ancestor of the old
|
||||
if old != nil && !old.Name().IsTag() && !force && !spec.IsForceUpdate() {
|
||||
ff, err := isFastForward(r.s, old.Hash(), new.Hash(), nil)
|
||||
if err != nil {
|
||||
return updated, err
|
||||
}
|
||||
@ -1011,15 +1330,40 @@ func (r *Remote) buildFetchedTags(refs memory.ReferenceStorage) (updated bool, e
|
||||
}
|
||||
|
||||
// List the references on the remote repository.
|
||||
// The provided Context must be non-nil. If the context expires before the
|
||||
// operation is complete, an error is returned. The context only affects to the
|
||||
// transport operations.
|
||||
func (r *Remote) ListContext(ctx context.Context, o *ListOptions) (rfs []*plumbing.Reference, err error) {
|
||||
return r.list(ctx, o)
|
||||
}
|
||||
|
||||
func (r *Remote) List(o *ListOptions) (rfs []*plumbing.Reference, err error) {
|
||||
s, err := newUploadPackSession(r.c.URLs[0], o.Auth)
|
||||
timeout := o.Timeout
|
||||
// Default to the old hardcoded 10s value if a timeout is not explicitly set.
|
||||
if timeout == 0 {
|
||||
timeout = 10
|
||||
}
|
||||
if timeout < 0 {
|
||||
return nil, fmt.Errorf("invalid timeout: %d", timeout)
|
||||
}
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Duration(timeout)*time.Second)
|
||||
defer cancel()
|
||||
return r.ListContext(ctx, o)
|
||||
}
|
||||
|
||||
func (r *Remote) list(ctx context.Context, o *ListOptions) (rfs []*plumbing.Reference, err error) {
|
||||
if r.c == nil || len(r.c.URLs) == 0 {
|
||||
return nil, ErrEmptyUrls
|
||||
}
|
||||
|
||||
s, err := newUploadPackSession(r.c.URLs[0], o.Auth, o.InsecureSkipTLS, o.CABundle, o.ProxyOptions)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
defer ioutil.CheckClose(s, &err)
|
||||
|
||||
ar, err := s.AdvertisedReferences()
|
||||
ar, err := s.AdvertisedReferencesContext(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -1035,13 +1379,22 @@ func (r *Remote) List(o *ListOptions) (rfs []*plumbing.Reference, err error) {
|
||||
}
|
||||
|
||||
var resultRefs []*plumbing.Reference
|
||||
err = refs.ForEach(func(ref *plumbing.Reference) error {
|
||||
resultRefs = append(resultRefs, ref)
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
if o.PeelingOption == AppendPeeled || o.PeelingOption == IgnorePeeled {
|
||||
err = refs.ForEach(func(ref *plumbing.Reference) error {
|
||||
resultRefs = append(resultRefs, ref)
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if o.PeelingOption == AppendPeeled || o.PeelingOption == OnlyPeeled {
|
||||
for k, v := range ar.Peeled {
|
||||
resultRefs = append(resultRefs, plumbing.NewReferenceFromStrings(k+"^{}", v.String()))
|
||||
}
|
||||
}
|
||||
|
||||
return resultRefs, nil
|
||||
}
|
||||
|
||||
@ -1087,7 +1440,6 @@ func pushHashes(
|
||||
useRefDeltas bool,
|
||||
allDelete bool,
|
||||
) (*packp.ReportStatus, error) {
|
||||
|
||||
rd, wr := io.Pipe()
|
||||
|
||||
config, err := s.Config()
|
||||
@ -1151,3 +1503,33 @@ outer:
|
||||
|
||||
return r.s.SetShallow(shallows)
|
||||
}
|
||||
|
||||
func (r *Remote) checkRequireRemoteRefs(requires []config.RefSpec, remoteRefs storer.ReferenceStorer) error {
|
||||
for _, require := range requires {
|
||||
if require.IsWildcard() {
|
||||
return fmt.Errorf("wildcards not supported in RequireRemoteRefs, got %s", require.String())
|
||||
}
|
||||
|
||||
name := require.Dst("")
|
||||
remote, err := remoteRefs.Reference(name)
|
||||
if err != nil {
|
||||
return fmt.Errorf("remote ref %s required to be %s but is absent", name.String(), require.Src())
|
||||
}
|
||||
|
||||
var requireHash string
|
||||
if require.IsExactSHA1() {
|
||||
requireHash = require.Src()
|
||||
} else {
|
||||
target, err := storer.ResolveReference(remoteRefs, plumbing.ReferenceName(require.Src()))
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not resolve ref %s in RequireRemoteRefs", require.Src())
|
||||
}
|
||||
requireHash = target.Hash().String()
|
||||
}
|
||||
|
||||
if remote.Hash().String() != requireHash {
|
||||
return fmt.Errorf("remote ref %s required to be %s but is %s", name.String(), requireHash, remote.Hash().String())
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
Reference in New Issue
Block a user