1
0
mirror of https://github.com/jesseduffield/lazygit.git synced 2025-06-15 00:15:32 +02:00

Bump go-git

This commit is contained in:
Stefan Haller
2025-04-09 10:38:46 +02:00
parent da0105c16b
commit 4cf49ff449
527 changed files with 70489 additions and 10167 deletions

View File

@ -2,3 +2,6 @@ coverage.out
*~
coverage.txt
profile.out
.tmp/
.git-dist/
.vscode

View File

@ -1,111 +1,234 @@
Supported Capabilities
======================
# Supported Features
Here is a non-comprehensive table of git commands and features whose equivalent
is supported by go-git.
Here is a non-comprehensive table of git commands and features and their
compatibility status with go-git.
| Feature | Status | Notes |
|---------------------------------------|--------|-------|
| **config** |
| config | ✔ | Reading and modifying per-repository configuration (`.git/config`) is supported. Global configuration (`$HOME/.gitconfig`) is not. |
| **getting and creating repositories** |
| init | ✔ | Plain init and `--bare` are supported. Flags `--template`, `--separate-git-dir` and `--shared` are not. |
| clone | ✔ | Plain clone and equivalents to `--progress`, `--single-branch`, `--depth`, `--origin`, `--recurse-submodules` are supported. Others are not. |
| **basic snapshotting** |
| add | ✔ | Plain add is supported. Any other flags aren't supported |
| status | ✔ |
| commit | ✔ |
| reset | ✔ |
| rm | ✔ |
| mv | ✔ |
| **branching and merging** |
| branch | ✔ |
| checkout | ✔ | Basic usages of checkout are supported. |
| merge | ✖ |
| mergetool | ✖ |
| stash | ✖ |
| tag | |
| **sharing and updating projects** |
| fetch | ✔ |
| pull | ✔ | Only supports merges where the merge can be resolved as a fast-forward. |
| push | ✔ |
| remote | ✔ |
| submodule | ✔ |
| **inspection and comparison** |
| show | ✔ |
| log | ✔ |
| shortlog | (see log) |
| describe | |
| **patching** |
| apply | ✖ |
| cherry-pick | ✖ |
| diff | ✔ | Patch object with UnifiedDiff output representation |
| rebase | ✖ |
| revert | ✖ |
| **debugging** |
| bisect | ✖ |
| blame | ✔ |
| grep | ✔ |
| **email** ||
| am | ✖ |
| apply | ✖ |
| format-patch | ✖ |
| send-email | ✖ |
| request-pull | ✖ |
| **external systems** |
| svn | ✖ |
| fast-import | |
| **administration** |
| clean | ✔ |
| gc | ✖ |
| fsck | ✖ |
| reflog | ✖ |
| filter-branch | |
| instaweb | ✖ |
| archive | |
| bundle | |
| prune | ✖ |
| repack | ✖ |
| **server admin** |
| daemon | |
| update-server-info | |
| **advanced** |
| notes | ✖ |
| replace | ✖ |
| worktree | ✖ |
| annotate | (see blame) |
| **gpg** |
| git-verify-commit | ✔ |
| git-verify-tag | ✔ |
| **plumbing commands** |
| cat-file | ✔ |
| check-ignore | |
| commit-tree | |
| count-objects | |
| diff-index | |
| for-each-ref | |
| hash-object | |
| ls-files | ✔ |
| merge-base | ✔ | Calculates the merge-base only between two commits, and supports `--independent` and `--is-ancestor` modifiers; Does not support `--fork-point` nor `--octopus` modifiers. |
| read-tree | |
| rev-list | ✔ |
| rev-parse | |
| show-ref | |
| symbolic-ref | |
| update-index | |
| update-ref | |
| verify-pack | |
| write-tree | |
| **protocols** |
| http(s):// (dumb) | ✖ |
| http(s):// (smart) | |
| git:// | |
| ssh:// | |
| file:// | partial | Warning: this is not pure Golang. This shells out to the `git` binary. |
| custom | ✔ |
| **other features** |
| gitignore | |
| gitattributes | |
| index version | |
| packfile version | |
| push-certs | ✖ |
## Getting and creating repositories
| Feature | Sub-feature | Status | Notes | Examples |
| ------- | ------------------------------------------------------------------------------------------------------------------ | ------ | ----- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| `init` | | ✅ | | |
| `init` | `--bare` | ✅ | | |
| `init` | `--template` <br/> `--separate-git-dir` <br/> `--shared` | ❌ | | |
| `clone` | | ✅ | | - [PlainClone](_examples/clone/main.go) |
| `clone` | Authentication: <br/> - none <br/> - access token <br/> - username + password <br/> - ssh | ✅ | | - [clone ssh (private_key)](_examples/clone/auth/ssh/private_key/main.go) <br/> - [clone ssh (ssh_agent)](_examples/clone/auth/ssh/ssh_agent/main.go) <br/> - [clone access token](_examples/clone/auth/basic/access_token/main.go) <br/> - [clone user + password](_examples/clone/auth/basic/username_password/main.go) |
| `clone` | `--progress` <br/> `--single-branch` <br/> `--depth` <br/> `--origin` <br/> `--recurse-submodules` <br/>`--shared` | ✅ | | - [recurse submodules](_examples/clone/main.go) <br/> - [progress](_examples/progress/main.go) |
## Basic snapshotting
| Feature | Sub-feature | Status | Notes | Examples |
| -------- | ----------- | ------ | -------------------------------------------------------- | ------------------------------------ |
| `add` | | ✅ | Plain add is supported. Any other flags aren't supported | |
| `status` | | ✅ | | |
| `commit` | | ✅ | | - [commit](_examples/commit/main.go) |
| `reset` | | ✅ | | |
| `rm` | | ✅ | | |
| `mv` | | ✅ | | |
## Branching and merging
| Feature | Sub-feature | Status | Notes | Examples |
| ----------- | ----------- | ------------ | --------------------------------------- | ----------------------------------------------------------------------------------------------- |
| `branch` | | ✅ | | - [branch](_examples/branch/main.go) |
| `checkout` | | ✅ | Basic usages of checkout are supported. | - [checkout](_examples/checkout/main.go) |
| `merge` | | ⚠️ (partial) | Fast-forward only | |
| `mergetool` | | ❌ | | |
| `stash` | | ❌ | | |
| `sparse-checkout` | | ✅ | | - [sparse-checkout](_examples/sparse-checkout/main.go) |
| `tag` | | ✅ | | - [tag](_examples/tag/main.go) <br/> - [tag create and push](_examples/tag-create-push/main.go) |
## Sharing and updating projects
| Feature | Sub-feature | Status | Notes | Examples |
| ----------- | ----------- | ------ | ----------------------------------------------------------------------- | ------------------------------------------ |
| `fetch` | | ✅ | | |
| `pull` | | ✅ | Only supports merges where the merge can be resolved as a fast-forward. | - [pull](_examples/pull/main.go) |
| `push` | | ✅ | | - [push](_examples/push/main.go) |
| `remote` | | ✅ | | - [remotes](_examples/remotes/main.go) |
| `submodule` | | ✅ | | - [submodule](_examples/submodule/main.go) |
| `submodule` | deinit | ❌ | | |
## Inspection and comparison
| Feature | Sub-feature | Status | Notes | Examples |
| ---------- | ----------- | --------- | ----- | ------------------------------ |
| `show` | | ✅ | | |
| `log` | | ✅ | | - [log](_examples/log/main.go) |
| `shortlog` | | (see log) | | |
| `describe` | | ❌ | | |
## Patching
| Feature | Sub-feature | Status | Notes | Examples |
| ------------- | ----------- | ------ | ---------------------------------------------------- | -------- |
| `apply` | | ❌ | | |
| `cherry-pick` | | ❌ | | |
| `diff` | | ✅ | Patch object with UnifiedDiff output representation. | |
| `rebase` | | ❌ | | |
| `revert` | | ❌ | | |
## Debugging
| Feature | Sub-feature | Status | Notes | Examples |
| -------- | ----------- | ------ | ----- | ---------------------------------- |
| `bisect` | | ❌ | | |
| `blame` | | ✅ | | - [blame](_examples/blame/main.go) |
| `grep` | | ✅ | | |
## Email
| Feature | Sub-feature | Status | Notes | Examples |
| -------------- | ----------- | ------ | ----- | -------- |
| `am` | | ❌ | | |
| `apply` | | ❌ | | |
| `format-patch` | | ❌ | | |
| `send-email` | | ❌ | | |
| `request-pull` | | ❌ | | |
## External systems
| Feature | Sub-feature | Status | Notes | Examples |
| ------------- | ----------- | ------ | ----- | -------- |
| `svn` | | ❌ | | |
| `fast-import` | | ❌ | | |
| `lfs` | | ❌ | | |
## Administration
| Feature | Sub-feature | Status | Notes | Examples |
| --------------- | ----------- | ------ | ----- | -------- |
| `clean` | | ✅ | | |
| `gc` | | ❌ | | |
| `fsck` | | ❌ | | |
| `reflog` | | ❌ | | |
| `filter-branch` | | ❌ | | |
| `instaweb` | | ❌ | | |
| `archive` | | ❌ | | |
| `bundle` | | ❌ | | |
| `prune` | | ❌ | | |
| `repack` | | ❌ | | |
## Server admin
| Feature | Sub-feature | Status | Notes | Examples |
| -------------------- | ----------- | ------ | ----- | ----------------------------------------- |
| `daemon` | | ❌ | | |
| `update-server-info` | | ✅ | | [cli](./cli/go-git/update_server_info.go) |
## Advanced
| Feature | Sub-feature | Status | Notes | Examples |
| ---------- | ----------- | ----------- | ----- | -------- |
| `notes` | | ❌ | | |
| `replace` | | ❌ | | |
| `worktree` | | ❌ | | |
| `annotate` | | (see blame) | | |
## GPG
| Feature | Sub-feature | Status | Notes | Examples |
| ------------------- | ----------- | ------ | ----- | -------- |
| `git-verify-commit` | | ✅ | | |
| `git-verify-tag` | | ✅ | | |
## Plumbing commands
| Feature | Sub-feature | Status | Notes | Examples |
| --------------- | ------------------------------------- | ------------ | --------------------------------------------------- | -------------------------------------------- |
| `cat-file` | | ✅ | | |
| `check-ignore` | | ❌ | | |
| `commit-tree` | | ❌ | | |
| `count-objects` | | ❌ | | |
| `diff-index` | | ❌ | | |
| `for-each-ref` | | ✅ | | |
| `hash-object` | | ✅ | | |
| `ls-files` | | ✅ | | |
| `ls-remote` | | ✅ | | - [ls-remote](_examples/ls-remote/main.go) |
| `merge-base` | `--independent` <br/> `--is-ancestor` | ⚠️ (partial) | Calculates the merge-base only between two commits. | - [merge-base](_examples/merge_base/main.go) |
| `merge-base` | `--fork-point` <br/> `--octopus` | ❌ | | |
| `read-tree` | | ❌ | | |
| `rev-list` | | ✅ | | |
| `rev-parse` | | ❌ | | |
| `show-ref` | | ✅ | | |
| `symbolic-ref` | | ✅ | | |
| `update-index` | | ❌ | | |
| `update-ref` | | ❌ | | |
| `verify-pack` | | ❌ | | |
| `write-tree` | | ❌ | | |
## Indexes and Git Protocols
| Feature | Version | Status | Notes |
| -------------------- | ------------------------------------------------------------------------------- | ------ | ----- |
| index | [v1](https://github.com/git/git/blob/master/Documentation/gitformat-index.txt) | ❌ | |
| index | [v2](https://github.com/git/git/blob/master/Documentation/gitformat-index.txt) | ✅ | |
| index | [v3](https://github.com/git/git/blob/master/Documentation/gitformat-index.txt) | ❌ | |
| pack-protocol | [v1](https://github.com/git/git/blob/master/Documentation/gitprotocol-pack.txt) | ✅ | |
| pack-protocol | [v2](https://github.com/git/git/blob/master/Documentation/gitprotocol-v2.txt) | ❌ | |
| multi-pack-index | [v1](https://github.com/git/git/blob/master/Documentation/gitformat-pack.txt) | ❌ | |
| pack-\*.rev files | [v1](https://github.com/git/git/blob/master/Documentation/gitformat-pack.txt) | ❌ | |
| pack-\*.mtimes files | [v1](https://github.com/git/git/blob/master/Documentation/gitformat-pack.txt) | ❌ | |
| cruft packs | | ❌ | |
## Capabilities
| Feature | Status | Notes |
| ------------------------------ | ------------ | ----- |
| `multi_ack` | ❌ | |
| `multi_ack_detailed` | ❌ | |
| `no-done` | ❌ | |
| `thin-pack` | ❌ | |
| `side-band` | ⚠️ (partial) | |
| `side-band-64k` | ⚠️ (partial) | |
| `ofs-delta` | ✅ | |
| `agent` | ✅ | |
| `object-format` | ❌ | |
| `symref` | ✅ | |
| `shallow` | ✅ | |
| `deepen-since` | ✅ | |
| `deepen-not` | ❌ | |
| `deepen-relative` | ❌ | |
| `no-progress` | ✅ | |
| `include-tag` | ✅ | |
| `report-status` | ✅ | |
| `report-status-v2` | ❌ | |
| `delete-refs` | ✅ | |
| `quiet` | ❌ | |
| `atomic` | ✅ | |
| `push-options` | ✅ | |
| `allow-tip-sha1-in-want` | ✅ | |
| `allow-reachable-sha1-in-want` | ❌ | |
| `push-cert=<nonce>` | ❌ | |
| `filter` | ❌ | |
| `session-id=<session id>` | ❌ | |
## Transport Schemes
| Scheme | Status | Notes | Examples |
| -------------------- | ------------ | ---------------------------------------------------------------------- | ---------------------------------------------- |
| `http(s)://` (dumb) | ❌ | | |
| `http(s)://` (smart) | ✅ | | |
| `git://` | ✅ | | |
| `ssh://` | ✅ | | |
| `file://` | ⚠️ (partial) | Warning: this is not pure Golang. This shells out to the `git` binary. | |
| Custom | ✅ | All existing schemes can be replaced by custom implementations. | - [custom_http](_examples/custom_http/main.go) |
## SHA256
| Feature | Sub-feature | Status | Notes | Examples |
| -------- | ----------- | ------ | ---------------------------------- | ------------------------------------ |
| `init` | | ✅ | Requires building with tag sha256. | - [init](_examples/sha256/main.go) |
| `commit` | | ✅ | Requires building with tag sha256. | - [commit](_examples/sha256/main.go) |
| `pull` | | ❌ | | |
| `fetch` | | ❌ | | |
| `push` | | ❌ | | |
## Other features
| Feature | Sub-feature | Status | Notes | Examples |
| --------------- | --------------------------- | ------ | ---------------------------------------------- | -------- |
| `config` | `--local` | ✅ | Read and write per-repository (`.git/config`). | |
| `config` | `--global` <br/> `--system` | ✅ | Read-only. | |
| `gitignore` | | ✅ | | |
| `gitattributes` | | ✅ | | |
| `git-worktree` | | ❌ | Multiple worktrees are not supported. | |

View File

@ -31,6 +31,13 @@ In order for a PR to be accepted it needs to pass a list of requirements:
- If the PR is a new feature, it has to come with a suite of unit tests, that tests the new functionality.
- In any case, all the PRs have to pass the personal evaluation of at least one of the maintainers of go-git.
### Branches
The `master` branch is currently used for maintaining the `v5` major release only. The accepted changes would
be dependency bumps, bug fixes and small changes that aren't needed for `v6`. New development should target the
`v6-exp` branch, and if agreed with at least one go-git maintainer, it can be back ported to `v5` by creating
a new PR that targets `master`.
### Format of the commit message
Every commit message should describe what was changed, under which context and, if applicable, the GitHub issue it relates to:

78
vendor/github.com/jesseduffield/go-git/v5/EXTENDING.md generated vendored Normal file
View File

@ -0,0 +1,78 @@
# Extending go-git
`go-git` was built in a highly extensible manner, which enables some of its functionalities to be changed or extended without the need of changing its codebase. Here are the key extensibility features:
## Dot Git Storers
Dot git storers are the components responsible for storing the Git internal files, including objects and references.
The built-in storer implementations include [memory](storage/memory) and [filesystem](storage/filesystem). The `memory` storer stores all the data in memory, and its use look like this:
```go
r, err := git.Init(memory.NewStorage(), nil)
```
The `filesystem` storer stores the data in the OS filesystem, and can be used as follows:
```go
r, err := git.Init(filesystem.NewStorage(osfs.New("/tmp/foo")), nil)
```
New implementations can be created by implementing the [storage.Storer interface](storage/storer.go#L16).
## Filesystem
Git repository worktrees are managed using a filesystem abstraction based on [go-billy](https://github.com/go-git/go-billy). The Git operations will take place against the specific filesystem implementation. Initialising a repository in Memory can be done as follows:
```go
fs := memfs.New()
r, err := git.Init(memory.NewStorage(), fs)
```
The same operation can be done against the OS filesystem:
```go
fs := osfs.New("/tmp/foo")
r, err := git.Init(memory.NewStorage(), fs)
```
New filesystems (e.g. cloud based storage) could be created by implementing `go-billy`'s [Filesystem interface](https://github.com/go-git/go-billy/blob/326c59f064021b821a55371d57794fbfb86d4cb3/fs.go#L52).
## Transport Schemes
Git supports various transport schemes, including `http`, `https`, `ssh`, `git`, `file`. `go-git` defines the [transport.Transport interface](plumbing/transport/common.go#L48) to represent them.
The built-in implementations can be replaced by calling `client.InstallProtocol`.
An example of changing the built-in `https` implementation to skip TLS could look like this:
```go
customClient := &http.Client{
Transport: &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
},
}
client.InstallProtocol("https", githttp.NewClient(customClient))
```
Some internal implementations enables code reuse amongst the different transport implementations. Some of these may be made public in the future (e.g. `plumbing/transport/internal/common`).
## Cache
Several different operations across `go-git` lean on caching of objects in order to achieve optimal performance. The caching functionality is defined by the [cache.Object interface](plumbing/cache/common.go#L17).
Two built-in implementations are `cache.ObjectLRU` and `cache.BufferLRU`. However, the caching functionality can be customized by implementing the interface `cache.Object` interface.
## Hash
`go-git` uses the `crypto.Hash` interface to represent hash functions. The built-in implementations are `github.com/pjbgf/sha1cd` for SHA1 and Go's `crypto/SHA256`.
The default hash functions can be changed by calling `hash.RegisterHash`.
```go
func init() {
hash.RegisterHash(crypto.SHA1, sha1.New)
}
```
New `SHA1` or `SHA256` hash functions that implement the `hash.RegisterHash` interface can be registered by calling `RegisterHash`.

View File

@ -27,7 +27,14 @@ build-git:
test:
@echo "running against `git version`"; \
$(GOTEST) ./...
$(GOTEST) -race ./...
$(GOTEST) -v _examples/common_test.go _examples/common.go --examples
TEMP_REPO := $(shell mktemp)
test-sha256:
$(GOCMD) run -tags sha256 _examples/sha256/main.go $(TEMP_REPO)
cd $(TEMP_REPO) && git fsck
rm -rf $(TEMP_REPO)
test-coverage:
@echo "running against `git version`"; \
@ -35,4 +42,13 @@ test-coverage:
$(GOTEST) -coverprofile=$(COVERAGE_REPORT) -coverpkg=./... -covermode=$(COVERAGE_MODE) ./...
clean:
rm -rf $(GIT_DIST_PATH)
rm -rf $(GIT_DIST_PATH)
fuzz:
@go test -fuzz=FuzzParser $(PWD)/internal/revision
@go test -fuzz=FuzzDecoder $(PWD)/plumbing/format/config
@go test -fuzz=FuzzPatchDelta $(PWD)/plumbing/format/packfile
@go test -fuzz=FuzzParseSignedBytes $(PWD)/plumbing/object
@go test -fuzz=FuzzDecode $(PWD)/plumbing/object
@go test -fuzz=FuzzDecoder $(PWD)/plumbing/protocol/packp
@go test -fuzz=FuzzNewEndpoint $(PWD)/plumbing/transport

View File

@ -1,9 +1,9 @@
![go-git logo](https://cdn.rawgit.com/src-d/artwork/02036484/go-git/files/go-git-github-readme-header.png)
[![GoDoc](https://godoc.org/github.com/jesseduffield/go-git/v5?status.svg)](https://pkg.go.dev/github.com/jesseduffield/go-git/v5) [![Build Status](https://github.com/go-git/go-git/workflows/Test/badge.svg)](https://github.com/go-git/go-git/actions) [![Go Report Card](https://goreportcard.com/badge/github.com/go-git/go-git)](https://goreportcard.com/report/github.com/go-git/go-git)
[![GoDoc](https://godoc.org/github.com/go-git/go-git/v5?status.svg)](https://pkg.go.dev/github.com/go-git/go-git/v5) [![Build Status](https://github.com/go-git/go-git/workflows/Test/badge.svg)](https://github.com/go-git/go-git/actions) [![Go Report Card](https://goreportcard.com/badge/github.com/go-git/go-git)](https://goreportcard.com/report/github.com/go-git/go-git)
*go-git* is a highly extensible git implementation library written in **pure Go**.
It can be used to manipulate git repositories at low level *(plumbing)* or high level *(porcelain)*, through an idiomatic Go API. It also supports several types of storage, such as in-memory filesystems, or custom implementations, thanks to the [`Storer`](https://pkg.go.dev/github.com/jesseduffield/go-git/v5/plumbing/storer) interface.
It can be used to manipulate git repositories at low level *(plumbing)* or high level *(porcelain)*, through an idiomatic Go API. It also supports several types of storage, such as in-memory filesystems, or custom implementations, thanks to the [`Storer`](https://pkg.go.dev/github.com/go-git/go-git/v5/plumbing/storer) interface.
It's being actively developed since 2015 and is being used extensively by [Keybase](https://keybase.io/blog/encrypted-git-for-everyone), [Gitea](https://gitea.io/en-us/) or [Pulumi](https://github.com/search?q=org%3Apulumi+go-git&type=Code), and by many other libraries and tools.
@ -29,7 +29,7 @@ Installation
The recommended way to install *go-git* is:
```go
import "github.com/jesseduffield/go-git/v5" // with go modules enabled (GO111MODULE=on or outside GOPATH)
import "github.com/go-git/go-git/v5" // with go modules enabled (GO111MODULE=on or outside GOPATH)
import "github.com/go-git/go-git" // with go modules disabled
```

38
vendor/github.com/jesseduffield/go-git/v5/SECURITY.md generated vendored Normal file
View File

@ -0,0 +1,38 @@
# go-git Security Policy
The purpose of this security policy is to outline `go-git`'s process
for reporting, handling and disclosing security sensitive information.
## Supported Versions
The project follows a version support policy where only the latest minor
release is actively supported. Therefore, only issues that impact the latest
minor release will be fixed. Users are encouraged to upgrade to the latest
minor/patch release to benefit from the most up-to-date features, bug fixes,
and security enhancements.​
The supported versions policy applies to both the `go-git` library and its
associated repositories within the `go-git` org.
## Reporting Security Issues
Please report any security vulnerabilities or potential weaknesses in `go-git`
privately via go-git-security@googlegroups.com. Do not publicly disclose the
details of the vulnerability until a fix has been implemented and released.
During the process the project maintainers will investigate the report, so please
provide detailed information, including steps to reproduce, affected versions, and any mitigations if known.
The project maintainers will acknowledge the receipt of the report and work with
the reporter to validate and address the issue.
Please note that `go-git` does not have any bounty programs, and therefore do
not provide financial compensation for disclosures.
## Security Disclosure Process
The project maintainers will make every effort to promptly address security issues.
Once a security vulnerability is fixed, a security advisory will be published to notify users and provide appropriate mitigation measures.
All `go-git` advisories can be found at https://github.com/go-git/go-git/security/advisories.

View File

@ -2,16 +2,18 @@ package git
import (
"bytes"
"container/heap"
"errors"
"fmt"
"io"
"strconv"
"strings"
"time"
"unicode/utf8"
"github.com/jesseduffield/go-git/v5/plumbing"
"github.com/jesseduffield/go-git/v5/plumbing/object"
"github.com/jesseduffield/go-git/v5/utils/diff"
"github.com/sergi/go-diff/diffmatchpatch"
)
// BlameResult represents the result of a Blame operation.
@ -29,53 +31,26 @@ type BlameResult struct {
func Blame(c *object.Commit, path string) (*BlameResult, error) {
// The file to blame is identified by the input arguments:
// commit and path. commit is a Commit object obtained from a Repository. Path
// represents a path to a specific file contained into the repository.
// represents a path to a specific file contained in the repository.
//
// Blaming a file is a two step process:
// Blaming a file is done by walking the tree in reverse order trying to find where each line was last modified.
//
// 1. Create a linear history of the commits affecting a file. We use
// revlist.New for that.
// When a diff is found it cannot immediately assume it came from that commit, as it may have come from 1 of its
// parents, so it will first try to resolve those diffs from its parents, if it couldn't find the change in its
// parents then it will assign the change to itself.
//
// 2. Then build a graph with a node for every line in every file in
// the history of the file.
// When encountering 2 parents that have made the same change to a file it will choose the parent that was merged
// into the current branch first (this is determined by the order of the parents inside the commit).
//
// Each node is assigned a commit: Start by the nodes in the first
// commit. Assign that commit as the creator of all its lines.
//
// Then jump to the nodes in the next commit, and calculate the diff
// between the two files. Newly created lines get
// assigned the new commit as its origin. Modified lines also get
// this new commit. Untouched lines retain the old commit.
//
// All this work is done in the assignOrigin function which holds all
// the internal relevant data in a "blame" struct, that is not
// exported.
//
// TODO: ways to improve the efficiency of this function:
// 1. Improve revlist
// 2. Improve how to traverse the history (example a backward traversal will
// be much more efficient)
//
// TODO: ways to improve the function in general:
// 1. Add memoization between revlist and assign.
// 2. It is using much more memory than needed, see the TODOs below.
// This currently works on a line by line basis, if performance becomes an issue it could be changed to work with
// hunks rather than lines. Then when encountering diff hunks it would need to split them where necessary.
b := new(blame)
b.fRev = c
b.path = path
b.q = new(priorityQueue)
// get all the file revisions
if err := b.fillRevs(); err != nil {
return nil, err
}
// calculate the line tracking graph and fill in
// file contents in data.
if err := b.fillGraphAndData(); err != nil {
return nil, err
}
file, err := b.fRev.File(b.path)
file, err := b.fRev.File(path)
if err != nil {
return nil, err
}
@ -83,13 +58,56 @@ func Blame(c *object.Commit, path string) (*BlameResult, error) {
if err != nil {
return nil, err
}
finalLength := len(finalLines)
// Each node (line) holds the commit where it was introduced or
// last modified. To achieve that we use the FORWARD algorithm
// described in Zimmermann, et al. "Mining Version Archives for
// Co-changed Lines", in proceedings of the Mining Software
// Repositories workshop, Shanghai, May 22-23, 2006.
lines, err := newLines(finalLines, b.sliceGraph(len(b.graph)-1))
needsMap := make([]lineMap, finalLength)
for i := range needsMap {
needsMap[i] = lineMap{i, i, nil, -1}
}
contents, err := file.Contents()
if err != nil {
return nil, err
}
b.q.Push(&queueItem{
nil,
nil,
c,
path,
contents,
needsMap,
0,
false,
0,
})
items := make([]*queueItem, 0)
for {
items = items[:0]
for {
if b.q.Len() == 0 {
return nil, errors.New("invalid state: no items left on the blame queue")
}
item := b.q.Pop()
items = append(items, item)
next := b.q.Peek()
if next == nil || next.Hash != item.Commit.Hash {
break
}
}
finished, err := b.addBlames(items)
if err != nil {
return nil, err
}
if finished {
break
}
}
b.lineToCommit = make([]*object.Commit, finalLength)
for i := range needsMap {
b.lineToCommit[i] = needsMap[i].Commit
}
lines, err := newLines(finalLines, b.lineToCommit)
if err != nil {
return nil, err
}
@ -105,6 +123,8 @@ func Blame(c *object.Commit, path string) (*BlameResult, error) {
type Line struct {
// Author is the email address of the last author that modified the line.
Author string
// AuthorName is the name of the last author that modified the line.
AuthorName string
// Text is the original text of the line.
Text string
// Date is when the original text of the line was introduced
@ -113,31 +133,21 @@ type Line struct {
Hash plumbing.Hash
}
func newLine(author, text string, date time.Time, hash plumbing.Hash) *Line {
func newLine(author, authorName, text string, date time.Time, hash plumbing.Hash) *Line {
return &Line{
Author: author,
Text: text,
Hash: hash,
Date: date,
Author: author,
AuthorName: authorName,
Text: text,
Hash: hash,
Date: date,
}
}
func newLines(contents []string, commits []*object.Commit) ([]*Line, error) {
lcontents := len(contents)
lcommits := len(commits)
if lcontents != lcommits {
if lcontents == lcommits-1 && contents[lcontents-1] != "\n" {
contents = append(contents, "\n")
} else {
return nil, errors.New("contents and commits have different length")
}
}
result := make([]*Line, 0, lcontents)
result := make([]*Line, 0, len(contents))
for i := range contents {
result = append(result, newLine(
commits[i].Author.Email, contents[i],
commits[i].Author.Email, commits[i].Author.Name, contents[i],
commits[i].Author.When, commits[i].Hash,
))
}
@ -152,151 +162,426 @@ type blame struct {
path string
// the commit of the final revision of the file to blame
fRev *object.Commit
// the chain of revisions affecting the the file to blame
revs []*object.Commit
// the contents of the file across all its revisions
data []string
// the graph of the lines in the file across all the revisions
graph [][]*object.Commit
// resolved lines
lineToCommit []*object.Commit
// queue of commits that need resolving
q *priorityQueue
}
// calculate the history of a file "path", starting from commit "from", sorted by commit date.
func (b *blame) fillRevs() error {
var err error
b.revs, err = references(b.fRev, b.path)
return err
type lineMap struct {
Orig, Cur int
Commit *object.Commit
FromParentNo int
}
// build graph of a file from its revision history
func (b *blame) fillGraphAndData() error {
//TODO: not all commits are needed, only the current rev and the prev
b.graph = make([][]*object.Commit, len(b.revs))
b.data = make([]string, len(b.revs)) // file contents in all the revisions
// for every revision of the file, starting with the first
// one...
for i, rev := range b.revs {
func (b *blame) addBlames(curItems []*queueItem) (bool, error) {
curItem := curItems[0]
// Simple optimisation to merge paths, there is potential to go a bit further here and check for any duplicates
// not only if they are all the same.
if len(curItems) == 1 {
curItems = nil
} else if curItem.IdenticalToChild {
allSame := true
lenCurItems := len(curItems)
lowestParentNo := curItem.ParentNo
for i := 1; i < lenCurItems; i++ {
if !curItems[i].IdenticalToChild || curItem.Child != curItems[i].Child {
allSame = false
break
}
lowestParentNo = min(lowestParentNo, curItems[i].ParentNo)
}
if allSame {
curItem.Child.numParentsNeedResolving = curItem.Child.numParentsNeedResolving - lenCurItems + 1
curItems = nil // free the memory
curItem.ParentNo = lowestParentNo
// Now check if we can remove the parent completely
for curItem.Child.IdenticalToChild && curItem.Child.MergedChildren == nil && curItem.Child.numParentsNeedResolving == 1 {
oldChild := curItem.Child
curItem.Child = oldChild.Child
curItem.ParentNo = oldChild.ParentNo
}
}
}
// if we have more than 1 item for this commit, create a single needsMap
if len(curItems) > 1 {
curItem.MergedChildren = make([]childToNeedsMap, len(curItems))
for i, c := range curItems {
curItem.MergedChildren[i] = childToNeedsMap{c.Child, c.NeedsMap, c.IdenticalToChild, c.ParentNo}
}
newNeedsMap := make([]lineMap, 0, len(curItem.NeedsMap))
newNeedsMap = append(newNeedsMap, curItems[0].NeedsMap...)
for i := 1; i < len(curItems); i++ {
cur := curItems[i].NeedsMap
n := 0 // position in newNeedsMap
c := 0 // position in current list
for c < len(cur) {
if n == len(newNeedsMap) {
newNeedsMap = append(newNeedsMap, cur[c:]...)
break
} else if newNeedsMap[n].Cur == cur[c].Cur {
n++
c++
} else if newNeedsMap[n].Cur < cur[c].Cur {
n++
} else {
newNeedsMap = append(newNeedsMap, cur[c])
newPos := len(newNeedsMap) - 1
for newPos > n {
newNeedsMap[newPos-1], newNeedsMap[newPos] = newNeedsMap[newPos], newNeedsMap[newPos-1]
newPos--
}
}
}
}
curItem.NeedsMap = newNeedsMap
curItem.IdenticalToChild = false
curItem.Child = nil
curItems = nil // free the memory
}
parents, err := parentsContainingPath(curItem.path, curItem.Commit)
if err != nil {
return false, err
}
anyPushed := false
for parnetNo, prev := range parents {
currentHash, err := blobHash(curItem.path, curItem.Commit)
if err != nil {
return false, err
}
prevHash, err := blobHash(prev.Path, prev.Commit)
if err != nil {
return false, err
}
if currentHash == prevHash {
if len(parents) == 1 && curItem.MergedChildren == nil && curItem.IdenticalToChild {
// commit that has 1 parent and 1 child and is the same as both, bypass it completely
b.q.Push(&queueItem{
Child: curItem.Child,
Commit: prev.Commit,
path: prev.Path,
Contents: curItem.Contents,
NeedsMap: curItem.NeedsMap, // reuse the NeedsMap as we are throwing away this item
IdenticalToChild: true,
ParentNo: curItem.ParentNo,
})
} else {
b.q.Push(&queueItem{
Child: curItem,
Commit: prev.Commit,
path: prev.Path,
Contents: curItem.Contents,
NeedsMap: append([]lineMap(nil), curItem.NeedsMap...), // create new slice and copy
IdenticalToChild: true,
ParentNo: parnetNo,
})
curItem.numParentsNeedResolving++
}
anyPushed = true
continue
}
// get the contents of the file
file, err := rev.File(b.path)
file, err := prev.Commit.File(prev.Path)
if err != nil {
return nil
return false, err
}
b.data[i], err = file.Contents()
prevContents, err := file.Contents()
if err != nil {
return err
return false, err
}
nLines := countLines(b.data[i])
// create a node for each line
b.graph[i] = make([]*object.Commit, nLines)
// assign a commit to each node
// if this is the first revision, then the node is assigned to
// this first commit.
if i == 0 {
for j := 0; j < nLines; j++ {
b.graph[i][j] = b.revs[i]
hunks := diff.Do(prevContents, curItem.Contents)
prevl := -1
curl := -1
need := 0
getFromParent := make([]lineMap, 0)
out:
for h := range hunks {
hLines := countLines(hunks[h].Text)
for hl := 0; hl < hLines; hl++ {
switch hunks[h].Type {
case diffmatchpatch.DiffEqual:
prevl++
curl++
if curl == curItem.NeedsMap[need].Cur {
// add to needs
getFromParent = append(getFromParent, lineMap{curl, prevl, nil, -1})
// move to next need
need++
if need >= len(curItem.NeedsMap) {
break out
}
}
case diffmatchpatch.DiffInsert:
curl++
if curl == curItem.NeedsMap[need].Cur {
// the line we want is added, it may have been added here (or by another parent), skip it for now
need++
if need >= len(curItem.NeedsMap) {
break out
}
}
case diffmatchpatch.DiffDelete:
prevl += hLines
continue out
default:
return false, errors.New("invalid state: invalid hunk Type")
}
}
} else {
// if this is not the first commit, then assign to the old
// commit or to the new one, depending on what the diff
// says.
b.assignOrigin(i, i-1)
}
if len(getFromParent) > 0 {
b.q.Push(&queueItem{
curItem,
nil,
prev.Commit,
prev.Path,
prevContents,
getFromParent,
0,
false,
parnetNo,
})
curItem.numParentsNeedResolving++
anyPushed = true
}
}
return nil
}
// sliceGraph returns a slice of commits (one per line) for a particular
// revision of a file (0=first revision).
func (b *blame) sliceGraph(i int) []*object.Commit {
fVs := b.graph[i]
result := make([]*object.Commit, 0, len(fVs))
for _, v := range fVs {
c := *v
result = append(result, &c)
curItem.Contents = "" // no longer need, free the memory
if !anyPushed {
return finishNeeds(curItem)
}
return result
return false, nil
}
// Assigns origin to vertexes in current (c) rev from data in its previous (p)
// revision
func (b *blame) assignOrigin(c, p int) {
// assign origin based on diff info
hunks := diff.Do(b.data[p], b.data[c])
sl := -1 // source line
dl := -1 // destination line
for h := range hunks {
hLines := countLines(hunks[h].Text)
for hl := 0; hl < hLines; hl++ {
switch {
case hunks[h].Type == 0:
sl++
dl++
b.graph[c][dl] = b.graph[p][sl]
case hunks[h].Type == 1:
dl++
b.graph[c][dl] = b.revs[c]
case hunks[h].Type == -1:
sl++
default:
panic("unreachable")
func finishNeeds(curItem *queueItem) (bool, error) {
// any needs left in the needsMap must have come from this revision
for i := range curItem.NeedsMap {
if curItem.NeedsMap[i].Commit == nil {
curItem.NeedsMap[i].Commit = curItem.Commit
curItem.NeedsMap[i].FromParentNo = -1
}
}
if curItem.Child == nil && curItem.MergedChildren == nil {
return true, nil
}
if curItem.MergedChildren == nil {
return applyNeeds(curItem.Child, curItem.NeedsMap, curItem.IdenticalToChild, curItem.ParentNo)
}
for _, ctn := range curItem.MergedChildren {
m := 0 // position in merged needs map
p := 0 // position in parent needs map
for p < len(ctn.NeedsMap) {
if ctn.NeedsMap[p].Cur == curItem.NeedsMap[m].Cur {
ctn.NeedsMap[p].Commit = curItem.NeedsMap[m].Commit
m++
p++
} else if ctn.NeedsMap[p].Cur < curItem.NeedsMap[m].Cur {
p++
} else {
m++
}
}
finished, err := applyNeeds(ctn.Child, ctn.NeedsMap, ctn.IdenticalToChild, ctn.ParentNo)
if finished || err != nil {
return finished, err
}
}
return false, nil
}
func applyNeeds(child *queueItem, needsMap []lineMap, identicalToChild bool, parentNo int) (bool, error) {
if identicalToChild {
for i := range child.NeedsMap {
l := &child.NeedsMap[i]
if l.Cur != needsMap[i].Cur || l.Orig != needsMap[i].Orig {
return false, errors.New("needsMap isn't the same? Why not??")
}
if l.Commit == nil || parentNo < l.FromParentNo {
l.Commit = needsMap[i].Commit
l.FromParentNo = parentNo
}
}
} else {
i := 0
out:
for j := range child.NeedsMap {
l := &child.NeedsMap[j]
for needsMap[i].Orig < l.Cur {
i++
if i == len(needsMap) {
break out
}
}
if l.Cur == needsMap[i].Orig {
if l.Commit == nil || parentNo < l.FromParentNo {
l.Commit = needsMap[i].Commit
l.FromParentNo = parentNo
}
}
}
}
child.numParentsNeedResolving--
if child.numParentsNeedResolving == 0 {
finished, err := finishNeeds(child)
if finished || err != nil {
return finished, err
}
}
return false, nil
}
// GoString prints the results of a Blame using git-blame's style.
func (b *blame) GoString() string {
// String prints the results of a Blame using git-blame's style.
func (b BlameResult) String() string {
var buf bytes.Buffer
file, err := b.fRev.File(b.path)
if err != nil {
panic("PrettyPrint: internal error in repo.Data")
}
contents, err := file.Contents()
if err != nil {
panic("PrettyPrint: internal error in repo.Data")
}
lines := strings.Split(contents, "\n")
// max line number length
mlnl := len(strconv.Itoa(len(lines)))
mlnl := len(strconv.Itoa(len(b.Lines)))
// max author length
mal := b.maxAuthorLength()
format := fmt.Sprintf("%%s (%%-%ds %%%dd) %%s\n",
mal, mlnl)
format := fmt.Sprintf("%%s (%%-%ds %%s %%%dd) %%s\n", mal, mlnl)
fVs := b.graph[len(b.graph)-1]
for ln, v := range fVs {
fmt.Fprintf(&buf, format, v.Hash.String()[:8],
prettyPrintAuthor(fVs[ln]), ln+1, lines[ln])
for ln := range b.Lines {
_, _ = fmt.Fprintf(&buf, format, b.Lines[ln].Hash.String()[:8],
b.Lines[ln].AuthorName, b.Lines[ln].Date.Format("2006-01-02 15:04:05 -0700"), ln+1, b.Lines[ln].Text)
}
return buf.String()
}
// utility function to pretty print the author.
func prettyPrintAuthor(c *object.Commit) string {
return fmt.Sprintf("%s %s", c.Author.Name, c.Author.When.Format("2006-01-02"))
}
// utility function to calculate the number of runes needed
// to print the longest author name in the blame of a file.
func (b *blame) maxAuthorLength() int {
memo := make(map[plumbing.Hash]struct{}, len(b.graph)-1)
fVs := b.graph[len(b.graph)-1]
func (b BlameResult) maxAuthorLength() int {
m := 0
for ln := range fVs {
if _, ok := memo[fVs[ln].Hash]; ok {
continue
}
memo[fVs[ln].Hash] = struct{}{}
m = max(m, utf8.RuneCountInString(prettyPrintAuthor(fVs[ln])))
for ln := range b.Lines {
m = max(m, utf8.RuneCountInString(b.Lines[ln].AuthorName))
}
return m
}
func min(a, b int) int {
if a < b {
return a
}
return b
}
func max(a, b int) int {
if a > b {
return a
}
return b
}
type childToNeedsMap struct {
Child *queueItem
NeedsMap []lineMap
IdenticalToChild bool
ParentNo int
}
type queueItem struct {
Child *queueItem
MergedChildren []childToNeedsMap
Commit *object.Commit
path string
Contents string
NeedsMap []lineMap
numParentsNeedResolving int
IdenticalToChild bool
ParentNo int
}
type priorityQueueImp []*queueItem
func (pq *priorityQueueImp) Len() int { return len(*pq) }
func (pq *priorityQueueImp) Less(i, j int) bool {
return !(*pq)[i].Commit.Less((*pq)[j].Commit)
}
func (pq *priorityQueueImp) Swap(i, j int) { (*pq)[i], (*pq)[j] = (*pq)[j], (*pq)[i] }
func (pq *priorityQueueImp) Push(x any) { *pq = append(*pq, x.(*queueItem)) }
func (pq *priorityQueueImp) Pop() any {
n := len(*pq)
ret := (*pq)[n-1]
(*pq)[n-1] = nil // ovoid memory leak
*pq = (*pq)[0 : n-1]
return ret
}
func (pq *priorityQueueImp) Peek() *object.Commit {
if len(*pq) == 0 {
return nil
}
return (*pq)[0].Commit
}
type priorityQueue priorityQueueImp
func (pq *priorityQueue) Init() { heap.Init((*priorityQueueImp)(pq)) }
func (pq *priorityQueue) Len() int { return (*priorityQueueImp)(pq).Len() }
func (pq *priorityQueue) Push(c *queueItem) {
heap.Push((*priorityQueueImp)(pq), c)
}
func (pq *priorityQueue) Pop() *queueItem {
return heap.Pop((*priorityQueueImp)(pq)).(*queueItem)
}
func (pq *priorityQueue) Peek() *object.Commit { return (*priorityQueueImp)(pq).Peek() }
type parentCommit struct {
Commit *object.Commit
Path string
}
func parentsContainingPath(path string, c *object.Commit) ([]parentCommit, error) {
// TODO: benchmark this method making git.object.Commit.parent public instead of using
// an iterator
var result []parentCommit
iter := c.Parents()
for {
parent, err := iter.Next()
if err == io.EOF {
return result, nil
}
if err != nil {
return nil, err
}
if _, err := parent.File(path); err == nil {
result = append(result, parentCommit{parent, path})
} else {
// look for renames
patch, err := parent.Patch(c)
if err != nil {
return nil, err
} else if patch != nil {
for _, fp := range patch.FilePatches() {
from, to := fp.Files()
if from != nil && to != nil && to.Path() == path {
result = append(result, parentCommit{parent, from.Path()})
break
}
}
}
}
}
}
func blobHash(path string, commit *object.Commit) (plumbing.Hash, error) {
file, err := commit.File(path)
if err != nil {
return plumbing.ZeroHash, err
}
return file.Hash, nil
}

View File

@ -2,8 +2,6 @@ package git
import "strings"
const defaultDotGitPath = ".git"
// countLines returns the number of lines in a string à la git, this is
// The newline character is assumed to be '\n'. The empty string
// contains 0 lines. If the last line of the string doesn't end with a

View File

@ -2,6 +2,7 @@ package config
import (
"errors"
"strings"
"github.com/jesseduffield/go-git/v5/plumbing"
format "github.com/jesseduffield/go-git/v5/plumbing/format/config"
@ -26,6 +27,12 @@ type Branch struct {
// "true" and "interactive". "false" is undocumented and
// typically represented by the non-existence of this field
Rebase string
// Description explains what the branch is for.
// Multi-line explanations may be used.
//
// Original git command to edit:
// git branch --edit-description
Description string
raw *format.Subsection
}
@ -47,7 +54,7 @@ func (b *Branch) Validate() error {
return errBranchInvalidRebase
}
return nil
return plumbing.NewBranchReferenceName(b.Name).Validate()
}
func (b *Branch) marshal() *format.Subsection {
@ -75,14 +82,42 @@ func (b *Branch) marshal() *format.Subsection {
b.raw.SetOption(rebaseKey, b.Rebase)
}
if b.Description == "" {
b.raw.RemoveOption(descriptionKey)
} else {
desc := quoteDescription(b.Description)
b.raw.SetOption(descriptionKey, desc)
}
return b.raw
}
func (b *Branch) unmarshal(s *format.Subsection) {
// hack to trigger conditional quoting in the
// plumbing/format/config/Encoder.encodeOptions
//
// Current Encoder implementation uses Go %q format if value contains a backslash character,
// which is not consistent with reference git implementation.
// git just replaces newline characters with \n, while Encoder prints them directly.
// Until value quoting fix, we should escape description value by replacing newline characters with \n.
func quoteDescription(desc string) string {
return strings.ReplaceAll(desc, "\n", `\n`)
}
func (b *Branch) unmarshal(s *format.Subsection) error {
b.raw = s
b.Name = b.raw.Name
b.Remote = b.raw.Options.Get(remoteSection)
b.Merge = plumbing.ReferenceName(b.raw.Options.Get(mergeKey))
b.Rebase = b.raw.Options.Get(rebaseKey)
b.Description = unquoteDescription(b.raw.Options.Get(descriptionKey))
return nil
}
// hack to enable conditional quoting in the
// plumbing/format/config/Encoder.encodeOptions
// goto quoteDescription for details.
func unquoteDescription(desc string) string {
return strings.ReplaceAll(desc, `\n`, "\n")
}

View File

@ -6,15 +6,15 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"sort"
"strconv"
"github.com/go-git/go-billy/v5/osfs"
"github.com/jesseduffield/go-git/v5/internal/url"
"github.com/jesseduffield/go-git/v5/plumbing"
format "github.com/jesseduffield/go-git/v5/plumbing/format/config"
"github.com/mitchellh/go-homedir"
)
const (
@ -59,12 +59,14 @@ type Config struct {
// CommentChar is the character indicating the start of a
// comment for commands like commit and tag
CommentChar string
// RepositoryFormatVersion identifies the repository format and layout version.
RepositoryFormatVersion format.RepositoryFormatVersion
}
User struct {
// Name is the personal name of the author and the commiter of a commit.
// Name is the personal name of the author and the committer of a commit.
Name string
// Email is the email of the author and the commiter of a commit.
// Email is the email of the author and the committer of a commit.
Email string
}
@ -76,9 +78,9 @@ type Config struct {
}
Committer struct {
// Name is the personal name of the commiter of a commit.
// Name is the personal name of the committer of a commit.
Name string
// Email is the email of the the commiter of a commit.
// Email is the email of the committer of a commit.
Email string
}
@ -89,6 +91,24 @@ type Config struct {
Window uint
}
Init struct {
// DefaultBranch Allows overriding the default branch name
// e.g. when initializing a new repository or when cloning
// an empty repository.
DefaultBranch string
}
Extensions struct {
// ObjectFormat specifies the hash algorithm to use. The
// acceptable values are sha1 and sha256. If not specified,
// sha1 is assumed. It is an error to specify this key unless
// core.repositoryFormatVersion is 1.
//
// This setting must not be changed after repository initialization
// (e.g. clone or init).
ObjectFormat format.ObjectFormat
}
// Remotes list of repository remotes, the key of the map is the name
// of the remote, should equal to RemoteConfig.Name.
Remotes map[string]*RemoteConfig
@ -98,6 +118,9 @@ type Config struct {
// Branches list of branches, the key is the branch name and should
// equal Branch.Name
Branches map[string]*Branch
// URLs list of url rewrite rules, if repo url starts with URL.InsteadOf value, it will be replaced with the
// key instead.
URLs map[string]*URL
// Raw contains the raw information of a config file. The main goal is
// preserve the parsed information from the original format, to avoid
// dropping unsupported fields.
@ -110,6 +133,7 @@ func NewConfig() *Config {
Remotes: make(map[string]*RemoteConfig),
Submodules: make(map[string]*Submodule),
Branches: make(map[string]*Branch),
URLs: make(map[string]*URL),
Raw: format.New(),
}
@ -120,7 +144,7 @@ func NewConfig() *Config {
// ReadConfig reads a config file from a io.Reader.
func ReadConfig(r io.Reader) (*Config, error) {
b, err := ioutil.ReadAll(r)
b, err := io.ReadAll(r)
if err != nil {
return nil, err
}
@ -134,11 +158,11 @@ func ReadConfig(r io.Reader) (*Config, error) {
}
// LoadConfig loads a config file from a given scope. The returned Config,
// contains exclusively information fom the given scope. If couldn't find a
// config file to the given scope, a empty one is returned.
// contains exclusively information from the given scope. If it couldn't find a
// config file to the given scope, an empty one is returned.
func LoadConfig(scope Scope) (*Config, error) {
if scope == LocalScope {
return nil, fmt.Errorf("LocalScope should be read from the a ConfigStorer.")
return nil, fmt.Errorf("LocalScope should be read from the a ConfigStorer")
}
files, err := Paths(scope)
@ -147,7 +171,7 @@ func LoadConfig(scope Scope) (*Config, error) {
}
for _, file := range files {
f, err := os.Open(file)
f, err := osfs.Default.Open(file)
if err != nil {
if os.IsNotExist(err) {
continue
@ -173,7 +197,7 @@ func Paths(scope Scope) ([]string, error) {
files = append(files, filepath.Join(xdg, "git/config"))
}
home, err := homedir.Dir()
home, err := os.UserHomeDir()
if err != nil {
return nil, err
}
@ -215,24 +239,33 @@ func (c *Config) Validate() error {
}
const (
remoteSection = "remote"
submoduleSection = "submodule"
branchSection = "branch"
coreSection = "core"
packSection = "pack"
userSection = "user"
authorSection = "author"
committerSection = "committer"
fetchKey = "fetch"
urlKey = "url"
bareKey = "bare"
worktreeKey = "worktree"
commentCharKey = "commentChar"
windowKey = "window"
mergeKey = "merge"
rebaseKey = "rebase"
nameKey = "name"
emailKey = "email"
remoteSection = "remote"
submoduleSection = "submodule"
branchSection = "branch"
coreSection = "core"
packSection = "pack"
userSection = "user"
authorSection = "author"
committerSection = "committer"
initSection = "init"
urlSection = "url"
extensionsSection = "extensions"
fetchKey = "fetch"
urlKey = "url"
pushurlKey = "pushurl"
bareKey = "bare"
worktreeKey = "worktree"
commentCharKey = "commentChar"
windowKey = "window"
mergeKey = "merge"
rebaseKey = "rebase"
nameKey = "name"
emailKey = "email"
descriptionKey = "description"
defaultBranchKey = "defaultBranch"
repositoryFormatVersionKey = "repositoryformatversion"
objectFormat = "objectformat"
mirrorKey = "mirror"
// DefaultPackWindow holds the number of previous objects used to
// generate deltas. The value 10 is the same used by git command.
@ -251,12 +284,19 @@ func (c *Config) Unmarshal(b []byte) error {
c.unmarshalCore()
c.unmarshalUser()
c.unmarshalInit()
if err := c.unmarshalPack(); err != nil {
return err
}
unmarshalSubmodules(c.Raw, c.Submodules)
c.unmarshalBranches()
if err := c.unmarshalBranches(); err != nil {
return err
}
if err := c.unmarshalURLs(); err != nil {
return err
}
return c.unmarshalRemotes()
}
@ -311,6 +351,25 @@ func (c *Config) unmarshalRemotes() error {
c.Remotes[r.Name] = r
}
// Apply insteadOf url rules
for _, r := range c.Remotes {
r.applyURLRules(c.URLs)
}
return nil
}
func (c *Config) unmarshalURLs() error {
s := c.Raw.Section(urlSection)
for _, sub := range s.Subsections {
r := &URL{}
if err := r.unmarshal(sub); err != nil {
return err
}
c.URLs[r.Name] = r
}
return nil
}
@ -328,25 +387,36 @@ func unmarshalSubmodules(fc *format.Config, submodules map[string]*Submodule) {
}
}
func (c *Config) unmarshalBranches() {
func (c *Config) unmarshalBranches() error {
bs := c.Raw.Section(branchSection)
for _, sub := range bs.Subsections {
b := &Branch{}
b.unmarshal(sub)
if err := b.unmarshal(sub); err != nil {
return err
}
c.Branches[b.Name] = b
}
return nil
}
func (c *Config) unmarshalInit() {
s := c.Raw.Section(initSection)
c.Init.DefaultBranch = s.Options.Get(defaultBranchKey)
}
// Marshal returns Config encoded as a git-config file.
func (c *Config) Marshal() ([]byte, error) {
c.marshalCore()
c.marshalExtensions()
c.marshalUser()
c.marshalPack()
c.marshalRemotes()
c.marshalSubmodules()
c.marshalBranches()
c.marshalURLs()
c.marshalInit()
buf := bytes.NewBuffer(nil)
if err := format.NewEncoder(buf).Encode(c.Raw); err != nil {
@ -359,12 +429,24 @@ func (c *Config) Marshal() ([]byte, error) {
func (c *Config) marshalCore() {
s := c.Raw.Section(coreSection)
s.SetOption(bareKey, fmt.Sprintf("%t", c.Core.IsBare))
if string(c.Core.RepositoryFormatVersion) != "" {
s.SetOption(repositoryFormatVersionKey, string(c.Core.RepositoryFormatVersion))
}
if c.Core.Worktree != "" {
s.SetOption(worktreeKey, c.Core.Worktree)
}
}
func (c *Config) marshalExtensions() {
// Extensions are only supported on Version 1, therefore
// ignore them otherwise.
if c.Core.RepositoryFormatVersion == format.Version_1 {
s := c.Raw.Section(extensionsSection)
s.SetOption(objectFormat, string(c.Extensions.ObjectFormat))
}
}
func (c *Config) marshalUser() {
s := c.Raw.Section(userSection)
if c.User.Name != "" {
@ -470,6 +552,27 @@ func (c *Config) marshalBranches() {
s.Subsections = newSubsections
}
func (c *Config) marshalURLs() {
s := c.Raw.Section(urlSection)
s.Subsections = make(format.Subsections, len(c.URLs))
var i int
for _, r := range c.URLs {
section := r.marshal()
// the submodule section at config is a subset of the .gitmodule file
// we should remove the non-valid options for the config file.
s.Subsections[i] = section
i++
}
}
func (c *Config) marshalInit() {
s := c.Raw.Section(initSection)
if c.Init.DefaultBranch != "" {
s.SetOption(defaultBranchKey, c.Init.DefaultBranch)
}
}
// RemoteConfig contains the configuration for a given remote repository.
type RemoteConfig struct {
// Name of the remote
@ -477,6 +580,14 @@ type RemoteConfig struct {
// URLs the URLs of a remote repository. It must be non-empty. Fetch will
// always use the first URL, while push will use all of them.
URLs []string
// Mirror indicates that the repository is a mirror of remote.
Mirror bool
// insteadOfRulesApplied have urls been modified
insteadOfRulesApplied bool
// originalURLs are the urls before applying insteadOf rules
originalURLs []string
// Fetch the default set of "refspec" for fetch operation
Fetch []RefSpec
@ -505,7 +616,7 @@ func (c *RemoteConfig) Validate() error {
c.Fetch = []RefSpec{RefSpec(fmt.Sprintf(DefaultFetchRefSpec, c.Name))}
}
return nil
return plumbing.NewRemoteHEADReferenceName(c.Name).Validate()
}
func (c *RemoteConfig) unmarshal(s *format.Subsection) error {
@ -523,7 +634,9 @@ func (c *RemoteConfig) unmarshal(s *format.Subsection) error {
c.Name = c.raw.Name
c.URLs = append([]string(nil), c.raw.Options.GetAll(urlKey)...)
c.URLs = append(c.URLs, c.raw.Options.GetAll(pushurlKey)...)
c.Fetch = fetch
c.Mirror = c.raw.Options.Get(mirrorKey) == "true"
return nil
}
@ -537,7 +650,12 @@ func (c *RemoteConfig) marshal() *format.Subsection {
if len(c.URLs) == 0 {
c.raw.RemoveOption(urlKey)
} else {
c.raw.SetOption(urlKey, c.URLs...)
urls := c.URLs
if c.insteadOfRulesApplied {
urls = c.originalURLs
}
c.raw.SetOption(urlKey, urls...)
}
if len(c.Fetch) == 0 {
@ -551,9 +669,30 @@ func (c *RemoteConfig) marshal() *format.Subsection {
c.raw.SetOption(fetchKey, values...)
}
if c.Mirror {
c.raw.SetOption(mirrorKey, strconv.FormatBool(c.Mirror))
}
return c.raw
}
func (c *RemoteConfig) IsFirstURLLocal() bool {
return url.IsLocalEndpoint(c.URLs[0])
}
func (c *RemoteConfig) applyURLRules(urlRules map[string]*URL) {
// save original urls
originalURLs := make([]string, len(c.URLs))
copy(originalURLs, c.URLs)
for i, url := range c.URLs {
if matchingURLRule := findLongestInsteadOfMatch(url, urlRules); matchingURLRule != nil {
c.URLs[i] = matchingURLRule.ApplyInsteadOf(c.URLs[i])
c.insteadOfRulesApplied = true
}
}
if c.insteadOfRulesApplied {
c.originalURLs = originalURLs
}
}

View File

@ -89,7 +89,7 @@ func (s RefSpec) IsNegative() bool {
return s[0] == refSpecNegative[0]
}
// Src return the src side.
// Src returns the src side.
func (s RefSpec) Src() string {
spec := string(s)

View File

@ -0,0 +1,81 @@
package config
import (
"errors"
"strings"
format "github.com/jesseduffield/go-git/v5/plumbing/format/config"
)
var (
errURLEmptyInsteadOf = errors.New("url config: empty insteadOf")
)
// Url defines Url rewrite rules
type URL struct {
// Name new base url
Name string
// Any URL that starts with this value will be rewritten to start, instead, with <base>.
// When more than one insteadOf strings match a given URL, the longest match is used.
InsteadOf string
// raw representation of the subsection, filled by marshal or unmarshal are
// called.
raw *format.Subsection
}
// Validate validates fields of branch
func (b *URL) Validate() error {
if b.InsteadOf == "" {
return errURLEmptyInsteadOf
}
return nil
}
const (
insteadOfKey = "insteadOf"
)
func (u *URL) unmarshal(s *format.Subsection) error {
u.raw = s
u.Name = s.Name
u.InsteadOf = u.raw.Option(insteadOfKey)
return nil
}
func (u *URL) marshal() *format.Subsection {
if u.raw == nil {
u.raw = &format.Subsection{}
}
u.raw.Name = u.Name
u.raw.SetOption(insteadOfKey, u.InsteadOf)
return u.raw
}
func findLongestInsteadOfMatch(remoteURL string, urls map[string]*URL) *URL {
var longestMatch *URL
for _, u := range urls {
if !strings.HasPrefix(remoteURL, u.InsteadOf) {
continue
}
// according to spec if there is more than one match, take the logest
if longestMatch == nil || len(longestMatch.InsteadOf) < len(u.InsteadOf) {
longestMatch = u
}
}
return longestMatch
}
func (u *URL) ApplyInsteadOf(url string) string {
if !strings.HasPrefix(url, u.InsteadOf) {
return url
}
return u.Name + url[len(u.InsteadOf):]
}

View File

@ -0,0 +1,29 @@
package path_util
import (
"os"
"os/user"
"strings"
)
func ReplaceTildeWithHome(path string) (string, error) {
if strings.HasPrefix(path, "~") {
firstSlash := strings.Index(path, "/")
if firstSlash == 1 {
home, err := os.UserHomeDir()
if err != nil {
return path, err
}
return strings.Replace(path, "~", home, 1), nil
} else if firstSlash > 1 {
username := path[1:firstSlash]
userAccount, err := user.Lookup(username)
if err != nil {
return path, err
}
return strings.Replace(path, path[:firstSlash], userAccount.HomeDir, 1), nil
}
}
return path, nil
}

View File

@ -322,6 +322,8 @@ func (p *Parser) parseAt() (Revisioner, error) {
}
return AtDate{t}, nil
case tok == eof:
return nil, &ErrInvalidRevision{s: `missing "}" in @{<data>} structure`}
default:
date += lit
}
@ -424,6 +426,8 @@ func (p *Parser) parseCaretBraces() (Revisioner, error) {
p.unscan()
case tok != slash && start:
return nil, &ErrInvalidRevision{fmt.Sprintf(`"%s" is not a valid revision suffix brace component`, lit)}
case tok == eof:
return nil, &ErrInvalidRevision{s: `missing "}" in ^{<data>} structure`}
case tok != cbrace:
p.unscan()
re += lit

View File

@ -43,6 +43,11 @@ func tokenizeExpression(ch rune, tokenType token, check runeCategoryValidator, r
return tokenType, string(data), nil
}
// maxRevisionLength holds the maximum length that will be parsed for a
// revision. Git itself doesn't enforce a max length, but rather leans on
// the OS to enforce it via its ARG_MAX.
const maxRevisionLength = 128 * 1024 // 128kb
var zeroRune = rune(0)
// scanner represents a lexical scanner.
@ -52,7 +57,7 @@ type scanner struct {
// newScanner returns a new instance of scanner.
func newScanner(r io.Reader) *scanner {
return &scanner{r: bufio.NewReader(r)}
return &scanner{r: bufio.NewReader(io.LimitReader(r, maxRevisionLength))}
}
// Scan extracts tokens and their strings counterpart

View File

@ -5,8 +5,10 @@ import (
)
var (
isSchemeRegExp = regexp.MustCompile(`^[^:]+://`)
scpLikeUrlRegExp = regexp.MustCompile(`^(?:(?P<user>[^@]+)@)?(?P<host>[^:\s]+):(?:(?P<port>[0-9]{1,5})(?:\/|:))?(?P<path>[^\\].*\/[^\\].*)$`)
isSchemeRegExp = regexp.MustCompile(`^[^:]+://`)
// Ref: https://github.com/git/git/blob/master/Documentation/urls.txt#L37
scpLikeUrlRegExp = regexp.MustCompile(`^(?:(?P<user>[^@]+)@)?(?P<host>[^:\s]+):(?:(?P<port>[0-9]{1,5}):)?(?P<path>[^\\].*)$`)
)
// MatchesScheme returns true if the given string matches a URL-like

View File

@ -60,7 +60,7 @@ func (p *objectWalker) walkObjectTree(hash plumbing.Hash) error {
// Fetch the object.
obj, err := object.GetObject(p.Storer, hash)
if err != nil {
return fmt.Errorf("Getting object %s failed: %v", hash, err)
return fmt.Errorf("getting object %s failed: %v", hash, err)
}
// Walk all children depending on object type.
switch obj := obj.(type) {
@ -98,7 +98,7 @@ func (p *objectWalker) walkObjectTree(hash plumbing.Hash) error {
return p.walkObjectTree(obj.Target)
default:
// Error out on unhandled object types.
return fmt.Errorf("Unknown object %X %s %T\n", obj.ID(), obj.Type(), obj)
return fmt.Errorf("unknown object %X %s %T", obj.ID(), obj.Type(), obj)
}
return nil
}

View File

@ -7,12 +7,13 @@ import (
"strings"
"time"
"github.com/ProtonMail/go-crypto/openpgp"
"github.com/jesseduffield/go-git/v5/config"
"github.com/jesseduffield/go-git/v5/plumbing"
formatcfg "github.com/jesseduffield/go-git/v5/plumbing/format/config"
"github.com/jesseduffield/go-git/v5/plumbing/object"
"github.com/jesseduffield/go-git/v5/plumbing/protocol/packp/sideband"
"github.com/jesseduffield/go-git/v5/plumbing/transport"
"golang.org/x/crypto/openpgp"
)
// SubmoduleRescursivity defines how depth will affect any submodule recursive
@ -45,6 +46,14 @@ type CloneOptions struct {
ReferenceName plumbing.ReferenceName
// Fetch only ReferenceName if true.
SingleBranch bool
// Mirror clones the repository as a mirror.
//
// Compared to a bare clone, mirror not only maps local branches of the
// source to local branches of the target, it maps all refs (including
// remote-tracking branches, notes etc.) and sets up a refspec configuration
// such that all these refs are overwritten by a git remote update in the
// target repository.
Mirror bool
// No checkout of HEAD after clone if true.
NoCheckout bool
// Limit fetching to the specified number of commits.
@ -53,6 +62,9 @@ type CloneOptions struct {
// within, using their default settings. This option is ignored if the
// cloned repository does not have a worktree.
RecurseSubmodules SubmoduleRescursivity
// ShallowSubmodules limit cloning submodules to the 1 level of depth.
// It matches the git command --shallow-submodules.
ShallowSubmodules bool
// Progress is where the human readable information sent by the server is
// stored, if nil nothing is stored and the capability (if supported)
// no-progress, is sent to the server to avoid send this information.
@ -60,8 +72,42 @@ type CloneOptions struct {
// Tags describe how the tags will be fetched from the remote repository,
// by default is AllTags.
Tags TagMode
// InsecureSkipTLS skips ssl verify if protocol is https
InsecureSkipTLS bool
// CABundle specify additional ca bundle with system cert pool
CABundle []byte
// ProxyOptions provides info required for connecting to a proxy.
ProxyOptions transport.ProxyOptions
// When the repository to clone is on the local machine, instead of
// using hard links, automatically setup .git/objects/info/alternates
// to share the objects with the source repository.
// The resulting repository starts out without any object of its own.
// NOTE: this is a possibly dangerous operation; do not use it unless
// you understand what it does.
//
// [Reference]: https://git-scm.com/docs/git-clone#Documentation/git-clone.txt---shared
Shared bool
}
// MergeOptions describes how a merge should be performed.
type MergeOptions struct {
// Strategy defines the merge strategy to be used.
Strategy MergeStrategy
}
// MergeStrategy represents the different types of merge strategies.
type MergeStrategy int8
const (
// FastForwardMerge represents a Git merge strategy where the current
// branch can be simply updated to point to the HEAD of the branch being
// merged. This is only possible if the history of the branch being merged
// is a linear descendant of the current branch, with no conflicting commits.
//
// This is the default option.
FastForwardMerge MergeStrategy = iota
)
// Validate validates the fields and sets the default values.
func (o *CloneOptions) Validate() error {
if o.URL == "" {
@ -87,6 +133,8 @@ func (o *CloneOptions) Validate() error {
type PullOptions struct {
// Name of the remote to be pulled. If empty, uses the default.
RemoteName string
// RemoteURL overrides the remote repo address with a custom URL
RemoteURL string
// Remote branch to clone. If empty, uses HEAD.
ReferenceName plumbing.ReferenceName
// Fetch only ReferenceName if true.
@ -105,6 +153,12 @@ type PullOptions struct {
// Force allows the pull to update a local branch even when the remote
// branch does not descend from it.
Force bool
// InsecureSkipTLS skips ssl verify if protocol is https
InsecureSkipTLS bool
// CABundle specify additional ca bundle with system cert pool
CABundle []byte
// ProxyOptions provides info required for connecting to a proxy.
ProxyOptions transport.ProxyOptions
}
// Validate validates the fields and sets the default values.
@ -131,7 +185,7 @@ const (
// AllTags fetch all tags from the remote (i.e., fetch remote tags
// refs/tags/* into local tags with the same name)
AllTags
//NoTags fetch no tags from the remote at all
// NoTags fetch no tags from the remote at all
NoTags
)
@ -139,7 +193,9 @@ const (
type FetchOptions struct {
// Name of the remote to fetch from. Defaults to origin.
RemoteName string
RefSpecs []config.RefSpec
// RemoteURL overrides the remote repo address with a custom URL
RemoteURL string
RefSpecs []config.RefSpec
// Depth limit fetching to the specified number of commits from the tip of
// each remote branch history.
Depth int
@ -155,6 +211,15 @@ type FetchOptions struct {
// Force allows the fetch to update a local branch even when the remote
// branch does not descend from it.
Force bool
// InsecureSkipTLS skips ssl verify if protocol is https
InsecureSkipTLS bool
// CABundle specify additional ca bundle with system cert pool
CABundle []byte
// ProxyOptions provides info required for connecting to a proxy.
ProxyOptions transport.ProxyOptions
// Prune specify that local refs that match given RefSpecs and that do
// not exist remotely will be removed.
Prune bool
}
// Validate validates the fields and sets the default values.
@ -180,8 +245,16 @@ func (o *FetchOptions) Validate() error {
type PushOptions struct {
// RemoteName is the name of the remote to be pushed to.
RemoteName string
// RefSpecs specify what destination ref to update with what source
// object. A refspec with empty src can be used to delete a reference.
// RemoteURL overrides the remote repo address with a custom URL
RemoteURL string
// RefSpecs specify what destination ref to update with what source object.
//
// The format of a <refspec> parameter is an optional plus +, followed by
// the source object <src>, followed by a colon :, followed by the destination ref <dst>.
// The <src> is often the name of the branch you would want to push, but it can be a SHA-1.
// The <dst> tells which ref on the remote side is updated with this push.
//
// A refspec with empty src can be used to delete a reference.
RefSpecs []config.RefSpec
// Auth credentials, if required, to use with the remote repository.
Auth transport.AuthMethod
@ -194,6 +267,37 @@ type PushOptions struct {
// Force allows the push to update a remote branch even when the local
// branch does not descend from it.
Force bool
// InsecureSkipTLS skips ssl verify if protocol is https
InsecureSkipTLS bool
// CABundle specify additional ca bundle with system cert pool
CABundle []byte
// RequireRemoteRefs only allows a remote ref to be updated if its current
// value is the one specified here.
RequireRemoteRefs []config.RefSpec
// FollowTags will send any annotated tags with a commit target reachable from
// the refs already being pushed
FollowTags bool
// ForceWithLease allows a force push as long as the remote ref adheres to a "lease"
ForceWithLease *ForceWithLease
// PushOptions sets options to be transferred to the server during push.
Options map[string]string
// Atomic sets option to be an atomic push
Atomic bool
// ProxyOptions provides info required for connecting to a proxy.
ProxyOptions transport.ProxyOptions
}
// ForceWithLease sets fields on the lease
// If neither RefName nor Hash are set, ForceWithLease protects
// all refs in the refspec by ensuring the ref of the remote in the local repsitory
// matches the one in the ref advertisement.
type ForceWithLease struct {
// RefName, when set will protect the ref by ensuring it matches the
// hash in the ref advertisement.
RefName plumbing.ReferenceName
// Hash is the expected object id of RefName. The push will be rejected unless this
// matches the corresponding object id of RefName in the refs advertisement.
Hash plumbing.Hash
}
// Validate validates the fields and sets the default values.
@ -230,6 +334,9 @@ type SubmoduleUpdateOptions struct {
RecurseSubmodules SubmoduleRescursivity
// Auth credentials, if required, to use with the remote repository.
Auth transport.AuthMethod
// Depth limit fetching to the specified number of commits from the tip of
// each remote branch history.
Depth int
}
var (
@ -239,9 +346,9 @@ var (
// CheckoutOptions describes how a checkout operation should be performed.
type CheckoutOptions struct {
// Hash is the hash of the commit to be checked out. If used, HEAD will be
// in detached mode. If Create is not used, Branch and Hash are mutually
// exclusive.
// Hash is the hash of a commit or tag to be checked out. If used, HEAD
// will be in detached mode. If Create is not used, Branch and Hash are
// mutually exclusive.
Hash plumbing.Hash
// Branch to be checked out, if Branch and Hash are empty is set to `master`.
Branch plumbing.ReferenceName
@ -255,6 +362,8 @@ type CheckoutOptions struct {
// target branch. Force and Keep are mutually exclusive, should not be both
// set to true.
Keep bool
// SparseCheckoutDirectories
SparseCheckoutDirectories []string
}
// Validate validates the fields and sets the default values.
@ -307,6 +416,9 @@ type ResetOptions struct {
// the index (resetting it to the tree of Commit) and the working tree
// depending on Mode. If empty MixedReset is used.
Mode ResetMode
// Files, if not empty will constrain the reseting the index to only files
// specified in this list.
Files []string
}
// Validate validates the fields and sets the default values.
@ -318,6 +430,11 @@ func (o *ResetOptions) Validate(r *Repository) error {
}
o.Commit = ref.Hash()
} else {
_, err := r.CommitObject(o.Commit)
if err != nil {
return fmt.Errorf("invalid reset option: %w", err)
}
}
return nil
@ -347,7 +464,7 @@ type LogOptions struct {
// Show only those commits in which the specified file was inserted/updated.
// It is equivalent to running `git log -- <file-name>`.
// this field is kept for compatility, it can be replaced with PathFilter
// this field is kept for compatibility, it can be replaced with PathFilter
FileName *string
// Filter commits based on the path of files that are updated
@ -374,7 +491,7 @@ var (
ErrMissingAuthor = errors.New("author field is required")
)
// AddOptions describes how a add operation should be performed
// AddOptions describes how an `add` operation should be performed
type AddOptions struct {
// All equivalent to `git add -A`, update the index not only where the
// working tree has a file matching `Path` but also where the index already
@ -382,11 +499,16 @@ type AddOptions struct {
// working tree. If no `Path` nor `Glob` is given when `All` option is
// used, all files in the entire working tree are updated.
All bool
// Path is the exact filepath to a the file or directory to be added.
// Path is the exact filepath to the file or directory to be added.
Path string
// Glob adds all paths, matching pattern, to the index. If pattern matches a
// directory path, all directory contents are added to the index recursively.
Glob string
// SkipStatus adds the path with no status check. This option is relevant only
// when the `Path` option is specified and does not apply when the `All` option is used.
// Notice that when passing an ignored path it will be added anyway.
// When true it can speed up adding files to the worktree in very large repositories.
SkipStatus bool
}
// Validate validates the fields and sets the default values.
@ -403,6 +525,10 @@ type CommitOptions struct {
// All automatically stage files that have been modified and deleted, but
// new files you have not told Git about are not affected.
All bool
// AllowEmptyCommits enable empty commits to be created. An empty commit
// is when no changes to the tree were made, but a new commit message is
// provided. The default behavior is false, which results in ErrEmptyCommit.
AllowEmptyCommits bool
// Author is the author's signature of the commit. If Author is empty the
// Name and Email is read from the config, and time.Now it's used as When.
Author *object.Signature
@ -416,10 +542,25 @@ type CommitOptions struct {
// commit will not be signed. The private key must be present and already
// decrypted.
SignKey *openpgp.Entity
// Signer denotes a cryptographic signer to sign the commit with.
// A nil value here means the commit will not be signed.
// Takes precedence over SignKey.
Signer Signer
// Amend will create a new commit object and replace the commit that HEAD currently
// points to. Cannot be used with All nor Parents.
Amend bool
}
// Validate validates the fields and sets the default values.
func (o *CommitOptions) Validate(r *Repository) error {
if o.All && o.Amend {
return errors.New("all and amend cannot be used together")
}
if o.Amend && len(o.Parents) > 0 {
return errors.New("parents cannot be used with amend")
}
if o.Author == nil {
if err := o.loadConfigAuthorAndCommitter(r); err != nil {
return err
@ -552,8 +693,35 @@ func (o *CreateTagOptions) loadConfigTagger(r *Repository) error {
type ListOptions struct {
// Auth credentials, if required, to use with the remote repository.
Auth transport.AuthMethod
// InsecureSkipTLS skips ssl verify if protocol is https
InsecureSkipTLS bool
// CABundle specify additional ca bundle with system cert pool
CABundle []byte
// PeelingOption defines how peeled objects are handled during a
// remote list.
PeelingOption PeelingOption
// ProxyOptions provides info required for connecting to a proxy.
ProxyOptions transport.ProxyOptions
// Timeout specifies the timeout in seconds for list operations
Timeout int
}
// PeelingOption represents the different ways to handle peeled references.
//
// Peeled references represent the underlying object of an annotated
// (or signed) tag. Refer to upstream documentation for more info:
// https://github.com/git/git/blob/master/Documentation/technical/reftable.txt
type PeelingOption uint8
const (
// IgnorePeeled ignores all peeled reference names. This is the default behavior.
IgnorePeeled PeelingOption = 0
// OnlyPeeled returns only peeled reference names.
OnlyPeeled PeelingOption = 1
// AppendPeeled appends peeled reference names to the reference list.
AppendPeeled PeelingOption = 2
)
// CleanOptions describes how a clean should be performed.
type CleanOptions struct {
Dir bool
@ -578,7 +746,13 @@ var (
)
// Validate validates the fields and sets the default values.
//
// TODO: deprecate in favor of Validate(r *Repository) in v6.
func (o *GrepOptions) Validate(w *Worktree) error {
return o.validate(w.r)
}
func (o *GrepOptions) validate(r *Repository) error {
if !o.CommitHash.IsZero() && o.ReferenceName != "" {
return ErrHashOrReference
}
@ -586,7 +760,7 @@ func (o *GrepOptions) Validate(w *Worktree) error {
// If none of CommitHash and ReferenceName are provided, set commit hash of
// the repository's head.
if o.CommitHash.IsZero() && o.ReferenceName == "" {
ref, err := w.r.Head()
ref, err := r.Head()
if err != nil {
return err
}
@ -609,3 +783,36 @@ type PlainOpenOptions struct {
// Validate validates the fields and sets the default values.
func (o *PlainOpenOptions) Validate() error { return nil }
type PlainInitOptions struct {
InitOptions
// Determines if the repository will have a worktree (non-bare) or not (bare).
Bare bool
ObjectFormat formatcfg.ObjectFormat
}
// Validate validates the fields and sets the default values.
func (o *PlainInitOptions) Validate() error { return nil }
var (
ErrNoRestorePaths = errors.New("you must specify path(s) to restore")
)
// RestoreOptions describes how a restore should be performed.
type RestoreOptions struct {
// Marks to restore the content in the index
Staged bool
// Marks to restore the content of the working tree
Worktree bool
// List of file paths that will be restored
Files []string
}
// Validate validates the fields and sets the default values.
func (o *RestoreOptions) Validate() error {
if len(o.Files) == 0 {
return ErrNoRestorePaths
}
return nil
}

35
vendor/github.com/jesseduffield/go-git/v5/oss-fuzz.sh generated vendored Normal file
View File

@ -0,0 +1,35 @@
#!/bin/bash -eu
# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
go mod download
go get github.com/AdamKorcz/go-118-fuzz-build/testing
if [ "$SANITIZER" != "coverage" ]; then
sed -i '/func (s \*DecoderSuite) TestDecode(/,/^}/ s/^/\/\//' plumbing/format/config/decoder_test.go
sed -n '35,$p' plumbing/format/packfile/common_test.go >> plumbing/format/packfile/delta_test.go
sed -n '20,53p' plumbing/object/object_test.go >> plumbing/object/tree_test.go
sed -i 's|func Test|// func Test|' plumbing/transport/common_test.go
fi
compile_native_go_fuzzer $(pwd)/internal/revision FuzzParser fuzz_parser
compile_native_go_fuzzer $(pwd)/plumbing/format/config FuzzDecoder fuzz_decoder_config
compile_native_go_fuzzer $(pwd)/plumbing/format/packfile FuzzPatchDelta fuzz_patch_delta
compile_native_go_fuzzer $(pwd)/plumbing/object FuzzParseSignedBytes fuzz_parse_signed_bytes
compile_native_go_fuzzer $(pwd)/plumbing/object FuzzDecode fuzz_decode
compile_native_go_fuzzer $(pwd)/plumbing/protocol/packp FuzzDecoder fuzz_decoder_packp
compile_native_go_fuzzer $(pwd)/plumbing/transport FuzzNewEndpoint fuzz_new_endpoint

View File

@ -133,7 +133,7 @@ func (m FileMode) IsMalformed() bool {
m != Submodule
}
// String returns the FileMode as a string in the standatd git format,
// String returns the FileMode as a string in the standard git format,
// this is, an octal number padded with ceros to 7 digits. Malformed
// modes are printed in that same format, for easier debugging.
//

View File

@ -44,6 +44,46 @@ func (c *Config) Section(name string) *Section {
return s
}
// HasSection checks if the Config has a section with the specified name.
func (c *Config) HasSection(name string) bool {
for _, s := range c.Sections {
if s.IsName(name) {
return true
}
}
return false
}
// RemoveSection removes a section from a config file.
func (c *Config) RemoveSection(name string) *Config {
result := Sections{}
for _, s := range c.Sections {
if !s.IsName(name) {
result = append(result, s)
}
}
c.Sections = result
return c
}
// RemoveSubsection remove a subsection from a config file.
func (c *Config) RemoveSubsection(section string, subsection string) *Config {
for _, s := range c.Sections {
if s.IsName(section) {
result := Subsections{}
for _, ss := range s.Subsections {
if !ss.IsName(subsection) {
result = append(result, ss)
}
}
s.Subsections = result
}
}
return c
}
// AddOption adds an option to a given section and subsection. Use the
// NoSubsection constant for the subsection argument if no subsection is wanted.
func (c *Config) AddOption(section string, subsection string, key string, value string) *Config {
@ -67,33 +107,3 @@ func (c *Config) SetOption(section string, subsection string, key string, value
return c
}
// RemoveSection removes a section from a config file.
func (c *Config) RemoveSection(name string) *Config {
result := Sections{}
for _, s := range c.Sections {
if !s.IsName(name) {
result = append(result, s)
}
}
c.Sections = result
return c
}
// RemoveSubsection remove s a subsection from a config file.
func (c *Config) RemoveSubsection(section string, subsection string) *Config {
for _, s := range c.Sections {
if s.IsName(section) {
result := Subsections{}
for _, ss := range s.Subsections {
if !ss.IsName(subsection) {
result = append(result, ss)
}
}
s.Subsections = result
}
}
return c
}

View File

@ -11,6 +11,10 @@ type Encoder struct {
w io.Writer
}
var (
subsectionReplacer = strings.NewReplacer(`"`, `\"`, `\`, `\\`)
valueReplacer = strings.NewReplacer(`"`, `\"`, `\`, `\\`, "\n", `\n`, "\t", `\t`, "\b", `\b`)
)
// NewEncoder returns a new encoder that writes to w.
func NewEncoder(w io.Writer) *Encoder {
return &Encoder{w}
@ -48,8 +52,7 @@ func (e *Encoder) encodeSection(s *Section) error {
}
func (e *Encoder) encodeSubsection(sectionName string, s *Subsection) error {
//TODO: escape
if err := e.printf("[%s \"%s\"]\n", sectionName, s.Name); err != nil {
if err := e.printf("[%s \"%s\"]\n", sectionName, subsectionReplacer.Replace(s.Name)); err != nil {
return err
}
@ -58,12 +61,14 @@ func (e *Encoder) encodeSubsection(sectionName string, s *Subsection) error {
func (e *Encoder) encodeOptions(opts Options) error {
for _, o := range opts {
pattern := "\t%s = %s\n"
if strings.Contains(o.Value, "\\") {
pattern = "\t%s = %q\n"
var value string
if strings.ContainsAny(o.Value, "#;\"\t\n\\") || strings.HasPrefix(o.Value, " ") || strings.HasSuffix(o.Value, " ") {
value = `"`+valueReplacer.Replace(o.Value)+`"`
} else {
value = o.Value
}
if err := e.printf(pattern, o.Key, o.Value); err != nil {
if err := e.printf("\t%s = %s\n", o.Key, value); err != nil {
return err
}
}

View File

@ -0,0 +1,53 @@
package config
// RepositoryFormatVersion represents the repository format version,
// as per defined at:
//
// https://git-scm.com/docs/repository-version
type RepositoryFormatVersion string
const (
// Version_0 is the format defined by the initial version of git,
// including but not limited to the format of the repository
// directory, the repository configuration file, and the object
// and ref storage.
//
// Specifying the complete behavior of git is beyond the scope
// of this document.
Version_0 = "0"
// Version_1 is identical to version 0, with the following exceptions:
//
// 1. When reading the core.repositoryformatversion variable, a git
// implementation which supports version 1 MUST also read any
// configuration keys found in the extensions section of the
// configuration file.
//
// 2. If a version-1 repository specifies any extensions.* keys that
// the running git has not implemented, the operation MUST NOT proceed.
// Similarly, if the value of any known key is not understood by the
// implementation, the operation MUST NOT proceed.
//
// Note that if no extensions are specified in the config file, then
// core.repositoryformatversion SHOULD be set to 0 (setting it to 1 provides
// no benefit, and makes the repository incompatible with older
// implementations of git).
Version_1 = "1"
// DefaultRepositoryFormatVersion holds the default repository format version.
DefaultRepositoryFormatVersion = Version_0
)
// ObjectFormat defines the object format.
type ObjectFormat string
const (
// SHA1 represents the object format used for SHA1.
SHA1 ObjectFormat = "sha1"
// SHA256 represents the object format used for SHA256.
SHA256 ObjectFormat = "sha256"
// DefaultObjectFormat holds the default object format.
DefaultObjectFormat = SHA1
)

View File

@ -54,6 +54,16 @@ func (opts Options) Get(key string) string {
return ""
}
// Has checks if an Option exist with the given key.
func (opts Options) Has(key string) bool {
for _, o := range opts {
if o.IsKey(key) {
return true
}
}
return false
}
// GetAll returns all possible values for the same key.
func (opts Options) GetAll(key string) []string {
result := []string{}

View File

@ -64,31 +64,6 @@ func (s *Section) IsName(name string) bool {
return strings.EqualFold(s.Name, name)
}
// Option return the value for the specified key. Empty string is returned if
// key does not exists.
func (s *Section) Option(key string) string {
return s.Options.Get(key)
}
// AddOption adds a new Option to the Section. The updated Section is returned.
func (s *Section) AddOption(key string, value string) *Section {
s.Options = s.Options.withAddedOption(key, value)
return s
}
// SetOption adds a new Option to the Section. If the option already exists, is replaced.
// The updated Section is returned.
func (s *Section) SetOption(key string, value string) *Section {
s.Options = s.Options.withSettedOption(key, value)
return s
}
// Remove an option with the specified key. The updated Section is returned.
func (s *Section) RemoveOption(key string) *Section {
s.Options = s.Options.withoutOption(key)
return s
}
// Subsection returns a Subsection from the specified Section. If the
// Subsection does not exists, new one is created and added to Section.
func (s *Section) Subsection(name string) *Subsection {
@ -115,6 +90,55 @@ func (s *Section) HasSubsection(name string) bool {
return false
}
// RemoveSubsection removes a subsection from a Section.
func (s *Section) RemoveSubsection(name string) *Section {
result := Subsections{}
for _, s := range s.Subsections {
if !s.IsName(name) {
result = append(result, s)
}
}
s.Subsections = result
return s
}
// Option returns the value for the specified key. Empty string is returned if
// key does not exists.
func (s *Section) Option(key string) string {
return s.Options.Get(key)
}
// OptionAll returns all possible values for an option with the specified key.
// If the option does not exists, an empty slice will be returned.
func (s *Section) OptionAll(key string) []string {
return s.Options.GetAll(key)
}
// HasOption checks if the Section has an Option with the given key.
func (s *Section) HasOption(key string) bool {
return s.Options.Has(key)
}
// AddOption adds a new Option to the Section. The updated Section is returned.
func (s *Section) AddOption(key string, value string) *Section {
s.Options = s.Options.withAddedOption(key, value)
return s
}
// SetOption adds a new Option to the Section. If the option already exists, is replaced.
// The updated Section is returned.
func (s *Section) SetOption(key string, value string) *Section {
s.Options = s.Options.withSettedOption(key, value)
return s
}
// Remove an option with the specified key. The updated Section is returned.
func (s *Section) RemoveOption(key string) *Section {
s.Options = s.Options.withoutOption(key)
return s
}
// IsName checks if the name of the subsection is exactly the specified name.
func (s *Subsection) IsName(name string) bool {
return s.Name == name
@ -126,6 +150,17 @@ func (s *Subsection) Option(key string) string {
return s.Options.Get(key)
}
// OptionAll returns all possible values for an option with the specified key.
// If the option does not exists, an empty slice will be returned.
func (s *Subsection) OptionAll(key string) []string {
return s.Options.GetAll(key)
}
// HasOption checks if the Subsection has an Option with the given key.
func (s *Subsection) HasOption(key string) bool {
return s.Options.Has(key)
}
// AddOption adds a new Option to the Subsection. The updated Subsection is returned.
func (s *Subsection) AddOption(key string, value string) *Subsection {
s.Options = s.Options.withAddedOption(key, value)

View File

@ -9,7 +9,7 @@ import (
type Operation int
const (
// Equal item represents a equals diff.
// Equal item represents an equals diff.
Equal Operation = iota
// Add item represents an insert diff.
Add
@ -26,15 +26,15 @@ type Patch interface {
Message() string
}
// FilePatch represents the necessary steps to transform one file to another.
// FilePatch represents the necessary steps to transform one file into another.
type FilePatch interface {
// IsBinary returns true if this patch is representing a binary file.
IsBinary() bool
// Files returns the from and to Files, with all the necessary metadata to
// Files returns the from and to Files, with all the necessary metadata
// about them. If the patch creates a new file, "from" will be nil.
// If the patch deletes a file, "to" will be nil.
Files() (from, to File)
// Chunks returns a slice of ordered changes to transform "from" File to
// Chunks returns a slice of ordered changes to transform "from" File into
// "to" File. If the file is a binary one, Chunks will be empty.
Chunks() []Chunk
}
@ -49,7 +49,7 @@ type File interface {
Path() string
}
// Chunk represents a portion of a file transformation to another.
// Chunk represents a portion of a file transformation into another.
type Chunk interface {
// Content contains the portion of the file.
Content() string

View File

@ -38,6 +38,10 @@ type UnifiedEncoder struct {
// a change.
contextLines int
// srcPrefix and dstPrefix are prepended to file paths when encoding a diff.
srcPrefix string
dstPrefix string
// colorConfig is the color configuration. The default is no color.
color ColorConfig
}
@ -46,6 +50,8 @@ type UnifiedEncoder struct {
func NewUnifiedEncoder(w io.Writer, contextLines int) *UnifiedEncoder {
return &UnifiedEncoder{
Writer: w,
srcPrefix: "a/",
dstPrefix: "b/",
contextLines: contextLines,
}
}
@ -56,6 +62,18 @@ func (e *UnifiedEncoder) SetColor(colorConfig ColorConfig) *UnifiedEncoder {
return e
}
// SetSrcPrefix sets e's srcPrefix and returns e.
func (e *UnifiedEncoder) SetSrcPrefix(prefix string) *UnifiedEncoder {
e.srcPrefix = prefix
return e
}
// SetDstPrefix sets e's dstPrefix and returns e.
func (e *UnifiedEncoder) SetDstPrefix(prefix string) *UnifiedEncoder {
e.dstPrefix = prefix
return e
}
// Encode encodes patch.
func (e *UnifiedEncoder) Encode(patch Patch) error {
sb := &strings.Builder{}
@ -91,7 +109,8 @@ func (e *UnifiedEncoder) writeFilePatchHeader(sb *strings.Builder, filePatch Fil
case from != nil && to != nil:
hashEquals := from.Hash() == to.Hash()
lines = append(lines,
fmt.Sprintf("diff --git a/%s b/%s", from.Path(), to.Path()),
fmt.Sprintf("diff --git %s%s %s%s",
e.srcPrefix, from.Path(), e.dstPrefix, to.Path()),
)
if from.Mode() != to.Mode() {
lines = append(lines,
@ -115,22 +134,22 @@ func (e *UnifiedEncoder) writeFilePatchHeader(sb *strings.Builder, filePatch Fil
)
}
if !hashEquals {
lines = e.appendPathLines(lines, "a/"+from.Path(), "b/"+to.Path(), isBinary)
lines = e.appendPathLines(lines, e.srcPrefix+from.Path(), e.dstPrefix+to.Path(), isBinary)
}
case from == nil:
lines = append(lines,
fmt.Sprintf("diff --git a/%s b/%s", to.Path(), to.Path()),
fmt.Sprintf("diff --git %s %s", e.srcPrefix+to.Path(), e.dstPrefix+to.Path()),
fmt.Sprintf("new file mode %o", to.Mode()),
fmt.Sprintf("index %s..%s", plumbing.ZeroHash, to.Hash()),
)
lines = e.appendPathLines(lines, "/dev/null", "b/"+to.Path(), isBinary)
lines = e.appendPathLines(lines, "/dev/null", e.dstPrefix+to.Path(), isBinary)
case to == nil:
lines = append(lines,
fmt.Sprintf("diff --git a/%s b/%s", from.Path(), from.Path()),
fmt.Sprintf("diff --git %s %s", e.srcPrefix+from.Path(), e.dstPrefix+from.Path()),
fmt.Sprintf("deleted file mode %o", from.Mode()),
fmt.Sprintf("index %s..%s", from.Hash(), plumbing.ZeroHash),
)
lines = e.appendPathLines(lines, "a/"+from.Path(), "/dev/null", isBinary)
lines = e.appendPathLines(lines, e.srcPrefix+from.Path(), "/dev/null", isBinary)
}
sb.WriteString(e.color[Meta])

View File

@ -3,28 +3,32 @@ package gitignore
import (
"bufio"
"bytes"
"io/ioutil"
"io"
"os"
"os/user"
"strings"
"github.com/go-git/go-billy/v5"
"github.com/jesseduffield/go-git/v5/internal/path_util"
"github.com/jesseduffield/go-git/v5/plumbing/format/config"
gioutil "github.com/jesseduffield/go-git/v5/utils/ioutil"
)
const (
commentPrefix = "#"
coreSection = "core"
excludesfile = "excludesfile"
gitDir = ".git"
gitignoreFile = ".gitignore"
gitconfigFile = ".gitconfig"
systemFile = "/etc/gitconfig"
commentPrefix = "#"
coreSection = "core"
excludesfile = "excludesfile"
gitDir = ".git"
gitignoreFile = ".gitignore"
gitconfigFile = ".gitconfig"
systemFile = "/etc/gitconfig"
infoExcludeFile = gitDir + "/info/exclude"
)
// readIgnoreFile reads a specific git ignore file.
func readIgnoreFile(fs billy.Filesystem, path []string, ignoreFile string) (ps []Pattern, err error) {
ignoreFile, _ = path_util.ReplaceTildeWithHome(ignoreFile)
f, err := fs.Open(fs.Join(append(path, ignoreFile)...))
if err == nil {
defer f.Close()
@ -43,10 +47,14 @@ func readIgnoreFile(fs billy.Filesystem, path []string, ignoreFile string) (ps [
return
}
// ReadPatterns reads gitignore patterns recursively traversing through the directory
// structure. The result is in the ascending order of priority (last higher).
// ReadPatterns reads the .git/info/exclude and then the gitignore patterns
// recursively traversing through the directory structure. The result is in
// the ascending order of priority (last higher).
func ReadPatterns(fs billy.Filesystem, path []string) (ps []Pattern, err error) {
ps, _ = readIgnoreFile(fs, path, gitignoreFile)
ps, _ = readIgnoreFile(fs, path, infoExcludeFile)
subps, _ := readIgnoreFile(fs, path, gitignoreFile)
ps = append(ps, subps...)
var fis []os.FileInfo
fis, err = fs.ReadDir(fs.Join(path...))
@ -56,6 +64,10 @@ func ReadPatterns(fs billy.Filesystem, path []string) (ps []Pattern, err error)
for _, fi := range fis {
if fi.IsDir() && fi.Name() != gitDir {
if NewMatcher(ps).Match(append(path, fi.Name()), true) {
continue
}
var subps []Pattern
subps, err = ReadPatterns(fs, append(path, fi.Name()))
if err != nil {
@ -82,7 +94,7 @@ func loadPatterns(fs billy.Filesystem, path string) (ps []Pattern, err error) {
defer gioutil.CheckClose(f, &err)
b, err := ioutil.ReadAll(f)
b, err := io.ReadAll(f)
if err != nil {
return
}
@ -108,7 +120,7 @@ func loadPatterns(fs billy.Filesystem, path string) (ps []Pattern, err error) {
return
}
// LoadGlobalPatterns loads gitignore patterns from from the gitignore file
// LoadGlobalPatterns loads gitignore patterns from the gitignore file
// declared in a user's ~/.gitconfig file. If the ~/.gitconfig file does not
// exist the function will return nil. If the core.excludesfile property
// is not declared, the function will return nil. If the file pointed to by
@ -116,16 +128,16 @@ func loadPatterns(fs billy.Filesystem, path string) (ps []Pattern, err error) {
//
// The function assumes fs is rooted at the root filesystem.
func LoadGlobalPatterns(fs billy.Filesystem) (ps []Pattern, err error) {
usr, err := user.Current()
home, err := os.UserHomeDir()
if err != nil {
return
}
return loadPatterns(fs, fs.Join(usr.HomeDir, gitconfigFile))
return loadPatterns(fs, fs.Join(home, gitconfigFile))
}
// LoadSystemPatterns loads gitignore patterns from from the gitignore file
// declared in a system's /etc/gitconfig file. If the ~/.gitconfig file does
// LoadSystemPatterns loads gitignore patterns from the gitignore file
// declared in a system's /etc/gitconfig file. If the /etc/gitconfig file does
// not exist the function will return nil. If the core.excludesfile property
// is not declared, the function will return nil. If the file pointed to by
// the core.excludesfile property does not exist, the function will return nil.

View File

@ -39,6 +39,8 @@ type pattern struct {
// ParsePattern parses a gitignore pattern string into the Pattern structure.
func ParsePattern(p string, domain []string) Pattern {
// storing domain, copy it to ensure it isn't changed externally
domain = append([]string(nil), domain...)
res := pattern{domain: domain}
if strings.HasPrefix(p, inclusionPrefix) {

View File

@ -6,20 +6,21 @@ import (
"errors"
"io"
"github.com/jesseduffield/go-git/v5/plumbing/hash"
"github.com/jesseduffield/go-git/v5/utils/binary"
)
var (
// ErrUnsupportedVersion is returned by Decode when the idx file version
// is not supported.
ErrUnsupportedVersion = errors.New("Unsupported version")
ErrUnsupportedVersion = errors.New("unsupported version")
// ErrMalformedIdxFile is returned by Decode when the idx file is corrupted.
ErrMalformedIdxFile = errors.New("Malformed IDX file")
ErrMalformedIdxFile = errors.New("malformed IDX file")
)
const (
fanout = 256
objectIDLength = 20
objectIDLength = hash.Size
)
// Decoder reads and decodes idx files from an input stream.

View File

@ -1,10 +1,9 @@
package idxfile
import (
"crypto/sha1"
"hash"
"io"
"github.com/jesseduffield/go-git/v5/plumbing/hash"
"github.com/jesseduffield/go-git/v5/utils/binary"
)
@ -16,7 +15,7 @@ type Encoder struct {
// NewEncoder returns a new stream encoder that writes to w.
func NewEncoder(w io.Writer) *Encoder {
h := sha1.New()
h := hash.New(hash.CryptoType)
mw := io.MultiWriter(w, h)
return &Encoder{mw, h}
}
@ -133,10 +132,10 @@ func (e *Encoder) encodeChecksums(idx *MemoryIndex) (int, error) {
return 0, err
}
copy(idx.IdxChecksum[:], e.hash.Sum(nil)[:20])
copy(idx.IdxChecksum[:], e.hash.Sum(nil)[:hash.Size])
if _, err := e.Write(idx.IdxChecksum[:]); err != nil {
return 0, err
}
return 40, nil
return hash.HexSize, nil
}

View File

@ -8,6 +8,7 @@ import (
encbin "encoding/binary"
"github.com/jesseduffield/go-git/v5/plumbing"
"github.com/jesseduffield/go-git/v5/plumbing/hash"
)
const (
@ -53,8 +54,8 @@ type MemoryIndex struct {
Offset32 [][]byte
CRC32 [][]byte
Offset64 []byte
PackfileChecksum [20]byte
IdxChecksum [20]byte
PackfileChecksum [hash.Size]byte
IdxChecksum [hash.Size]byte
offsetHash map[int64]plumbing.Hash
offsetHashIsFull bool

View File

@ -84,11 +84,8 @@ func (w *Writer) OnFooter(h plumbing.Hash) error {
w.checksum = h
w.finished = true
_, err := w.createIndex()
if err != nil {
return err
}
return nil
return err
}
// creatIndex returns a filled MemoryIndex with the information filled by
@ -139,15 +136,23 @@ func (w *Writer) createIndex() (*MemoryIndex, error) {
offset := o.Offset
if offset > math.MaxInt32 {
offset = w.addOffset64(offset)
var err error
offset, err = w.addOffset64(offset)
if err != nil {
return nil, err
}
}
buf.Truncate(0)
binary.WriteUint32(buf, uint32(offset))
if err := binary.WriteUint32(buf, uint32(offset)); err != nil {
return nil, err
}
idx.Offset32[bucket] = append(idx.Offset32[bucket], buf.Bytes()...)
buf.Truncate(0)
binary.WriteUint32(buf, o.CRC32)
if err := binary.WriteUint32(buf, o.CRC32); err != nil {
return nil, err
}
idx.CRC32[bucket] = append(idx.CRC32[bucket], buf.Bytes()...)
}
@ -161,15 +166,17 @@ func (w *Writer) createIndex() (*MemoryIndex, error) {
return idx, nil
}
func (w *Writer) addOffset64(pos uint64) uint64 {
func (w *Writer) addOffset64(pos uint64) (uint64, error) {
buf := new(bytes.Buffer)
binary.WriteUint64(buf, pos)
w.index.Offset64 = append(w.index.Offset64, buf.Bytes()...)
if err := binary.WriteUint64(buf, pos); err != nil {
return 0, err
}
w.index.Offset64 = append(w.index.Offset64, buf.Bytes()...)
index := uint64(w.offset64 | (1 << 31))
w.offset64++
return index
return index, nil
}
func (o objects) Len() int {

View File

@ -3,15 +3,14 @@ package index
import (
"bufio"
"bytes"
"crypto/sha1"
"errors"
"hash"
"io"
"io/ioutil"
"strconv"
"time"
"github.com/jesseduffield/go-git/v5/plumbing"
"github.com/jesseduffield/go-git/v5/plumbing/hash"
"github.com/jesseduffield/go-git/v5/utils/binary"
)
@ -25,8 +24,8 @@ var (
// ErrInvalidChecksum is returned by Decode if the SHA1 hash mismatch with
// the read content
ErrInvalidChecksum = errors.New("invalid checksum")
errUnknownExtension = errors.New("unknown extension")
// ErrUnknownExtension is returned when an index extension is encountered that is considered mandatory
ErrUnknownExtension = errors.New("unknown extension")
)
const (
@ -40,6 +39,7 @@ const (
// A Decoder reads and decodes index files from an input stream.
type Decoder struct {
buf *bufio.Reader
r io.Reader
hash hash.Hash
lastEntry *Entry
@ -49,9 +49,11 @@ type Decoder struct {
// NewDecoder returns a new decoder that reads from r.
func NewDecoder(r io.Reader) *Decoder {
h := sha1.New()
h := hash.New(hash.CryptoType)
buf := bufio.NewReader(r)
return &Decoder{
r: io.TeeReader(r, h),
buf: buf,
r: io.TeeReader(buf, h),
hash: h,
extReader: bufio.NewReader(nil),
}
@ -202,7 +204,7 @@ func (d *Decoder) padEntry(idx *Index, e *Entry, read int) error {
entrySize := read + len(e.Name)
padLen := 8 - entrySize%8
_, err := io.CopyN(ioutil.Discard, d.r, int64(padLen))
_, err := io.CopyN(io.Discard, d.r, int64(padLen))
return err
}
@ -211,71 +213,75 @@ func (d *Decoder) readExtensions(idx *Index) error {
// count that they are not supported by jgit or libgit
var expected []byte
var peeked []byte
var err error
var header [4]byte
// we should always be able to peek for 4 bytes (header) + 4 bytes (extlen) + final hash
// if this fails, we know that we're at the end of the index
peekLen := 4 + 4 + d.hash.Size()
for {
expected = d.hash.Sum(nil)
var n int
if n, err = io.ReadFull(d.r, header[:]); err != nil {
if n == 0 {
err = io.EOF
}
peeked, err = d.buf.Peek(peekLen)
if len(peeked) < peekLen {
// there can't be an extension at this point, so let's bail out
break
}
err = d.readExtension(idx, header[:])
if err != nil {
break
}
}
if err != errUnknownExtension {
return err
}
return d.readChecksum(expected, header)
}
func (d *Decoder) readExtension(idx *Index, header []byte) error {
switch {
case bytes.Equal(header, treeExtSignature):
r, err := d.getExtensionReader()
if err != nil {
return err
}
err = d.readExtension(idx)
if err != nil {
return err
}
}
return d.readChecksum(expected)
}
func (d *Decoder) readExtension(idx *Index) error {
var header [4]byte
if _, err := io.ReadFull(d.r, header[:]); err != nil {
return err
}
r, err := d.getExtensionReader()
if err != nil {
return err
}
switch {
case bytes.Equal(header[:], treeExtSignature):
idx.Cache = &Tree{}
d := &treeExtensionDecoder{r}
if err := d.Decode(idx.Cache); err != nil {
return err
}
case bytes.Equal(header, resolveUndoExtSignature):
r, err := d.getExtensionReader()
if err != nil {
return err
}
case bytes.Equal(header[:], resolveUndoExtSignature):
idx.ResolveUndo = &ResolveUndo{}
d := &resolveUndoDecoder{r}
if err := d.Decode(idx.ResolveUndo); err != nil {
return err
}
case bytes.Equal(header, endOfIndexEntryExtSignature):
r, err := d.getExtensionReader()
if err != nil {
return err
}
case bytes.Equal(header[:], endOfIndexEntryExtSignature):
idx.EndOfIndexEntry = &EndOfIndexEntry{}
d := &endOfIndexEntryDecoder{r}
if err := d.Decode(idx.EndOfIndexEntry); err != nil {
return err
}
default:
return errUnknownExtension
// See https://git-scm.com/docs/index-format, which says:
// If the first byte is 'A'..'Z' the extension is optional and can be ignored.
if header[0] < 'A' || header[0] > 'Z' {
return ErrUnknownExtension
}
d := &unknownExtensionDecoder{r}
if err := d.Decode(); err != nil {
return err
}
}
return nil
@ -291,11 +297,10 @@ func (d *Decoder) getExtensionReader() (*bufio.Reader, error) {
return d.extReader, nil
}
func (d *Decoder) readChecksum(expected []byte, alreadyRead [4]byte) error {
func (d *Decoder) readChecksum(expected []byte) error {
var h plumbing.Hash
copy(h[:4], alreadyRead[:])
if _, err := io.ReadFull(d.r, h[4:]); err != nil {
if _, err := io.ReadFull(d.r, h[:]); err != nil {
return err
}
@ -477,3 +482,22 @@ func (d *endOfIndexEntryDecoder) Decode(e *EndOfIndexEntry) error {
_, err = io.ReadFull(d.r, e.Hash[:])
return err
}
type unknownExtensionDecoder struct {
r *bufio.Reader
}
func (d *unknownExtensionDecoder) Decode() error {
var buf [1024]byte
for {
_, err := d.r.Read(buf[:])
if err == io.EOF {
break
}
if err != nil {
return err
}
}
return nil
}

View File

@ -2,19 +2,21 @@ package index
import (
"bytes"
"crypto/sha1"
"errors"
"hash"
"fmt"
"io"
"path"
"sort"
"strings"
"time"
"github.com/jesseduffield/go-git/v5/plumbing/hash"
"github.com/jesseduffield/go-git/v5/utils/binary"
)
var (
// EncodeVersionSupported is the range of supported index versions
EncodeVersionSupported uint32 = 2
EncodeVersionSupported uint32 = 4
// ErrInvalidTimestamp is returned by Encode if a Index with a Entry with
// negative timestamp values
@ -23,22 +25,27 @@ var (
// An Encoder writes an Index to an output stream.
type Encoder struct {
w io.Writer
hash hash.Hash
w io.Writer
hash hash.Hash
lastEntry *Entry
}
// NewEncoder returns a new encoder that writes to w.
func NewEncoder(w io.Writer) *Encoder {
h := sha1.New()
h := hash.New(hash.CryptoType)
mw := io.MultiWriter(w, h)
return &Encoder{mw, h}
return &Encoder{mw, h, nil}
}
// Encode writes the Index to the stream of the encoder.
func (e *Encoder) Encode(idx *Index) error {
// TODO: support versions v3 and v4
return e.encode(idx, true)
}
func (e *Encoder) encode(idx *Index, footer bool) error {
// TODO: support extensions
if idx.Version != EncodeVersionSupported {
if idx.Version > EncodeVersionSupported {
return ErrUnsupportedVersion
}
@ -50,7 +57,10 @@ func (e *Encoder) Encode(idx *Index) error {
return err
}
return e.encodeFooter()
if footer {
return e.encodeFooter()
}
return nil
}
func (e *Encoder) encodeHeader(idx *Index) error {
@ -65,12 +75,16 @@ func (e *Encoder) encodeEntries(idx *Index) error {
sort.Sort(byName(idx.Entries))
for _, entry := range idx.Entries {
if err := e.encodeEntry(entry); err != nil {
if err := e.encodeEntry(idx, entry); err != nil {
return err
}
entryLength := entryHeaderLength
if entry.IntentToAdd || entry.SkipWorktree {
entryLength += 2
}
wrote := entryHeaderLength + len(entry.Name)
if err := e.padEntry(wrote); err != nil {
wrote := entryLength + len(entry.Name)
if err := e.padEntry(idx, wrote); err != nil {
return err
}
}
@ -78,11 +92,7 @@ func (e *Encoder) encodeEntries(idx *Index) error {
return nil
}
func (e *Encoder) encodeEntry(entry *Entry) error {
if entry.IntentToAdd || entry.SkipWorktree {
return ErrUnsupportedVersion
}
func (e *Encoder) encodeEntry(idx *Index, entry *Entry) error {
sec, nsec, err := e.timeToUint32(&entry.CreatedAt)
if err != nil {
return err
@ -110,16 +120,91 @@ func (e *Encoder) encodeEntry(entry *Entry) error {
entry.GID,
entry.Size,
entry.Hash[:],
flags,
}
flagsFlow := []interface{}{flags}
if entry.IntentToAdd || entry.SkipWorktree {
var extendedFlags uint16
if entry.IntentToAdd {
extendedFlags |= intentToAddMask
}
if entry.SkipWorktree {
extendedFlags |= skipWorkTreeMask
}
flagsFlow = []interface{}{flags | entryExtended, extendedFlags}
}
flow = append(flow, flagsFlow...)
if err := binary.Write(e.w, flow...); err != nil {
return err
}
switch idx.Version {
case 2, 3:
err = e.encodeEntryName(entry)
case 4:
err = e.encodeEntryNameV4(entry)
default:
err = ErrUnsupportedVersion
}
return err
}
func (e *Encoder) encodeEntryName(entry *Entry) error {
return binary.Write(e.w, []byte(entry.Name))
}
func (e *Encoder) encodeEntryNameV4(entry *Entry) error {
name := entry.Name
l := 0
if e.lastEntry != nil {
dir := path.Dir(e.lastEntry.Name) + "/"
if strings.HasPrefix(entry.Name, dir) {
l = len(e.lastEntry.Name) - len(dir)
name = strings.TrimPrefix(entry.Name, dir)
} else {
l = len(e.lastEntry.Name)
}
}
e.lastEntry = entry
err := binary.WriteVariableWidthInt(e.w, int64(l))
if err != nil {
return err
}
return binary.Write(e.w, []byte(name+string('\x00')))
}
func (e *Encoder) encodeRawExtension(signature string, data []byte) error {
if len(signature) != 4 {
return fmt.Errorf("invalid signature length")
}
_, err := e.w.Write([]byte(signature))
if err != nil {
return err
}
err = binary.WriteUint32(e.w, uint32(len(data)))
if err != nil {
return err
}
_, err = e.w.Write(data)
if err != nil {
return err
}
return nil
}
func (e *Encoder) timeToUint32(t *time.Time) (uint32, uint32, error) {
if t.IsZero() {
return 0, 0, nil
@ -132,7 +217,11 @@ func (e *Encoder) timeToUint32(t *time.Time) (uint32, uint32, error) {
return uint32(t.Unix()), uint32(t.Nanosecond()), nil
}
func (e *Encoder) padEntry(wrote int) error {
func (e *Encoder) padEntry(idx *Index, wrote int) error {
if idx.Version == 4 {
return nil
}
padLen := 8 - wrote%8
_, err := e.w.Write(bytes.Repeat([]byte{'\x00'}, padLen))

View File

@ -5,6 +5,7 @@ import (
"errors"
"fmt"
"path/filepath"
"strings"
"time"
"github.com/jesseduffield/go-git/v5/plumbing"
@ -211,3 +212,20 @@ type EndOfIndexEntry struct {
// their contents).
Hash plumbing.Hash
}
// SkipUnless applies patterns in the form of A, A/B, A/B/C
// to the index to prevent the files from being checked out
func (i *Index) SkipUnless(patterns []string) {
for _, e := range i.Entries {
var include bool
for _, pattern := range patterns {
if strings.HasPrefix(e.Name, pattern) {
include = true
break
}
}
if !include {
e.SkipWorktree = true
}
}
}

View File

@ -1,13 +1,13 @@
package objfile
import (
"compress/zlib"
"errors"
"io"
"strconv"
"github.com/jesseduffield/go-git/v5/plumbing"
"github.com/jesseduffield/go-git/v5/plumbing/format/packfile"
"github.com/jesseduffield/go-git/v5/utils/sync"
)
var (
@ -20,20 +20,22 @@ var (
// Reader implements io.ReadCloser. Close should be called when finished with
// the Reader. Close will not close the underlying io.Reader.
type Reader struct {
multi io.Reader
zlib io.ReadCloser
hasher plumbing.Hasher
multi io.Reader
zlib io.Reader
zlibref sync.ZLibReader
hasher plumbing.Hasher
}
// NewReader returns a new Reader reading from r.
func NewReader(r io.Reader) (*Reader, error) {
zlib, err := zlib.NewReader(r)
zlib, err := sync.GetZlibReader(r)
if err != nil {
return nil, packfile.ErrZLib.AddDetails(err.Error())
}
return &Reader{
zlib: zlib,
zlib: zlib.Reader,
zlibref: zlib,
}, nil
}
@ -110,5 +112,6 @@ func (r *Reader) Hash() plumbing.Hash {
// Close releases any resources consumed by the Reader. Calling Close does not
// close the wrapped io.Reader originally passed to NewReader.
func (r *Reader) Close() error {
return r.zlib.Close()
sync.PutZlibReader(r.zlibref)
return nil
}

View File

@ -7,6 +7,7 @@ import (
"strconv"
"github.com/jesseduffield/go-git/v5/plumbing"
"github.com/jesseduffield/go-git/v5/utils/sync"
)
var (
@ -18,9 +19,9 @@ var (
// not close the underlying io.Writer.
type Writer struct {
raw io.Writer
zlib io.WriteCloser
hasher plumbing.Hasher
multi io.Writer
zlib *zlib.Writer
closed bool
pending int64 // number of unwritten bytes
@ -31,9 +32,10 @@ type Writer struct {
// The returned Writer implements io.WriteCloser. Close should be called when
// finished with the Writer. Close will not close the underlying io.Writer.
func NewWriter(w io.Writer) *Writer {
zlib := sync.GetZlibWriter(w)
return &Writer{
raw: w,
zlib: zlib.NewWriter(w),
zlib: zlib,
}
}
@ -100,6 +102,7 @@ func (w *Writer) Hash() plumbing.Hash {
// Calling Close does not close the wrapped io.Writer originally passed to
// NewWriter.
func (w *Writer) Close() error {
defer sync.PutZlibWriter(w.zlib)
if err := w.zlib.Close(); err != nil {
return err
}

View File

@ -1,10 +1,7 @@
package packfile
import (
"bytes"
"compress/zlib"
"io"
"sync"
"github.com/jesseduffield/go-git/v5/plumbing/storer"
"github.com/jesseduffield/go-git/v5/utils/ioutil"
@ -61,18 +58,3 @@ func WritePackfileToObjectStorage(
return err
}
var bufPool = sync.Pool{
New: func() interface{} {
return bytes.NewBuffer(nil)
},
}
var zlibInitBytes = []byte{0x78, 0x9c, 0x01, 0x00, 0x00, 0xff, 0xff, 0x00, 0x00, 0x00, 0x01}
var zlibReaderPool = sync.Pool{
New: func() interface{} {
r, _ := zlib.NewReader(bytes.NewReader(zlibInitBytes))
return r
},
}

View File

@ -32,19 +32,17 @@ func (idx *deltaIndex) findMatch(src, tgt []byte, tgtOffset int) (srcOffset, l i
return 0, -1
}
if len(tgt) >= tgtOffset+s && len(src) >= blksz {
h := hashBlock(tgt, tgtOffset)
tIdx := h & idx.mask
eIdx := idx.table[tIdx]
if eIdx != 0 {
srcOffset = idx.entries[eIdx]
} else {
return
}
l = matchLength(src, tgt, tgtOffset, srcOffset)
h := hashBlock(tgt, tgtOffset)
tIdx := h & idx.mask
eIdx := idx.table[tIdx]
if eIdx == 0 {
return
}
srcOffset = idx.entries[eIdx]
l = matchLength(src, tgt, tgtOffset, srcOffset)
return
}

View File

@ -5,6 +5,7 @@ import (
"github.com/jesseduffield/go-git/v5/plumbing"
"github.com/jesseduffield/go-git/v5/utils/ioutil"
"github.com/jesseduffield/go-git/v5/utils/sync"
)
// See https://github.com/jelmer/dulwich/blob/master/dulwich/pack.py and
@ -16,8 +17,11 @@ const (
s = 16
// https://github.com/git/git/blob/f7466e94375b3be27f229c78873f0acf8301c0a5/diff-delta.c#L428
// Max size of a copy operation (64KB)
// Max size of a copy operation (64KB).
maxCopySize = 64 * 1024
// Min size of a copy operation.
minCopySize = 4
)
// GetDelta returns an EncodedObject of type OFSDeltaObject. Base and Target object,
@ -43,18 +47,16 @@ func getDelta(index *deltaIndex, base, target plumbing.EncodedObject) (o plumbin
defer ioutil.CheckClose(tr, &err)
bb := bufPool.Get().(*bytes.Buffer)
defer bufPool.Put(bb)
bb.Reset()
bb := sync.GetBytesBuffer()
defer sync.PutBytesBuffer(bb)
_, err = bb.ReadFrom(br)
if err != nil {
return nil, err
}
tb := bufPool.Get().(*bytes.Buffer)
defer bufPool.Put(tb)
tb.Reset()
tb := sync.GetBytesBuffer()
defer sync.PutBytesBuffer(tb)
_, err = tb.ReadFrom(tr)
if err != nil {
@ -80,9 +82,8 @@ func DiffDelta(src, tgt []byte) []byte {
}
func diffDelta(index *deltaIndex, src []byte, tgt []byte) []byte {
buf := bufPool.Get().(*bytes.Buffer)
defer bufPool.Put(buf)
buf.Reset()
buf := sync.GetBytesBuffer()
defer sync.PutBytesBuffer(buf)
buf.Write(deltaEncodeSize(len(src)))
buf.Write(deltaEncodeSize(len(tgt)))
@ -90,9 +91,8 @@ func diffDelta(index *deltaIndex, src []byte, tgt []byte) []byte {
index.init(src)
}
ibuf := bufPool.Get().(*bytes.Buffer)
defer bufPool.Put(ibuf)
ibuf.Reset()
ibuf := sync.GetBytesBuffer()
defer sync.PutBytesBuffer(ibuf)
for i := 0; i < len(tgt); i++ {
offset, l := index.findMatch(src, tgt, i)

View File

@ -2,11 +2,11 @@ package packfile
import (
"compress/zlib"
"crypto/sha1"
"fmt"
"io"
"github.com/jesseduffield/go-git/v5/plumbing"
"github.com/jesseduffield/go-git/v5/plumbing/hash"
"github.com/jesseduffield/go-git/v5/plumbing/storer"
"github.com/jesseduffield/go-git/v5/utils/binary"
"github.com/jesseduffield/go-git/v5/utils/ioutil"
@ -28,7 +28,7 @@ type Encoder struct {
// OFSDeltaObject. To use Reference deltas, set useRefDeltas to true.
func NewEncoder(w io.Writer, s storer.EncodedObjectStorer, useRefDeltas bool) *Encoder {
h := plumbing.Hasher{
Hash: sha1.New(),
Hash: hash.New(hash.CryptoType),
}
mw := io.MultiWriter(w, h)
ow := newOffsetWriter(mw)
@ -131,11 +131,7 @@ func (e *Encoder) entry(o *ObjectToPack) (err error) {
defer ioutil.CheckClose(or, &err)
_, err = io.Copy(e.zw, or)
if err != nil {
return err
}
return nil
return err
}
func (e *Encoder) writeBaseIfDelta(o *ObjectToPack) error {

View File

@ -7,19 +7,20 @@ import (
"github.com/jesseduffield/go-git/v5/plumbing"
"github.com/jesseduffield/go-git/v5/plumbing/cache"
"github.com/jesseduffield/go-git/v5/plumbing/format/idxfile"
"github.com/jesseduffield/go-git/v5/utils/ioutil"
)
// FSObject is an object from the packfile on the filesystem.
type FSObject struct {
hash plumbing.Hash
h *ObjectHeader
offset int64
size int64
typ plumbing.ObjectType
index idxfile.Index
fs billy.Filesystem
path string
cache cache.Object
hash plumbing.Hash
offset int64
size int64
typ plumbing.ObjectType
index idxfile.Index
fs billy.Filesystem
path string
cache cache.Object
largeObjectThreshold int64
}
// NewFSObject creates a new filesystem object.
@ -32,16 +33,18 @@ func NewFSObject(
fs billy.Filesystem,
path string,
cache cache.Object,
largeObjectThreshold int64,
) *FSObject {
return &FSObject{
hash: hash,
offset: offset,
size: contentSize,
typ: finalType,
index: index,
fs: fs,
path: path,
cache: cache,
hash: hash,
offset: offset,
size: contentSize,
typ: finalType,
index: index,
fs: fs,
path: path,
cache: cache,
largeObjectThreshold: largeObjectThreshold,
}
}
@ -62,7 +65,21 @@ func (o *FSObject) Reader() (io.ReadCloser, error) {
return nil, err
}
p := NewPackfileWithCache(o.index, nil, f, o.cache)
p := NewPackfileWithCache(o.index, nil, f, o.cache, o.largeObjectThreshold)
if o.largeObjectThreshold > 0 && o.size > o.largeObjectThreshold {
// We have a big object
h, err := p.objectHeaderAtOffset(o.offset)
if err != nil {
return nil, err
}
r, err := p.getReaderDirect(h)
if err != nil {
_ = f.Close()
return nil, err
}
return ioutil.NewReadCloserWithCloser(r, f.Close), nil
}
r, err := p.getObjectContent(o.offset)
if err != nil {
_ = f.Close()
@ -100,17 +117,3 @@ func (o *FSObject) Type() plumbing.ObjectType {
func (o *FSObject) Writer() (io.WriteCloser, error) {
return nil, nil
}
type objectReader struct {
io.ReadCloser
f billy.File
}
func (r *objectReader) Close() error {
if err := r.ReadCloser.Close(); err != nil {
_ = r.f.Close()
return err
}
return r.f.Close()
}

View File

@ -2,6 +2,7 @@ package packfile
import (
"bytes"
"fmt"
"io"
"os"
@ -11,6 +12,7 @@ import (
"github.com/jesseduffield/go-git/v5/plumbing/format/idxfile"
"github.com/jesseduffield/go-git/v5/plumbing/storer"
"github.com/jesseduffield/go-git/v5/utils/ioutil"
"github.com/jesseduffield/go-git/v5/utils/sync"
)
var (
@ -35,11 +37,12 @@ const smallObjectThreshold = 16 * 1024
// Packfile allows retrieving information from inside a packfile.
type Packfile struct {
idxfile.Index
fs billy.Filesystem
file billy.File
s *Scanner
deltaBaseCache cache.Object
offsetToType map[int64]plumbing.ObjectType
fs billy.Filesystem
file billy.File
s *Scanner
deltaBaseCache cache.Object
offsetToType map[int64]plumbing.ObjectType
largeObjectThreshold int64
}
// NewPackfileWithCache creates a new Packfile with the given object cache.
@ -50,6 +53,7 @@ func NewPackfileWithCache(
fs billy.Filesystem,
file billy.File,
cache cache.Object,
largeObjectThreshold int64,
) *Packfile {
s := NewScanner(file)
return &Packfile{
@ -59,6 +63,7 @@ func NewPackfileWithCache(
s,
cache,
make(map[int64]plumbing.ObjectType),
largeObjectThreshold,
}
}
@ -66,8 +71,8 @@ func NewPackfileWithCache(
// and packfile idx.
// If the filesystem is provided, the packfile will return FSObjects, otherwise
// it will return MemoryObjects.
func NewPackfile(index idxfile.Index, fs billy.Filesystem, file billy.File) *Packfile {
return NewPackfileWithCache(index, fs, file, cache.NewObjectLRUDefault())
func NewPackfile(index idxfile.Index, fs billy.Filesystem, file billy.File, largeObjectThreshold int64) *Packfile {
return NewPackfileWithCache(index, fs, file, cache.NewObjectLRUDefault(), largeObjectThreshold)
}
// Get retrieves the encoded object in the packfile with the given hash.
@ -133,9 +138,8 @@ func (p *Packfile) getObjectSize(h *ObjectHeader) (int64, error) {
case plumbing.CommitObject, plumbing.TreeObject, plumbing.BlobObject, plumbing.TagObject:
return h.Length, nil
case plumbing.REFDeltaObject, plumbing.OFSDeltaObject:
buf := bufPool.Get().(*bytes.Buffer)
defer bufPool.Put(buf)
buf.Reset()
buf := sync.GetBytesBuffer()
defer sync.PutBytesBuffer(buf)
if _, _, err := p.s.NextObject(buf); err != nil {
return 0, err
@ -222,9 +226,9 @@ func (p *Packfile) getNextObject(h *ObjectHeader, hash plumbing.Hash) (plumbing.
// For delta objects we read the delta data and apply the small object
// optimization only if the expanded version of the object still meets
// the small object threshold condition.
buf := bufPool.Get().(*bytes.Buffer)
defer bufPool.Put(buf)
buf.Reset()
buf := sync.GetBytesBuffer()
defer sync.PutBytesBuffer(buf)
if _, _, err := p.s.NextObject(buf); err != nil {
return nil, err
}
@ -263,6 +267,7 @@ func (p *Packfile) getNextObject(h *ObjectHeader, hash plumbing.Hash) (plumbing.
p.fs,
p.file.Name(),
p.deltaBaseCache,
p.largeObjectThreshold,
), nil
}
@ -282,6 +287,49 @@ func (p *Packfile) getObjectContent(offset int64) (io.ReadCloser, error) {
return obj.Reader()
}
func asyncReader(p *Packfile) (io.ReadCloser, error) {
reader := ioutil.NewReaderUsingReaderAt(p.file, p.s.r.offset)
zr, err := sync.GetZlibReader(reader)
if err != nil {
return nil, fmt.Errorf("zlib reset error: %s", err)
}
return ioutil.NewReadCloserWithCloser(zr.Reader, func() error {
sync.PutZlibReader(zr)
return nil
}), nil
}
func (p *Packfile) getReaderDirect(h *ObjectHeader) (io.ReadCloser, error) {
switch h.Type {
case plumbing.CommitObject, plumbing.TreeObject, plumbing.BlobObject, plumbing.TagObject:
return asyncReader(p)
case plumbing.REFDeltaObject:
deltaRc, err := asyncReader(p)
if err != nil {
return nil, err
}
r, err := p.readREFDeltaObjectContent(h, deltaRc)
if err != nil {
return nil, err
}
return r, nil
case plumbing.OFSDeltaObject:
deltaRc, err := asyncReader(p)
if err != nil {
return nil, err
}
r, err := p.readOFSDeltaObjectContent(h, deltaRc)
if err != nil {
return nil, err
}
return r, nil
default:
return nil, ErrInvalidObject.AddDetails("type %q", h.Type)
}
}
func (p *Packfile) getNextMemoryObject(h *ObjectHeader) (plumbing.EncodedObject, error) {
var obj = new(plumbing.MemoryObject)
obj.SetSize(h.Length)
@ -323,9 +371,9 @@ func (p *Packfile) fillRegularObjectContent(obj plumbing.EncodedObject) (err err
}
func (p *Packfile) fillREFDeltaObjectContent(obj plumbing.EncodedObject, ref plumbing.Hash) error {
buf := bufPool.Get().(*bytes.Buffer)
defer bufPool.Put(buf)
buf.Reset()
buf := sync.GetBytesBuffer()
defer sync.PutBytesBuffer(buf)
_, _, err := p.s.NextObject(buf)
if err != nil {
return err
@ -334,6 +382,20 @@ func (p *Packfile) fillREFDeltaObjectContent(obj plumbing.EncodedObject, ref plu
return p.fillREFDeltaObjectContentWithBuffer(obj, ref, buf)
}
func (p *Packfile) readREFDeltaObjectContent(h *ObjectHeader, deltaRC io.Reader) (io.ReadCloser, error) {
var err error
base, ok := p.cacheGet(h.Reference)
if !ok {
base, err = p.Get(h.Reference)
if err != nil {
return nil, err
}
}
return ReaderFromDelta(base, deltaRC)
}
func (p *Packfile) fillREFDeltaObjectContentWithBuffer(obj plumbing.EncodedObject, ref plumbing.Hash, buf *bytes.Buffer) error {
var err error
@ -353,9 +415,9 @@ func (p *Packfile) fillREFDeltaObjectContentWithBuffer(obj plumbing.EncodedObjec
}
func (p *Packfile) fillOFSDeltaObjectContent(obj plumbing.EncodedObject, offset int64) error {
buf := bufPool.Get().(*bytes.Buffer)
defer bufPool.Put(buf)
buf.Reset()
buf := sync.GetBytesBuffer()
defer sync.PutBytesBuffer(buf)
_, _, err := p.s.NextObject(buf)
if err != nil {
return err
@ -364,6 +426,20 @@ func (p *Packfile) fillOFSDeltaObjectContent(obj plumbing.EncodedObject, offset
return p.fillOFSDeltaObjectContentWithBuffer(obj, offset, buf)
}
func (p *Packfile) readOFSDeltaObjectContent(h *ObjectHeader, deltaRC io.Reader) (io.ReadCloser, error) {
hash, err := p.FindHash(h.OffsetReference)
if err != nil {
return nil, err
}
base, err := p.objectAtOffset(h.OffsetReference, hash)
if err != nil {
return nil, err
}
return ReaderFromDelta(base, deltaRC)
}
func (p *Packfile) fillOFSDeltaObjectContentWithBuffer(obj plumbing.EncodedObject, offset int64, buf *bytes.Buffer) error {
hash, err := p.FindHash(offset)
if err != nil {

View File

@ -3,13 +3,14 @@ package packfile
import (
"bytes"
"errors"
"fmt"
"io"
stdioutil "io/ioutil"
"github.com/jesseduffield/go-git/v5/plumbing"
"github.com/jesseduffield/go-git/v5/plumbing/cache"
"github.com/jesseduffield/go-git/v5/plumbing/storer"
"github.com/jesseduffield/go-git/v5/utils/ioutil"
"github.com/jesseduffield/go-git/v5/utils/sync"
)
var (
@ -46,7 +47,6 @@ type Parser struct {
oi []*objectInfo
oiByHash map[plumbing.Hash]*objectInfo
oiByOffset map[int64]*objectInfo
hashOffset map[plumbing.Hash]int64
checksum plumbing.Hash
cache *cache.BufferLRU
@ -175,12 +175,25 @@ func (p *Parser) init() error {
return nil
}
type objectHeaderWriter func(typ plumbing.ObjectType, sz int64) error
type lazyObjectWriter interface {
// LazyWriter enables an object to be lazily written.
// It returns:
// - w: a writer to receive the object's content.
// - lwh: a func to write the object header.
// - err: any error from the initial writer creation process.
//
// Note that if the object header is not written BEFORE the writer
// is used, this will result in an invalid object.
LazyWriter() (w io.WriteCloser, lwh objectHeaderWriter, err error)
}
func (p *Parser) indexObjects() error {
buf := new(bytes.Buffer)
buf := sync.GetBytesBuffer()
defer sync.PutBytesBuffer(buf)
for i := uint32(0); i < p.count; i++ {
buf.Reset()
oh, err := p.scanner.NextObjectHeader()
if err != nil {
return err
@ -220,39 +233,76 @@ func (p *Parser) indexObjects() error {
ota = newBaseObject(oh.Offset, oh.Length, t)
}
_, crc, err := p.scanner.NextObject(buf)
hasher := plumbing.NewHasher(oh.Type, oh.Length)
writers := []io.Writer{hasher}
var obj *plumbing.MemoryObject
// Lazy writing is only available for non-delta objects.
if p.storage != nil && !delta {
// When a storage is set and supports lazy writing,
// use that instead of creating a memory object.
if low, ok := p.storage.(lazyObjectWriter); ok {
ow, lwh, err := low.LazyWriter()
if err != nil {
return err
}
if err = lwh(oh.Type, oh.Length); err != nil {
return err
}
defer ow.Close()
writers = append(writers, ow)
} else {
obj = new(plumbing.MemoryObject)
obj.SetSize(oh.Length)
obj.SetType(oh.Type)
writers = append(writers, obj)
}
}
if delta && !p.scanner.IsSeekable {
buf.Reset()
buf.Grow(int(oh.Length))
writers = append(writers, buf)
}
mw := io.MultiWriter(writers...)
_, crc, err := p.scanner.NextObject(mw)
if err != nil {
return err
}
// Non delta objects needs to be added into the storage. This
// is only required when lazy writing is not supported.
if obj != nil {
if _, err := p.storage.SetEncodedObject(obj); err != nil {
return err
}
}
ota.Crc32 = crc
ota.Length = oh.Length
data := buf.Bytes()
if !delta {
sha1, err := getSHA1(ota.Type, data)
if err != nil {
return err
sha1 := hasher.Sum()
// Move children of placeholder parent into actual parent, in case this
// was a non-external delta reference.
if placeholder, ok := p.oiByHash[sha1]; ok {
ota.Children = placeholder.Children
for _, c := range ota.Children {
c.Parent = ota
}
}
ota.SHA1 = sha1
p.oiByHash[ota.SHA1] = ota
}
if p.storage != nil && !delta {
obj := new(plumbing.MemoryObject)
obj.SetSize(oh.Length)
obj.SetType(oh.Type)
if _, err := obj.Write(data); err != nil {
return err
}
if _, err := p.storage.SetEncodedObject(obj); err != nil {
return err
}
}
if delta && !p.scanner.IsSeekable {
data := buf.Bytes()
p.deltas[oh.Offset] = make([]byte, len(data))
copy(p.deltas[oh.Offset], data)
}
@ -265,28 +315,37 @@ func (p *Parser) indexObjects() error {
}
func (p *Parser) resolveDeltas() error {
buf := &bytes.Buffer{}
buf := sync.GetBytesBuffer()
defer sync.PutBytesBuffer(buf)
for _, obj := range p.oi {
buf.Reset()
buf.Grow(int(obj.Length))
err := p.get(obj, buf)
if err != nil {
return err
}
content := buf.Bytes()
if err := p.onInflatedObjectHeader(obj.Type, obj.Length, obj.Offset); err != nil {
return err
}
if err := p.onInflatedObjectContent(obj.SHA1, obj.Offset, obj.Crc32, content); err != nil {
if err := p.onInflatedObjectContent(obj.SHA1, obj.Offset, obj.Crc32, nil); err != nil {
return err
}
if !obj.IsDelta() && len(obj.Children) > 0 {
// Dealing with an io.ReaderAt object, means we can
// create it once and reuse across all children.
r := bytes.NewReader(buf.Bytes())
for _, child := range obj.Children {
if err := p.resolveObject(stdioutil.Discard, child, content); err != nil {
// Even though we are discarding the output, we still need to read it to
// so that the scanner can advance to the next object, and the SHA1 can be
// calculated.
if err := p.resolveObject(io.Discard, child, r); err != nil {
return err
}
p.resolveExternalRef(child)
}
// Remove the delta from the cache.
@ -299,6 +358,16 @@ func (p *Parser) resolveDeltas() error {
return nil
}
func (p *Parser) resolveExternalRef(o *objectInfo) {
if ref, ok := p.oiByHash[o.SHA1]; ok && ref.ExternalRef {
p.oiByHash[o.SHA1] = o
o.Children = ref.Children
for _, c := range o.Children {
c.Parent = o
}
}
}
func (p *Parser) get(o *objectInfo, buf *bytes.Buffer) (err error) {
if !o.ExternalRef { // skip cache check for placeholder parents
b, ok := p.cache.Get(o.Offset)
@ -336,16 +405,15 @@ func (p *Parser) get(o *objectInfo, buf *bytes.Buffer) (err error) {
}
if o.DiskType.IsDelta() {
b := bufPool.Get().(*bytes.Buffer)
defer bufPool.Put(b)
b.Reset()
b := sync.GetBytesBuffer()
defer sync.PutBytesBuffer(b)
buf.Grow(int(o.Length))
err := p.get(o.Parent, b)
if err != nil {
return err
}
base := b.Bytes()
err = p.resolveObject(buf, o, base)
err = p.resolveObject(buf, o, bytes.NewReader(b.Bytes()))
if err != nil {
return err
}
@ -356,6 +424,13 @@ func (p *Parser) get(o *objectInfo, buf *bytes.Buffer) (err error) {
}
}
// If the scanner is seekable, caching this data into
// memory by offset seems wasteful.
// There is a trade-off to be considered here in terms
// of execution time vs memory consumption.
//
// TODO: improve seekable execution time, so that we can
// skip this cache.
if len(o.Children) > 0 {
data := make([]byte, buf.Len())
copy(data, buf.Bytes())
@ -364,41 +439,75 @@ func (p *Parser) get(o *objectInfo, buf *bytes.Buffer) (err error) {
return nil
}
// resolveObject resolves an object from base, using information
// provided by o.
//
// This call has the side-effect of changing field values
// from the object info o:
// - Type: OFSDeltaObject may become the target type (e.g. Blob).
// - Size: The size may be update with the target size.
// - Hash: Zero hashes will be calculated as part of the object
// resolution. Hence why this process can't be avoided even when w
// is an io.Discard.
//
// base must be an io.ReaderAt, which is a requirement from
// patchDeltaStream. The main reason being that reversing an
// delta object may lead to going backs and forths within base,
// which is not supported by io.Reader.
func (p *Parser) resolveObject(
w io.Writer,
o *objectInfo,
base []byte,
base io.ReaderAt,
) error {
if !o.DiskType.IsDelta() {
return nil
}
buf := bufPool.Get().(*bytes.Buffer)
defer bufPool.Put(buf)
buf.Reset()
buf := sync.GetBytesBuffer()
defer sync.PutBytesBuffer(buf)
err := p.readData(buf, o)
if err != nil {
return err
}
data := buf.Bytes()
data, err = applyPatchBase(o, data, base)
writers := []io.Writer{w}
var obj *plumbing.MemoryObject
var lwh objectHeaderWriter
if p.storage != nil {
if low, ok := p.storage.(lazyObjectWriter); ok {
ow, wh, err := low.LazyWriter()
if err != nil {
return err
}
lwh = wh
defer ow.Close()
writers = append(writers, ow)
} else {
obj = new(plumbing.MemoryObject)
ow, err := obj.Writer()
if err != nil {
return err
}
writers = append(writers, ow)
}
}
mw := io.MultiWriter(writers...)
err = applyPatchBase(o, base, buf, mw, lwh)
if err != nil {
return err
}
if p.storage != nil {
obj := new(plumbing.MemoryObject)
obj.SetSize(o.Size())
if obj != nil {
obj.SetType(o.Type)
if _, err := obj.Write(data); err != nil {
return err
}
obj.SetSize(o.Size()) // Size here is correct as it was populated by applyPatchBase.
if _, err := p.storage.SetEncodedObject(obj); err != nil {
return err
}
}
_, err = w.Write(data)
return err
}
@ -422,24 +531,31 @@ func (p *Parser) readData(w io.Writer, o *objectInfo) error {
return nil
}
func applyPatchBase(ota *objectInfo, data, base []byte) ([]byte, error) {
patched, err := PatchDelta(base, data)
// applyPatchBase applies the patch to target.
//
// Note that ota will be updated based on the description in resolveObject.
func applyPatchBase(ota *objectInfo, base io.ReaderAt, delta io.Reader, target io.Writer, wh objectHeaderWriter) error {
if target == nil {
return fmt.Errorf("cannot apply patch against nil target")
}
typ := ota.Type
if ota.SHA1 == plumbing.ZeroHash {
typ = ota.Parent.Type
}
sz, h, err := patchDeltaWriter(target, base, delta, typ, wh)
if err != nil {
return nil, err
return err
}
if ota.SHA1 == plumbing.ZeroHash {
ota.Type = ota.Parent.Type
sha1, err := getSHA1(ota.Type, patched)
if err != nil {
return nil, err
}
ota.SHA1 = sha1
ota.Length = int64(len(patched))
ota.Type = typ
ota.Length = int64(sz)
ota.SHA1 = h
}
return patched, nil
return nil
}
func getSHA1(t plumbing.ObjectType, data []byte) (plumbing.Hash, error) {

View File

@ -1,12 +1,16 @@
package packfile
import (
"bufio"
"bytes"
"errors"
"fmt"
"io"
"math"
"github.com/jesseduffield/go-git/v5/plumbing"
"github.com/jesseduffield/go-git/v5/utils/ioutil"
"github.com/jesseduffield/go-git/v5/utils/sync"
)
// See https://github.com/git/git/blob/49fa3dc76179e04b0833542fa52d0f287a4955ac/delta.h
@ -14,7 +18,40 @@ import (
// and https://github.com/tarruda/node-git-core/blob/master/src/js/delta.js
// for details about the delta format.
const deltaSizeMin = 4
var (
ErrInvalidDelta = errors.New("invalid delta")
ErrDeltaCmd = errors.New("wrong delta command")
)
const (
payload = 0x7f // 0111 1111
continuation = 0x80 // 1000 0000
// maxPatchPreemptionSize defines what is the max size of bytes to be
// premptively made available for a patch operation.
maxPatchPreemptionSize uint = 65536
// minDeltaSize defines the smallest size for a delta.
minDeltaSize = 4
)
type offset struct {
mask byte
shift uint
}
var offsets = []offset{
{mask: 0x01, shift: 0},
{mask: 0x02, shift: 8},
{mask: 0x04, shift: 16},
{mask: 0x08, shift: 24},
}
var sizes = []offset{
{mask: 0x10, shift: 0},
{mask: 0x20, shift: 8},
{mask: 0x40, shift: 16},
}
// ApplyDelta writes to target the result of applying the modification deltas in delta to base.
func ApplyDelta(target, base plumbing.EncodedObject, delta []byte) (err error) {
@ -32,18 +69,16 @@ func ApplyDelta(target, base plumbing.EncodedObject, delta []byte) (err error) {
defer ioutil.CheckClose(w, &err)
buf := bufPool.Get().(*bytes.Buffer)
defer bufPool.Put(buf)
buf.Reset()
buf := sync.GetBytesBuffer()
defer sync.PutBytesBuffer(buf)
_, err = buf.ReadFrom(r)
if err != nil {
return err
}
src := buf.Bytes()
dst := bufPool.Get().(*bytes.Buffer)
defer bufPool.Put(dst)
dst.Reset()
dst := sync.GetBytesBuffer()
defer sync.PutBytesBuffer(dst)
err = patchDelta(dst, src, delta)
if err != nil {
return err
@ -51,21 +86,20 @@ func ApplyDelta(target, base plumbing.EncodedObject, delta []byte) (err error) {
target.SetSize(int64(dst.Len()))
b := byteSlicePool.Get().([]byte)
_, err = io.CopyBuffer(w, dst, b)
byteSlicePool.Put(b)
b := sync.GetByteSlice()
_, err = io.CopyBuffer(w, dst, *b)
sync.PutByteSlice(b)
return err
}
var (
ErrInvalidDelta = errors.New("invalid delta")
ErrDeltaCmd = errors.New("wrong delta command")
)
// PatchDelta returns the result of applying the modification deltas in delta to src.
// An error will be returned if delta is corrupted (ErrDeltaLen) or an action command
// An error will be returned if delta is corrupted (ErrInvalidDelta) or an action command
// is not copy from source or copy from delta (ErrDeltaCmd).
func PatchDelta(src, delta []byte) ([]byte, error) {
if len(src) == 0 || len(delta) < minDeltaSize {
return nil, ErrInvalidDelta
}
b := &bytes.Buffer{}
if err := patchDelta(b, src, delta); err != nil {
return nil, err
@ -73,8 +107,137 @@ func PatchDelta(src, delta []byte) ([]byte, error) {
return b.Bytes(), nil
}
func ReaderFromDelta(base plumbing.EncodedObject, deltaRC io.Reader) (io.ReadCloser, error) {
deltaBuf := bufio.NewReaderSize(deltaRC, 1024)
srcSz, err := decodeLEB128ByteReader(deltaBuf)
if err != nil {
if err == io.EOF {
return nil, ErrInvalidDelta
}
return nil, err
}
if srcSz != uint(base.Size()) {
return nil, ErrInvalidDelta
}
targetSz, err := decodeLEB128ByteReader(deltaBuf)
if err != nil {
if err == io.EOF {
return nil, ErrInvalidDelta
}
return nil, err
}
remainingTargetSz := targetSz
dstRd, dstWr := io.Pipe()
go func() {
baseRd, err := base.Reader()
if err != nil {
_ = dstWr.CloseWithError(ErrInvalidDelta)
return
}
defer baseRd.Close()
baseBuf := bufio.NewReader(baseRd)
basePos := uint(0)
for {
cmd, err := deltaBuf.ReadByte()
if err == io.EOF {
_ = dstWr.CloseWithError(ErrInvalidDelta)
return
}
if err != nil {
_ = dstWr.CloseWithError(err)
return
}
switch {
case isCopyFromSrc(cmd):
offset, err := decodeOffsetByteReader(cmd, deltaBuf)
if err != nil {
_ = dstWr.CloseWithError(err)
return
}
sz, err := decodeSizeByteReader(cmd, deltaBuf)
if err != nil {
_ = dstWr.CloseWithError(err)
return
}
if invalidSize(sz, targetSz) ||
invalidOffsetSize(offset, sz, srcSz) {
_ = dstWr.Close()
return
}
discard := offset - basePos
if basePos > offset {
_ = baseRd.Close()
baseRd, err = base.Reader()
if err != nil {
_ = dstWr.CloseWithError(ErrInvalidDelta)
return
}
baseBuf.Reset(baseRd)
discard = offset
}
for discard > math.MaxInt32 {
n, err := baseBuf.Discard(math.MaxInt32)
if err != nil {
_ = dstWr.CloseWithError(err)
return
}
basePos += uint(n)
discard -= uint(n)
}
for discard > 0 {
n, err := baseBuf.Discard(int(discard))
if err != nil {
_ = dstWr.CloseWithError(err)
return
}
basePos += uint(n)
discard -= uint(n)
}
if _, err := io.Copy(dstWr, io.LimitReader(baseBuf, int64(sz))); err != nil {
_ = dstWr.CloseWithError(err)
return
}
remainingTargetSz -= sz
basePos += sz
case isCopyFromDelta(cmd):
sz := uint(cmd) // cmd is the size itself
if invalidSize(sz, targetSz) {
_ = dstWr.CloseWithError(ErrInvalidDelta)
return
}
if _, err := io.Copy(dstWr, io.LimitReader(deltaBuf, int64(sz))); err != nil {
_ = dstWr.CloseWithError(err)
return
}
remainingTargetSz -= sz
default:
_ = dstWr.CloseWithError(ErrDeltaCmd)
return
}
if remainingTargetSz <= 0 {
_ = dstWr.Close()
return
}
}
}()
return dstRd, nil
}
func patchDelta(dst *bytes.Buffer, src, delta []byte) error {
if len(delta) < deltaSizeMin {
if len(delta) < minCopySize {
return ErrInvalidDelta
}
@ -87,7 +250,9 @@ func patchDelta(dst *bytes.Buffer, src, delta []byte) error {
remainingTargetSz := targetSz
var cmd byte
dst.Grow(int(targetSz))
growSz := min(targetSz, maxPatchPreemptionSize)
dst.Grow(int(growSz))
for {
if len(delta) == 0 {
return ErrInvalidDelta
@ -95,7 +260,9 @@ func patchDelta(dst *bytes.Buffer, src, delta []byte) error {
cmd = delta[0]
delta = delta[1:]
if isCopyFromSrc(cmd) {
switch {
case isCopyFromSrc(cmd):
var offset, sz uint
var err error
offset, delta, err = decodeOffset(cmd, delta)
@ -114,7 +281,8 @@ func patchDelta(dst *bytes.Buffer, src, delta []byte) error {
}
dst.Write(src[offset : offset+sz])
remainingTargetSz -= sz
} else if isCopyFromDelta(cmd) {
case isCopyFromDelta(cmd):
sz := uint(cmd) // cmd is the size itself
if invalidSize(sz, targetSz) {
return ErrInvalidDelta
@ -127,7 +295,8 @@ func patchDelta(dst *bytes.Buffer, src, delta []byte) error {
dst.Write(delta[0:sz])
remainingTargetSz -= sz
delta = delta[sz:]
} else {
default:
return ErrDeltaCmd
}
@ -139,6 +308,107 @@ func patchDelta(dst *bytes.Buffer, src, delta []byte) error {
return nil
}
func patchDeltaWriter(dst io.Writer, base io.ReaderAt, delta io.Reader,
typ plumbing.ObjectType, writeHeader objectHeaderWriter) (uint, plumbing.Hash, error) {
deltaBuf := bufio.NewReaderSize(delta, 1024)
srcSz, err := decodeLEB128ByteReader(deltaBuf)
if err != nil {
if err == io.EOF {
return 0, plumbing.ZeroHash, ErrInvalidDelta
}
return 0, plumbing.ZeroHash, err
}
if r, ok := base.(*bytes.Reader); ok && srcSz != uint(r.Size()) {
return 0, plumbing.ZeroHash, ErrInvalidDelta
}
targetSz, err := decodeLEB128ByteReader(deltaBuf)
if err != nil {
if err == io.EOF {
return 0, plumbing.ZeroHash, ErrInvalidDelta
}
return 0, plumbing.ZeroHash, err
}
// If header still needs to be written, caller will provide
// a LazyObjectWriterHeader. This seems to be the case when
// dealing with thin-packs.
if writeHeader != nil {
err = writeHeader(typ, int64(targetSz))
if err != nil {
return 0, plumbing.ZeroHash, fmt.Errorf("could not lazy write header: %w", err)
}
}
remainingTargetSz := targetSz
hasher := plumbing.NewHasher(typ, int64(targetSz))
mw := io.MultiWriter(dst, hasher)
bufp := sync.GetByteSlice()
defer sync.PutByteSlice(bufp)
sr := io.NewSectionReader(base, int64(0), int64(srcSz))
// Keep both the io.LimitedReader types, so we can reset N.
baselr := io.LimitReader(sr, 0).(*io.LimitedReader)
deltalr := io.LimitReader(deltaBuf, 0).(*io.LimitedReader)
for {
buf := *bufp
cmd, err := deltaBuf.ReadByte()
if err == io.EOF {
return 0, plumbing.ZeroHash, ErrInvalidDelta
}
if err != nil {
return 0, plumbing.ZeroHash, err
}
if isCopyFromSrc(cmd) {
offset, err := decodeOffsetByteReader(cmd, deltaBuf)
if err != nil {
return 0, plumbing.ZeroHash, err
}
sz, err := decodeSizeByteReader(cmd, deltaBuf)
if err != nil {
return 0, plumbing.ZeroHash, err
}
if invalidSize(sz, targetSz) ||
invalidOffsetSize(offset, sz, srcSz) {
return 0, plumbing.ZeroHash, err
}
if _, err := sr.Seek(int64(offset), io.SeekStart); err != nil {
return 0, plumbing.ZeroHash, err
}
baselr.N = int64(sz)
if _, err := io.CopyBuffer(mw, baselr, buf); err != nil {
return 0, plumbing.ZeroHash, err
}
remainingTargetSz -= sz
} else if isCopyFromDelta(cmd) {
sz := uint(cmd) // cmd is the size itself
if invalidSize(sz, targetSz) {
return 0, plumbing.ZeroHash, ErrInvalidDelta
}
deltalr.N = int64(sz)
if _, err := io.CopyBuffer(mw, deltalr, buf); err != nil {
return 0, plumbing.ZeroHash, err
}
remainingTargetSz -= sz
} else {
return 0, plumbing.ZeroHash, err
}
if remainingTargetSz <= 0 {
break
}
}
return targetSz, hasher.Sum(), nil
}
// Decodes a number encoded as an unsigned LEB128 at the start of some
// binary data and returns the decoded number and the rest of the
// stream.
@ -146,6 +416,10 @@ func patchDelta(dst *bytes.Buffer, src, delta []byte) error {
// This must be called twice on the delta data buffer, first to get the
// expected source buffer size, and again to get the target buffer size.
func decodeLEB128(input []byte) (uint, []byte) {
if len(input) == 0 {
return 0, input
}
var num, sz uint
var b byte
for {
@ -161,78 +435,95 @@ func decodeLEB128(input []byte) (uint, []byte) {
return num, input[sz:]
}
const (
payload = 0x7f // 0111 1111
continuation = 0x80 // 1000 0000
)
func decodeLEB128ByteReader(input io.ByteReader) (uint, error) {
var num, sz uint
for {
b, err := input.ReadByte()
if err != nil {
return 0, err
}
num |= (uint(b) & payload) << (sz * 7) // concats 7 bits chunks
sz++
if uint(b)&continuation == 0 {
break
}
}
return num, nil
}
func isCopyFromSrc(cmd byte) bool {
return (cmd & 0x80) != 0
return (cmd & continuation) != 0
}
func isCopyFromDelta(cmd byte) bool {
return (cmd&0x80) == 0 && cmd != 0
return (cmd&continuation) == 0 && cmd != 0
}
func decodeOffsetByteReader(cmd byte, delta io.ByteReader) (uint, error) {
var offset uint
for _, o := range offsets {
if (cmd & o.mask) != 0 {
next, err := delta.ReadByte()
if err != nil {
return 0, err
}
offset |= uint(next) << o.shift
}
}
return offset, nil
}
func decodeOffset(cmd byte, delta []byte) (uint, []byte, error) {
var offset uint
if (cmd & 0x01) != 0 {
if len(delta) == 0 {
return 0, nil, ErrInvalidDelta
for _, o := range offsets {
if (cmd & o.mask) != 0 {
if len(delta) == 0 {
return 0, nil, ErrInvalidDelta
}
offset |= uint(delta[0]) << o.shift
delta = delta[1:]
}
offset = uint(delta[0])
delta = delta[1:]
}
if (cmd & 0x02) != 0 {
if len(delta) == 0 {
return 0, nil, ErrInvalidDelta
}
offset |= uint(delta[0]) << 8
delta = delta[1:]
}
if (cmd & 0x04) != 0 {
if len(delta) == 0 {
return 0, nil, ErrInvalidDelta
}
offset |= uint(delta[0]) << 16
delta = delta[1:]
}
if (cmd & 0x08) != 0 {
if len(delta) == 0 {
return 0, nil, ErrInvalidDelta
}
offset |= uint(delta[0]) << 24
delta = delta[1:]
}
return offset, delta, nil
}
func decodeSizeByteReader(cmd byte, delta io.ByteReader) (uint, error) {
var sz uint
for _, s := range sizes {
if (cmd & s.mask) != 0 {
next, err := delta.ReadByte()
if err != nil {
return 0, err
}
sz |= uint(next) << s.shift
}
}
if sz == 0 {
sz = maxCopySize
}
return sz, nil
}
func decodeSize(cmd byte, delta []byte) (uint, []byte, error) {
var sz uint
if (cmd & 0x10) != 0 {
if len(delta) == 0 {
return 0, nil, ErrInvalidDelta
for _, s := range sizes {
if (cmd & s.mask) != 0 {
if len(delta) == 0 {
return 0, nil, ErrInvalidDelta
}
sz |= uint(delta[0]) << s.shift
delta = delta[1:]
}
sz = uint(delta[0])
delta = delta[1:]
}
if (cmd & 0x20) != 0 {
if len(delta) == 0 {
return 0, nil, ErrInvalidDelta
}
sz |= uint(delta[0]) << 8
delta = delta[1:]
}
if (cmd & 0x40) != 0 {
if len(delta) == 0 {
return 0, nil, ErrInvalidDelta
}
sz |= uint(delta[0]) << 16
delta = delta[1:]
}
if sz == 0 {
sz = 0x10000
sz = maxCopySize
}
return sz, delta, nil

View File

@ -3,17 +3,15 @@ package packfile
import (
"bufio"
"bytes"
"compress/zlib"
"fmt"
"hash"
"hash/crc32"
"io"
stdioutil "io/ioutil"
"sync"
"github.com/jesseduffield/go-git/v5/plumbing"
"github.com/jesseduffield/go-git/v5/utils/binary"
"github.com/jesseduffield/go-git/v5/utils/ioutil"
"github.com/jesseduffield/go-git/v5/utils/sync"
)
var (
@ -114,7 +112,7 @@ func (s *Scanner) Header() (version, objects uint32, err error) {
return
}
// readSignature reads an returns the signature field in the packfile.
// readSignature reads a returns the signature field in the packfile.
func (s *Scanner) readSignature() ([]byte, error) {
var sig = make([]byte, 4)
if _, err := io.ReadFull(s.r, sig); err != nil {
@ -243,7 +241,7 @@ func (s *Scanner) discardObjectIfNeeded() error {
}
h := s.pendingObject
n, _, err := s.NextObject(stdioutil.Discard)
n, _, err := s.NextObject(io.Discard)
if err != nil {
return err
}
@ -320,29 +318,38 @@ func (s *Scanner) NextObject(w io.Writer) (written int64, crc32 uint32, err erro
return
}
// ReadObject returns a reader for the object content and an error
func (s *Scanner) ReadObject() (io.ReadCloser, error) {
s.pendingObject = nil
zr, err := sync.GetZlibReader(s.r)
if err != nil {
return nil, fmt.Errorf("zlib reset error: %s", err)
}
return ioutil.NewReadCloserWithCloser(zr.Reader, func() error {
sync.PutZlibReader(zr)
return nil
}), nil
}
// ReadRegularObject reads and write a non-deltified object
// from it zlib stream in an object entry in the packfile.
func (s *Scanner) copyObject(w io.Writer) (n int64, err error) {
zr := zlibReaderPool.Get().(io.ReadCloser)
defer zlibReaderPool.Put(zr)
zr, err := sync.GetZlibReader(s.r)
defer sync.PutZlibReader(zr)
if err = zr.(zlib.Resetter).Reset(s.r, nil); err != nil {
if err != nil {
return 0, fmt.Errorf("zlib reset error: %s", err)
}
defer ioutil.CheckClose(zr, &err)
buf := byteSlicePool.Get().([]byte)
n, err = io.CopyBuffer(w, zr, buf)
byteSlicePool.Put(buf)
defer ioutil.CheckClose(zr.Reader, &err)
buf := sync.GetByteSlice()
n, err = io.CopyBuffer(w, zr.Reader, *buf)
sync.PutByteSlice(buf)
return
}
var byteSlicePool = sync.Pool{
New: func() interface{} {
return make([]byte, 32*1024)
},
}
// SeekFromStart sets a new offset from start, returns the old position before
// the change.
func (s *Scanner) SeekFromStart(offset int64) (previous int64, err error) {
@ -372,9 +379,10 @@ func (s *Scanner) Checksum() (plumbing.Hash, error) {
// Close reads the reader until io.EOF
func (s *Scanner) Close() error {
buf := byteSlicePool.Get().([]byte)
_, err := io.CopyBuffer(stdioutil.Discard, s.r, buf)
byteSlicePool.Put(buf)
buf := sync.GetByteSlice()
_, err := io.CopyBuffer(io.Discard, s.r, *buf)
sync.PutByteSlice(buf)
return err
}
@ -384,13 +392,13 @@ func (s *Scanner) Flush() error {
}
// scannerReader has the following characteristics:
// - Provides an io.SeekReader impl for bufio.Reader, when the underlying
// reader supports it.
// - Keeps track of the current read position, for when the underlying reader
// isn't an io.SeekReader, but we still want to know the current offset.
// - Writes to the hash writer what it reads, with the aid of a smaller buffer.
// The buffer helps avoid a performance penality for performing small writes
// to the crc32 hash writer.
// - Provides an io.SeekReader impl for bufio.Reader, when the underlying
// reader supports it.
// - Keeps track of the current read position, for when the underlying reader
// isn't an io.SeekReader, but we still want to know the current offset.
// - Writes to the hash writer what it reads, with the aid of a smaller buffer.
// The buffer helps avoid a performance penalty for performing small writes
// to the crc32 hash writer.
type scannerReader struct {
reader io.Reader
crc io.Writer

View File

@ -7,6 +7,8 @@ import (
"errors"
"fmt"
"io"
"github.com/jesseduffield/go-git/v5/utils/trace"
)
// An Encoder writes pkt-lines to an output stream.
@ -43,6 +45,7 @@ func NewEncoder(w io.Writer) *Encoder {
// Flush encodes a flush-pkt to the output stream.
func (e *Encoder) Flush() error {
defer trace.Packet.Print("packet: > 0000")
_, err := e.w.Write(FlushPkt)
return err
}
@ -70,6 +73,7 @@ func (e *Encoder) encodeLine(p []byte) error {
}
n := len(p) + 4
defer trace.Packet.Printf("packet: > %04x %s", n, p)
if _, err := e.w.Write(asciiHex16(n)); err != nil {
return err
}

View File

@ -0,0 +1,51 @@
package pktline
import (
"bytes"
"errors"
"io"
"strings"
)
var (
// ErrInvalidErrorLine is returned by Decode when the packet line is not an
// error line.
ErrInvalidErrorLine = errors.New("expected an error-line")
errPrefix = []byte("ERR ")
)
// ErrorLine is a packet line that contains an error message.
// Once this packet is sent by client or server, the data transfer process is
// terminated.
// See https://git-scm.com/docs/pack-protocol#_pkt_line_format
type ErrorLine struct {
Text string
}
// Error implements the error interface.
func (e *ErrorLine) Error() string {
return e.Text
}
// Encode encodes the ErrorLine into a packet line.
func (e *ErrorLine) Encode(w io.Writer) error {
p := NewEncoder(w)
return p.Encodef("%s%s\n", string(errPrefix), e.Text)
}
// Decode decodes a packet line into an ErrorLine.
func (e *ErrorLine) Decode(r io.Reader) error {
s := NewScanner(r)
if !s.Scan() {
return s.Err()
}
line := s.Bytes()
if !bytes.HasPrefix(line, errPrefix) {
return ErrInvalidErrorLine
}
e.Text = strings.TrimSpace(string(line[4:]))
return nil
}

View File

@ -1,8 +1,12 @@
package pktline
import (
"bytes"
"errors"
"io"
"strings"
"github.com/jesseduffield/go-git/v5/utils/trace"
)
const (
@ -65,6 +69,14 @@ func (s *Scanner) Scan() bool {
return false
}
s.payload = s.payload[:l]
trace.Packet.Printf("packet: < %04x %s", l, s.payload)
if bytes.HasPrefix(s.payload, errPrefix) {
s.err = &ErrorLine{
Text: strings.TrimSpace(string(s.payload[4:])),
}
return false
}
return true
}
@ -128,6 +140,8 @@ func asciiHexToByte(b byte) (byte, error) {
return b - '0', nil
case b >= 'a' && b <= 'f':
return b - 'a' + 10, nil
case b >= 'A' && b <= 'F':
return b - 'A' + 10, nil
default:
return 0, ErrInvalidPktLen
}

View File

@ -2,15 +2,15 @@ package plumbing
import (
"bytes"
"crypto/sha1"
"encoding/hex"
"hash"
"sort"
"strconv"
"github.com/jesseduffield/go-git/v5/plumbing/hash"
)
// Hash SHA1 hashed content
type Hash [20]byte
type Hash [hash.Size]byte
// ZeroHash is Hash with value zero
var ZeroHash Hash
@ -46,7 +46,7 @@ type Hasher struct {
}
func NewHasher(t ObjectType, size int64) Hasher {
h := Hasher{sha1.New()}
h := Hasher{hash.New(hash.CryptoType)}
h.Write(t.Bytes())
h.Write([]byte(" "))
h.Write([]byte(strconv.FormatInt(size, 10)))
@ -74,10 +74,11 @@ func (p HashSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
// IsHash returns true if the given string is a valid hash.
func IsHash(s string) bool {
if len(s) != 40 {
switch len(s) {
case hash.HexSize:
_, err := hex.DecodeString(s)
return err == nil
default:
return false
}
_, err := hex.DecodeString(s)
return err == nil
}

View File

@ -0,0 +1,60 @@
// package hash provides a way for managing the
// underlying hash implementations used across go-git.
package hash
import (
"crypto"
"fmt"
"hash"
"github.com/pjbgf/sha1cd"
)
// algos is a map of hash algorithms.
var algos = map[crypto.Hash]func() hash.Hash{}
func init() {
reset()
}
// reset resets the default algos value. Can be used after running tests
// that registers new algorithms to avoid side effects.
func reset() {
algos[crypto.SHA1] = sha1cd.New
algos[crypto.SHA256] = crypto.SHA256.New
}
// RegisterHash allows for the hash algorithm used to be overridden.
// This ensures the hash selection for go-git must be explicit, when
// overriding the default value.
func RegisterHash(h crypto.Hash, f func() hash.Hash) error {
if f == nil {
return fmt.Errorf("cannot register hash: f is nil")
}
switch h {
case crypto.SHA1:
algos[h] = f
case crypto.SHA256:
algos[h] = f
default:
return fmt.Errorf("unsupported hash function: %v", h)
}
return nil
}
// Hash is the same as hash.Hash. This allows consumers
// to not having to import this package alongside "hash".
type Hash interface {
hash.Hash
}
// New returns a new Hash for the given hash function.
// It panics if the hash function is not registered.
func New(h crypto.Hash) Hash {
hh, ok := algos[h]
if !ok {
panic(fmt.Sprintf("hash algorithm not registered: %v", h))
}
return hh()
}

View File

@ -0,0 +1,15 @@
//go:build !sha256
// +build !sha256
package hash
import "crypto"
const (
// CryptoType defines what hash algorithm is being used.
CryptoType = crypto.SHA1
// Size defines the amount of bytes the hash yields.
Size = 20
// HexSize defines the strings size of the hash when represented in hexadecimal.
HexSize = 40
)

View File

@ -0,0 +1,15 @@
//go:build sha256
// +build sha256
package hash
import "crypto"
const (
// CryptoType defines what hash algorithm is being used.
CryptoType = crypto.SHA256
// Size defines the amount of bytes the hash yields.
Size = 32
// HexSize defines the strings size of the hash when represented in hexadecimal.
HexSize = 64
)

View File

@ -25,13 +25,13 @@ func (o *MemoryObject) Hash() Hash {
return o.h
}
// Type return the ObjectType
// Type returns the ObjectType
func (o *MemoryObject) Type() ObjectType { return o.t }
// SetType sets the ObjectType
func (o *MemoryObject) SetType(t ObjectType) { o.t = t }
// Size return the size of the object
// Size returns the size of the object
func (o *MemoryObject) Size() int64 { return o.sz }
// SetSize set the object size, a content of the given size should be written

View File

@ -82,7 +82,7 @@ func (t ObjectType) Valid() bool {
return t >= CommitObject && t <= REFDeltaObject
}
// IsDelta returns true for any ObjectTyoe that represents a delta (i.e.
// IsDelta returns true for any ObjectType that represents a delta (i.e.
// REFDeltaObject or OFSDeltaObject).
func (t ObjectType) IsDelta() bool {
return t == REFDeltaObject || t == OFSDeltaObject

View File

@ -39,7 +39,7 @@ func (c *Change) Action() (merkletrie.Action, error) {
return merkletrie.Modify, nil
}
// Files return the files before and after a change.
// Files returns the files before and after a change.
// For insertions from will be nil. For deletions to will be nil.
func (c *Change) Files() (from, to *File, err error) {
action, err := c.Action()

View File

@ -16,11 +16,11 @@ func newChange(c merkletrie.Change) (*Change, error) {
var err error
if ret.From, err = newChangeEntry(c.From); err != nil {
return nil, fmt.Errorf("From field: %s", err)
return nil, fmt.Errorf("from field: %s", err)
}
if ret.To, err = newChangeEntry(c.To); err != nil {
return nil, fmt.Errorf("To field: %s", err)
return nil, fmt.Errorf("to field: %s", err)
}
return ret, nil

View File

@ -1,7 +1,6 @@
package object
import (
"bufio"
"bytes"
"context"
"errors"
@ -9,22 +8,34 @@ import (
"io"
"strings"
"golang.org/x/crypto/openpgp"
"github.com/ProtonMail/go-crypto/openpgp"
"github.com/jesseduffield/go-git/v5/plumbing"
"github.com/jesseduffield/go-git/v5/plumbing/storer"
"github.com/jesseduffield/go-git/v5/utils/ioutil"
"github.com/jesseduffield/go-git/v5/utils/sync"
)
const (
beginpgp string = "-----BEGIN PGP SIGNATURE-----"
endpgp string = "-----END PGP SIGNATURE-----"
headerpgp string = "gpgsig"
beginpgp string = "-----BEGIN PGP SIGNATURE-----"
endpgp string = "-----END PGP SIGNATURE-----"
headerpgp string = "gpgsig"
headerencoding string = "encoding"
// https://github.com/git/git/blob/bcb6cae2966cc407ca1afc77413b3ef11103c175/Documentation/gitformat-signature.txt#L153
// When a merge commit is created from a signed tag, the tag is embedded in
// the commit with the "mergetag" header.
headermergetag string = "mergetag"
defaultUtf8CommitMessageEncoding MessageEncoding = "UTF-8"
)
// Hash represents the hash of an object
type Hash plumbing.Hash
// MessageEncoding represents the encoding of a commit
type MessageEncoding string
// Commit points to a single tree, marking it as what the project looked like
// at a certain point in time. It contains meta-information about that point
// in time, such as a timestamp, the author of the changes since the last
@ -38,6 +49,9 @@ type Commit struct {
// Committer is the one performing the commit, might be different from
// Author.
Committer Signature
// MergeTag is the embedded tag object when a merge commit is created by
// merging a signed tag.
MergeTag string
// PGPSignature is the PGP signature of the commit.
PGPSignature string
// Message is the commit message, contains arbitrary text.
@ -46,6 +60,8 @@ type Commit struct {
TreeHash plumbing.Hash
// ParentHashes are the hashes of the parent commits of the commit.
ParentHashes []plumbing.Hash
// Encoding is the encoding of the commit.
Encoding MessageEncoding
s storer.EncodedObjectStorer
}
@ -173,6 +189,7 @@ func (c *Commit) Decode(o plumbing.EncodedObject) (err error) {
}
c.Hash = o.Hash()
c.Encoding = defaultUtf8CommitMessageEncoding
reader, err := o.Reader()
if err != nil {
@ -180,11 +197,11 @@ func (c *Commit) Decode(o plumbing.EncodedObject) (err error) {
}
defer ioutil.CheckClose(reader, &err)
r := bufPool.Get().(*bufio.Reader)
defer bufPool.Put(r)
r.Reset(reader)
r := sync.GetBufioReader(reader)
defer sync.PutBufioReader(r)
var message bool
var mergetag bool
var pgpsig bool
var msgbuf bytes.Buffer
for {
@ -193,6 +210,16 @@ func (c *Commit) Decode(o plumbing.EncodedObject) (err error) {
return err
}
if mergetag {
if len(line) > 0 && line[0] == ' ' {
line = bytes.TrimLeft(line, " ")
c.MergeTag += string(line)
continue
} else {
mergetag = false
}
}
if pgpsig {
if len(line) > 0 && line[0] == ' ' {
line = bytes.TrimLeft(line, " ")
@ -226,6 +253,11 @@ func (c *Commit) Decode(o plumbing.EncodedObject) (err error) {
c.Author.Decode(data)
case "committer":
c.Committer.Decode(data)
case headermergetag:
c.MergeTag += string(data) + "\n"
mergetag = true
case headerencoding:
c.Encoding = MessageEncoding(data)
case headerpgp:
c.PGPSignature += string(data) + "\n"
pgpsig = true
@ -287,6 +319,28 @@ func (c *Commit) encode(o plumbing.EncodedObject, includeSig bool) (err error) {
return err
}
if c.MergeTag != "" {
if _, err = fmt.Fprint(w, "\n"+headermergetag+" "); err != nil {
return err
}
// Split tag information lines and re-write with a left padding and
// newline. Use join for this so it's clear that a newline should not be
// added after this section. The newline will be added either as part of
// the PGP signature or the commit message.
mergetag := strings.TrimSuffix(c.MergeTag, "\n")
lines := strings.Split(mergetag, "\n")
if _, err = fmt.Fprint(w, strings.Join(lines, "\n ")); err != nil {
return err
}
}
if string(c.Encoding) != "" && c.Encoding != defaultUtf8CommitMessageEncoding {
if _, err = fmt.Fprintf(w, "\n%s %s", headerencoding, c.Encoding); err != nil {
return err
}
}
if c.PGPSignature != "" && includeSig {
if _, err = fmt.Fprint(w, "\n"+headerpgp+" "); err != nil {
return err
@ -374,7 +428,18 @@ func (c *Commit) Verify(armoredKeyRing string) (*openpgp.Entity, error) {
return nil, err
}
return openpgp.CheckArmoredDetachedSignature(keyring, er, signature)
return openpgp.CheckArmoredDetachedSignature(keyring, er, signature, nil)
}
// Less defines a compare function to determine which commit is 'earlier' by:
// - First use Committer.When
// - If Committer.When are equal then use Author.When
// - If Author.When also equal then compare the string value of the hash
func (c *Commit) Less(rhs *Commit) bool {
return c.Committer.When.Before(rhs.Committer.When) ||
(c.Committer.When.Equal(rhs.Committer.When) &&
(c.Author.When.Before(rhs.Author.When) ||
(c.Author.When.Equal(rhs.Author.When) && bytes.Compare(c.Hash[:], rhs.Hash[:]) < 0)))
}
func indent(t string) string {

View File

@ -57,6 +57,8 @@ func (c *commitPathIter) Next() (*Commit, error) {
}
func (c *commitPathIter) getNextFileCommit() (*Commit, error) {
var parentTree, currentTree *Tree
for {
// Parent-commit can be nil if the current-commit is the initial commit
parentCommit, parentCommitErr := c.sourceIter.Next()
@ -68,13 +70,17 @@ func (c *commitPathIter) getNextFileCommit() (*Commit, error) {
parentCommit = nil
}
// Fetch the trees of the current and parent commits
currentTree, currTreeErr := c.currentCommit.Tree()
if currTreeErr != nil {
return nil, currTreeErr
if parentTree == nil {
var currTreeErr error
currentTree, currTreeErr = c.currentCommit.Tree()
if currTreeErr != nil {
return nil, currTreeErr
}
} else {
currentTree = parentTree
parentTree = nil
}
var parentTree *Tree
if parentCommit != nil {
var parentTreeErr error
parentTree, parentTreeErr = parentCommit.Tree()
@ -115,7 +121,8 @@ func (c *commitPathIter) hasFileChange(changes Changes, parent *Commit) bool {
// filename matches, now check if source iterator contains all commits (from all refs)
if c.checkParent {
if parent != nil && isParentHash(parent.Hash, c.currentCommit) {
// Check if parent is beyond the initial commit
if parent == nil || isParentHash(parent.Hash, c.currentCommit) {
return true
}
continue

View File

@ -1,12 +0,0 @@
package object
import (
"bufio"
"sync"
)
var bufPool = sync.Pool{
New: func() interface{} {
return bufio.NewReader(nil)
},
}

View File

@ -6,7 +6,7 @@ import (
"errors"
"fmt"
"io"
"math"
"strconv"
"strings"
"github.com/jesseduffield/go-git/v5/plumbing"
@ -96,10 +96,6 @@ func filePatchWithContext(ctx context.Context, c *Change) (fdiff.FilePatch, erro
}
func filePatch(c *Change) (fdiff.FilePatch, error) {
return filePatchWithContext(context.Background(), c)
}
func fileContent(f *File) (content string, isBinary bool, err error) {
if f == nil {
return
@ -238,61 +234,56 @@ func (fileStats FileStats) String() string {
return printStat(fileStats)
}
// printStat prints the stats of changes in content of files.
// Original implementation: https://github.com/git/git/blob/1a87c842ece327d03d08096395969aca5e0a6996/diff.c#L2615
// Parts of the output:
// <pad><filename><pad>|<pad><changeNumber><pad><+++/---><newline>
// example: " main.go | 10 +++++++--- "
func printStat(fileStats []FileStat) string {
padLength := float64(len(" "))
newlineLength := float64(len("\n"))
separatorLength := float64(len("|"))
// Soft line length limit. The text length calculation below excludes
// length of the change number. Adding that would take it closer to 80,
// but probably not more than 80, until it's a huge number.
lineLength := 72.0
maxGraphWidth := uint(53)
maxNameLen := 0
maxChangeLen := 0
// Get the longest filename and longest total change.
var longestLength float64
var longestTotalChange float64
for _, fs := range fileStats {
if int(longestLength) < len(fs.Name) {
longestLength = float64(len(fs.Name))
scaleLinear := func(it, width, max uint) uint {
if it == 0 || max == 0 {
return 0
}
totalChange := fs.Addition + fs.Deletion
if int(longestTotalChange) < totalChange {
longestTotalChange = float64(totalChange)
return 1 + (it * (width - 1) / max)
}
for _, fs := range fileStats {
if len(fs.Name) > maxNameLen {
maxNameLen = len(fs.Name)
}
changes := strconv.Itoa(fs.Addition + fs.Deletion)
if len(changes) > maxChangeLen {
maxChangeLen = len(changes)
}
}
// Parts of the output:
// <pad><filename><pad>|<pad><changeNumber><pad><+++/---><newline>
// example: " main.go | 10 +++++++--- "
// <pad><filename><pad>
leftTextLength := padLength + longestLength + padLength
// <pad><number><pad><+++++/-----><newline>
// Excluding number length here.
rightTextLength := padLength + padLength + newlineLength
totalTextArea := leftTextLength + separatorLength + rightTextLength
heightOfHistogram := lineLength - totalTextArea
// Scale the histogram.
var scaleFactor float64
if longestTotalChange > heightOfHistogram {
// Scale down to heightOfHistogram.
scaleFactor = longestTotalChange / heightOfHistogram
} else {
scaleFactor = 1.0
}
finalOutput := ""
result := ""
for _, fs := range fileStats {
addn := float64(fs.Addition)
deln := float64(fs.Deletion)
adds := strings.Repeat("+", int(math.Floor(addn/scaleFactor)))
dels := strings.Repeat("-", int(math.Floor(deln/scaleFactor)))
finalOutput += fmt.Sprintf(" %s | %d %s%s\n", fs.Name, (fs.Addition + fs.Deletion), adds, dels)
}
add := uint(fs.Addition)
del := uint(fs.Deletion)
np := maxNameLen - len(fs.Name)
cp := maxChangeLen - len(strconv.Itoa(fs.Addition+fs.Deletion))
return finalOutput
total := add + del
if total > maxGraphWidth {
add = scaleLinear(add, maxGraphWidth, total)
del = scaleLinear(del, maxGraphWidth, total)
}
adds := strings.Repeat("+", int(add))
dels := strings.Repeat("-", int(del))
namePad := strings.Repeat(" ", np)
changePad := strings.Repeat(" ", cp)
result += fmt.Sprintf(" %s%s | %s%d %s%s\n", fs.Name, namePad, changePad, total, adds, dels)
}
return result
}
func getFileStatsFromFilePatches(filePatches []fdiff.FilePatch) FileStats {
@ -313,8 +304,8 @@ func getFileStatsFromFilePatches(filePatches []fdiff.FilePatch) FileStats {
// File is deleted.
cs.Name = from.Path()
} else if from.Path() != to.Path() {
// File is renamed. Not supported.
// cs.Name = fmt.Sprintf("%s => %s", from.Path(), to.Path())
// File is renamed.
cs.Name = fmt.Sprintf("%s => %s", from.Path(), to.Path())
} else {
cs.Name = from.Path()
}

View File

@ -403,10 +403,16 @@ func min(a, b int) int {
return b
}
const maxMatrixSize = 10000
func buildSimilarityMatrix(srcs, dsts []*Change, renameScore int) (similarityMatrix, error) {
// Allocate for the worst-case scenario where every pair has a score
// that we need to consider. We might not need that many.
matrix := make(similarityMatrix, 0, len(srcs)*len(dsts))
matrixSize := len(srcs) * len(dsts)
if matrixSize > maxMatrixSize {
matrixSize = maxMatrixSize
}
matrix := make(similarityMatrix, 0, matrixSize)
srcSizes := make([]int64, len(srcs))
dstSizes := make([]int64, len(dsts))
dstTooLarge := make(map[int]bool)
@ -735,10 +741,7 @@ func (i *similarityIndex) add(key int, cnt uint64) error {
// It's the same key, so increment the counter.
var err error
i.hashes[j], err = newKeyCountPair(key, v.count()+cnt)
if err != nil {
return err
}
return nil
return err
} else if j+1 >= len(i.hashes) {
j = 0
} else {

View File

@ -0,0 +1,102 @@
package object
import "bytes"
const (
signatureTypeUnknown signatureType = iota
signatureTypeOpenPGP
signatureTypeX509
signatureTypeSSH
)
var (
// openPGPSignatureFormat is the format of an OpenPGP signature.
openPGPSignatureFormat = signatureFormat{
[]byte("-----BEGIN PGP SIGNATURE-----"),
[]byte("-----BEGIN PGP MESSAGE-----"),
}
// x509SignatureFormat is the format of an X509 signature, which is
// a PKCS#7 (S/MIME) signature.
x509SignatureFormat = signatureFormat{
[]byte("-----BEGIN CERTIFICATE-----"),
[]byte("-----BEGIN SIGNED MESSAGE-----"),
}
// sshSignatureFormat is the format of an SSH signature.
sshSignatureFormat = signatureFormat{
[]byte("-----BEGIN SSH SIGNATURE-----"),
}
)
var (
// knownSignatureFormats is a map of known signature formats, indexed by
// their signatureType.
knownSignatureFormats = map[signatureType]signatureFormat{
signatureTypeOpenPGP: openPGPSignatureFormat,
signatureTypeX509: x509SignatureFormat,
signatureTypeSSH: sshSignatureFormat,
}
)
// signatureType represents the type of the signature.
type signatureType int8
// signatureFormat represents the beginning of a signature.
type signatureFormat [][]byte
// typeForSignature returns the type of the signature based on its format.
func typeForSignature(b []byte) signatureType {
for t, i := range knownSignatureFormats {
for _, begin := range i {
if bytes.HasPrefix(b, begin) {
return t
}
}
}
return signatureTypeUnknown
}
// parseSignedBytes returns the position of the last signature block found in
// the given bytes. If no signature block is found, it returns -1.
//
// When multiple signature blocks are found, the position of the last one is
// returned. Any tailing bytes after this signature block start should be
// considered part of the signature.
//
// Given this, it would be safe to use the returned position to split the bytes
// into two parts: the first part containing the message, the second part
// containing the signature.
//
// Example:
//
// message := []byte(`Message with signature
//
// -----BEGIN SSH SIGNATURE-----
// ...`)
//
// var signature string
// if pos, _ := parseSignedBytes(message); pos != -1 {
// signature = string(message[pos:])
// message = message[:pos]
// }
//
// This logic is on par with git's gpg-interface.c:parse_signed_buffer().
// https://github.com/git/git/blob/7c2ef319c52c4997256f5807564523dfd4acdfc7/gpg-interface.c#L668
func parseSignedBytes(b []byte) (int, signatureType) {
var n, match = 0, -1
var t signatureType
for n < len(b) {
var i = b[n:]
if st := typeForSignature(i); st != signatureTypeUnknown {
match = n
t = st
}
if eol := bytes.IndexByte(i, '\n'); eol >= 0 {
n += eol + 1
continue
}
// If we reach this point, we've reached the end.
break
}
return match, t
}

View File

@ -1,18 +1,16 @@
package object
import (
"bufio"
"bytes"
"fmt"
"io"
stdioutil "io/ioutil"
"strings"
"golang.org/x/crypto/openpgp"
"github.com/ProtonMail/go-crypto/openpgp"
"github.com/jesseduffield/go-git/v5/plumbing"
"github.com/jesseduffield/go-git/v5/plumbing/storer"
"github.com/jesseduffield/go-git/v5/utils/ioutil"
"github.com/jesseduffield/go-git/v5/utils/sync"
)
// Tag represents an annotated tag object. It points to a single git object of
@ -93,9 +91,9 @@ func (t *Tag) Decode(o plumbing.EncodedObject) (err error) {
}
defer ioutil.CheckClose(reader, &err)
r := bufPool.Get().(*bufio.Reader)
defer bufPool.Put(r)
r.Reset(reader)
r := sync.GetBufioReader(reader)
defer sync.PutBufioReader(r)
for {
var line []byte
line, err = r.ReadBytes('\n')
@ -128,40 +126,15 @@ func (t *Tag) Decode(o plumbing.EncodedObject) (err error) {
}
}
data, err := stdioutil.ReadAll(r)
data, err := io.ReadAll(r)
if err != nil {
return err
}
var pgpsig bool
// Check if data contains PGP signature.
if bytes.Contains(data, []byte(beginpgp)) {
// Split the lines at newline.
messageAndSig := bytes.Split(data, []byte("\n"))
for _, l := range messageAndSig {
if pgpsig {
if bytes.Contains(l, []byte(endpgp)) {
t.PGPSignature += endpgp + "\n"
break
} else {
t.PGPSignature += string(l) + "\n"
}
continue
}
// Check if it's the beginning of a PGP signature.
if bytes.Contains(l, []byte(beginpgp)) {
t.PGPSignature += beginpgp + "\n"
pgpsig = true
continue
}
t.Message += string(l) + "\n"
}
} else {
t.Message = string(data)
if sm, _ := parseSignedBytes(data); sm >= 0 {
t.PGPSignature = string(data[sm:])
data = data[:sm]
}
t.Message = string(data)
return nil
}
@ -304,7 +277,7 @@ func (t *Tag) Verify(armoredKeyRing string) (*openpgp.Entity, error) {
return nil, err
}
return openpgp.CheckArmoredDetachedSignature(keyring, er, signature)
return openpgp.CheckArmoredDetachedSignature(keyring, er, signature, nil)
}
// TagIter provides an iterator for a set of tags.

View File

@ -1,19 +1,20 @@
package object
import (
"bufio"
"context"
"errors"
"fmt"
"io"
"path"
"path/filepath"
"sort"
"strings"
"github.com/jesseduffield/go-git/v5/plumbing"
"github.com/jesseduffield/go-git/v5/plumbing/filemode"
"github.com/jesseduffield/go-git/v5/plumbing/storer"
"github.com/jesseduffield/go-git/v5/utils/ioutil"
"github.com/jesseduffield/go-git/v5/utils/sync"
)
const (
@ -27,6 +28,7 @@ var (
ErrFileNotFound = errors.New("file not found")
ErrDirectoryNotFound = errors.New("directory not found")
ErrEntryNotFound = errors.New("entry not found")
ErrEntriesNotSorted = errors.New("entries in tree are not sorted")
)
// Tree is basically like a directory - it references a bunch of other trees
@ -230,9 +232,9 @@ func (t *Tree) Decode(o plumbing.EncodedObject) (err error) {
}
defer ioutil.CheckClose(reader, &err)
r := bufPool.Get().(*bufio.Reader)
defer bufPool.Put(r)
r.Reset(reader)
r := sync.GetBufioReader(reader)
defer sync.PutBufioReader(r)
for {
str, err := r.ReadString(' ')
if err != nil {
@ -270,7 +272,30 @@ func (t *Tree) Decode(o plumbing.EncodedObject) (err error) {
return nil
}
type TreeEntrySorter []TreeEntry
func (s TreeEntrySorter) Len() int {
return len(s)
}
func (s TreeEntrySorter) Less(i, j int) bool {
name1 := s[i].Name
name2 := s[j].Name
if s[i].Mode == filemode.Dir {
name1 += "/"
}
if s[j].Mode == filemode.Dir {
name2 += "/"
}
return name1 < name2
}
func (s TreeEntrySorter) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}
// Encode transforms a Tree into a plumbing.EncodedObject.
// The tree entries must be sorted by name.
func (t *Tree) Encode(o plumbing.EncodedObject) (err error) {
o.SetType(plumbing.TreeObject)
w, err := o.Writer()
@ -279,7 +304,15 @@ func (t *Tree) Encode(o plumbing.EncodedObject) (err error) {
}
defer ioutil.CheckClose(w, &err)
if !sort.IsSorted(TreeEntrySorter(t.Entries)) {
return ErrEntriesNotSorted
}
for _, entry := range t.Entries {
if strings.IndexByte(entry.Name, 0) != -1 {
return fmt.Errorf("malformed filename %q", entry.Name)
}
if _, err = fmt.Fprintf(w, "%o %s", entry.Mode, entry.Name); err != nil {
return err
}

View File

@ -38,6 +38,10 @@ func NewTreeRootNode(t *Tree) noder.Noder {
}
}
func (t *treeNoder) Skip() bool {
return false
}
func (t *treeNoder) isRoot() bool {
return t.name == ""
}
@ -84,7 +88,9 @@ func (t *treeNoder) Children() ([]noder.Noder, error) {
}
}
return transformChildren(parent)
var err error
t.children, err = transformChildren(parent)
return t.children, err
}
// Returns the children of a tree as treenoders.

View File

@ -57,7 +57,7 @@ func (a *AdvRefs) AddReference(r *plumbing.Reference) error {
switch r.Type() {
case plumbing.SymbolicReference:
v := fmt.Sprintf("%s:%s", r.Name().String(), r.Target().String())
a.Capabilities.Add(capability.SymRef, v)
return a.Capabilities.Add(capability.SymRef, v)
case plumbing.HashReference:
a.References[r.Name().String()] = r.Hash()
default:
@ -96,12 +96,12 @@ func (a *AdvRefs) addRefs(s storer.ReferenceStorer) error {
//
// Git versions prior to 1.8.4.3 has an special procedure to get
// the reference where is pointing to HEAD:
// - Check if a reference called master exists. If exists and it
// has the same hash as HEAD hash, we can say that HEAD is pointing to master
// - If master does not exists or does not have the same hash as HEAD,
// order references and check in that order if that reference has the same
// hash than HEAD. If yes, set HEAD pointing to that branch hash
// - If no reference is found, throw an error
// - Check if a reference called master exists. If exists and it
// has the same hash as HEAD hash, we can say that HEAD is pointing to master
// - If master does not exists or does not have the same hash as HEAD,
// order references and check in that order if that reference has the same
// hash than HEAD. If yes, set HEAD pointing to that branch hash
// - If no reference is found, throw an error
func (a *AdvRefs) resolveHead(s storer.ReferenceStorer) error {
if a.Head == nil {
return nil

View File

@ -133,6 +133,7 @@ func decodeFirstHash(p *advRefsDecoder) decoderStateFn {
return nil
}
// TODO: Use object-format (when available) for hash size. Git 2.41+
if len(p.line) < hashSize {
p.error("cannot read hash, pkt-line too short")
return nil

View File

@ -1,6 +1,11 @@
// Package capability defines the server and client capabilities.
package capability
import (
"fmt"
"os"
)
// Capability describes a server or client capability.
type Capability string
@ -230,9 +235,23 @@ const (
PushCert Capability = "push-cert"
// SymRef symbolic reference support for better negotiation.
SymRef Capability = "symref"
// ObjectFormat takes a hash algorithm as an argument, indicates that the
// server supports the given hash algorithms.
ObjectFormat Capability = "object-format"
// Filter if present, fetch-pack may send "filter" commands to request a
// partial clone or partial fetch and request that the server omit various objects from the packfile
Filter Capability = "filter"
)
const DefaultAgent = "go-git/4.x"
const userAgent = "go-git/5.x"
// DefaultAgent provides the user agent string.
func DefaultAgent() string {
if envUserAgent, ok := os.LookupEnv("GO_GIT_USER_AGENT_EXTRA"); ok {
return fmt.Sprintf("%s %s", userAgent, envUserAgent)
}
return userAgent
}
var known = map[Capability]bool{
MultiACK: true, MultiACKDetailed: true, NoDone: true, ThinPack: true,
@ -241,10 +260,11 @@ var known = map[Capability]bool{
NoProgress: true, IncludeTag: true, ReportStatus: true, DeleteRefs: true,
Quiet: true, Atomic: true, PushOptions: true, AllowTipSHA1InWant: true,
AllowReachableSHA1InWant: true, PushCert: true, SymRef: true,
ObjectFormat: true, Filter: true,
}
var requiresArgument = map[Capability]bool{
Agent: true, PushCert: true, SymRef: true,
Agent: true, PushCert: true, SymRef: true, ObjectFormat: true,
}
var multipleArgument = map[Capability]bool{

View File

@ -86,7 +86,9 @@ func (l *List) Get(capability Capability) []string {
// Set sets a capability removing the previous values
func (l *List) Set(capability Capability, values ...string) error {
delete(l.m, capability)
if _, ok := l.m[capability]; ok {
l.m[capability].Values = l.m[capability].Values[:0]
}
return l.Add(capability, values...)
}

View File

@ -19,7 +19,6 @@ var (
// common
sp = []byte(" ")
eol = []byte("\n")
eq = []byte{'='}
// advertised-refs
null = []byte("\x00")
@ -49,6 +48,11 @@ func isFlush(payload []byte) bool {
return len(payload) == 0
}
var (
// ErrNilWriter is returned when a nil writer is passed to the encoder.
ErrNilWriter = fmt.Errorf("nil writer")
)
// ErrUnexpectedData represents an unexpected data decoding a message
type ErrUnexpectedData struct {
Msg string

View File

@ -0,0 +1,76 @@
package packp
import (
"errors"
"fmt"
"github.com/jesseduffield/go-git/v5/plumbing"
"net/url"
"strings"
)
var ErrUnsupportedObjectFilterType = errors.New("unsupported object filter type")
// Filter values enable the partial clone capability which causes
// the server to omit objects that match the filter.
//
// See [Git's documentation] for more details.
//
// [Git's documentation]: https://github.com/git/git/blob/e02ecfcc534e2021aae29077a958dd11c3897e4c/Documentation/rev-list-options.txt#L948
type Filter string
type BlobLimitPrefix string
const (
BlobLimitPrefixNone BlobLimitPrefix = ""
BlobLimitPrefixKibi BlobLimitPrefix = "k"
BlobLimitPrefixMebi BlobLimitPrefix = "m"
BlobLimitPrefixGibi BlobLimitPrefix = "g"
)
// FilterBlobNone omits all blobs.
func FilterBlobNone() Filter {
return "blob:none"
}
// FilterBlobLimit omits blobs of size at least n bytes (when prefix is
// BlobLimitPrefixNone), n kibibytes (when prefix is BlobLimitPrefixKibi),
// n mebibytes (when prefix is BlobLimitPrefixMebi) or n gibibytes (when
// prefix is BlobLimitPrefixGibi). n can be zero, in which case all blobs
// will be omitted.
func FilterBlobLimit(n uint64, prefix BlobLimitPrefix) Filter {
return Filter(fmt.Sprintf("blob:limit=%d%s", n, prefix))
}
// FilterTreeDepth omits all blobs and trees whose depth from the root tree
// is larger or equal to depth.
func FilterTreeDepth(depth uint64) Filter {
return Filter(fmt.Sprintf("tree:%d", depth))
}
// FilterObjectType omits all objects which are not of the requested type t.
// Supported types are TagObject, CommitObject, TreeObject and BlobObject.
func FilterObjectType(t plumbing.ObjectType) (Filter, error) {
switch t {
case plumbing.TagObject:
fallthrough
case plumbing.CommitObject:
fallthrough
case plumbing.TreeObject:
fallthrough
case plumbing.BlobObject:
return Filter(fmt.Sprintf("object:type=%s", t.String())), nil
default:
return "", fmt.Errorf("%w: %s", ErrUnsupportedObjectFilterType, t.String())
}
}
// FilterCombine combines multiple Filter values together.
func FilterCombine(filters ...Filter) Filter {
var escapedFilters []string
for _, filter := range filters {
escapedFilters = append(escapedFilters, url.QueryEscape(string(filter)))
}
return Filter(fmt.Sprintf("combine:%s", strings.Join(escapedFilters, "+")))
}

View File

@ -0,0 +1,120 @@
package packp
import (
"fmt"
"io"
"strings"
"github.com/jesseduffield/go-git/v5/plumbing/format/pktline"
)
var (
// ErrInvalidGitProtoRequest is returned by Decode if the input is not a
// valid git protocol request.
ErrInvalidGitProtoRequest = fmt.Errorf("invalid git protocol request")
)
// GitProtoRequest is a command request for the git protocol.
// It is used to send the command, endpoint, and extra parameters to the
// remote.
// See https://git-scm.com/docs/pack-protocol#_git_transport
type GitProtoRequest struct {
RequestCommand string
Pathname string
// Optional
Host string
// Optional
ExtraParams []string
}
// validate validates the request.
func (g *GitProtoRequest) validate() error {
if g.RequestCommand == "" {
return fmt.Errorf("%w: empty request command", ErrInvalidGitProtoRequest)
}
if g.Pathname == "" {
return fmt.Errorf("%w: empty pathname", ErrInvalidGitProtoRequest)
}
return nil
}
// Encode encodes the request into the writer.
func (g *GitProtoRequest) Encode(w io.Writer) error {
if w == nil {
return ErrNilWriter
}
if err := g.validate(); err != nil {
return err
}
p := pktline.NewEncoder(w)
req := fmt.Sprintf("%s %s\x00", g.RequestCommand, g.Pathname)
if host := g.Host; host != "" {
req += fmt.Sprintf("host=%s\x00", host)
}
if len(g.ExtraParams) > 0 {
req += "\x00"
for _, param := range g.ExtraParams {
req += param + "\x00"
}
}
if err := p.Encode([]byte(req)); err != nil {
return err
}
return nil
}
// Decode decodes the request from the reader.
func (g *GitProtoRequest) Decode(r io.Reader) error {
s := pktline.NewScanner(r)
if !s.Scan() {
err := s.Err()
if err == nil {
return ErrInvalidGitProtoRequest
}
return err
}
line := string(s.Bytes())
if len(line) == 0 {
return io.EOF
}
if line[len(line)-1] != 0 {
return fmt.Errorf("%w: missing null terminator", ErrInvalidGitProtoRequest)
}
parts := strings.SplitN(line, " ", 2)
if len(parts) != 2 {
return fmt.Errorf("%w: short request", ErrInvalidGitProtoRequest)
}
g.RequestCommand = parts[0]
params := strings.Split(parts[1], string(null))
if len(params) < 1 {
return fmt.Errorf("%w: missing pathname", ErrInvalidGitProtoRequest)
}
g.Pathname = params[0]
if len(params) > 1 {
g.Host = strings.TrimPrefix(params[1], "host=")
}
if len(params) > 2 {
for _, param := range params[2:] {
if param != "" {
g.ExtraParams = append(g.ExtraParams, param)
}
}
}
return nil
}

View File

@ -114,7 +114,7 @@ func (d *Demuxer) nextPackData() ([]byte, error) {
size := len(content)
if size == 0 {
return nil, nil
return nil, io.EOF
} else if size > d.max {
return nil, ErrMaxPackedExceeded
}

View File

@ -21,11 +21,6 @@ type ServerResponse struct {
// Decode decodes the response into the struct, isMultiACK should be true, if
// the request was done with multi_ack or multi_ack_detailed capabilities.
func (r *ServerResponse) Decode(reader *bufio.Reader, isMultiACK bool) error {
// TODO: implement support for multi_ack or multi_ack_detailed responses
if isMultiACK {
return errors.New("multi_ack and multi_ack_detailed are not supported")
}
s := pktline.NewScanner(reader)
for s.Scan() {
@ -48,7 +43,23 @@ func (r *ServerResponse) Decode(reader *bufio.Reader, isMultiACK bool) error {
}
}
return s.Err()
// isMultiACK is true when the remote server advertises the related
// capabilities when they are not in transport.UnsupportedCapabilities.
//
// Users may decide to remove multi_ack and multi_ack_detailed from the
// unsupported capabilities list, which allows them to do initial clones
// from Azure DevOps.
//
// Follow-up fetches may error, therefore errors are wrapped with additional
// information highlighting that this capabilities are not supported by go-git.
//
// TODO: Implement support for multi_ack or multi_ack_detailed responses.
err := s.Err()
if err != nil && isMultiACK {
return fmt.Errorf("multi_ack and multi_ack_detailed are not supported: %w", err)
}
return err
}
// stopReading detects when a valid command such as ACK or NAK is found to be
@ -90,12 +101,14 @@ func (r *ServerResponse) decodeLine(line []byte) error {
return fmt.Errorf("unexpected flush")
}
if bytes.Equal(line[0:3], ack) {
return r.decodeACKLine(line)
}
if len(line) >= 3 {
if bytes.Equal(line[0:3], ack) {
return r.decodeACKLine(line)
}
if bytes.Equal(line[0:3], nak) {
return nil
if bytes.Equal(line[0:3], nak) {
return nil
}
}
return fmt.Errorf("unexpected content %q", string(line))
@ -107,14 +120,18 @@ func (r *ServerResponse) decodeACKLine(line []byte) error {
}
sp := bytes.Index(line, []byte(" "))
if sp+41 > len(line) {
return fmt.Errorf("malformed ACK %q", line)
}
h := plumbing.NewHash(string(line[sp+1 : sp+41]))
r.ACKs = append(r.ACKs, h)
return nil
}
// Encode encodes the ServerResponse into a writer.
func (r *ServerResponse) Encode(w io.Writer) error {
if len(r.ACKs) > 1 {
func (r *ServerResponse) Encode(w io.Writer, isMultiACK bool) error {
if len(r.ACKs) > 1 && !isMultiACK {
// For further information, refer to comments in the Decode func above.
return errors.New("multi_ack and multi_ack_detailed are not supported")
}

View File

@ -17,6 +17,7 @@ type UploadRequest struct {
Wants []plumbing.Hash
Shallows []plumbing.Hash
Depth Depth
Filter Filter
}
// Depth values stores the desired depth of the requested packfile: see
@ -95,7 +96,7 @@ func NewUploadRequestFromCapabilities(adv *capability.List) *UploadRequest {
}
if adv.Supports(capability.Agent) {
r.Capabilities.Set(capability.Agent, capability.DefaultAgent)
r.Capabilities.Set(capability.Agent, capability.DefaultAgent())
}
return r

View File

@ -43,7 +43,7 @@ func (d *ulReqDecoder) Decode(v *UploadRequest) error {
return d.err
}
// fills out the parser stiky error
// fills out the parser sticky error
func (d *ulReqDecoder) error(format string, a ...interface{}) {
msg := fmt.Sprintf(
"pkt-line %d: %s", d.nLine,

View File

@ -132,6 +132,17 @@ func (e *ulReqEncoder) encodeDepth() stateFn {
return nil
}
return e.encodeFilter
}
func (e *ulReqEncoder) encodeFilter() stateFn {
if filter := e.data.Filter; filter != "" {
if err := e.pe.Encodef("filter %s\n", filter); err != nil {
e.err = fmt.Errorf("encoding filter %s: %s", filter, err)
return nil
}
}
return e.encodeFlush
}

View File

@ -19,6 +19,7 @@ var (
type ReferenceUpdateRequest struct {
Capabilities *capability.List
Commands []*Command
Options []*Option
Shallow *plumbing.Hash
// Packfile contains an optional packfile reader.
Packfile io.ReadCloser
@ -58,7 +59,7 @@ func NewReferenceUpdateRequestFromCapabilities(adv *capability.List) *ReferenceU
r := NewReferenceUpdateRequest()
if adv.Supports(capability.Agent) {
r.Capabilities.Set(capability.Agent, capability.DefaultAgent)
r.Capabilities.Set(capability.Agent, capability.DefaultAgent())
}
if adv.Supports(capability.ReportStatus) {
@ -86,9 +87,9 @@ type Action string
const (
Create Action = "create"
Update = "update"
Delete = "delete"
Invalid = "invalid"
Update Action = "update"
Delete Action = "delete"
Invalid Action = "invalid"
)
type Command struct {
@ -120,3 +121,8 @@ func (c *Command) validate() error {
return nil
}
type Option struct {
Key string
Value string
}

View File

@ -6,7 +6,6 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"github.com/jesseduffield/go-git/v5/plumbing"
"github.com/jesseduffield/go-git/v5/plumbing/format/pktline"
@ -81,7 +80,7 @@ func (req *ReferenceUpdateRequest) Decode(r io.Reader) error {
var ok bool
rc, ok = r.(io.ReadCloser)
if !ok {
rc = ioutil.NopCloser(r)
rc = io.NopCloser(r)
}
d := &updReqDecoder{r: rc, s: pktline.NewScanner(r)}

View File

@ -9,10 +9,6 @@ import (
"github.com/jesseduffield/go-git/v5/plumbing/protocol/packp/capability"
)
var (
zeroHashString = plumbing.ZeroHash.String()
)
// Encode writes the ReferenceUpdateRequest encoding to the stream.
func (req *ReferenceUpdateRequest) Encode(w io.Writer) error {
if err := req.validate(); err != nil {
@ -29,6 +25,12 @@ func (req *ReferenceUpdateRequest) Encode(w io.Writer) error {
return err
}
if req.Capabilities.Supports(capability.PushOptions) {
if err := req.encodeOptions(e, req.Options); err != nil {
return err
}
}
if req.Packfile != nil {
if _, err := io.Copy(w, req.Packfile); err != nil {
return err
@ -73,3 +75,15 @@ func formatCommand(cmd *Command) string {
n := cmd.New.String()
return fmt.Sprintf("%s %s %s", o, n, cmd.Name)
}
func (req *ReferenceUpdateRequest) encodeOptions(e *pktline.Encoder,
opts []*Option) error {
for _, opt := range opts {
if err := e.Encodef("%s=%s", opt.Key, opt.Value); err != nil {
return err
}
}
return e.Flush()
}

View File

@ -38,10 +38,10 @@ func NewUploadPackRequestFromCapabilities(adv *capability.List) *UploadPackReque
}
}
// IsEmpty a request if empty if Haves are contained in the Wants, or if Wants
// length is zero
// IsEmpty returns whether a request is empty - it is empty if Haves are contained
// in the Wants, or if Wants length is zero, and we don't have any shallows
func (r *UploadPackRequest) IsEmpty() bool {
return isSubset(r.Wants, r.Haves)
return isSubset(r.Wants, r.Haves) && len(r.Shallows) == 0
}
func isSubset(needle []plumbing.Hash, haystack []plumbing.Hash) bool {

View File

@ -24,7 +24,6 @@ type UploadPackResponse struct {
r io.ReadCloser
isShallow bool
isMultiACK bool
isOk bool
}
// NewUploadPackResponse create a new UploadPackResponse instance, the request
@ -79,7 +78,7 @@ func (r *UploadPackResponse) Encode(w io.Writer) (err error) {
}
}
if err := r.ServerResponse.Encode(w); err != nil {
if err := r.ServerResponse.Encode(w, r.isMultiACK); err != nil {
return err
}

View File

@ -3,6 +3,7 @@ package plumbing
import (
"errors"
"fmt"
"regexp"
"strings"
)
@ -15,10 +16,11 @@ const (
symrefPrefix = "ref: "
)
// RefRevParseRules are a set of rules to parse references into short names.
// These are the same rules as used by git in shorten_unambiguous_ref.
// RefRevParseRules are a set of rules to parse references into short names, or expand into a full reference.
// These are the same rules as used by git in shorten_unambiguous_ref and expand_ref.
// See: https://github.com/git/git/blob/e0aaa1b6532cfce93d87af9bc813fb2e7a7ce9d7/refs.c#L417
var RefRevParseRules = []string{
"%s",
"refs/%s",
"refs/tags/%s",
"refs/heads/%s",
@ -28,6 +30,9 @@ var RefRevParseRules = []string{
var (
ErrReferenceNotFound = errors.New("reference not found")
// ErrInvalidReferenceName is returned when a reference name is invalid.
ErrInvalidReferenceName = errors.New("invalid reference name")
)
// ReferenceType reference type's
@ -113,7 +118,7 @@ func (r ReferenceName) String() string {
func (r ReferenceName) Short() string {
s := string(r)
res := s
for _, format := range RefRevParseRules {
for _, format := range RefRevParseRules[1:] {
_, err := fmt.Sscanf(s, format, &res)
if err == nil {
continue
@ -123,9 +128,95 @@ func (r ReferenceName) Short() string {
return res
}
var (
ctrlSeqs = regexp.MustCompile(`[\000-\037\177]`)
)
// Validate validates a reference name.
// This follows the git-check-ref-format rules.
// See https://git-scm.com/docs/git-check-ref-format
//
// It is important to note that this function does not check if the reference
// exists in the repository.
// It only checks if the reference name is valid.
// This functions does not support the --refspec-pattern, --normalize, and
// --allow-onelevel options.
//
// Git imposes the following rules on how references are named:
//
// 1. They can include slash / for hierarchical (directory) grouping, but no
// slash-separated component can begin with a dot . or end with the
// sequence .lock.
// 2. They must contain at least one /. This enforces the presence of a
// category like heads/, tags/ etc. but the actual names are not
// restricted. If the --allow-onelevel option is used, this rule is
// waived.
// 3. They cannot have two consecutive dots .. anywhere.
// 4. They cannot have ASCII control characters (i.e. bytes whose values are
// lower than \040, or \177 DEL), space, tilde ~, caret ^, or colon :
// anywhere.
// 5. They cannot have question-mark ?, asterisk *, or open bracket [
// anywhere. See the --refspec-pattern option below for an exception to this
// rule.
// 6. They cannot begin or end with a slash / or contain multiple consecutive
// slashes (see the --normalize option below for an exception to this rule).
// 7. They cannot end with a dot ..
// 8. They cannot contain a sequence @{.
// 9. They cannot be the single character @.
// 10. They cannot contain a \.
func (r ReferenceName) Validate() error {
s := string(r)
if len(s) == 0 {
return ErrInvalidReferenceName
}
// HEAD is a special case
if r == HEAD {
return nil
}
// rule 7
if strings.HasSuffix(s, ".") {
return ErrInvalidReferenceName
}
// rule 2
parts := strings.Split(s, "/")
if len(parts) < 2 {
return ErrInvalidReferenceName
}
isBranch := r.IsBranch()
isTag := r.IsTag()
for i, part := range parts {
// rule 6
if len(part) == 0 {
return ErrInvalidReferenceName
}
if strings.HasPrefix(part, ".") || // rule 1
strings.Contains(part, "..") || // rule 3
ctrlSeqs.MatchString(part) || // rule 4
strings.ContainsAny(part, "~^:?*[ \t\n") || // rule 4 & 5
strings.Contains(part, "@{") || // rule 8
part == "@" || // rule 9
strings.Contains(part, "\\") || // rule 10
strings.HasSuffix(part, ".lock") { // rule 1
return ErrInvalidReferenceName
}
if (isBranch || isTag) && strings.HasPrefix(part, "-") && (i == 2) { // branches & tags can't start with -
return ErrInvalidReferenceName
}
}
return nil
}
const (
HEAD ReferenceName = "HEAD"
Master ReferenceName = "refs/heads/master"
Main ReferenceName = "refs/heads/main"
)
// Reference is a representation of git reference
@ -168,22 +259,22 @@ func NewHashReference(n ReferenceName, h Hash) *Reference {
}
}
// Type return the type of a reference
// Type returns the type of a reference
func (r *Reference) Type() ReferenceType {
return r.t
}
// Name return the name of a reference
// Name returns the name of a reference
func (r *Reference) Name() ReferenceName {
return r.n
}
// Hash return the hash of a hash reference
// Hash returns the hash of a hash reference
func (r *Reference) Hash() Hash {
return r.h
}
// Target return the target of a symbolic reference
// Target returns the target of a symbolic reference
func (r *Reference) Target() ReferenceName {
return r.target
}
@ -204,6 +295,21 @@ func (r *Reference) Strings() [2]string {
}
func (r *Reference) String() string {
s := r.Strings()
return fmt.Sprintf("%s %s", s[1], s[0])
ref := ""
switch r.Type() {
case HashReference:
ref = r.Hash().String()
case SymbolicReference:
ref = symrefPrefix + r.Target().String()
default:
return ""
}
name := r.Name().String()
var v strings.Builder
v.Grow(len(ref) + len(name) + 1)
v.WriteString(ref)
v.WriteString(" ")
v.WriteString(name)
return v.String()
}

View File

@ -42,6 +42,7 @@ type EncodedObjectStorer interface {
HasEncodedObject(plumbing.Hash) error
// EncodedObjectSize returns the plaintext size of the encoded object.
EncodedObjectSize(plumbing.Hash) (int64, error)
AddAlternate(remote string) error
}
// DeltaObjectStorer is an EncodedObjectStorer that can return delta
@ -52,8 +53,8 @@ type DeltaObjectStorer interface {
DeltaObject(plumbing.ObjectType, plumbing.Hash) (plumbing.EncodedObject, error)
}
// Transactioner is a optional method for ObjectStorer, it enable transaction
// base write and read operations in the storage
// Transactioner is a optional method for ObjectStorer, it enables transactional read and write
// operations.
type Transactioner interface {
// Begin starts a transaction.
Begin() Transaction
@ -87,8 +88,8 @@ type PackedObjectStorer interface {
DeleteOldObjectPackAndIndex(plumbing.Hash, time.Time) error
}
// PackfileWriter is a optional method for ObjectStorer, it enable direct write
// of packfile to the storage
// PackfileWriter is an optional method for ObjectStorer, it enables directly writing
// a packfile to storage.
type PackfileWriter interface {
// PackfileWriter returns a writer for writing a packfile to the storage
//

View File

@ -35,6 +35,10 @@ func InstallProtocol(scheme string, c transport.Transport) {
// http://, https://, ssh:// and file://.
// See `InstallProtocol` to add or modify protocols.
func NewClient(endpoint *transport.Endpoint) (transport.Transport, error) {
return getTransport(endpoint)
}
func getTransport(endpoint *transport.Endpoint) (transport.Transport, error) {
f, ok := Protocols[endpoint.Protocol]
if !ok {
return nil, fmt.Errorf("unsupported scheme %q", endpoint.Protocol)
@ -43,6 +47,5 @@ func NewClient(endpoint *transport.Endpoint) (transport.Transport, error) {
if f == nil {
return nil, fmt.Errorf("malformed client for scheme %q, client is defined as nil", endpoint.Protocol)
}
return f, nil
}

View File

@ -19,6 +19,7 @@ import (
"fmt"
"io"
"net/url"
"path/filepath"
"strconv"
"strings"
@ -58,6 +59,11 @@ type Session interface {
// If the repository does not exist, returns ErrRepositoryNotFound.
// If the repository exists, but is empty, returns ErrEmptyRemoteRepository.
AdvertisedReferences() (*packp.AdvRefs, error)
// AdvertisedReferencesContext retrieves the advertised references for a
// repository.
// If the repository does not exist, returns ErrRepositoryNotFound.
// If the repository exists, but is empty, returns ErrEmptyRemoteRepository.
AdvertisedReferencesContext(context.Context) (*packp.AdvRefs, error)
io.Closer
}
@ -103,10 +109,45 @@ type Endpoint struct {
// Host is the host.
Host string
// Port is the port to connect, if 0 the default port for the given protocol
// wil be used.
// will be used.
Port int
// Path is the repository path.
Path string
// InsecureSkipTLS skips ssl verify if protocol is https
InsecureSkipTLS bool
// CaBundle specify additional ca bundle with system cert pool
CaBundle []byte
// Proxy provides info required for connecting to a proxy.
Proxy ProxyOptions
}
type ProxyOptions struct {
URL string
Username string
Password string
}
func (o *ProxyOptions) Validate() error {
if o.URL != "" {
_, err := url.Parse(o.URL)
return err
}
return nil
}
func (o *ProxyOptions) FullURL() (*url.URL, error) {
proxyURL, err := url.Parse(o.URL)
if err != nil {
return nil, err
}
if o.Username != "" {
if o.Password != "" {
proxyURL.User = url.UserPassword(o.Username, o.Password)
} else {
proxyURL.User = url.User(o.Username)
}
}
return proxyURL, nil
}
var defaultPorts = map[string]int{
@ -187,11 +228,17 @@ func parseURL(endpoint string) (*Endpoint, error) {
pass, _ = u.User.Password()
}
host := u.Hostname()
if strings.Contains(host, ":") {
// IPv6 address
host = "[" + host + "]"
}
return &Endpoint{
Protocol: u.Scheme,
User: user,
Password: pass,
Host: u.Hostname(),
Host: host,
Port: getPort(u),
Path: getPath(u),
}, nil
@ -249,7 +296,11 @@ func parseFile(endpoint string) (*Endpoint, bool) {
return nil, false
}
path := endpoint
path, err := filepath.Abs(endpoint)
if err != nil {
return nil, false
}
return &Endpoint{
Protocol: "file",
Path: path,

View File

@ -6,12 +6,13 @@ import (
"errors"
"io"
"os"
"os/exec"
"path/filepath"
"runtime"
"strings"
"github.com/jesseduffield/go-git/v5/plumbing/transport"
"github.com/jesseduffield/go-git/v5/plumbing/transport/internal/common"
"golang.org/x/sys/execabs"
)
// DefaultClient is the default local client.
@ -36,7 +37,7 @@ func NewClient(uploadPackBin, receivePackBin string) transport.Transport {
func prefixExecPath(cmd string) (string, error) {
// Use `git --exec-path` to find the exec path.
execCmd := exec.Command("git", "--exec-path")
execCmd := execabs.Command("git", "--exec-path")
stdout, err := execCmd.StdoutPipe()
if err != nil {
@ -54,7 +55,7 @@ func prefixExecPath(cmd string) (string, error) {
return "", err
}
if isPrefix {
return "", errors.New("Couldn't read exec-path line all at once")
return "", errors.New("couldn't read exec-path line all at once")
}
err = execCmd.Wait()
@ -66,7 +67,7 @@ func prefixExecPath(cmd string) (string, error) {
cmd = filepath.Join(execPath, cmd)
// Make sure it actually exists.
_, err = exec.LookPath(cmd)
_, err = execabs.LookPath(cmd)
if err != nil {
return "", err
}
@ -83,9 +84,9 @@ func (r *runner) Command(cmd string, ep *transport.Endpoint, auth transport.Auth
cmd = r.ReceivePackBin
}
_, err := exec.LookPath(cmd)
_, err := execabs.LookPath(cmd)
if err != nil {
if e, ok := err.(*exec.Error); ok && e.Err == exec.ErrNotFound {
if e, ok := err.(*execabs.Error); ok && e.Err == execabs.ErrNotFound {
cmd, err = prefixExecPath(cmd)
if err != nil {
return nil, err
@ -95,11 +96,27 @@ func (r *runner) Command(cmd string, ep *transport.Endpoint, auth transport.Auth
}
}
return &command{cmd: exec.Command(cmd, ep.Path)}, nil
return &command{cmd: execabs.Command(cmd, adjustPathForWindows(ep.Path))}, nil
}
func isDriveLetter(c byte) bool {
return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z')
}
// On Windows, the path that results from a file: URL has a leading slash. This
// has to be removed if there's a drive letter
func adjustPathForWindows(p string) string {
if runtime.GOOS != "windows" {
return p
}
if len(p) >= 3 && p[0] == '/' && isDriveLetter(p[1]) && p[2] == ':' {
return p[1:]
}
return p
}
type command struct {
cmd *exec.Cmd
cmd *execabs.Cmd
stderrCloser io.Closer
closed bool
}
@ -148,7 +165,7 @@ func (c *command) Close() error {
}
// When a repository does not exist, the command exits with code 128.
if _, ok := err.(*exec.ExitError); ok {
if _, ok := err.(*execabs.ExitError); ok {
return nil
}

View File

@ -2,11 +2,11 @@
package git
import (
"fmt"
"io"
"net"
"strconv"
"github.com/jesseduffield/go-git/v5/plumbing/format/pktline"
"github.com/jesseduffield/go-git/v5/plumbing/protocol/packp"
"github.com/jesseduffield/go-git/v5/plumbing/transport"
"github.com/jesseduffield/go-git/v5/plumbing/transport/internal/common"
"github.com/jesseduffield/go-git/v5/utils/ioutil"
@ -41,10 +41,18 @@ type command struct {
// Start executes the command sending the required message to the TCP connection
func (c *command) Start() error {
cmd := endpointToCommand(c.command, c.endpoint)
req := packp.GitProtoRequest{
RequestCommand: c.command,
Pathname: c.endpoint.Path,
}
host := c.endpoint.Host
if c.endpoint.Port != DefaultPort {
host = net.JoinHostPort(c.endpoint.Host, strconv.Itoa(c.endpoint.Port))
}
e := pktline.NewEncoder(c.conn)
return e.Encode([]byte(cmd))
req.Host = host
return req.Encode(c.conn)
}
func (c *command) connect() error {
@ -69,7 +77,7 @@ func (c *command) getHostWithPort() string {
port = DefaultPort
}
return fmt.Sprintf("%s:%d", host, port)
return net.JoinHostPort(host, strconv.Itoa(port))
}
// StderrPipe git protocol doesn't have any dedicated error channel
@ -77,27 +85,18 @@ func (c *command) StderrPipe() (io.Reader, error) {
return nil, nil
}
// StdinPipe return the underlying connection as WriteCloser, wrapped to prevent
// StdinPipe returns the underlying connection as WriteCloser, wrapped to prevent
// call to the Close function from the connection, a command execution in git
// protocol can't be closed or killed
func (c *command) StdinPipe() (io.WriteCloser, error) {
return ioutil.WriteNopCloser(c.conn), nil
}
// StdoutPipe return the underlying connection as Reader
// StdoutPipe returns the underlying connection as Reader
func (c *command) StdoutPipe() (io.Reader, error) {
return c.conn, nil
}
func endpointToCommand(cmd string, ep *transport.Endpoint) string {
host := ep.Host
if ep.Port != DefaultPort {
host = fmt.Sprintf("%s:%d", ep.Host, ep.Port)
}
return fmt.Sprintf("%s %s%chost=%s%c", cmd, ep.Path, 0, host, 0)
}
// Close closes the TCP connection and connection.
func (c *command) Close() error {
if !c.connected {

View File

@ -3,21 +3,29 @@ package http
import (
"bytes"
"context"
"crypto/tls"
"crypto/x509"
"fmt"
"net"
"net/http"
"net/url"
"reflect"
"strconv"
"strings"
"sync"
"github.com/jesseduffield/go-git/v5/plumbing"
"github.com/jesseduffield/go-git/v5/plumbing/protocol/packp"
"github.com/jesseduffield/go-git/v5/plumbing/protocol/packp/capability"
"github.com/jesseduffield/go-git/v5/plumbing/transport"
"github.com/jesseduffield/go-git/v5/utils/ioutil"
"github.com/golang/groupcache/lru"
)
// it requires a bytes.Buffer, because we need to know the length
func applyHeadersToRequest(req *http.Request, content *bytes.Buffer, host string, requestType string) {
req.Header.Add("User-Agent", "git/1.0")
req.Header.Add("User-Agent", capability.DefaultAgent())
req.Header.Add("Host", host) // host:port
if content == nil {
@ -32,7 +40,7 @@ func applyHeadersToRequest(req *http.Request, content *bytes.Buffer, host string
const infoRefsPath = "/info/refs"
func advertisedReferences(s *session, serviceName string) (ref *packp.AdvRefs, err error) {
func advertisedReferences(ctx context.Context, s *session, serviceName string) (ref *packp.AdvRefs, err error) {
url := fmt.Sprintf(
"%s%s?service=%s",
s.endpoint.String(), infoRefsPath, serviceName,
@ -45,7 +53,7 @@ func advertisedReferences(s *session, serviceName string) (ref *packp.AdvRefs, e
s.ApplyAuthToRequest(req)
applyHeadersToRequest(req, nil, s.endpoint.Host, serviceName)
res, err := s.client.Do(req)
res, err := s.client.Do(req.WithContext(ctx))
if err != nil {
return nil, err
}
@ -66,6 +74,17 @@ func advertisedReferences(s *session, serviceName string) (ref *packp.AdvRefs, e
return nil, err
}
// Git 2.41+ returns a zero-id plus capabilities when an empty
// repository is being cloned. This skips the existing logic within
// advrefs_decode.decodeFirstHash, which expects a flush-pkt instead.
//
// This logic aligns with plumbing/transport/internal/common/common.go.
if ar.IsEmpty() &&
// Empty repositories are valid for git-receive-pack.
transport.ReceivePackServiceName != serviceName {
return nil, transport.ErrEmptyRemoteRepository
}
transport.FilterUnsupportedCapabilities(ar.Capabilities)
s.advRefs = ar
@ -73,40 +92,83 @@ func advertisedReferences(s *session, serviceName string) (ref *packp.AdvRefs, e
}
type client struct {
c *http.Client
client *http.Client
transports *lru.Cache
mutex sync.RWMutex
}
// DefaultClient is the default HTTP client, which uses `http.DefaultClient`.
var DefaultClient = NewClient(nil)
// ClientOptions holds user configurable options for the client.
type ClientOptions struct {
// CacheMaxEntries is the max no. of entries that the transport objects
// cache will hold at any given point of time. It must be a positive integer.
// Calling `client.addTransport()` after the cache has reached the specified
// size, will result in the least recently used transport getting deleted
// before the provided transport is added to the cache.
CacheMaxEntries int
}
var (
// defaultTransportCacheSize is the default capacity of the transport objects cache.
// Its value is 0 because transport caching is turned off by default and is an
// opt-in feature.
defaultTransportCacheSize = 0
// DefaultClient is the default HTTP client, which uses a net/http client configured
// with http.DefaultTransport.
DefaultClient = NewClient(nil)
)
// NewClient creates a new client with a custom net/http client.
// See `InstallProtocol` to install and override default http client.
// Unless a properly initialized client is given, it will fall back into
// `http.DefaultClient`.
// If the net/http client is nil or empty, it will use a net/http client configured
// with http.DefaultTransport.
//
// Note that for HTTP client cannot distinguish between private repositories and
// unexistent repositories on GitHub. So it returns `ErrAuthorizationRequired`
// for both.
func NewClient(c *http.Client) transport.Transport {
if c == nil {
return &client{http.DefaultClient}
c = &http.Client{
Transport: http.DefaultTransport,
}
}
return NewClientWithOptions(c, &ClientOptions{
CacheMaxEntries: defaultTransportCacheSize,
})
}
// NewClientWithOptions returns a new client configured with the provided net/http client
// and other custom options specific to the client.
// If the net/http client is nil or empty, it will use a net/http client configured
// with http.DefaultTransport.
func NewClientWithOptions(c *http.Client, opts *ClientOptions) transport.Transport {
if c == nil {
c = &http.Client{
Transport: http.DefaultTransport,
}
}
cl := &client{
client: c,
}
return &client{
c: c,
if opts != nil {
if opts.CacheMaxEntries > 0 {
cl.transports = lru.New(opts.CacheMaxEntries)
}
}
return cl
}
func (c *client) NewUploadPackSession(ep *transport.Endpoint, auth transport.AuthMethod) (
transport.UploadPackSession, error) {
return newUploadPackSession(c.c, ep, auth)
return newUploadPackSession(c, ep, auth)
}
func (c *client) NewReceivePackSession(ep *transport.Endpoint, auth transport.AuthMethod) (
transport.ReceivePackSession, error) {
return newReceivePackSession(c.c, ep, auth)
return newReceivePackSession(c, ep, auth)
}
type session struct {
@ -116,10 +178,106 @@ type session struct {
advRefs *packp.AdvRefs
}
func newSession(c *http.Client, ep *transport.Endpoint, auth transport.AuthMethod) (*session, error) {
func transportWithInsecureTLS(transport *http.Transport) {
if transport.TLSClientConfig == nil {
transport.TLSClientConfig = &tls.Config{}
}
transport.TLSClientConfig.InsecureSkipVerify = true
}
func transportWithCABundle(transport *http.Transport, caBundle []byte) error {
rootCAs, err := x509.SystemCertPool()
if err != nil {
return err
}
if rootCAs == nil {
rootCAs = x509.NewCertPool()
}
rootCAs.AppendCertsFromPEM(caBundle)
if transport.TLSClientConfig == nil {
transport.TLSClientConfig = &tls.Config{}
}
transport.TLSClientConfig.RootCAs = rootCAs
return nil
}
func transportWithProxy(transport *http.Transport, proxyURL *url.URL) {
transport.Proxy = http.ProxyURL(proxyURL)
}
func configureTransport(transport *http.Transport, ep *transport.Endpoint) error {
if len(ep.CaBundle) > 0 {
if err := transportWithCABundle(transport, ep.CaBundle); err != nil {
return err
}
}
if ep.InsecureSkipTLS {
transportWithInsecureTLS(transport)
}
if ep.Proxy.URL != "" {
proxyURL, err := ep.Proxy.FullURL()
if err != nil {
return err
}
transportWithProxy(transport, proxyURL)
}
return nil
}
func newSession(c *client, ep *transport.Endpoint, auth transport.AuthMethod) (*session, error) {
var httpClient *http.Client
// We need to configure the http transport if there are transport specific
// options present in the endpoint.
if len(ep.CaBundle) > 0 || ep.InsecureSkipTLS || ep.Proxy.URL != "" {
var transport *http.Transport
// if the client wasn't configured to have a cache for transports then just configure
// the transport and use it directly, otherwise try to use the cache.
if c.transports == nil {
tr, ok := c.client.Transport.(*http.Transport)
if !ok {
return nil, fmt.Errorf("expected underlying client transport to be of type: %s; got: %s",
reflect.TypeOf(transport), reflect.TypeOf(c.client.Transport))
}
transport = tr.Clone()
configureTransport(transport, ep)
} else {
transportOpts := transportOptions{
caBundle: string(ep.CaBundle),
insecureSkipTLS: ep.InsecureSkipTLS,
}
if ep.Proxy.URL != "" {
proxyURL, err := ep.Proxy.FullURL()
if err != nil {
return nil, err
}
transportOpts.proxyURL = *proxyURL
}
var found bool
transport, found = c.fetchTransport(transportOpts)
if !found {
transport = c.client.Transport.(*http.Transport).Clone()
configureTransport(transport, ep)
c.addTransport(transportOpts, transport)
}
}
httpClient = &http.Client{
Transport: transport,
CheckRedirect: c.client.CheckRedirect,
Jar: c.client.Jar,
Timeout: c.client.Timeout,
}
} else {
httpClient = c.client
}
s := &session{
auth: basicAuthFromEndpoint(ep),
client: c,
client: httpClient,
endpoint: ep,
}
if auth != nil {
@ -249,24 +407,38 @@ func (a *TokenAuth) String() string {
// Err is a dedicated error to return errors based on status code
type Err struct {
Response *http.Response
Reason string
}
// NewErr returns a new Err based on a http response
// NewErr returns a new Err based on a http response and closes response body
// if needed
func NewErr(r *http.Response) error {
if r.StatusCode >= http.StatusOK && r.StatusCode < http.StatusMultipleChoices {
return nil
}
switch r.StatusCode {
case http.StatusUnauthorized:
return transport.ErrAuthenticationRequired
case http.StatusForbidden:
return transport.ErrAuthorizationFailed
case http.StatusNotFound:
return transport.ErrRepositoryNotFound
var reason string
// If a response message is present, add it to error
var messageBuffer bytes.Buffer
if r.Body != nil {
messageLength, _ := messageBuffer.ReadFrom(r.Body)
if messageLength > 0 {
reason = messageBuffer.String()
}
_ = r.Body.Close()
}
return plumbing.NewUnexpectedError(&Err{r})
switch r.StatusCode {
case http.StatusUnauthorized:
return fmt.Errorf("%w: %s", transport.ErrAuthenticationRequired, reason)
case http.StatusForbidden:
return fmt.Errorf("%w: %s", transport.ErrAuthorizationFailed, reason)
case http.StatusNotFound:
return fmt.Errorf("%w: %s", transport.ErrRepositoryNotFound, reason)
}
return plumbing.NewUnexpectedError(&Err{r, reason})
}
// StatusCode returns the status code of the response

View File

@ -19,13 +19,17 @@ type rpSession struct {
*session
}
func newReceivePackSession(c *http.Client, ep *transport.Endpoint, auth transport.AuthMethod) (transport.ReceivePackSession, error) {
func newReceivePackSession(c *client, ep *transport.Endpoint, auth transport.AuthMethod) (transport.ReceivePackSession, error) {
s, err := newSession(c, ep, auth)
return &rpSession{s}, err
}
func (s *rpSession) AdvertisedReferences() (*packp.AdvRefs, error) {
return advertisedReferences(s.session, transport.ReceivePackServiceName)
return advertisedReferences(context.TODO(), s.session, transport.ReceivePackServiceName)
}
func (s *rpSession) AdvertisedReferencesContext(ctx context.Context) (*packp.AdvRefs, error) {
return advertisedReferences(ctx, s.session, transport.ReceivePackServiceName)
}
func (s *rpSession) ReceivePack(ctx context.Context, req *packp.ReferenceUpdateRequest) (
@ -98,7 +102,6 @@ func (s *rpSession) doRequest(
}
if err := NewErr(res); err != nil {
_ = res.Body.Close()
return nil, err
}

View File

@ -0,0 +1,40 @@
package http
import (
"net/http"
"net/url"
)
// transportOptions contains transport specific configuration.
type transportOptions struct {
insecureSkipTLS bool
// []byte is not comparable.
caBundle string
proxyURL url.URL
}
func (c *client) addTransport(opts transportOptions, transport *http.Transport) {
c.mutex.Lock()
c.transports.Add(opts, transport)
c.mutex.Unlock()
}
func (c *client) removeTransport(opts transportOptions) {
c.mutex.Lock()
c.transports.Remove(opts)
c.mutex.Unlock()
}
func (c *client) fetchTransport(opts transportOptions) (*http.Transport, bool) {
c.mutex.RLock()
t, ok := c.transports.Get(opts)
c.mutex.RUnlock()
if !ok {
return nil, false
}
transport, ok := t.(*http.Transport)
if !ok {
return nil, false
}
return transport, true
}

View File

@ -19,13 +19,17 @@ type upSession struct {
*session
}
func newUploadPackSession(c *http.Client, ep *transport.Endpoint, auth transport.AuthMethod) (transport.UploadPackSession, error) {
func newUploadPackSession(c *client, ep *transport.Endpoint, auth transport.AuthMethod) (transport.UploadPackSession, error) {
s, err := newSession(c, ep, auth)
return &upSession{s}, err
}
func (s *upSession) AdvertisedReferences() (*packp.AdvRefs, error) {
return advertisedReferences(s.session, transport.UploadPackServiceName)
return advertisedReferences(context.TODO(), s.session, transport.UploadPackServiceName)
}
func (s *upSession) AdvertisedReferencesContext(ctx context.Context) (*packp.AdvRefs, error) {
return advertisedReferences(ctx, s.session, transport.UploadPackServiceName)
}
func (s *upSession) UploadPack(
@ -96,7 +100,6 @@ func (s *upSession) doRequest(
}
if err := NewErr(res); err != nil {
_ = res.Body.Close()
return nil, err
}

View File

@ -11,7 +11,7 @@ import (
"errors"
"fmt"
"io"
stdioutil "io/ioutil"
"regexp"
"strings"
"time"
@ -29,6 +29,10 @@ const (
var (
ErrTimeoutExceeded = errors.New("timeout exceeded")
// stdErrSkipPattern is used for skipping lines from a command's stderr output.
// Any line matching this pattern will be skipped from further
// processing and not be returned to calling code.
stdErrSkipPattern = regexp.MustCompile("^remote:( =*){0,1}$")
)
// Commander creates Command instances. This is the main entry point for
@ -150,26 +154,37 @@ func (c *client) listenFirstError(r io.Reader) chan string {
errLine := make(chan string, 1)
go func() {
s := bufio.NewScanner(r)
if s.Scan() {
errLine <- s.Text()
} else {
close(errLine)
for {
if s.Scan() {
line := s.Text()
if !stdErrSkipPattern.MatchString(line) {
errLine <- line
break
}
} else {
close(errLine)
break
}
}
_, _ = io.Copy(stdioutil.Discard, r)
_, _ = io.Copy(io.Discard, r)
}()
return errLine
}
// AdvertisedReferences retrieves the advertised references from the server.
func (s *session) AdvertisedReferences() (*packp.AdvRefs, error) {
return s.AdvertisedReferencesContext(context.TODO())
}
// AdvertisedReferences retrieves the advertised references from the server.
func (s *session) AdvertisedReferencesContext(ctx context.Context) (*packp.AdvRefs, error) {
if s.advRefs != nil {
return s.advRefs, nil
}
ar := packp.NewAdvRefs()
if err := ar.Decode(s.Stdout); err != nil {
if err := ar.Decode(s.StdoutContext(ctx)); err != nil {
if err := s.handleAdvRefDecodeError(err); err != nil {
return nil, err
}
@ -188,9 +203,22 @@ func (s *session) AdvertisedReferences() (*packp.AdvRefs, error) {
}
func (s *session) handleAdvRefDecodeError(err error) error {
var errLine *pktline.ErrorLine
if errors.As(err, &errLine) {
if isRepoNotFoundError(errLine.Text) {
return transport.ErrRepositoryNotFound
}
return errLine
}
// If repository is not found, we get empty stdout and server writes an
// error to stderr.
if err == packp.ErrEmptyInput {
if errors.Is(err, packp.ErrEmptyInput) {
// TODO:(v6): handle this error in a better way.
// Instead of checking the stderr output for a specific error message,
// define an ExitError and embed the stderr output and exit (if one
// exists) in the error struct. Just like exec.ExitError.
s.finished = true
if err := s.checkNotFoundError(); err != nil {
return err
@ -230,6 +258,12 @@ func (s *session) handleAdvRefDecodeError(err error) error {
// returned with the packfile content. The reader must be closed after reading.
func (s *session) UploadPack(ctx context.Context, req *packp.UploadPackRequest) (*packp.UploadPackResponse, error) {
if req.IsEmpty() {
// XXX: IsEmpty means haves are a subset of wants, in that case we have
// everything we asked for. Close the connection and return nil.
if err := s.finish(); err != nil {
return nil, err
}
// TODO:(v6) return nil here
return nil, transport.ErrEmptyUploadPackRequest
}
@ -237,7 +271,7 @@ func (s *session) UploadPack(ctx context.Context, req *packp.UploadPackRequest)
return nil, err
}
if _, err := s.AdvertisedReferences(); err != nil {
if _, err := s.AdvertisedReferencesContext(ctx); err != nil {
return nil, err
}
@ -370,7 +404,7 @@ func (s *session) checkNotFoundError() error {
case <-t.C:
return ErrTimeoutExceeded
case line, ok := <-s.firstErrLine:
if !ok {
if !ok || len(line) == 0 {
return nil
}
@ -378,59 +412,43 @@ func (s *session) checkNotFoundError() error {
return transport.ErrRepositoryNotFound
}
// TODO:(v6): return server error just as it is without a prefix
return fmt.Errorf("unknown error: %s", line)
}
}
var (
githubRepoNotFoundErr = "ERROR: Repository not found."
bitbucketRepoNotFoundErr = "conq: repository does not exist."
const (
githubRepoNotFoundErr = "Repository not found."
bitbucketRepoNotFoundErr = "repository does not exist."
localRepoNotFoundErr = "does not appear to be a git repository"
gitProtocolNotFoundErr = "ERR \n Repository not found."
gitProtocolNoSuchErr = "ERR no such repository"
gitProtocolAccessDeniedErr = "ERR access denied"
gogsAccessDeniedErr = "Gogs: Repository does not exist or you do not have access"
gitProtocolNotFoundErr = "Repository not found."
gitProtocolNoSuchErr = "no such repository"
gitProtocolAccessDeniedErr = "access denied"
gogsAccessDeniedErr = "Repository does not exist or you do not have access"
gitlabRepoNotFoundErr = "The project you were looking for could not be found"
)
func isRepoNotFoundError(s string) bool {
if strings.HasPrefix(s, githubRepoNotFoundErr) {
return true
}
if strings.HasPrefix(s, bitbucketRepoNotFoundErr) {
return true
}
if strings.HasSuffix(s, localRepoNotFoundErr) {
return true
}
if strings.HasPrefix(s, gitProtocolNotFoundErr) {
return true
}
if strings.HasPrefix(s, gitProtocolNoSuchErr) {
return true
}
if strings.HasPrefix(s, gitProtocolAccessDeniedErr) {
return true
}
if strings.HasPrefix(s, gogsAccessDeniedErr) {
return true
for _, err := range []string{
githubRepoNotFoundErr,
bitbucketRepoNotFoundErr,
localRepoNotFoundErr,
gitProtocolNotFoundErr,
gitProtocolNoSuchErr,
gitProtocolAccessDeniedErr,
gogsAccessDeniedErr,
gitlabRepoNotFoundErr,
} {
if strings.Contains(s, err) {
return true
}
}
return false
}
var (
nak = []byte("NAK")
eol = []byte("\n")
)
// uploadPack implements the git-upload-pack protocol.
func uploadPack(w io.WriteCloser, r io.Reader, req *packp.UploadPackRequest) error {
func uploadPack(w io.WriteCloser, _ io.Reader, req *packp.UploadPackRequest) error {
// TODO support multi_ack mode
// TODO support multi_ack_detailed mode
// TODO support acks for common objects

View File

@ -0,0 +1,46 @@
package common
import (
"bytes"
"io"
gogitioutil "github.com/jesseduffield/go-git/v5/utils/ioutil"
"github.com/jesseduffield/go-git/v5/plumbing/transport"
)
type MockCommand struct {
stdin bytes.Buffer
stdout bytes.Buffer
stderr bytes.Buffer
}
func (c MockCommand) StderrPipe() (io.Reader, error) {
return &c.stderr, nil
}
func (c MockCommand) StdinPipe() (io.WriteCloser, error) {
return gogitioutil.WriteNopCloser(&c.stdin), nil
}
func (c MockCommand) StdoutPipe() (io.Reader, error) {
return &c.stdout, nil
}
func (c MockCommand) Start() error {
return nil
}
func (c MockCommand) Close() error {
panic("not implemented")
}
type MockCommander struct {
stderr string
}
func (c MockCommander) Command(cmd string, ep *transport.Endpoint, auth transport.AuthMethod) (Command, error) {
return &MockCommand{
stderr: *bytes.NewBufferString(c.stderr),
}, nil
}

View File

@ -40,8 +40,16 @@ func (l *fsLoader) Load(ep *transport.Endpoint) (storer.Storer, error) {
return nil, err
}
if _, err := fs.Stat("config"); err != nil {
return nil, transport.ErrRepositoryNotFound
var bare bool
if _, err := fs.Stat("config"); err == nil {
bare = true
}
if !bare {
// do not use git.GitDirName due to import cycle
if _, err := fs.Stat(".git"); err != nil {
return nil, transport.ErrRepositoryNotFound
}
}
return filesystem.NewStorage(fs, cache.NewObjectLRUDefault()), nil

View File

@ -108,6 +108,10 @@ type upSession struct {
}
func (s *upSession) AdvertisedReferences() (*packp.AdvRefs, error) {
return s.AdvertisedReferencesContext(context.TODO())
}
func (s *upSession) AdvertisedReferencesContext(ctx context.Context) (*packp.AdvRefs, error) {
ar := packp.NewAdvRefs()
if err := s.setSupportedCapabilities(ar.Capabilities); err != nil {
@ -185,7 +189,7 @@ func (s *upSession) objectsToUpload(req *packp.UploadPackRequest) ([]plumbing.Ha
}
func (*upSession) setSupportedCapabilities(c *capability.List) error {
if err := c.Set(capability.Agent, capability.DefaultAgent); err != nil {
if err := c.Set(capability.Agent, capability.DefaultAgent()); err != nil {
return err
}
@ -204,6 +208,10 @@ type rpSession struct {
}
func (s *rpSession) AdvertisedReferences() (*packp.AdvRefs, error) {
return s.AdvertisedReferencesContext(context.TODO())
}
func (s *rpSession) AdvertisedReferencesContext(ctx context.Context) (*packp.AdvRefs, error) {
ar := packp.NewAdvRefs()
if err := s.setSupportedCapabilities(ar.Capabilities); err != nil {
@ -347,7 +355,7 @@ func (s *rpSession) reportStatus() *packp.ReportStatus {
}
func (*rpSession) setSupportedCapabilities(c *capability.List) error {
if err := c.Set(capability.Agent, capability.DefaultAgent); err != nil {
if err := c.Set(capability.Agent, capability.DefaultAgent()); err != nil {
return err
}

View File

@ -1,21 +1,17 @@
package ssh
import (
"crypto/x509"
"encoding/pem"
"errors"
"fmt"
"io/ioutil"
"os"
"os/user"
"path/filepath"
"github.com/jesseduffield/go-git/v5/plumbing/transport"
"github.com/mitchellh/go-homedir"
"github.com/skeema/knownhosts"
sshagent "github.com/xanzy/ssh-agent"
"golang.org/x/crypto/ssh"
"golang.org/x/crypto/ssh/knownhosts"
)
const DefaultUsername = "git"
@ -121,27 +117,15 @@ type PublicKeys struct {
// NewPublicKeys returns a PublicKeys from a PEM encoded private key. An
// encryption password should be given if the pemBytes contains a password
// encrypted PEM block otherwise password should be empty. It supports RSA
// (PKCS#1), DSA (OpenSSL), and ECDSA private keys.
// (PKCS#1), PKCS#8, DSA (OpenSSL), and ECDSA private keys.
func NewPublicKeys(user string, pemBytes []byte, password string) (*PublicKeys, error) {
block, _ := pem.Decode(pemBytes)
if block == nil {
return nil, errors.New("invalid PEM data")
}
if x509.IsEncryptedPEMBlock(block) {
key, err := x509.DecryptPEMBlock(block, []byte(password))
if err != nil {
return nil, err
}
block = &pem.Block{Type: block.Type, Bytes: key}
pemBytes = pem.EncodeToMemory(block)
}
signer, err := ssh.ParsePrivateKey(pemBytes)
if _, ok := err.(*ssh.PassphraseMissingError); ok {
signer, err = ssh.ParsePrivateKeyWithPassphrase(pemBytes, []byte(password))
}
if err != nil {
return nil, err
}
return &PublicKeys{User: user, Signer: signer}, nil
}
@ -149,7 +133,7 @@ func NewPublicKeys(user string, pemBytes []byte, password string) (*PublicKeys,
// encoded private key. An encryption password should be given if the pemBytes
// contains a password encrypted PEM block otherwise password should be empty.
func NewPublicKeysFromFile(user, pemFile, password string) (*PublicKeys, error) {
bytes, err := ioutil.ReadFile(pemFile)
bytes, err := os.ReadFile(pemFile)
if err != nil {
return nil, err
}
@ -238,12 +222,19 @@ func (a *PublicKeysCallback) ClientConfig() (*ssh.ClientConfig, error) {
//
// If list of files is empty, then it will be read from the SSH_KNOWN_HOSTS
// environment variable, example:
// /home/foo/custom_known_hosts_file:/etc/custom_known/hosts_file
//
// /home/foo/custom_known_hosts_file:/etc/custom_known/hosts_file
//
// If SSH_KNOWN_HOSTS is not set the following file locations will be used:
// ~/.ssh/known_hosts
// /etc/ssh/ssh_known_hosts
//
// ~/.ssh/known_hosts
// /etc/ssh/ssh_known_hosts
func NewKnownHostsCallback(files ...string) (ssh.HostKeyCallback, error) {
kh, err := newKnownHosts(files...)
return ssh.HostKeyCallback(kh), err
}
func newKnownHosts(files ...string) (knownhosts.HostKeyCallback, error) {
var err error
if len(files) == 0 {
@ -265,7 +256,7 @@ func getDefaultKnownHostsFiles() ([]string, error) {
return files, nil
}
homeDirPath, err := homedir.Dir()
homeDirPath, err := os.UserHomeDir()
if err != nil {
return nil, err
}

Some files were not shown because too many files have changed in this diff Show More