mirror of
https://github.com/ko-build/ko.git
synced 2025-03-17 20:47:51 +02:00
Bump github.com/containerd/stargz-snapshotter/estargz (#464)
Bumps [github.com/containerd/stargz-snapshotter/estargz](https://github.com/containerd/stargz-snapshotter) from 0.8.0 to 0.9.0. - [Release notes](https://github.com/containerd/stargz-snapshotter/releases) - [Commits](https://github.com/containerd/stargz-snapshotter/compare/v0.8.0...v0.9.0) --- updated-dependencies: - dependency-name: github.com/containerd/stargz-snapshotter/estargz dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] <support@github.com> Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
This commit is contained in:
parent
6014fcda9a
commit
7477a29d40
3
go.mod
3
go.mod
@ -4,7 +4,7 @@ go 1.16
|
||||
|
||||
require (
|
||||
github.com/containerd/containerd v1.5.7 // indirect
|
||||
github.com/containerd/stargz-snapshotter/estargz v0.8.0
|
||||
github.com/containerd/stargz-snapshotter/estargz v0.9.0
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.1 // indirect
|
||||
github.com/docker/docker v20.10.8+incompatible
|
||||
github.com/dprotaso/go-yit v0.0.0-20191028211022-135eb7262960
|
||||
@ -13,7 +13,6 @@ require (
|
||||
github.com/go-training/helloworld v0.0.0-20200225145412-ba5f4379d78b
|
||||
github.com/google/go-cmp v0.5.6
|
||||
github.com/google/go-containerregistry v0.6.0
|
||||
github.com/klauspost/compress v1.13.6 // indirect
|
||||
github.com/mattmoor/dep-notify v0.0.0-20190205035814-a45dec370a17
|
||||
github.com/mattn/go-isatty v0.0.13 // indirect
|
||||
github.com/opencontainers/image-spec v1.0.2-0.20210730191737-8e42a01fb1b7
|
||||
|
8
go.sum
8
go.sum
@ -200,8 +200,8 @@ github.com/containerd/nri v0.0.0-20201007170849-eb1350a75164/go.mod h1:+2wGSDGFY
|
||||
github.com/containerd/nri v0.0.0-20210316161719-dbaa18c31c14/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY=
|
||||
github.com/containerd/nri v0.1.0/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY=
|
||||
github.com/containerd/stargz-snapshotter/estargz v0.7.0/go.mod h1:83VWDqHnurTKliEB0YvWMiCfLDwv4Cjj1X9Vk98GJZw=
|
||||
github.com/containerd/stargz-snapshotter/estargz v0.8.0 h1:oA1wx8kTFfImfsT5bScbrZd8gK+WtQnn15q82Djvm0Y=
|
||||
github.com/containerd/stargz-snapshotter/estargz v0.8.0/go.mod h1:mwIwuwb+D8FX2t45Trwi0hmWmZm5VW7zPP/rekwhWQU=
|
||||
github.com/containerd/stargz-snapshotter/estargz v0.9.0 h1:PkB6BSTfOKX23erT2GkoUKkJEcXfNcyKskIViK770v8=
|
||||
github.com/containerd/stargz-snapshotter/estargz v0.9.0/go.mod h1:aE5PCyhFMwR8sbrErO5eM2GcvkyXTTJremG883D4qF0=
|
||||
github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
|
||||
github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
|
||||
github.com/containerd/ttrpc v0.0.0-20191028202541-4f1b8fe65a5c/go.mod h1:LPm1u0xBw8r8NOKoOdNMeVHSawSsltak+Ihv+etqsE8=
|
||||
@ -510,7 +510,6 @@ github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYs
|
||||
github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
|
||||
github.com/klauspost/compress v1.12.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
|
||||
github.com/klauspost/compress v1.13.0/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
|
||||
github.com/klauspost/compress v1.13.5/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
|
||||
github.com/klauspost/compress v1.13.6 h1:P76CopJELS0TiO2mebmnzgWaajssP/EszplttgQxcgc=
|
||||
github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
@ -765,6 +764,9 @@ github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/
|
||||
github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
|
||||
github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
|
||||
github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
|
||||
github.com/urfave/cli v1.22.4/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
|
||||
github.com/vbatts/tar-split v0.11.2 h1:Via6XqJr0hceW4wff3QRzD5gAk/tatMw/4ZA7cTlIME=
|
||||
github.com/vbatts/tar-split v0.11.2/go.mod h1:vV3ZuO2yWSVsz+pfFzDG/upWH1JhjOiEaWq6kXyQ3VI=
|
||||
github.com/vishvananda/netlink v0.0.0-20181108222139-023a6dafdcdf/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk=
|
||||
github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE=
|
||||
github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho=
|
||||
|
131
vendor/github.com/containerd/stargz-snapshotter/estargz/estargz.go
generated
vendored
131
vendor/github.com/containerd/stargz-snapshotter/estargz/estargz.go
generated
vendored
@ -23,7 +23,6 @@
|
||||
package estargz
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"bufio"
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
@ -42,6 +41,7 @@ import (
|
||||
"github.com/containerd/stargz-snapshotter/estargz/errorutil"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/vbatts/tar-split/archive/tar"
|
||||
)
|
||||
|
||||
// A Reader permits random access reads from a stargz file.
|
||||
@ -95,10 +95,10 @@ func WithTelemetry(telemetry *Telemetry) OpenOption {
|
||||
}
|
||||
}
|
||||
|
||||
// A func which takes start time and records the diff
|
||||
// MeasureLatencyHook is a func which takes start time and records the diff
|
||||
type MeasureLatencyHook func(time.Time)
|
||||
|
||||
// A struct which defines telemetry hooks. By implementing these hooks you should be able to record
|
||||
// Telemetry is a struct which defines telemetry hooks. By implementing these hooks you should be able to record
|
||||
// the latency metrics of the respective steps of estargz open operation. To be used with estargz.OpenWithTelemetry(...)
|
||||
type Telemetry struct {
|
||||
GetFooterLatency MeasureLatencyHook // measure time to get stargz footer (in milliseconds)
|
||||
@ -146,7 +146,7 @@ func Open(sr *io.SectionReader, opt ...OpenOption) (*Reader, error) {
|
||||
fSize := d.FooterSize()
|
||||
fOffset := positive(int64(len(footer)) - fSize)
|
||||
maybeTocBytes := footer[:fOffset]
|
||||
tocOffset, tocSize, err := d.ParseFooter(footer[fOffset:])
|
||||
_, tocOffset, tocSize, err := d.ParseFooter(footer[fOffset:])
|
||||
if err != nil {
|
||||
allErr = append(allErr, err)
|
||||
continue
|
||||
@ -187,7 +187,7 @@ func OpenFooter(sr *io.SectionReader) (tocOffset int64, footerSize int64, rErr e
|
||||
for _, d := range []Decompressor{new(GzipDecompressor), new(legacyGzipDecompressor)} {
|
||||
fSize := d.FooterSize()
|
||||
fOffset := positive(int64(len(footer)) - fSize)
|
||||
tocOffset, _, err := d.ParseFooter(footer[fOffset:])
|
||||
_, tocOffset, _, err := d.ParseFooter(footer[fOffset:])
|
||||
if err == nil {
|
||||
return tocOffset, fSize, err
|
||||
}
|
||||
@ -326,6 +326,10 @@ func (r *Reader) getOrCreateDir(d string) *TOCEntry {
|
||||
return e
|
||||
}
|
||||
|
||||
func (r *Reader) TOCDigest() digest.Digest {
|
||||
return r.tocDigest
|
||||
}
|
||||
|
||||
// VerifyTOC checks that the TOC JSON in the passed blob matches the
|
||||
// passed digests and that the TOC JSON contains digests for all chunks
|
||||
// contained in the blob. If the verification succceeds, this function
|
||||
@ -335,7 +339,12 @@ func (r *Reader) VerifyTOC(tocDigest digest.Digest) (TOCEntryVerifier, error) {
|
||||
if r.tocDigest != tocDigest {
|
||||
return nil, fmt.Errorf("invalid TOC JSON %q; want %q", r.tocDigest, tocDigest)
|
||||
}
|
||||
return r.Verifiers()
|
||||
}
|
||||
|
||||
// Verifiers returns TOCEntryVerifier of this chunk. Use VerifyTOC instead in most cases
|
||||
// because this doesn't verify TOC.
|
||||
func (r *Reader) Verifiers() (TOCEntryVerifier, error) {
|
||||
chunkDigestMap := make(map[int64]digest.Digest) // map from chunk offset to the chunk digest
|
||||
regDigestMap := make(map[int64]digest.Digest) // map from chunk offset to the reg file digest
|
||||
var chunkDigestMapIncomplete bool
|
||||
@ -591,6 +600,11 @@ type currentCompressionWriter struct{ w *Writer }
|
||||
|
||||
func (ccw currentCompressionWriter) Write(p []byte) (int, error) {
|
||||
ccw.w.diffHash.Write(p)
|
||||
if ccw.w.gz == nil {
|
||||
if err := ccw.w.condOpenGz(); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
return ccw.w.gz.Write(p)
|
||||
}
|
||||
|
||||
@ -601,6 +615,25 @@ func (w *Writer) chunkSize() int {
|
||||
return w.ChunkSize
|
||||
}
|
||||
|
||||
// Unpack decompresses the given estargz blob and returns a ReadCloser of the tar blob.
|
||||
// TOC JSON and footer are removed.
|
||||
func Unpack(sr *io.SectionReader, c Decompressor) (io.ReadCloser, error) {
|
||||
footerSize := c.FooterSize()
|
||||
if sr.Size() < footerSize {
|
||||
return nil, fmt.Errorf("blob is too small; %d < %d", sr.Size(), footerSize)
|
||||
}
|
||||
footerOffset := sr.Size() - footerSize
|
||||
footer := make([]byte, footerSize)
|
||||
if _, err := sr.ReadAt(footer, footerOffset); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
blobPayloadSize, _, _, err := c.ParseFooter(footer)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to parse footer")
|
||||
}
|
||||
return c.Reader(io.LimitReader(sr, blobPayloadSize))
|
||||
}
|
||||
|
||||
// NewWriter returns a new stargz writer (gzip-based) writing to w.
|
||||
//
|
||||
// The writer must be closed to write its trailing table of contents.
|
||||
@ -616,7 +649,7 @@ func NewWriterLevel(w io.Writer, compressionLevel int) *Writer {
|
||||
return NewWriterWithCompressor(w, NewGzipCompressorWithLevel(compressionLevel))
|
||||
}
|
||||
|
||||
// NewWriterLevel returns a new stargz writer writing to w.
|
||||
// NewWriterWithCompressor returns a new stargz writer writing to w.
|
||||
// The compression method is configurable.
|
||||
//
|
||||
// The writer must be closed to write its trailing table of contents.
|
||||
@ -696,29 +729,71 @@ func (w *Writer) condOpenGz() (err error) {
|
||||
// each of its contents to w.
|
||||
//
|
||||
// The input r can optionally be gzip compressed but the output will
|
||||
// always be gzip compressed.
|
||||
// always be compressed by the specified compressor.
|
||||
func (w *Writer) AppendTar(r io.Reader) error {
|
||||
return w.appendTar(r, false)
|
||||
}
|
||||
|
||||
// AppendTarLossLess reads the tar or tar.gz file from r and appends
|
||||
// each of its contents to w.
|
||||
//
|
||||
// The input r can optionally be gzip compressed but the output will
|
||||
// always be compressed by the specified compressor.
|
||||
//
|
||||
// The difference of this func with AppendTar is that this writes
|
||||
// the input tar stream into w without any modification (e.g. to header bytes).
|
||||
//
|
||||
// Note that if the input tar stream already contains TOC JSON, this returns
|
||||
// error because w cannot overwrite the TOC JSON to the one generated by w without
|
||||
// lossy modification. To avoid this error, if the input stream is known to be stargz/estargz,
|
||||
// you shoud decompress it and remove TOC JSON in advance.
|
||||
func (w *Writer) AppendTarLossLess(r io.Reader) error {
|
||||
return w.appendTar(r, true)
|
||||
}
|
||||
|
||||
func (w *Writer) appendTar(r io.Reader, lossless bool) error {
|
||||
var src io.Reader
|
||||
br := bufio.NewReader(r)
|
||||
var tr *tar.Reader
|
||||
if isGzip(br) {
|
||||
// NewReader can't fail if isGzip returned true.
|
||||
zr, _ := gzip.NewReader(br)
|
||||
tr = tar.NewReader(zr)
|
||||
src = zr
|
||||
} else {
|
||||
tr = tar.NewReader(br)
|
||||
src = io.Reader(br)
|
||||
}
|
||||
dst := currentCompressionWriter{w}
|
||||
var tw *tar.Writer
|
||||
if !lossless {
|
||||
tw = tar.NewWriter(dst) // use tar writer only when this isn't lossless mode.
|
||||
}
|
||||
tr := tar.NewReader(src)
|
||||
if lossless {
|
||||
tr.RawAccounting = true
|
||||
}
|
||||
for {
|
||||
h, err := tr.Next()
|
||||
if err == io.EOF {
|
||||
if lossless {
|
||||
if remain := tr.RawBytes(); len(remain) > 0 {
|
||||
// Collect the remaining null bytes.
|
||||
// https://github.com/vbatts/tar-split/blob/80a436fd6164c557b131f7c59ed69bd81af69761/concept/main.go#L49-L53
|
||||
if _, err := dst.Write(remain); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("error reading from source tar: tar.Reader.Next: %v", err)
|
||||
}
|
||||
if h.Name == TOCTarName {
|
||||
if cleanEntryName(h.Name) == TOCTarName {
|
||||
// It is possible for a layer to be "stargzified" twice during the
|
||||
// distribution lifecycle. So we reserve "TOCTarName" here to avoid
|
||||
// duplicated entries in the resulting layer.
|
||||
if lossless {
|
||||
// We cannot handle this in lossless way.
|
||||
return fmt.Errorf("existing TOC JSON is not allowed; decompress layer before append")
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
@ -744,9 +819,14 @@ func (w *Writer) AppendTar(r io.Reader) error {
|
||||
if err := w.condOpenGz(); err != nil {
|
||||
return err
|
||||
}
|
||||
tw := tar.NewWriter(currentCompressionWriter{w})
|
||||
if err := tw.WriteHeader(h); err != nil {
|
||||
return err
|
||||
if tw != nil {
|
||||
if err := tw.WriteHeader(h); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if _, err := dst.Write(tr.RawBytes()); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
switch h.Typeflag {
|
||||
case tar.TypeLink:
|
||||
@ -808,7 +888,13 @@ func (w *Writer) AppendTar(r io.Reader) error {
|
||||
}
|
||||
|
||||
teeChunk := io.TeeReader(tee, chunkDigest.Hash())
|
||||
if _, err := io.CopyN(tw, teeChunk, chunkSize); err != nil {
|
||||
var out io.Writer
|
||||
if tw != nil {
|
||||
out = tw
|
||||
} else {
|
||||
out = dst
|
||||
}
|
||||
if _, err := io.CopyN(out, teeChunk, chunkSize); err != nil {
|
||||
return fmt.Errorf("error copying %q: %v", h.Name, err)
|
||||
}
|
||||
ent.ChunkDigest = chunkDigest.Digest().String()
|
||||
@ -825,11 +911,18 @@ func (w *Writer) AppendTar(r io.Reader) error {
|
||||
if payloadDigest != nil {
|
||||
regFileEntry.Digest = payloadDigest.Digest().String()
|
||||
}
|
||||
if err := tw.Flush(); err != nil {
|
||||
return err
|
||||
if tw != nil {
|
||||
if err := tw.Flush(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
remainDest := ioutil.Discard
|
||||
if lossless {
|
||||
remainDest = dst // Preserve the remaining bytes in lossless mode
|
||||
}
|
||||
_, err := io.Copy(remainDest, src)
|
||||
return err
|
||||
}
|
||||
|
||||
// DiffID returns the SHA-256 of the uncompressed tar bytes.
|
||||
|
3
vendor/github.com/containerd/stargz-snapshotter/estargz/go.mod
generated
vendored
3
vendor/github.com/containerd/stargz-snapshotter/estargz/go.mod
generated
vendored
@ -3,8 +3,9 @@ module github.com/containerd/stargz-snapshotter/estargz
|
||||
go 1.16
|
||||
|
||||
require (
|
||||
github.com/klauspost/compress v1.13.5
|
||||
github.com/klauspost/compress v1.13.6
|
||||
github.com/opencontainers/go-digest v1.0.0
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/vbatts/tar-split v0.11.2
|
||||
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a
|
||||
)
|
||||
|
18
vendor/github.com/containerd/stargz-snapshotter/estargz/go.sum
generated
vendored
18
vendor/github.com/containerd/stargz-snapshotter/estargz/go.sum
generated
vendored
@ -1,8 +1,22 @@
|
||||
github.com/klauspost/compress v1.13.5 h1:9O69jUPDcsT9fEm74W92rZL9FQY7rCdaXVneq+yyzl4=
|
||||
github.com/klauspost/compress v1.13.5/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/klauspost/compress v1.13.6 h1:P76CopJELS0TiO2mebmnzgWaajssP/EszplttgQxcgc=
|
||||
github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
|
||||
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
|
||||
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
|
||||
github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/urfave/cli v1.22.4/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
|
||||
github.com/vbatts/tar-split v0.11.2 h1:Via6XqJr0hceW4wff3QRzD5gAk/tatMw/4ZA7cTlIME=
|
||||
github.com/vbatts/tar-split v0.11.2/go.mod h1:vV3ZuO2yWSVsz+pfFzDG/upWH1JhjOiEaWq6kXyQ3VI=
|
||||
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a h1:DcqTD9SDLc+1P/r1EmRBwnVsrOwW+kk2vWf9n+1sGhs=
|
||||
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
|
30
vendor/github.com/containerd/stargz-snapshotter/estargz/gzip.go
generated
vendored
30
vendor/github.com/containerd/stargz-snapshotter/estargz/gzip.go
generated
vendored
@ -124,31 +124,31 @@ func (gz *GzipDecompressor) ParseTOC(r io.Reader) (toc *JTOC, tocDgst digest.Dig
|
||||
return parseTOCEStargz(r)
|
||||
}
|
||||
|
||||
func (gz *GzipDecompressor) ParseFooter(p []byte) (tocOffset, tocSize int64, err error) {
|
||||
func (gz *GzipDecompressor) ParseFooter(p []byte) (blobPayloadSize, tocOffset, tocSize int64, err error) {
|
||||
if len(p) != FooterSize {
|
||||
return 0, 0, fmt.Errorf("invalid length %d cannot be parsed", len(p))
|
||||
return 0, 0, 0, fmt.Errorf("invalid length %d cannot be parsed", len(p))
|
||||
}
|
||||
zr, err := gzip.NewReader(bytes.NewReader(p))
|
||||
if err != nil {
|
||||
return 0, 0, err
|
||||
return 0, 0, 0, err
|
||||
}
|
||||
defer zr.Close()
|
||||
extra := zr.Header.Extra
|
||||
si1, si2, subfieldlen, subfield := extra[0], extra[1], extra[2:4], extra[4:]
|
||||
if si1 != 'S' || si2 != 'G' {
|
||||
return 0, 0, fmt.Errorf("invalid subfield IDs: %q, %q; want E, S", si1, si2)
|
||||
return 0, 0, 0, fmt.Errorf("invalid subfield IDs: %q, %q; want E, S", si1, si2)
|
||||
}
|
||||
if slen := binary.LittleEndian.Uint16(subfieldlen); slen != uint16(16+len("STARGZ")) {
|
||||
return 0, 0, fmt.Errorf("invalid length of subfield %d; want %d", slen, 16+len("STARGZ"))
|
||||
return 0, 0, 0, fmt.Errorf("invalid length of subfield %d; want %d", slen, 16+len("STARGZ"))
|
||||
}
|
||||
if string(subfield[16:]) != "STARGZ" {
|
||||
return 0, 0, fmt.Errorf("STARGZ magic string must be included in the footer subfield")
|
||||
return 0, 0, 0, fmt.Errorf("STARGZ magic string must be included in the footer subfield")
|
||||
}
|
||||
tocOffset, err = strconv.ParseInt(string(subfield[:16]), 16, 64)
|
||||
if err != nil {
|
||||
return 0, 0, errors.Wrapf(err, "legacy: failed to parse toc offset")
|
||||
return 0, 0, 0, errors.Wrapf(err, "legacy: failed to parse toc offset")
|
||||
}
|
||||
return tocOffset, 0, nil
|
||||
return tocOffset, tocOffset, 0, nil
|
||||
}
|
||||
|
||||
func (gz *GzipDecompressor) FooterSize() int64 {
|
||||
@ -165,27 +165,27 @@ func (gz *legacyGzipDecompressor) ParseTOC(r io.Reader) (toc *JTOC, tocDgst dige
|
||||
return parseTOCEStargz(r)
|
||||
}
|
||||
|
||||
func (gz *legacyGzipDecompressor) ParseFooter(p []byte) (tocOffset, tocSize int64, err error) {
|
||||
func (gz *legacyGzipDecompressor) ParseFooter(p []byte) (blobPayloadSize, tocOffset, tocSize int64, err error) {
|
||||
if len(p) != legacyFooterSize {
|
||||
return 0, 0, fmt.Errorf("legacy: invalid length %d cannot be parsed", len(p))
|
||||
return 0, 0, 0, fmt.Errorf("legacy: invalid length %d cannot be parsed", len(p))
|
||||
}
|
||||
zr, err := gzip.NewReader(bytes.NewReader(p))
|
||||
if err != nil {
|
||||
return 0, 0, errors.Wrapf(err, "legacy: failed to get footer gzip reader")
|
||||
return 0, 0, 0, errors.Wrapf(err, "legacy: failed to get footer gzip reader")
|
||||
}
|
||||
defer zr.Close()
|
||||
extra := zr.Header.Extra
|
||||
if len(extra) != 16+len("STARGZ") {
|
||||
return 0, 0, fmt.Errorf("legacy: invalid stargz's extra field size")
|
||||
return 0, 0, 0, fmt.Errorf("legacy: invalid stargz's extra field size")
|
||||
}
|
||||
if string(extra[16:]) != "STARGZ" {
|
||||
return 0, 0, fmt.Errorf("legacy: magic string STARGZ not found")
|
||||
return 0, 0, 0, fmt.Errorf("legacy: magic string STARGZ not found")
|
||||
}
|
||||
tocOffset, err = strconv.ParseInt(string(extra[:16]), 16, 64)
|
||||
if err != nil {
|
||||
return 0, 0, errors.Wrapf(err, "legacy: failed to parse toc offset")
|
||||
return 0, 0, 0, errors.Wrapf(err, "legacy: failed to parse toc offset")
|
||||
}
|
||||
return tocOffset, 0, nil
|
||||
return tocOffset, tocOffset, 0, nil
|
||||
}
|
||||
|
||||
func (gz *legacyGzipDecompressor) FooterSize() int64 {
|
||||
|
494
vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go
generated
vendored
494
vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go
generated
vendored
@ -148,93 +148,96 @@ func testBuild(t *testing.T, controllers ...TestingController) {
|
||||
srcCompression := srcCompression
|
||||
for _, cl := range controllers {
|
||||
cl := cl
|
||||
for _, prefix := range allowedPrefix {
|
||||
prefix := prefix
|
||||
t.Run(tt.name+"-"+fmt.Sprintf("compression=%v-prefix=%q-src=%d", cl, prefix, srcCompression), func(t *testing.T) {
|
||||
tarBlob := buildTarStatic(t, tt.in, prefix)
|
||||
// Test divideEntries()
|
||||
entries, err := sortEntries(tarBlob, nil, nil) // identical order
|
||||
if err != nil {
|
||||
t.Fatalf("faield to parse tar: %v", err)
|
||||
}
|
||||
var merged []*entry
|
||||
for _, part := range divideEntries(entries, 4) {
|
||||
merged = append(merged, part...)
|
||||
}
|
||||
if !reflect.DeepEqual(entries, merged) {
|
||||
for _, e := range entries {
|
||||
t.Logf("Original: %v", e.header)
|
||||
for _, srcTarFormat := range []tar.Format{tar.FormatUSTAR, tar.FormatPAX, tar.FormatGNU} {
|
||||
srcTarFormat := srcTarFormat
|
||||
for _, prefix := range allowedPrefix {
|
||||
prefix := prefix
|
||||
t.Run(tt.name+"-"+fmt.Sprintf("compression=%v,prefix=%q,src=%d,format=%s", cl, prefix, srcCompression, srcTarFormat), func(t *testing.T) {
|
||||
tarBlob := buildTar(t, tt.in, prefix, srcTarFormat)
|
||||
// Test divideEntries()
|
||||
entries, err := sortEntries(tarBlob, nil, nil) // identical order
|
||||
if err != nil {
|
||||
t.Fatalf("failed to parse tar: %v", err)
|
||||
}
|
||||
for _, e := range merged {
|
||||
t.Logf("Merged: %v", e.header)
|
||||
var merged []*entry
|
||||
for _, part := range divideEntries(entries, 4) {
|
||||
merged = append(merged, part...)
|
||||
}
|
||||
if !reflect.DeepEqual(entries, merged) {
|
||||
for _, e := range entries {
|
||||
t.Logf("Original: %v", e.header)
|
||||
}
|
||||
for _, e := range merged {
|
||||
t.Logf("Merged: %v", e.header)
|
||||
}
|
||||
t.Errorf("divided entries couldn't be merged")
|
||||
return
|
||||
}
|
||||
t.Errorf("divided entries couldn't be merged")
|
||||
return
|
||||
}
|
||||
|
||||
// Prepare sample data
|
||||
wantBuf := new(bytes.Buffer)
|
||||
sw := NewWriterWithCompressor(wantBuf, cl)
|
||||
sw.ChunkSize = tt.chunkSize
|
||||
if err := sw.AppendTar(tarBlob); err != nil {
|
||||
t.Fatalf("faield to append tar to want stargz: %v", err)
|
||||
}
|
||||
if _, err := sw.Close(); err != nil {
|
||||
t.Fatalf("faield to prepare want stargz: %v", err)
|
||||
}
|
||||
wantData := wantBuf.Bytes()
|
||||
want, err := Open(io.NewSectionReader(
|
||||
bytes.NewReader(wantData), 0, int64(len(wantData))),
|
||||
WithDecompressors(cl),
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to parse the want stargz: %v", err)
|
||||
}
|
||||
// Prepare sample data
|
||||
wantBuf := new(bytes.Buffer)
|
||||
sw := NewWriterWithCompressor(wantBuf, cl)
|
||||
sw.ChunkSize = tt.chunkSize
|
||||
if err := sw.AppendTar(tarBlob); err != nil {
|
||||
t.Fatalf("failed to append tar to want stargz: %v", err)
|
||||
}
|
||||
if _, err := sw.Close(); err != nil {
|
||||
t.Fatalf("failed to prepare want stargz: %v", err)
|
||||
}
|
||||
wantData := wantBuf.Bytes()
|
||||
want, err := Open(io.NewSectionReader(
|
||||
bytes.NewReader(wantData), 0, int64(len(wantData))),
|
||||
WithDecompressors(cl),
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to parse the want stargz: %v", err)
|
||||
}
|
||||
|
||||
// Prepare testing data
|
||||
rc, err := Build(compressBlob(t, tarBlob, srcCompression),
|
||||
WithChunkSize(tt.chunkSize), WithCompression(cl))
|
||||
if err != nil {
|
||||
t.Fatalf("faield to build stargz: %v", err)
|
||||
}
|
||||
defer rc.Close()
|
||||
gotBuf := new(bytes.Buffer)
|
||||
if _, err := io.Copy(gotBuf, rc); err != nil {
|
||||
t.Fatalf("failed to copy built stargz blob: %v", err)
|
||||
}
|
||||
gotData := gotBuf.Bytes()
|
||||
got, err := Open(io.NewSectionReader(
|
||||
bytes.NewReader(gotBuf.Bytes()), 0, int64(len(gotData))),
|
||||
WithDecompressors(cl),
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to parse the got stargz: %v", err)
|
||||
}
|
||||
// Prepare testing data
|
||||
rc, err := Build(compressBlob(t, tarBlob, srcCompression),
|
||||
WithChunkSize(tt.chunkSize), WithCompression(cl))
|
||||
if err != nil {
|
||||
t.Fatalf("failed to build stargz: %v", err)
|
||||
}
|
||||
defer rc.Close()
|
||||
gotBuf := new(bytes.Buffer)
|
||||
if _, err := io.Copy(gotBuf, rc); err != nil {
|
||||
t.Fatalf("failed to copy built stargz blob: %v", err)
|
||||
}
|
||||
gotData := gotBuf.Bytes()
|
||||
got, err := Open(io.NewSectionReader(
|
||||
bytes.NewReader(gotBuf.Bytes()), 0, int64(len(gotData))),
|
||||
WithDecompressors(cl),
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to parse the got stargz: %v", err)
|
||||
}
|
||||
|
||||
// Check DiffID is properly calculated
|
||||
rc.Close()
|
||||
diffID := rc.DiffID()
|
||||
wantDiffID := cl.DiffIDOf(t, gotData)
|
||||
if diffID.String() != wantDiffID {
|
||||
t.Errorf("DiffID = %q; want %q", diffID, wantDiffID)
|
||||
}
|
||||
// Check DiffID is properly calculated
|
||||
rc.Close()
|
||||
diffID := rc.DiffID()
|
||||
wantDiffID := cl.DiffIDOf(t, gotData)
|
||||
if diffID.String() != wantDiffID {
|
||||
t.Errorf("DiffID = %q; want %q", diffID, wantDiffID)
|
||||
}
|
||||
|
||||
// Compare as stargz
|
||||
if !isSameVersion(t, cl, wantData, gotData) {
|
||||
t.Errorf("built stargz hasn't same json")
|
||||
return
|
||||
}
|
||||
if !isSameEntries(t, want, got) {
|
||||
t.Errorf("built stargz isn't same as the original")
|
||||
return
|
||||
}
|
||||
// Compare as stargz
|
||||
if !isSameVersion(t, cl, wantData, gotData) {
|
||||
t.Errorf("built stargz hasn't same json")
|
||||
return
|
||||
}
|
||||
if !isSameEntries(t, want, got) {
|
||||
t.Errorf("built stargz isn't same as the original")
|
||||
return
|
||||
}
|
||||
|
||||
// Compare as tar.gz
|
||||
if !isSameTarGz(t, cl, wantData, gotData) {
|
||||
t.Errorf("built stargz isn't same tar.gz")
|
||||
return
|
||||
}
|
||||
})
|
||||
// Compare as tar.gz
|
||||
if !isSameTarGz(t, cl, wantData, gotData) {
|
||||
t.Errorf("built stargz isn't same tar.gz")
|
||||
return
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -526,7 +529,7 @@ func testDigestAndVerify(t *testing.T, controllers ...TestingController) {
|
||||
checks: []check{
|
||||
checkStargzTOC,
|
||||
checkVerifyTOC,
|
||||
checkVerifyInvalidStargzFail(buildTarStatic(t, tarOf(
|
||||
checkVerifyInvalidStargzFail(buildTar(t, tarOf(
|
||||
dir("test2/"), // modified
|
||||
), allowedPrefix[0])),
|
||||
},
|
||||
@ -544,7 +547,7 @@ func testDigestAndVerify(t *testing.T, controllers ...TestingController) {
|
||||
checks: []check{
|
||||
checkStargzTOC,
|
||||
checkVerifyTOC,
|
||||
checkVerifyInvalidStargzFail(buildTarStatic(t, tarOf(
|
||||
checkVerifyInvalidStargzFail(buildTar(t, tarOf(
|
||||
file("baz.txt", ""),
|
||||
file("foo.txt", "M"), // modified
|
||||
dir("test/"),
|
||||
@ -567,7 +570,7 @@ func testDigestAndVerify(t *testing.T, controllers ...TestingController) {
|
||||
checks: []check{
|
||||
checkStargzTOC,
|
||||
checkVerifyTOC,
|
||||
checkVerifyInvalidStargzFail(buildTarStatic(t, tarOf(
|
||||
checkVerifyInvalidStargzFail(buildTar(t, tarOf(
|
||||
file("baz.txt", "bazbazbazMMMbazbazbaz"), // modified
|
||||
file("foo.txt", "a"),
|
||||
dir("test/"),
|
||||
@ -593,7 +596,7 @@ func testDigestAndVerify(t *testing.T, controllers ...TestingController) {
|
||||
checks: []check{
|
||||
checkStargzTOC,
|
||||
checkVerifyTOC,
|
||||
checkVerifyInvalidStargzFail(buildTarStatic(t, tarOf(
|
||||
checkVerifyInvalidStargzFail(buildTar(t, tarOf(
|
||||
file("baz.txt", "bazbazbazbazbazbazbaz"),
|
||||
file("foo.txt", "a"),
|
||||
symlink("barlink", "test/bar.txt"),
|
||||
@ -615,30 +618,33 @@ func testDigestAndVerify(t *testing.T, controllers ...TestingController) {
|
||||
cl := cl
|
||||
for _, prefix := range allowedPrefix {
|
||||
prefix := prefix
|
||||
t.Run(tt.name+"-"+fmt.Sprintf("compression=%v-prefix=%q", cl, prefix), func(t *testing.T) {
|
||||
// Get original tar file and chunk digests
|
||||
dgstMap := make(map[string]digest.Digest)
|
||||
tarBlob := buildTarStatic(t, tt.tarInit(t, dgstMap), prefix)
|
||||
for _, srcTarFormat := range []tar.Format{tar.FormatUSTAR, tar.FormatPAX, tar.FormatGNU} {
|
||||
srcTarFormat := srcTarFormat
|
||||
t.Run(tt.name+"-"+fmt.Sprintf("compression=%v,prefix=%q,format=%s", cl, prefix, srcTarFormat), func(t *testing.T) {
|
||||
// Get original tar file and chunk digests
|
||||
dgstMap := make(map[string]digest.Digest)
|
||||
tarBlob := buildTar(t, tt.tarInit(t, dgstMap), prefix, srcTarFormat)
|
||||
|
||||
rc, err := Build(compressBlob(t, tarBlob, srcCompression),
|
||||
WithChunkSize(chunkSize), WithCompression(cl))
|
||||
if err != nil {
|
||||
t.Fatalf("failed to convert stargz: %v", err)
|
||||
}
|
||||
tocDigest := rc.TOCDigest()
|
||||
defer rc.Close()
|
||||
buf := new(bytes.Buffer)
|
||||
if _, err := io.Copy(buf, rc); err != nil {
|
||||
t.Fatalf("failed to copy built stargz blob: %v", err)
|
||||
}
|
||||
newStargz := buf.Bytes()
|
||||
// NoPrefetchLandmark is added during `Bulid`, which is expected behaviour.
|
||||
dgstMap[chunkID(NoPrefetchLandmark, 0, int64(len([]byte{landmarkContents})))] = digest.FromBytes([]byte{landmarkContents})
|
||||
rc, err := Build(compressBlob(t, tarBlob, srcCompression),
|
||||
WithChunkSize(chunkSize), WithCompression(cl))
|
||||
if err != nil {
|
||||
t.Fatalf("failed to convert stargz: %v", err)
|
||||
}
|
||||
tocDigest := rc.TOCDigest()
|
||||
defer rc.Close()
|
||||
buf := new(bytes.Buffer)
|
||||
if _, err := io.Copy(buf, rc); err != nil {
|
||||
t.Fatalf("failed to copy built stargz blob: %v", err)
|
||||
}
|
||||
newStargz := buf.Bytes()
|
||||
// NoPrefetchLandmark is added during `Bulid`, which is expected behaviour.
|
||||
dgstMap[chunkID(NoPrefetchLandmark, 0, int64(len([]byte{landmarkContents})))] = digest.FromBytes([]byte{landmarkContents})
|
||||
|
||||
for _, check := range tt.checks {
|
||||
check(t, newStargz, tocDigest, dgstMap, cl)
|
||||
}
|
||||
})
|
||||
for _, check := range tt.checks {
|
||||
check(t, newStargz, tocDigest, dgstMap, cl)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1058,7 +1064,7 @@ func parseStargz(sgz *io.SectionReader, controller TestingController) (decodedJT
|
||||
if _, err := sgz.ReadAt(footer, sgz.Size()-fSize); err != nil {
|
||||
return nil, 0, errors.Wrap(err, "error reading footer")
|
||||
}
|
||||
tocOffset, _, err := controller.ParseFooter(footer[positive(int64(len(footer))-fSize):])
|
||||
_, tocOffset, _, err := controller.ParseFooter(footer[positive(int64(len(footer))-fSize):])
|
||||
if err != nil {
|
||||
return nil, 0, errors.Wrapf(err, "failed to parse footer")
|
||||
}
|
||||
@ -1085,11 +1091,15 @@ func testWriteAndOpen(t *testing.T, controllers ...TestingController) {
|
||||
in []tarEntry
|
||||
want []stargzCheck
|
||||
wantNumGz int // expected number of streams
|
||||
|
||||
wantNumGzLossLess int // expected number of streams (> 0) in lossless mode if it's different from wantNumGz
|
||||
wantFailOnLossLess bool
|
||||
}{
|
||||
{
|
||||
name: "empty",
|
||||
in: tarOf(),
|
||||
wantNumGz: 2, // TOC + footer
|
||||
name: "empty",
|
||||
in: tarOf(),
|
||||
wantNumGz: 2, // empty tar + TOC + footer
|
||||
wantNumGzLossLess: 3, // empty tar + TOC + footer
|
||||
want: checks(
|
||||
numTOCEntries(0),
|
||||
),
|
||||
@ -1224,26 +1234,29 @@ func testWriteAndOpen(t *testing.T, controllers ...TestingController) {
|
||||
{
|
||||
name: "block_char_fifo",
|
||||
in: tarOf(
|
||||
tarEntryFunc(func(w *tar.Writer, prefix string) error {
|
||||
tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error {
|
||||
return w.WriteHeader(&tar.Header{
|
||||
Name: prefix + "b",
|
||||
Typeflag: tar.TypeBlock,
|
||||
Devmajor: 123,
|
||||
Devminor: 456,
|
||||
Format: format,
|
||||
})
|
||||
}),
|
||||
tarEntryFunc(func(w *tar.Writer, prefix string) error {
|
||||
tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error {
|
||||
return w.WriteHeader(&tar.Header{
|
||||
Name: prefix + "c",
|
||||
Typeflag: tar.TypeChar,
|
||||
Devmajor: 111,
|
||||
Devminor: 222,
|
||||
Format: format,
|
||||
})
|
||||
}),
|
||||
tarEntryFunc(func(w *tar.Writer, prefix string) error {
|
||||
tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error {
|
||||
return w.WriteHeader(&tar.Header{
|
||||
Name: prefix + "f",
|
||||
Typeflag: tar.TypeFifo,
|
||||
Format: format,
|
||||
})
|
||||
}),
|
||||
),
|
||||
@ -1278,6 +1291,29 @@ func testWriteAndOpen(t *testing.T, controllers ...TestingController) {
|
||||
hasMode("foo3/bar5", os.FileMode(0755)),
|
||||
),
|
||||
},
|
||||
{
|
||||
name: "lossy",
|
||||
in: tarOf(
|
||||
dir("bar/", sampleOwner),
|
||||
dir("foo/", sampleOwner),
|
||||
file("foo/bar.txt", content, sampleOwner),
|
||||
file(TOCTarName, "dummy"), // ignored by the writer. (lossless write returns error)
|
||||
),
|
||||
wantNumGz: 4, // both dirs, foo.txt alone, TOC, footer
|
||||
want: checks(
|
||||
numTOCEntries(3),
|
||||
hasDir("bar/"),
|
||||
hasDir("foo/"),
|
||||
hasFileLen("foo/bar.txt", len(content)),
|
||||
entryHasChildren("", "bar", "foo"),
|
||||
entryHasChildren("foo", "bar.txt"),
|
||||
hasChunkEntries("foo/bar.txt", 1),
|
||||
hasEntryOwner("bar/", sampleOwner),
|
||||
hasEntryOwner("foo/", sampleOwner),
|
||||
hasEntryOwner("foo/bar.txt", sampleOwner),
|
||||
),
|
||||
wantFailOnLossLess: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
@ -1285,47 +1321,90 @@ func testWriteAndOpen(t *testing.T, controllers ...TestingController) {
|
||||
cl := cl
|
||||
for _, prefix := range allowedPrefix {
|
||||
prefix := prefix
|
||||
t.Run(tt.name+"-"+fmt.Sprintf("compression=%v-prefix=%q", cl, prefix), func(t *testing.T) {
|
||||
tr, cancel := buildTar(t, tt.in, prefix)
|
||||
defer cancel()
|
||||
var stargzBuf bytes.Buffer
|
||||
w := NewWriterWithCompressor(&stargzBuf, cl)
|
||||
w.ChunkSize = tt.chunkSize
|
||||
if err := w.AppendTar(tr); err != nil {
|
||||
t.Fatalf("Append: %v", err)
|
||||
}
|
||||
if _, err := w.Close(); err != nil {
|
||||
t.Fatalf("Writer.Close: %v", err)
|
||||
}
|
||||
b := stargzBuf.Bytes()
|
||||
for _, srcTarFormat := range []tar.Format{tar.FormatUSTAR, tar.FormatPAX, tar.FormatGNU} {
|
||||
srcTarFormat := srcTarFormat
|
||||
for _, lossless := range []bool{true, false} {
|
||||
t.Run(tt.name+"-"+fmt.Sprintf("compression=%v,prefix=%q,lossless=%v,format=%s", cl, prefix, lossless, srcTarFormat), func(t *testing.T) {
|
||||
var tr io.Reader = buildTar(t, tt.in, prefix, srcTarFormat)
|
||||
origTarDgstr := digest.Canonical.Digester()
|
||||
tr = io.TeeReader(tr, origTarDgstr.Hash())
|
||||
var stargzBuf bytes.Buffer
|
||||
w := NewWriterWithCompressor(&stargzBuf, cl)
|
||||
w.ChunkSize = tt.chunkSize
|
||||
if lossless {
|
||||
err := w.AppendTarLossLess(tr)
|
||||
if tt.wantFailOnLossLess {
|
||||
if err != nil {
|
||||
return // expected to fail
|
||||
}
|
||||
t.Fatalf("Append wanted to fail on lossless")
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatalf("Append(lossless): %v", err)
|
||||
}
|
||||
} else {
|
||||
if err := w.AppendTar(tr); err != nil {
|
||||
t.Fatalf("Append: %v", err)
|
||||
}
|
||||
}
|
||||
if _, err := w.Close(); err != nil {
|
||||
t.Fatalf("Writer.Close: %v", err)
|
||||
}
|
||||
b := stargzBuf.Bytes()
|
||||
|
||||
diffID := w.DiffID()
|
||||
wantDiffID := cl.DiffIDOf(t, b)
|
||||
if diffID != wantDiffID {
|
||||
t.Errorf("DiffID = %q; want %q", diffID, wantDiffID)
|
||||
}
|
||||
if lossless {
|
||||
// Check if the result blob reserves original tar metadata
|
||||
rc, err := Unpack(io.NewSectionReader(bytes.NewReader(b), 0, int64(len(b))), cl)
|
||||
if err != nil {
|
||||
t.Errorf("failed to decompress blob: %v", err)
|
||||
return
|
||||
}
|
||||
defer rc.Close()
|
||||
resultDgstr := digest.Canonical.Digester()
|
||||
if _, err := io.Copy(resultDgstr.Hash(), rc); err != nil {
|
||||
t.Errorf("failed to read result decompressed blob: %v", err)
|
||||
return
|
||||
}
|
||||
if resultDgstr.Digest() != origTarDgstr.Digest() {
|
||||
t.Errorf("lossy compression occurred: digest=%v; want %v",
|
||||
resultDgstr.Digest(), origTarDgstr.Digest())
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
got := cl.CountStreams(t, b)
|
||||
if got != tt.wantNumGz {
|
||||
t.Errorf("number of streams = %d; want %d", got, tt.wantNumGz)
|
||||
}
|
||||
diffID := w.DiffID()
|
||||
wantDiffID := cl.DiffIDOf(t, b)
|
||||
if diffID != wantDiffID {
|
||||
t.Errorf("DiffID = %q; want %q", diffID, wantDiffID)
|
||||
}
|
||||
|
||||
telemetry, checkCalled := newCalledTelemetry()
|
||||
r, err := Open(
|
||||
io.NewSectionReader(bytes.NewReader(b), 0, int64(len(b))),
|
||||
WithDecompressors(cl),
|
||||
WithTelemetry(telemetry),
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("stargz.Open: %v", err)
|
||||
got := cl.CountStreams(t, b)
|
||||
wantNumGz := tt.wantNumGz
|
||||
if lossless && tt.wantNumGzLossLess > 0 {
|
||||
wantNumGz = tt.wantNumGzLossLess
|
||||
}
|
||||
if got != wantNumGz {
|
||||
t.Errorf("number of streams = %d; want %d", got, wantNumGz)
|
||||
}
|
||||
|
||||
telemetry, checkCalled := newCalledTelemetry()
|
||||
r, err := Open(
|
||||
io.NewSectionReader(bytes.NewReader(b), 0, int64(len(b))),
|
||||
WithDecompressors(cl),
|
||||
WithTelemetry(telemetry),
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("stargz.Open: %v", err)
|
||||
}
|
||||
if err := checkCalled(); err != nil {
|
||||
t.Errorf("telemetry failure: %v", err)
|
||||
}
|
||||
for _, want := range tt.want {
|
||||
want.check(t, r)
|
||||
}
|
||||
})
|
||||
}
|
||||
if err := checkCalled(); err != nil {
|
||||
t.Errorf("telemetry failure: %v", err)
|
||||
}
|
||||
for _, want := range tt.want {
|
||||
want.check(t, r)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1655,49 +1734,41 @@ func hasEntryOwner(entry string, owner owner) stargzCheck {
|
||||
func tarOf(s ...tarEntry) []tarEntry { return s }
|
||||
|
||||
type tarEntry interface {
|
||||
appendTar(tw *tar.Writer, prefix string) error
|
||||
appendTar(tw *tar.Writer, prefix string, format tar.Format) error
|
||||
}
|
||||
|
||||
type tarEntryFunc func(*tar.Writer, string) error
|
||||
type tarEntryFunc func(*tar.Writer, string, tar.Format) error
|
||||
|
||||
func (f tarEntryFunc) appendTar(tw *tar.Writer, prefix string) error { return f(tw, prefix) }
|
||||
|
||||
func buildTar(t *testing.T, ents []tarEntry, prefix string) (r io.Reader, cancel func()) {
|
||||
pr, pw := io.Pipe()
|
||||
go func() {
|
||||
tw := tar.NewWriter(pw)
|
||||
for _, ent := range ents {
|
||||
if err := ent.appendTar(tw, prefix); err != nil {
|
||||
t.Errorf("building input tar: %v", err)
|
||||
pw.Close()
|
||||
return
|
||||
}
|
||||
}
|
||||
if err := tw.Close(); err != nil {
|
||||
t.Errorf("closing write of input tar: %v", err)
|
||||
}
|
||||
pw.Close()
|
||||
}()
|
||||
return pr, func() { go pr.Close(); go pw.Close() }
|
||||
func (f tarEntryFunc) appendTar(tw *tar.Writer, prefix string, format tar.Format) error {
|
||||
return f(tw, prefix, format)
|
||||
}
|
||||
|
||||
func buildTarStatic(t *testing.T, ents []tarEntry, prefix string) *io.SectionReader {
|
||||
func buildTar(t *testing.T, ents []tarEntry, prefix string, opts ...interface{}) *io.SectionReader {
|
||||
format := tar.FormatUnknown
|
||||
for _, opt := range opts {
|
||||
switch v := opt.(type) {
|
||||
case tar.Format:
|
||||
format = v
|
||||
default:
|
||||
panic(fmt.Errorf("unsupported opt for buildTar: %v", opt))
|
||||
}
|
||||
}
|
||||
buf := new(bytes.Buffer)
|
||||
tw := tar.NewWriter(buf)
|
||||
for _, ent := range ents {
|
||||
if err := ent.appendTar(tw, prefix); err != nil {
|
||||
if err := ent.appendTar(tw, prefix, format); err != nil {
|
||||
t.Fatalf("building input tar: %v", err)
|
||||
}
|
||||
}
|
||||
if err := tw.Close(); err != nil {
|
||||
t.Errorf("closing write of input tar: %v", err)
|
||||
}
|
||||
data := buf.Bytes()
|
||||
data := append(buf.Bytes(), make([]byte, 100)...) // append empty bytes at the tail to see lossless works
|
||||
return io.NewSectionReader(bytes.NewReader(data), 0, int64(len(data)))
|
||||
}
|
||||
|
||||
func dir(name string, opts ...interface{}) tarEntry {
|
||||
return tarEntryFunc(func(tw *tar.Writer, prefix string) error {
|
||||
return tarEntryFunc(func(tw *tar.Writer, prefix string, format tar.Format) error {
|
||||
var o owner
|
||||
mode := os.FileMode(0755)
|
||||
for _, opt := range opts {
|
||||
@ -1723,6 +1794,7 @@ func dir(name string, opts ...interface{}) tarEntry {
|
||||
Mode: tm,
|
||||
Uid: o.uid,
|
||||
Gid: o.gid,
|
||||
Format: format,
|
||||
})
|
||||
})
|
||||
}
|
||||
@ -1737,7 +1809,7 @@ type owner struct {
|
||||
}
|
||||
|
||||
func file(name, contents string, opts ...interface{}) tarEntry {
|
||||
return tarEntryFunc(func(tw *tar.Writer, prefix string) error {
|
||||
return tarEntryFunc(func(tw *tar.Writer, prefix string, format tar.Format) error {
|
||||
var xattrs xAttr
|
||||
var o owner
|
||||
mode := os.FileMode(0644)
|
||||
@ -1760,6 +1832,9 @@ func file(name, contents string, opts ...interface{}) tarEntry {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(xattrs) > 0 {
|
||||
format = tar.FormatPAX // only PAX supports xattrs
|
||||
}
|
||||
if err := tw.WriteHeader(&tar.Header{
|
||||
Typeflag: tar.TypeReg,
|
||||
Name: prefix + name,
|
||||
@ -1768,6 +1843,7 @@ func file(name, contents string, opts ...interface{}) tarEntry {
|
||||
Size: int64(len(contents)),
|
||||
Uid: o.uid,
|
||||
Gid: o.gid,
|
||||
Format: format,
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
@ -1777,78 +1853,76 @@ func file(name, contents string, opts ...interface{}) tarEntry {
|
||||
}
|
||||
|
||||
func symlink(name, target string) tarEntry {
|
||||
return tarEntryFunc(func(tw *tar.Writer, prefix string) error {
|
||||
return tarEntryFunc(func(tw *tar.Writer, prefix string, format tar.Format) error {
|
||||
return tw.WriteHeader(&tar.Header{
|
||||
Typeflag: tar.TypeSymlink,
|
||||
Name: prefix + name,
|
||||
Linkname: target,
|
||||
Mode: 0644,
|
||||
Format: format,
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
func link(name string, linkname string) tarEntry {
|
||||
now := time.Now()
|
||||
return tarEntryFunc(func(w *tar.Writer, prefix string) error {
|
||||
return tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error {
|
||||
return w.WriteHeader(&tar.Header{
|
||||
Typeflag: tar.TypeLink,
|
||||
Name: prefix + name,
|
||||
Linkname: linkname,
|
||||
ModTime: now,
|
||||
AccessTime: now,
|
||||
ChangeTime: now,
|
||||
Typeflag: tar.TypeLink,
|
||||
Name: prefix + name,
|
||||
Linkname: linkname,
|
||||
ModTime: now,
|
||||
Format: format,
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
func chardev(name string, major, minor int64) tarEntry {
|
||||
now := time.Now()
|
||||
return tarEntryFunc(func(w *tar.Writer, prefix string) error {
|
||||
return tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error {
|
||||
return w.WriteHeader(&tar.Header{
|
||||
Typeflag: tar.TypeChar,
|
||||
Name: prefix + name,
|
||||
Devmajor: major,
|
||||
Devminor: minor,
|
||||
ModTime: now,
|
||||
AccessTime: now,
|
||||
ChangeTime: now,
|
||||
Typeflag: tar.TypeChar,
|
||||
Name: prefix + name,
|
||||
Devmajor: major,
|
||||
Devminor: minor,
|
||||
ModTime: now,
|
||||
Format: format,
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
func blockdev(name string, major, minor int64) tarEntry {
|
||||
now := time.Now()
|
||||
return tarEntryFunc(func(w *tar.Writer, prefix string) error {
|
||||
return tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error {
|
||||
return w.WriteHeader(&tar.Header{
|
||||
Typeflag: tar.TypeBlock,
|
||||
Name: prefix + name,
|
||||
Devmajor: major,
|
||||
Devminor: minor,
|
||||
ModTime: now,
|
||||
AccessTime: now,
|
||||
ChangeTime: now,
|
||||
Typeflag: tar.TypeBlock,
|
||||
Name: prefix + name,
|
||||
Devmajor: major,
|
||||
Devminor: minor,
|
||||
ModTime: now,
|
||||
Format: format,
|
||||
})
|
||||
})
|
||||
}
|
||||
func fifo(name string) tarEntry {
|
||||
now := time.Now()
|
||||
return tarEntryFunc(func(w *tar.Writer, prefix string) error {
|
||||
return tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error {
|
||||
return w.WriteHeader(&tar.Header{
|
||||
Typeflag: tar.TypeFifo,
|
||||
Name: prefix + name,
|
||||
ModTime: now,
|
||||
AccessTime: now,
|
||||
ChangeTime: now,
|
||||
Typeflag: tar.TypeFifo,
|
||||
Name: prefix + name,
|
||||
ModTime: now,
|
||||
Format: format,
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
func prefetchLandmark() tarEntry {
|
||||
return tarEntryFunc(func(w *tar.Writer, prefix string) error {
|
||||
return tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error {
|
||||
if err := w.WriteHeader(&tar.Header{
|
||||
Name: PrefetchLandmark,
|
||||
Typeflag: tar.TypeReg,
|
||||
Size: int64(len([]byte{landmarkContents})),
|
||||
Format: format,
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
@ -1861,11 +1935,12 @@ func prefetchLandmark() tarEntry {
|
||||
}
|
||||
|
||||
func noPrefetchLandmark() tarEntry {
|
||||
return tarEntryFunc(func(w *tar.Writer, prefix string) error {
|
||||
return tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error {
|
||||
if err := w.WriteHeader(&tar.Header{
|
||||
Name: NoPrefetchLandmark,
|
||||
Typeflag: tar.TypeReg,
|
||||
Size: int64(len([]byte{landmarkContents})),
|
||||
Format: format,
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
@ -1899,11 +1974,12 @@ func regDigest(t *testing.T, name string, contentStr string, digestMap map[strin
|
||||
n += size
|
||||
}
|
||||
|
||||
return tarEntryFunc(func(w *tar.Writer, prefix string) error {
|
||||
return tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error {
|
||||
if err := w.WriteHeader(&tar.Header{
|
||||
Typeflag: tar.TypeReg,
|
||||
Name: prefix + name,
|
||||
Size: int64(len(content)),
|
||||
Format: format,
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
6
vendor/github.com/containerd/stargz-snapshotter/estargz/types.go
generated
vendored
6
vendor/github.com/containerd/stargz-snapshotter/estargz/types.go
generated
vendored
@ -290,7 +290,7 @@ type Compressor interface {
|
||||
WriteTOCAndFooter(w io.Writer, off int64, toc *JTOC, diffHash hash.Hash) (tocDgst digest.Digest, err error)
|
||||
}
|
||||
|
||||
// Deompressor represents the helper mothods to be used for parsing eStargz.
|
||||
// Decompressor represents the helper mothods to be used for parsing eStargz.
|
||||
type Decompressor interface {
|
||||
// Reader returns ReadCloser to be used for decompressing file payload.
|
||||
Reader(r io.Reader) (io.ReadCloser, error)
|
||||
@ -299,10 +299,12 @@ type Decompressor interface {
|
||||
FooterSize() int64
|
||||
|
||||
// ParseFooter parses the footer and returns the offset and (compressed) size of TOC.
|
||||
// payloadBlobSize is the (compressed) size of the blob payload (i.e. the size between
|
||||
// the top until the TOC JSON).
|
||||
//
|
||||
// Here, tocSize is optional. If tocSize <= 0, it's by default the size of the range
|
||||
// from tocOffset until the beginning of the footer (blob size - tocOff - FooterSize).
|
||||
ParseFooter(p []byte) (tocOffset, tocSize int64, err error)
|
||||
ParseFooter(p []byte) (blobPayloadSize, tocOffset, tocSize int64, err error)
|
||||
|
||||
// ParseTOC parses TOC from the passed reader. The reader provides the partial contents
|
||||
// of the underlying blob that has the range specified by ParseFooter method.
|
||||
|
28
vendor/github.com/vbatts/tar-split/LICENSE
generated
vendored
Normal file
28
vendor/github.com/vbatts/tar-split/LICENSE
generated
vendored
Normal file
@ -0,0 +1,28 @@
|
||||
Copyright (c) 2015 Vincent Batts, Raleigh, NC, USA
|
||||
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are met:
|
||||
|
||||
1. Redistributions of source code must retain the above copyright notice, this
|
||||
list of conditions and the following disclaimer.
|
||||
|
||||
2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
|
||||
3. Neither the name of the copyright holder nor the names of its contributors
|
||||
may be used to endorse or promote products derived from this software without
|
||||
specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
||||
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
723
vendor/github.com/vbatts/tar-split/archive/tar/common.go
generated
vendored
Normal file
723
vendor/github.com/vbatts/tar-split/archive/tar/common.go
generated
vendored
Normal file
@ -0,0 +1,723 @@
|
||||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package tar implements access to tar archives.
|
||||
//
|
||||
// Tape archives (tar) are a file format for storing a sequence of files that
|
||||
// can be read and written in a streaming manner.
|
||||
// This package aims to cover most variations of the format,
|
||||
// including those produced by GNU and BSD tar tools.
|
||||
package tar
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
"os"
|
||||
"path"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// BUG: Use of the Uid and Gid fields in Header could overflow on 32-bit
|
||||
// architectures. If a large value is encountered when decoding, the result
|
||||
// stored in Header will be the truncated version.
|
||||
|
||||
var (
|
||||
ErrHeader = errors.New("archive/tar: invalid tar header")
|
||||
ErrWriteTooLong = errors.New("archive/tar: write too long")
|
||||
ErrFieldTooLong = errors.New("archive/tar: header field too long")
|
||||
ErrWriteAfterClose = errors.New("archive/tar: write after close")
|
||||
errMissData = errors.New("archive/tar: sparse file references non-existent data")
|
||||
errUnrefData = errors.New("archive/tar: sparse file contains unreferenced data")
|
||||
errWriteHole = errors.New("archive/tar: write non-NUL byte in sparse hole")
|
||||
)
|
||||
|
||||
type headerError []string
|
||||
|
||||
func (he headerError) Error() string {
|
||||
const prefix = "archive/tar: cannot encode header"
|
||||
var ss []string
|
||||
for _, s := range he {
|
||||
if s != "" {
|
||||
ss = append(ss, s)
|
||||
}
|
||||
}
|
||||
if len(ss) == 0 {
|
||||
return prefix
|
||||
}
|
||||
return fmt.Sprintf("%s: %v", prefix, strings.Join(ss, "; and "))
|
||||
}
|
||||
|
||||
// Type flags for Header.Typeflag.
|
||||
const (
|
||||
// Type '0' indicates a regular file.
|
||||
TypeReg = '0'
|
||||
TypeRegA = '\x00' // Deprecated: Use TypeReg instead.
|
||||
|
||||
// Type '1' to '6' are header-only flags and may not have a data body.
|
||||
TypeLink = '1' // Hard link
|
||||
TypeSymlink = '2' // Symbolic link
|
||||
TypeChar = '3' // Character device node
|
||||
TypeBlock = '4' // Block device node
|
||||
TypeDir = '5' // Directory
|
||||
TypeFifo = '6' // FIFO node
|
||||
|
||||
// Type '7' is reserved.
|
||||
TypeCont = '7'
|
||||
|
||||
// Type 'x' is used by the PAX format to store key-value records that
|
||||
// are only relevant to the next file.
|
||||
// This package transparently handles these types.
|
||||
TypeXHeader = 'x'
|
||||
|
||||
// Type 'g' is used by the PAX format to store key-value records that
|
||||
// are relevant to all subsequent files.
|
||||
// This package only supports parsing and composing such headers,
|
||||
// but does not currently support persisting the global state across files.
|
||||
TypeXGlobalHeader = 'g'
|
||||
|
||||
// Type 'S' indicates a sparse file in the GNU format.
|
||||
TypeGNUSparse = 'S'
|
||||
|
||||
// Types 'L' and 'K' are used by the GNU format for a meta file
|
||||
// used to store the path or link name for the next file.
|
||||
// This package transparently handles these types.
|
||||
TypeGNULongName = 'L'
|
||||
TypeGNULongLink = 'K'
|
||||
)
|
||||
|
||||
// Keywords for PAX extended header records.
|
||||
const (
|
||||
paxNone = "" // Indicates that no PAX key is suitable
|
||||
paxPath = "path"
|
||||
paxLinkpath = "linkpath"
|
||||
paxSize = "size"
|
||||
paxUid = "uid"
|
||||
paxGid = "gid"
|
||||
paxUname = "uname"
|
||||
paxGname = "gname"
|
||||
paxMtime = "mtime"
|
||||
paxAtime = "atime"
|
||||
paxCtime = "ctime" // Removed from later revision of PAX spec, but was valid
|
||||
paxCharset = "charset" // Currently unused
|
||||
paxComment = "comment" // Currently unused
|
||||
|
||||
paxSchilyXattr = "SCHILY.xattr."
|
||||
|
||||
// Keywords for GNU sparse files in a PAX extended header.
|
||||
paxGNUSparse = "GNU.sparse."
|
||||
paxGNUSparseNumBlocks = "GNU.sparse.numblocks"
|
||||
paxGNUSparseOffset = "GNU.sparse.offset"
|
||||
paxGNUSparseNumBytes = "GNU.sparse.numbytes"
|
||||
paxGNUSparseMap = "GNU.sparse.map"
|
||||
paxGNUSparseName = "GNU.sparse.name"
|
||||
paxGNUSparseMajor = "GNU.sparse.major"
|
||||
paxGNUSparseMinor = "GNU.sparse.minor"
|
||||
paxGNUSparseSize = "GNU.sparse.size"
|
||||
paxGNUSparseRealSize = "GNU.sparse.realsize"
|
||||
)
|
||||
|
||||
// basicKeys is a set of the PAX keys for which we have built-in support.
|
||||
// This does not contain "charset" or "comment", which are both PAX-specific,
|
||||
// so adding them as first-class features of Header is unlikely.
|
||||
// Users can use the PAXRecords field to set it themselves.
|
||||
var basicKeys = map[string]bool{
|
||||
paxPath: true, paxLinkpath: true, paxSize: true, paxUid: true, paxGid: true,
|
||||
paxUname: true, paxGname: true, paxMtime: true, paxAtime: true, paxCtime: true,
|
||||
}
|
||||
|
||||
// A Header represents a single header in a tar archive.
|
||||
// Some fields may not be populated.
|
||||
//
|
||||
// For forward compatibility, users that retrieve a Header from Reader.Next,
|
||||
// mutate it in some ways, and then pass it back to Writer.WriteHeader
|
||||
// should do so by creating a new Header and copying the fields
|
||||
// that they are interested in preserving.
|
||||
type Header struct {
|
||||
// Typeflag is the type of header entry.
|
||||
// The zero value is automatically promoted to either TypeReg or TypeDir
|
||||
// depending on the presence of a trailing slash in Name.
|
||||
Typeflag byte
|
||||
|
||||
Name string // Name of file entry
|
||||
Linkname string // Target name of link (valid for TypeLink or TypeSymlink)
|
||||
|
||||
Size int64 // Logical file size in bytes
|
||||
Mode int64 // Permission and mode bits
|
||||
Uid int // User ID of owner
|
||||
Gid int // Group ID of owner
|
||||
Uname string // User name of owner
|
||||
Gname string // Group name of owner
|
||||
|
||||
// If the Format is unspecified, then Writer.WriteHeader rounds ModTime
|
||||
// to the nearest second and ignores the AccessTime and ChangeTime fields.
|
||||
//
|
||||
// To use AccessTime or ChangeTime, specify the Format as PAX or GNU.
|
||||
// To use sub-second resolution, specify the Format as PAX.
|
||||
ModTime time.Time // Modification time
|
||||
AccessTime time.Time // Access time (requires either PAX or GNU support)
|
||||
ChangeTime time.Time // Change time (requires either PAX or GNU support)
|
||||
|
||||
Devmajor int64 // Major device number (valid for TypeChar or TypeBlock)
|
||||
Devminor int64 // Minor device number (valid for TypeChar or TypeBlock)
|
||||
|
||||
// Xattrs stores extended attributes as PAX records under the
|
||||
// "SCHILY.xattr." namespace.
|
||||
//
|
||||
// The following are semantically equivalent:
|
||||
// h.Xattrs[key] = value
|
||||
// h.PAXRecords["SCHILY.xattr."+key] = value
|
||||
//
|
||||
// When Writer.WriteHeader is called, the contents of Xattrs will take
|
||||
// precedence over those in PAXRecords.
|
||||
//
|
||||
// Deprecated: Use PAXRecords instead.
|
||||
Xattrs map[string]string
|
||||
|
||||
// PAXRecords is a map of PAX extended header records.
|
||||
//
|
||||
// User-defined records should have keys of the following form:
|
||||
// VENDOR.keyword
|
||||
// Where VENDOR is some namespace in all uppercase, and keyword may
|
||||
// not contain the '=' character (e.g., "GOLANG.pkg.version").
|
||||
// The key and value should be non-empty UTF-8 strings.
|
||||
//
|
||||
// When Writer.WriteHeader is called, PAX records derived from the
|
||||
// other fields in Header take precedence over PAXRecords.
|
||||
PAXRecords map[string]string
|
||||
|
||||
// Format specifies the format of the tar header.
|
||||
//
|
||||
// This is set by Reader.Next as a best-effort guess at the format.
|
||||
// Since the Reader liberally reads some non-compliant files,
|
||||
// it is possible for this to be FormatUnknown.
|
||||
//
|
||||
// If the format is unspecified when Writer.WriteHeader is called,
|
||||
// then it uses the first format (in the order of USTAR, PAX, GNU)
|
||||
// capable of encoding this Header (see Format).
|
||||
Format Format
|
||||
}
|
||||
|
||||
// sparseEntry represents a Length-sized fragment at Offset in the file.
|
||||
type sparseEntry struct{ Offset, Length int64 }
|
||||
|
||||
func (s sparseEntry) endOffset() int64 { return s.Offset + s.Length }
|
||||
|
||||
// A sparse file can be represented as either a sparseDatas or a sparseHoles.
|
||||
// As long as the total size is known, they are equivalent and one can be
|
||||
// converted to the other form and back. The various tar formats with sparse
|
||||
// file support represent sparse files in the sparseDatas form. That is, they
|
||||
// specify the fragments in the file that has data, and treat everything else as
|
||||
// having zero bytes. As such, the encoding and decoding logic in this package
|
||||
// deals with sparseDatas.
|
||||
//
|
||||
// However, the external API uses sparseHoles instead of sparseDatas because the
|
||||
// zero value of sparseHoles logically represents a normal file (i.e., there are
|
||||
// no holes in it). On the other hand, the zero value of sparseDatas implies
|
||||
// that the file has no data in it, which is rather odd.
|
||||
//
|
||||
// As an example, if the underlying raw file contains the 10-byte data:
|
||||
// var compactFile = "abcdefgh"
|
||||
//
|
||||
// And the sparse map has the following entries:
|
||||
// var spd sparseDatas = []sparseEntry{
|
||||
// {Offset: 2, Length: 5}, // Data fragment for 2..6
|
||||
// {Offset: 18, Length: 3}, // Data fragment for 18..20
|
||||
// }
|
||||
// var sph sparseHoles = []sparseEntry{
|
||||
// {Offset: 0, Length: 2}, // Hole fragment for 0..1
|
||||
// {Offset: 7, Length: 11}, // Hole fragment for 7..17
|
||||
// {Offset: 21, Length: 4}, // Hole fragment for 21..24
|
||||
// }
|
||||
//
|
||||
// Then the content of the resulting sparse file with a Header.Size of 25 is:
|
||||
// var sparseFile = "\x00"*2 + "abcde" + "\x00"*11 + "fgh" + "\x00"*4
|
||||
type (
|
||||
sparseDatas []sparseEntry
|
||||
sparseHoles []sparseEntry
|
||||
)
|
||||
|
||||
// validateSparseEntries reports whether sp is a valid sparse map.
|
||||
// It does not matter whether sp represents data fragments or hole fragments.
|
||||
func validateSparseEntries(sp []sparseEntry, size int64) bool {
|
||||
// Validate all sparse entries. These are the same checks as performed by
|
||||
// the BSD tar utility.
|
||||
if size < 0 {
|
||||
return false
|
||||
}
|
||||
var pre sparseEntry
|
||||
for _, cur := range sp {
|
||||
switch {
|
||||
case cur.Offset < 0 || cur.Length < 0:
|
||||
return false // Negative values are never okay
|
||||
case cur.Offset > math.MaxInt64-cur.Length:
|
||||
return false // Integer overflow with large length
|
||||
case cur.endOffset() > size:
|
||||
return false // Region extends beyond the actual size
|
||||
case pre.endOffset() > cur.Offset:
|
||||
return false // Regions cannot overlap and must be in order
|
||||
}
|
||||
pre = cur
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// alignSparseEntries mutates src and returns dst where each fragment's
|
||||
// starting offset is aligned up to the nearest block edge, and each
|
||||
// ending offset is aligned down to the nearest block edge.
|
||||
//
|
||||
// Even though the Go tar Reader and the BSD tar utility can handle entries
|
||||
// with arbitrary offsets and lengths, the GNU tar utility can only handle
|
||||
// offsets and lengths that are multiples of blockSize.
|
||||
func alignSparseEntries(src []sparseEntry, size int64) []sparseEntry {
|
||||
dst := src[:0]
|
||||
for _, s := range src {
|
||||
pos, end := s.Offset, s.endOffset()
|
||||
pos += blockPadding(+pos) // Round-up to nearest blockSize
|
||||
if end != size {
|
||||
end -= blockPadding(-end) // Round-down to nearest blockSize
|
||||
}
|
||||
if pos < end {
|
||||
dst = append(dst, sparseEntry{Offset: pos, Length: end - pos})
|
||||
}
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// invertSparseEntries converts a sparse map from one form to the other.
|
||||
// If the input is sparseHoles, then it will output sparseDatas and vice-versa.
|
||||
// The input must have been already validated.
|
||||
//
|
||||
// This function mutates src and returns a normalized map where:
|
||||
// * adjacent fragments are coalesced together
|
||||
// * only the last fragment may be empty
|
||||
// * the endOffset of the last fragment is the total size
|
||||
func invertSparseEntries(src []sparseEntry, size int64) []sparseEntry {
|
||||
dst := src[:0]
|
||||
var pre sparseEntry
|
||||
for _, cur := range src {
|
||||
if cur.Length == 0 {
|
||||
continue // Skip empty fragments
|
||||
}
|
||||
pre.Length = cur.Offset - pre.Offset
|
||||
if pre.Length > 0 {
|
||||
dst = append(dst, pre) // Only add non-empty fragments
|
||||
}
|
||||
pre.Offset = cur.endOffset()
|
||||
}
|
||||
pre.Length = size - pre.Offset // Possibly the only empty fragment
|
||||
return append(dst, pre)
|
||||
}
|
||||
|
||||
// fileState tracks the number of logical (includes sparse holes) and physical
|
||||
// (actual in tar archive) bytes remaining for the current file.
|
||||
//
|
||||
// Invariant: LogicalRemaining >= PhysicalRemaining
|
||||
type fileState interface {
|
||||
LogicalRemaining() int64
|
||||
PhysicalRemaining() int64
|
||||
}
|
||||
|
||||
// allowedFormats determines which formats can be used.
|
||||
// The value returned is the logical OR of multiple possible formats.
|
||||
// If the value is FormatUnknown, then the input Header cannot be encoded
|
||||
// and an error is returned explaining why.
|
||||
//
|
||||
// As a by-product of checking the fields, this function returns paxHdrs, which
|
||||
// contain all fields that could not be directly encoded.
|
||||
// A value receiver ensures that this method does not mutate the source Header.
|
||||
func (h Header) allowedFormats() (format Format, paxHdrs map[string]string, err error) {
|
||||
format = FormatUSTAR | FormatPAX | FormatGNU
|
||||
paxHdrs = make(map[string]string)
|
||||
|
||||
var whyNoUSTAR, whyNoPAX, whyNoGNU string
|
||||
var preferPAX bool // Prefer PAX over USTAR
|
||||
verifyString := func(s string, size int, name, paxKey string) {
|
||||
// NUL-terminator is optional for path and linkpath.
|
||||
// Technically, it is required for uname and gname,
|
||||
// but neither GNU nor BSD tar checks for it.
|
||||
tooLong := len(s) > size
|
||||
allowLongGNU := paxKey == paxPath || paxKey == paxLinkpath
|
||||
if hasNUL(s) || (tooLong && !allowLongGNU) {
|
||||
whyNoGNU = fmt.Sprintf("GNU cannot encode %s=%q", name, s)
|
||||
format.mustNotBe(FormatGNU)
|
||||
}
|
||||
if !isASCII(s) || tooLong {
|
||||
canSplitUSTAR := paxKey == paxPath
|
||||
if _, _, ok := splitUSTARPath(s); !canSplitUSTAR || !ok {
|
||||
whyNoUSTAR = fmt.Sprintf("USTAR cannot encode %s=%q", name, s)
|
||||
format.mustNotBe(FormatUSTAR)
|
||||
}
|
||||
if paxKey == paxNone {
|
||||
whyNoPAX = fmt.Sprintf("PAX cannot encode %s=%q", name, s)
|
||||
format.mustNotBe(FormatPAX)
|
||||
} else {
|
||||
paxHdrs[paxKey] = s
|
||||
}
|
||||
}
|
||||
if v, ok := h.PAXRecords[paxKey]; ok && v == s {
|
||||
paxHdrs[paxKey] = v
|
||||
}
|
||||
}
|
||||
verifyNumeric := func(n int64, size int, name, paxKey string) {
|
||||
if !fitsInBase256(size, n) {
|
||||
whyNoGNU = fmt.Sprintf("GNU cannot encode %s=%d", name, n)
|
||||
format.mustNotBe(FormatGNU)
|
||||
}
|
||||
if !fitsInOctal(size, n) {
|
||||
whyNoUSTAR = fmt.Sprintf("USTAR cannot encode %s=%d", name, n)
|
||||
format.mustNotBe(FormatUSTAR)
|
||||
if paxKey == paxNone {
|
||||
whyNoPAX = fmt.Sprintf("PAX cannot encode %s=%d", name, n)
|
||||
format.mustNotBe(FormatPAX)
|
||||
} else {
|
||||
paxHdrs[paxKey] = strconv.FormatInt(n, 10)
|
||||
}
|
||||
}
|
||||
if v, ok := h.PAXRecords[paxKey]; ok && v == strconv.FormatInt(n, 10) {
|
||||
paxHdrs[paxKey] = v
|
||||
}
|
||||
}
|
||||
verifyTime := func(ts time.Time, size int, name, paxKey string) {
|
||||
if ts.IsZero() {
|
||||
return // Always okay
|
||||
}
|
||||
if !fitsInBase256(size, ts.Unix()) {
|
||||
whyNoGNU = fmt.Sprintf("GNU cannot encode %s=%v", name, ts)
|
||||
format.mustNotBe(FormatGNU)
|
||||
}
|
||||
isMtime := paxKey == paxMtime
|
||||
fitsOctal := fitsInOctal(size, ts.Unix())
|
||||
if (isMtime && !fitsOctal) || !isMtime {
|
||||
whyNoUSTAR = fmt.Sprintf("USTAR cannot encode %s=%v", name, ts)
|
||||
format.mustNotBe(FormatUSTAR)
|
||||
}
|
||||
needsNano := ts.Nanosecond() != 0
|
||||
if !isMtime || !fitsOctal || needsNano {
|
||||
preferPAX = true // USTAR may truncate sub-second measurements
|
||||
if paxKey == paxNone {
|
||||
whyNoPAX = fmt.Sprintf("PAX cannot encode %s=%v", name, ts)
|
||||
format.mustNotBe(FormatPAX)
|
||||
} else {
|
||||
paxHdrs[paxKey] = formatPAXTime(ts)
|
||||
}
|
||||
}
|
||||
if v, ok := h.PAXRecords[paxKey]; ok && v == formatPAXTime(ts) {
|
||||
paxHdrs[paxKey] = v
|
||||
}
|
||||
}
|
||||
|
||||
// Check basic fields.
|
||||
var blk block
|
||||
v7 := blk.V7()
|
||||
ustar := blk.USTAR()
|
||||
gnu := blk.GNU()
|
||||
verifyString(h.Name, len(v7.Name()), "Name", paxPath)
|
||||
verifyString(h.Linkname, len(v7.LinkName()), "Linkname", paxLinkpath)
|
||||
verifyString(h.Uname, len(ustar.UserName()), "Uname", paxUname)
|
||||
verifyString(h.Gname, len(ustar.GroupName()), "Gname", paxGname)
|
||||
verifyNumeric(h.Mode, len(v7.Mode()), "Mode", paxNone)
|
||||
verifyNumeric(int64(h.Uid), len(v7.UID()), "Uid", paxUid)
|
||||
verifyNumeric(int64(h.Gid), len(v7.GID()), "Gid", paxGid)
|
||||
verifyNumeric(h.Size, len(v7.Size()), "Size", paxSize)
|
||||
verifyNumeric(h.Devmajor, len(ustar.DevMajor()), "Devmajor", paxNone)
|
||||
verifyNumeric(h.Devminor, len(ustar.DevMinor()), "Devminor", paxNone)
|
||||
verifyTime(h.ModTime, len(v7.ModTime()), "ModTime", paxMtime)
|
||||
verifyTime(h.AccessTime, len(gnu.AccessTime()), "AccessTime", paxAtime)
|
||||
verifyTime(h.ChangeTime, len(gnu.ChangeTime()), "ChangeTime", paxCtime)
|
||||
|
||||
// Check for header-only types.
|
||||
var whyOnlyPAX, whyOnlyGNU string
|
||||
switch h.Typeflag {
|
||||
case TypeReg, TypeChar, TypeBlock, TypeFifo, TypeGNUSparse:
|
||||
// Exclude TypeLink and TypeSymlink, since they may reference directories.
|
||||
if strings.HasSuffix(h.Name, "/") {
|
||||
return FormatUnknown, nil, headerError{"filename may not have trailing slash"}
|
||||
}
|
||||
case TypeXHeader, TypeGNULongName, TypeGNULongLink:
|
||||
return FormatUnknown, nil, headerError{"cannot manually encode TypeXHeader, TypeGNULongName, or TypeGNULongLink headers"}
|
||||
case TypeXGlobalHeader:
|
||||
h2 := Header{Name: h.Name, Typeflag: h.Typeflag, Xattrs: h.Xattrs, PAXRecords: h.PAXRecords, Format: h.Format}
|
||||
if !reflect.DeepEqual(h, h2) {
|
||||
return FormatUnknown, nil, headerError{"only PAXRecords should be set for TypeXGlobalHeader"}
|
||||
}
|
||||
whyOnlyPAX = "only PAX supports TypeXGlobalHeader"
|
||||
format.mayOnlyBe(FormatPAX)
|
||||
}
|
||||
if !isHeaderOnlyType(h.Typeflag) && h.Size < 0 {
|
||||
return FormatUnknown, nil, headerError{"negative size on header-only type"}
|
||||
}
|
||||
|
||||
// Check PAX records.
|
||||
if len(h.Xattrs) > 0 {
|
||||
for k, v := range h.Xattrs {
|
||||
paxHdrs[paxSchilyXattr+k] = v
|
||||
}
|
||||
whyOnlyPAX = "only PAX supports Xattrs"
|
||||
format.mayOnlyBe(FormatPAX)
|
||||
}
|
||||
if len(h.PAXRecords) > 0 {
|
||||
for k, v := range h.PAXRecords {
|
||||
switch _, exists := paxHdrs[k]; {
|
||||
case exists:
|
||||
continue // Do not overwrite existing records
|
||||
case h.Typeflag == TypeXGlobalHeader:
|
||||
paxHdrs[k] = v // Copy all records
|
||||
case !basicKeys[k] && !strings.HasPrefix(k, paxGNUSparse):
|
||||
paxHdrs[k] = v // Ignore local records that may conflict
|
||||
}
|
||||
}
|
||||
whyOnlyPAX = "only PAX supports PAXRecords"
|
||||
format.mayOnlyBe(FormatPAX)
|
||||
}
|
||||
for k, v := range paxHdrs {
|
||||
if !validPAXRecord(k, v) {
|
||||
return FormatUnknown, nil, headerError{fmt.Sprintf("invalid PAX record: %q", k+" = "+v)}
|
||||
}
|
||||
}
|
||||
|
||||
// TODO(dsnet): Re-enable this when adding sparse support.
|
||||
// See https://golang.org/issue/22735
|
||||
/*
|
||||
// Check sparse files.
|
||||
if len(h.SparseHoles) > 0 || h.Typeflag == TypeGNUSparse {
|
||||
if isHeaderOnlyType(h.Typeflag) {
|
||||
return FormatUnknown, nil, headerError{"header-only type cannot be sparse"}
|
||||
}
|
||||
if !validateSparseEntries(h.SparseHoles, h.Size) {
|
||||
return FormatUnknown, nil, headerError{"invalid sparse holes"}
|
||||
}
|
||||
if h.Typeflag == TypeGNUSparse {
|
||||
whyOnlyGNU = "only GNU supports TypeGNUSparse"
|
||||
format.mayOnlyBe(FormatGNU)
|
||||
} else {
|
||||
whyNoGNU = "GNU supports sparse files only with TypeGNUSparse"
|
||||
format.mustNotBe(FormatGNU)
|
||||
}
|
||||
whyNoUSTAR = "USTAR does not support sparse files"
|
||||
format.mustNotBe(FormatUSTAR)
|
||||
}
|
||||
*/
|
||||
|
||||
// Check desired format.
|
||||
if wantFormat := h.Format; wantFormat != FormatUnknown {
|
||||
if wantFormat.has(FormatPAX) && !preferPAX {
|
||||
wantFormat.mayBe(FormatUSTAR) // PAX implies USTAR allowed too
|
||||
}
|
||||
format.mayOnlyBe(wantFormat) // Set union of formats allowed and format wanted
|
||||
}
|
||||
if format == FormatUnknown {
|
||||
switch h.Format {
|
||||
case FormatUSTAR:
|
||||
err = headerError{"Format specifies USTAR", whyNoUSTAR, whyOnlyPAX, whyOnlyGNU}
|
||||
case FormatPAX:
|
||||
err = headerError{"Format specifies PAX", whyNoPAX, whyOnlyGNU}
|
||||
case FormatGNU:
|
||||
err = headerError{"Format specifies GNU", whyNoGNU, whyOnlyPAX}
|
||||
default:
|
||||
err = headerError{whyNoUSTAR, whyNoPAX, whyNoGNU, whyOnlyPAX, whyOnlyGNU}
|
||||
}
|
||||
}
|
||||
return format, paxHdrs, err
|
||||
}
|
||||
|
||||
// FileInfo returns an os.FileInfo for the Header.
|
||||
func (h *Header) FileInfo() os.FileInfo {
|
||||
return headerFileInfo{h}
|
||||
}
|
||||
|
||||
// headerFileInfo implements os.FileInfo.
|
||||
type headerFileInfo struct {
|
||||
h *Header
|
||||
}
|
||||
|
||||
func (fi headerFileInfo) Size() int64 { return fi.h.Size }
|
||||
func (fi headerFileInfo) IsDir() bool { return fi.Mode().IsDir() }
|
||||
func (fi headerFileInfo) ModTime() time.Time { return fi.h.ModTime }
|
||||
func (fi headerFileInfo) Sys() interface{} { return fi.h }
|
||||
|
||||
// Name returns the base name of the file.
|
||||
func (fi headerFileInfo) Name() string {
|
||||
if fi.IsDir() {
|
||||
return path.Base(path.Clean(fi.h.Name))
|
||||
}
|
||||
return path.Base(fi.h.Name)
|
||||
}
|
||||
|
||||
// Mode returns the permission and mode bits for the headerFileInfo.
|
||||
func (fi headerFileInfo) Mode() (mode os.FileMode) {
|
||||
// Set file permission bits.
|
||||
mode = os.FileMode(fi.h.Mode).Perm()
|
||||
|
||||
// Set setuid, setgid and sticky bits.
|
||||
if fi.h.Mode&c_ISUID != 0 {
|
||||
mode |= os.ModeSetuid
|
||||
}
|
||||
if fi.h.Mode&c_ISGID != 0 {
|
||||
mode |= os.ModeSetgid
|
||||
}
|
||||
if fi.h.Mode&c_ISVTX != 0 {
|
||||
mode |= os.ModeSticky
|
||||
}
|
||||
|
||||
// Set file mode bits; clear perm, setuid, setgid, and sticky bits.
|
||||
switch m := os.FileMode(fi.h.Mode) &^ 07777; m {
|
||||
case c_ISDIR:
|
||||
mode |= os.ModeDir
|
||||
case c_ISFIFO:
|
||||
mode |= os.ModeNamedPipe
|
||||
case c_ISLNK:
|
||||
mode |= os.ModeSymlink
|
||||
case c_ISBLK:
|
||||
mode |= os.ModeDevice
|
||||
case c_ISCHR:
|
||||
mode |= os.ModeDevice
|
||||
mode |= os.ModeCharDevice
|
||||
case c_ISSOCK:
|
||||
mode |= os.ModeSocket
|
||||
}
|
||||
|
||||
switch fi.h.Typeflag {
|
||||
case TypeSymlink:
|
||||
mode |= os.ModeSymlink
|
||||
case TypeChar:
|
||||
mode |= os.ModeDevice
|
||||
mode |= os.ModeCharDevice
|
||||
case TypeBlock:
|
||||
mode |= os.ModeDevice
|
||||
case TypeDir:
|
||||
mode |= os.ModeDir
|
||||
case TypeFifo:
|
||||
mode |= os.ModeNamedPipe
|
||||
}
|
||||
|
||||
return mode
|
||||
}
|
||||
|
||||
// sysStat, if non-nil, populates h from system-dependent fields of fi.
|
||||
var sysStat func(fi os.FileInfo, h *Header) error
|
||||
|
||||
const (
|
||||
// Mode constants from the USTAR spec:
|
||||
// See http://pubs.opengroup.org/onlinepubs/9699919799/utilities/pax.html#tag_20_92_13_06
|
||||
c_ISUID = 04000 // Set uid
|
||||
c_ISGID = 02000 // Set gid
|
||||
c_ISVTX = 01000 // Save text (sticky bit)
|
||||
|
||||
// Common Unix mode constants; these are not defined in any common tar standard.
|
||||
// Header.FileInfo understands these, but FileInfoHeader will never produce these.
|
||||
c_ISDIR = 040000 // Directory
|
||||
c_ISFIFO = 010000 // FIFO
|
||||
c_ISREG = 0100000 // Regular file
|
||||
c_ISLNK = 0120000 // Symbolic link
|
||||
c_ISBLK = 060000 // Block special file
|
||||
c_ISCHR = 020000 // Character special file
|
||||
c_ISSOCK = 0140000 // Socket
|
||||
)
|
||||
|
||||
// FileInfoHeader creates a partially-populated Header from fi.
|
||||
// If fi describes a symlink, FileInfoHeader records link as the link target.
|
||||
// If fi describes a directory, a slash is appended to the name.
|
||||
//
|
||||
// Since os.FileInfo's Name method only returns the base name of
|
||||
// the file it describes, it may be necessary to modify Header.Name
|
||||
// to provide the full path name of the file.
|
||||
func FileInfoHeader(fi os.FileInfo, link string) (*Header, error) {
|
||||
if fi == nil {
|
||||
return nil, errors.New("archive/tar: FileInfo is nil")
|
||||
}
|
||||
fm := fi.Mode()
|
||||
h := &Header{
|
||||
Name: fi.Name(),
|
||||
ModTime: fi.ModTime(),
|
||||
Mode: int64(fm.Perm()), // or'd with c_IS* constants later
|
||||
}
|
||||
switch {
|
||||
case fm.IsRegular():
|
||||
h.Typeflag = TypeReg
|
||||
h.Size = fi.Size()
|
||||
case fi.IsDir():
|
||||
h.Typeflag = TypeDir
|
||||
h.Name += "/"
|
||||
case fm&os.ModeSymlink != 0:
|
||||
h.Typeflag = TypeSymlink
|
||||
h.Linkname = link
|
||||
case fm&os.ModeDevice != 0:
|
||||
if fm&os.ModeCharDevice != 0 {
|
||||
h.Typeflag = TypeChar
|
||||
} else {
|
||||
h.Typeflag = TypeBlock
|
||||
}
|
||||
case fm&os.ModeNamedPipe != 0:
|
||||
h.Typeflag = TypeFifo
|
||||
case fm&os.ModeSocket != 0:
|
||||
return nil, fmt.Errorf("archive/tar: sockets not supported")
|
||||
default:
|
||||
return nil, fmt.Errorf("archive/tar: unknown file mode %v", fm)
|
||||
}
|
||||
if fm&os.ModeSetuid != 0 {
|
||||
h.Mode |= c_ISUID
|
||||
}
|
||||
if fm&os.ModeSetgid != 0 {
|
||||
h.Mode |= c_ISGID
|
||||
}
|
||||
if fm&os.ModeSticky != 0 {
|
||||
h.Mode |= c_ISVTX
|
||||
}
|
||||
// If possible, populate additional fields from OS-specific
|
||||
// FileInfo fields.
|
||||
if sys, ok := fi.Sys().(*Header); ok {
|
||||
// This FileInfo came from a Header (not the OS). Use the
|
||||
// original Header to populate all remaining fields.
|
||||
h.Uid = sys.Uid
|
||||
h.Gid = sys.Gid
|
||||
h.Uname = sys.Uname
|
||||
h.Gname = sys.Gname
|
||||
h.AccessTime = sys.AccessTime
|
||||
h.ChangeTime = sys.ChangeTime
|
||||
if sys.Xattrs != nil {
|
||||
h.Xattrs = make(map[string]string)
|
||||
for k, v := range sys.Xattrs {
|
||||
h.Xattrs[k] = v
|
||||
}
|
||||
}
|
||||
if sys.Typeflag == TypeLink {
|
||||
// hard link
|
||||
h.Typeflag = TypeLink
|
||||
h.Size = 0
|
||||
h.Linkname = sys.Linkname
|
||||
}
|
||||
if sys.PAXRecords != nil {
|
||||
h.PAXRecords = make(map[string]string)
|
||||
for k, v := range sys.PAXRecords {
|
||||
h.PAXRecords[k] = v
|
||||
}
|
||||
}
|
||||
}
|
||||
if sysStat != nil {
|
||||
return h, sysStat(fi, h)
|
||||
}
|
||||
return h, nil
|
||||
}
|
||||
|
||||
// isHeaderOnlyType checks if the given type flag is of the type that has no
|
||||
// data section even if a size is specified.
|
||||
func isHeaderOnlyType(flag byte) bool {
|
||||
switch flag {
|
||||
case TypeLink, TypeSymlink, TypeChar, TypeBlock, TypeDir, TypeFifo:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func min(a, b int64) int64 {
|
||||
if a < b {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
303
vendor/github.com/vbatts/tar-split/archive/tar/format.go
generated
vendored
Normal file
303
vendor/github.com/vbatts/tar-split/archive/tar/format.go
generated
vendored
Normal file
@ -0,0 +1,303 @@
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package tar
|
||||
|
||||
import "strings"
|
||||
|
||||
// Format represents the tar archive format.
|
||||
//
|
||||
// The original tar format was introduced in Unix V7.
|
||||
// Since then, there have been multiple competing formats attempting to
|
||||
// standardize or extend the V7 format to overcome its limitations.
|
||||
// The most common formats are the USTAR, PAX, and GNU formats,
|
||||
// each with their own advantages and limitations.
|
||||
//
|
||||
// The following table captures the capabilities of each format:
|
||||
//
|
||||
// | USTAR | PAX | GNU
|
||||
// ------------------+--------+-----------+----------
|
||||
// Name | 256B | unlimited | unlimited
|
||||
// Linkname | 100B | unlimited | unlimited
|
||||
// Size | uint33 | unlimited | uint89
|
||||
// Mode | uint21 | uint21 | uint57
|
||||
// Uid/Gid | uint21 | unlimited | uint57
|
||||
// Uname/Gname | 32B | unlimited | 32B
|
||||
// ModTime | uint33 | unlimited | int89
|
||||
// AccessTime | n/a | unlimited | int89
|
||||
// ChangeTime | n/a | unlimited | int89
|
||||
// Devmajor/Devminor | uint21 | uint21 | uint57
|
||||
// ------------------+--------+-----------+----------
|
||||
// string encoding | ASCII | UTF-8 | binary
|
||||
// sub-second times | no | yes | no
|
||||
// sparse files | no | yes | yes
|
||||
//
|
||||
// The table's upper portion shows the Header fields, where each format reports
|
||||
// the maximum number of bytes allowed for each string field and
|
||||
// the integer type used to store each numeric field
|
||||
// (where timestamps are stored as the number of seconds since the Unix epoch).
|
||||
//
|
||||
// The table's lower portion shows specialized features of each format,
|
||||
// such as supported string encodings, support for sub-second timestamps,
|
||||
// or support for sparse files.
|
||||
//
|
||||
// The Writer currently provides no support for sparse files.
|
||||
type Format int
|
||||
|
||||
// Constants to identify various tar formats.
|
||||
const (
|
||||
// Deliberately hide the meaning of constants from public API.
|
||||
_ Format = (1 << iota) / 4 // Sequence of 0, 0, 1, 2, 4, 8, etc...
|
||||
|
||||
// FormatUnknown indicates that the format is unknown.
|
||||
FormatUnknown
|
||||
|
||||
// The format of the original Unix V7 tar tool prior to standardization.
|
||||
formatV7
|
||||
|
||||
// FormatUSTAR represents the USTAR header format defined in POSIX.1-1988.
|
||||
//
|
||||
// While this format is compatible with most tar readers,
|
||||
// the format has several limitations making it unsuitable for some usages.
|
||||
// Most notably, it cannot support sparse files, files larger than 8GiB,
|
||||
// filenames larger than 256 characters, and non-ASCII filenames.
|
||||
//
|
||||
// Reference:
|
||||
// http://pubs.opengroup.org/onlinepubs/9699919799/utilities/pax.html#tag_20_92_13_06
|
||||
FormatUSTAR
|
||||
|
||||
// FormatPAX represents the PAX header format defined in POSIX.1-2001.
|
||||
//
|
||||
// PAX extends USTAR by writing a special file with Typeflag TypeXHeader
|
||||
// preceding the original header. This file contains a set of key-value
|
||||
// records, which are used to overcome USTAR's shortcomings, in addition to
|
||||
// providing the ability to have sub-second resolution for timestamps.
|
||||
//
|
||||
// Some newer formats add their own extensions to PAX by defining their
|
||||
// own keys and assigning certain semantic meaning to the associated values.
|
||||
// For example, sparse file support in PAX is implemented using keys
|
||||
// defined by the GNU manual (e.g., "GNU.sparse.map").
|
||||
//
|
||||
// Reference:
|
||||
// http://pubs.opengroup.org/onlinepubs/009695399/utilities/pax.html
|
||||
FormatPAX
|
||||
|
||||
// FormatGNU represents the GNU header format.
|
||||
//
|
||||
// The GNU header format is older than the USTAR and PAX standards and
|
||||
// is not compatible with them. The GNU format supports
|
||||
// arbitrary file sizes, filenames of arbitrary encoding and length,
|
||||
// sparse files, and other features.
|
||||
//
|
||||
// It is recommended that PAX be chosen over GNU unless the target
|
||||
// application can only parse GNU formatted archives.
|
||||
//
|
||||
// Reference:
|
||||
// https://www.gnu.org/software/tar/manual/html_node/Standard.html
|
||||
FormatGNU
|
||||
|
||||
// Schily's tar format, which is incompatible with USTAR.
|
||||
// This does not cover STAR extensions to the PAX format; these fall under
|
||||
// the PAX format.
|
||||
formatSTAR
|
||||
|
||||
formatMax
|
||||
)
|
||||
|
||||
func (f Format) has(f2 Format) bool { return f&f2 != 0 }
|
||||
func (f *Format) mayBe(f2 Format) { *f |= f2 }
|
||||
func (f *Format) mayOnlyBe(f2 Format) { *f &= f2 }
|
||||
func (f *Format) mustNotBe(f2 Format) { *f &^= f2 }
|
||||
|
||||
var formatNames = map[Format]string{
|
||||
formatV7: "V7", FormatUSTAR: "USTAR", FormatPAX: "PAX", FormatGNU: "GNU", formatSTAR: "STAR",
|
||||
}
|
||||
|
||||
func (f Format) String() string {
|
||||
var ss []string
|
||||
for f2 := Format(1); f2 < formatMax; f2 <<= 1 {
|
||||
if f.has(f2) {
|
||||
ss = append(ss, formatNames[f2])
|
||||
}
|
||||
}
|
||||
switch len(ss) {
|
||||
case 0:
|
||||
return "<unknown>"
|
||||
case 1:
|
||||
return ss[0]
|
||||
default:
|
||||
return "(" + strings.Join(ss, " | ") + ")"
|
||||
}
|
||||
}
|
||||
|
||||
// Magics used to identify various formats.
|
||||
const (
|
||||
magicGNU, versionGNU = "ustar ", " \x00"
|
||||
magicUSTAR, versionUSTAR = "ustar\x00", "00"
|
||||
trailerSTAR = "tar\x00"
|
||||
)
|
||||
|
||||
// Size constants from various tar specifications.
|
||||
const (
|
||||
blockSize = 512 // Size of each block in a tar stream
|
||||
nameSize = 100 // Max length of the name field in USTAR format
|
||||
prefixSize = 155 // Max length of the prefix field in USTAR format
|
||||
)
|
||||
|
||||
// blockPadding computes the number of bytes needed to pad offset up to the
|
||||
// nearest block edge where 0 <= n < blockSize.
|
||||
func blockPadding(offset int64) (n int64) {
|
||||
return -offset & (blockSize - 1)
|
||||
}
|
||||
|
||||
var zeroBlock block
|
||||
|
||||
type block [blockSize]byte
|
||||
|
||||
// Convert block to any number of formats.
|
||||
func (b *block) V7() *headerV7 { return (*headerV7)(b) }
|
||||
func (b *block) GNU() *headerGNU { return (*headerGNU)(b) }
|
||||
func (b *block) STAR() *headerSTAR { return (*headerSTAR)(b) }
|
||||
func (b *block) USTAR() *headerUSTAR { return (*headerUSTAR)(b) }
|
||||
func (b *block) Sparse() sparseArray { return (sparseArray)(b[:]) }
|
||||
|
||||
// GetFormat checks that the block is a valid tar header based on the checksum.
|
||||
// It then attempts to guess the specific format based on magic values.
|
||||
// If the checksum fails, then FormatUnknown is returned.
|
||||
func (b *block) GetFormat() Format {
|
||||
// Verify checksum.
|
||||
var p parser
|
||||
value := p.parseOctal(b.V7().Chksum())
|
||||
chksum1, chksum2 := b.ComputeChecksum()
|
||||
if p.err != nil || (value != chksum1 && value != chksum2) {
|
||||
return FormatUnknown
|
||||
}
|
||||
|
||||
// Guess the magic values.
|
||||
magic := string(b.USTAR().Magic())
|
||||
version := string(b.USTAR().Version())
|
||||
trailer := string(b.STAR().Trailer())
|
||||
switch {
|
||||
case magic == magicUSTAR && trailer == trailerSTAR:
|
||||
return formatSTAR
|
||||
case magic == magicUSTAR:
|
||||
return FormatUSTAR | FormatPAX
|
||||
case magic == magicGNU && version == versionGNU:
|
||||
return FormatGNU
|
||||
default:
|
||||
return formatV7
|
||||
}
|
||||
}
|
||||
|
||||
// SetFormat writes the magic values necessary for specified format
|
||||
// and then updates the checksum accordingly.
|
||||
func (b *block) SetFormat(format Format) {
|
||||
// Set the magic values.
|
||||
switch {
|
||||
case format.has(formatV7):
|
||||
// Do nothing.
|
||||
case format.has(FormatGNU):
|
||||
copy(b.GNU().Magic(), magicGNU)
|
||||
copy(b.GNU().Version(), versionGNU)
|
||||
case format.has(formatSTAR):
|
||||
copy(b.STAR().Magic(), magicUSTAR)
|
||||
copy(b.STAR().Version(), versionUSTAR)
|
||||
copy(b.STAR().Trailer(), trailerSTAR)
|
||||
case format.has(FormatUSTAR | FormatPAX):
|
||||
copy(b.USTAR().Magic(), magicUSTAR)
|
||||
copy(b.USTAR().Version(), versionUSTAR)
|
||||
default:
|
||||
panic("invalid format")
|
||||
}
|
||||
|
||||
// Update checksum.
|
||||
// This field is special in that it is terminated by a NULL then space.
|
||||
var f formatter
|
||||
field := b.V7().Chksum()
|
||||
chksum, _ := b.ComputeChecksum() // Possible values are 256..128776
|
||||
f.formatOctal(field[:7], chksum) // Never fails since 128776 < 262143
|
||||
field[7] = ' '
|
||||
}
|
||||
|
||||
// ComputeChecksum computes the checksum for the header block.
|
||||
// POSIX specifies a sum of the unsigned byte values, but the Sun tar used
|
||||
// signed byte values.
|
||||
// We compute and return both.
|
||||
func (b *block) ComputeChecksum() (unsigned, signed int64) {
|
||||
for i, c := range b {
|
||||
if 148 <= i && i < 156 {
|
||||
c = ' ' // Treat the checksum field itself as all spaces.
|
||||
}
|
||||
unsigned += int64(c)
|
||||
signed += int64(int8(c))
|
||||
}
|
||||
return unsigned, signed
|
||||
}
|
||||
|
||||
// Reset clears the block with all zeros.
|
||||
func (b *block) Reset() {
|
||||
*b = block{}
|
||||
}
|
||||
|
||||
type headerV7 [blockSize]byte
|
||||
|
||||
func (h *headerV7) Name() []byte { return h[000:][:100] }
|
||||
func (h *headerV7) Mode() []byte { return h[100:][:8] }
|
||||
func (h *headerV7) UID() []byte { return h[108:][:8] }
|
||||
func (h *headerV7) GID() []byte { return h[116:][:8] }
|
||||
func (h *headerV7) Size() []byte { return h[124:][:12] }
|
||||
func (h *headerV7) ModTime() []byte { return h[136:][:12] }
|
||||
func (h *headerV7) Chksum() []byte { return h[148:][:8] }
|
||||
func (h *headerV7) TypeFlag() []byte { return h[156:][:1] }
|
||||
func (h *headerV7) LinkName() []byte { return h[157:][:100] }
|
||||
|
||||
type headerGNU [blockSize]byte
|
||||
|
||||
func (h *headerGNU) V7() *headerV7 { return (*headerV7)(h) }
|
||||
func (h *headerGNU) Magic() []byte { return h[257:][:6] }
|
||||
func (h *headerGNU) Version() []byte { return h[263:][:2] }
|
||||
func (h *headerGNU) UserName() []byte { return h[265:][:32] }
|
||||
func (h *headerGNU) GroupName() []byte { return h[297:][:32] }
|
||||
func (h *headerGNU) DevMajor() []byte { return h[329:][:8] }
|
||||
func (h *headerGNU) DevMinor() []byte { return h[337:][:8] }
|
||||
func (h *headerGNU) AccessTime() []byte { return h[345:][:12] }
|
||||
func (h *headerGNU) ChangeTime() []byte { return h[357:][:12] }
|
||||
func (h *headerGNU) Sparse() sparseArray { return (sparseArray)(h[386:][:24*4+1]) }
|
||||
func (h *headerGNU) RealSize() []byte { return h[483:][:12] }
|
||||
|
||||
type headerSTAR [blockSize]byte
|
||||
|
||||
func (h *headerSTAR) V7() *headerV7 { return (*headerV7)(h) }
|
||||
func (h *headerSTAR) Magic() []byte { return h[257:][:6] }
|
||||
func (h *headerSTAR) Version() []byte { return h[263:][:2] }
|
||||
func (h *headerSTAR) UserName() []byte { return h[265:][:32] }
|
||||
func (h *headerSTAR) GroupName() []byte { return h[297:][:32] }
|
||||
func (h *headerSTAR) DevMajor() []byte { return h[329:][:8] }
|
||||
func (h *headerSTAR) DevMinor() []byte { return h[337:][:8] }
|
||||
func (h *headerSTAR) Prefix() []byte { return h[345:][:131] }
|
||||
func (h *headerSTAR) AccessTime() []byte { return h[476:][:12] }
|
||||
func (h *headerSTAR) ChangeTime() []byte { return h[488:][:12] }
|
||||
func (h *headerSTAR) Trailer() []byte { return h[508:][:4] }
|
||||
|
||||
type headerUSTAR [blockSize]byte
|
||||
|
||||
func (h *headerUSTAR) V7() *headerV7 { return (*headerV7)(h) }
|
||||
func (h *headerUSTAR) Magic() []byte { return h[257:][:6] }
|
||||
func (h *headerUSTAR) Version() []byte { return h[263:][:2] }
|
||||
func (h *headerUSTAR) UserName() []byte { return h[265:][:32] }
|
||||
func (h *headerUSTAR) GroupName() []byte { return h[297:][:32] }
|
||||
func (h *headerUSTAR) DevMajor() []byte { return h[329:][:8] }
|
||||
func (h *headerUSTAR) DevMinor() []byte { return h[337:][:8] }
|
||||
func (h *headerUSTAR) Prefix() []byte { return h[345:][:155] }
|
||||
|
||||
type sparseArray []byte
|
||||
|
||||
func (s sparseArray) Entry(i int) sparseElem { return (sparseElem)(s[i*24:]) }
|
||||
func (s sparseArray) IsExtended() []byte { return s[24*s.MaxEntries():][:1] }
|
||||
func (s sparseArray) MaxEntries() int { return len(s) / 24 }
|
||||
|
||||
type sparseElem []byte
|
||||
|
||||
func (s sparseElem) Offset() []byte { return s[00:][:12] }
|
||||
func (s sparseElem) Length() []byte { return s[12:][:12] }
|
923
vendor/github.com/vbatts/tar-split/archive/tar/reader.go
generated
vendored
Normal file
923
vendor/github.com/vbatts/tar-split/archive/tar/reader.go
generated
vendored
Normal file
@ -0,0 +1,923 @@
|
||||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package tar
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Reader provides sequential access to the contents of a tar archive.
|
||||
// Reader.Next advances to the next file in the archive (including the first),
|
||||
// and then Reader can be treated as an io.Reader to access the file's data.
|
||||
type Reader struct {
|
||||
r io.Reader
|
||||
pad int64 // Amount of padding (ignored) after current file entry
|
||||
curr fileReader // Reader for current file entry
|
||||
blk block // Buffer to use as temporary local storage
|
||||
|
||||
// err is a persistent error.
|
||||
// It is only the responsibility of every exported method of Reader to
|
||||
// ensure that this error is sticky.
|
||||
err error
|
||||
|
||||
RawAccounting bool // Whether to enable the access needed to reassemble the tar from raw bytes. Some performance/memory hit for this.
|
||||
rawBytes *bytes.Buffer // last raw bits
|
||||
}
|
||||
|
||||
type fileReader interface {
|
||||
io.Reader
|
||||
fileState
|
||||
|
||||
WriteTo(io.Writer) (int64, error)
|
||||
}
|
||||
|
||||
// RawBytes accesses the raw bytes of the archive, apart from the file payload itself.
|
||||
// This includes the header and padding.
|
||||
//
|
||||
// This call resets the current rawbytes buffer
|
||||
//
|
||||
// Only when RawAccounting is enabled, otherwise this returns nil
|
||||
func (tr *Reader) RawBytes() []byte {
|
||||
if !tr.RawAccounting {
|
||||
return nil
|
||||
}
|
||||
if tr.rawBytes == nil {
|
||||
tr.rawBytes = bytes.NewBuffer(nil)
|
||||
}
|
||||
defer tr.rawBytes.Reset() // if we've read them, then flush them.
|
||||
|
||||
return tr.rawBytes.Bytes()
|
||||
|
||||
}
|
||||
|
||||
// NewReader creates a new Reader reading from r.
|
||||
func NewReader(r io.Reader) *Reader {
|
||||
return &Reader{r: r, curr: ®FileReader{r, 0}}
|
||||
}
|
||||
|
||||
// Next advances to the next entry in the tar archive.
|
||||
// The Header.Size determines how many bytes can be read for the next file.
|
||||
// Any remaining data in the current file is automatically discarded.
|
||||
//
|
||||
// io.EOF is returned at the end of the input.
|
||||
func (tr *Reader) Next() (*Header, error) {
|
||||
if tr.err != nil {
|
||||
return nil, tr.err
|
||||
}
|
||||
hdr, err := tr.next()
|
||||
tr.err = err
|
||||
return hdr, err
|
||||
}
|
||||
|
||||
func (tr *Reader) next() (*Header, error) {
|
||||
var paxHdrs map[string]string
|
||||
var gnuLongName, gnuLongLink string
|
||||
|
||||
if tr.RawAccounting {
|
||||
if tr.rawBytes == nil {
|
||||
tr.rawBytes = bytes.NewBuffer(nil)
|
||||
} else {
|
||||
tr.rawBytes.Reset()
|
||||
}
|
||||
}
|
||||
|
||||
// Externally, Next iterates through the tar archive as if it is a series of
|
||||
// files. Internally, the tar format often uses fake "files" to add meta
|
||||
// data that describes the next file. These meta data "files" should not
|
||||
// normally be visible to the outside. As such, this loop iterates through
|
||||
// one or more "header files" until it finds a "normal file".
|
||||
format := FormatUSTAR | FormatPAX | FormatGNU
|
||||
for {
|
||||
// Discard the remainder of the file and any padding.
|
||||
if err := discard(tr, tr.curr.PhysicalRemaining()); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
n, err := tryReadFull(tr.r, tr.blk[:tr.pad])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if tr.RawAccounting {
|
||||
tr.rawBytes.Write(tr.blk[:n])
|
||||
}
|
||||
tr.pad = 0
|
||||
|
||||
hdr, rawHdr, err := tr.readHeader()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := tr.handleRegularFile(hdr); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
format.mayOnlyBe(hdr.Format)
|
||||
|
||||
// Check for PAX/GNU special headers and files.
|
||||
switch hdr.Typeflag {
|
||||
case TypeXHeader, TypeXGlobalHeader:
|
||||
format.mayOnlyBe(FormatPAX)
|
||||
paxHdrs, err = parsePAX(tr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if hdr.Typeflag == TypeXGlobalHeader {
|
||||
mergePAX(hdr, paxHdrs)
|
||||
return &Header{
|
||||
Name: hdr.Name,
|
||||
Typeflag: hdr.Typeflag,
|
||||
Xattrs: hdr.Xattrs,
|
||||
PAXRecords: hdr.PAXRecords,
|
||||
Format: format,
|
||||
}, nil
|
||||
}
|
||||
continue // This is a meta header affecting the next header
|
||||
case TypeGNULongName, TypeGNULongLink:
|
||||
format.mayOnlyBe(FormatGNU)
|
||||
realname, err := ioutil.ReadAll(tr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if tr.RawAccounting {
|
||||
tr.rawBytes.Write(realname)
|
||||
}
|
||||
|
||||
var p parser
|
||||
switch hdr.Typeflag {
|
||||
case TypeGNULongName:
|
||||
gnuLongName = p.parseString(realname)
|
||||
case TypeGNULongLink:
|
||||
gnuLongLink = p.parseString(realname)
|
||||
}
|
||||
continue // This is a meta header affecting the next header
|
||||
default:
|
||||
// The old GNU sparse format is handled here since it is technically
|
||||
// just a regular file with additional attributes.
|
||||
|
||||
if err := mergePAX(hdr, paxHdrs); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if gnuLongName != "" {
|
||||
hdr.Name = gnuLongName
|
||||
}
|
||||
if gnuLongLink != "" {
|
||||
hdr.Linkname = gnuLongLink
|
||||
}
|
||||
if hdr.Typeflag == TypeRegA {
|
||||
if strings.HasSuffix(hdr.Name, "/") {
|
||||
hdr.Typeflag = TypeDir // Legacy archives use trailing slash for directories
|
||||
} else {
|
||||
hdr.Typeflag = TypeReg
|
||||
}
|
||||
}
|
||||
|
||||
// The extended headers may have updated the size.
|
||||
// Thus, setup the regFileReader again after merging PAX headers.
|
||||
if err := tr.handleRegularFile(hdr); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Sparse formats rely on being able to read from the logical data
|
||||
// section; there must be a preceding call to handleRegularFile.
|
||||
if err := tr.handleSparseFile(hdr, rawHdr); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Set the final guess at the format.
|
||||
if format.has(FormatUSTAR) && format.has(FormatPAX) {
|
||||
format.mayOnlyBe(FormatUSTAR)
|
||||
}
|
||||
hdr.Format = format
|
||||
return hdr, nil // This is a file, so stop
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// handleRegularFile sets up the current file reader and padding such that it
|
||||
// can only read the following logical data section. It will properly handle
|
||||
// special headers that contain no data section.
|
||||
func (tr *Reader) handleRegularFile(hdr *Header) error {
|
||||
nb := hdr.Size
|
||||
if isHeaderOnlyType(hdr.Typeflag) {
|
||||
nb = 0
|
||||
}
|
||||
if nb < 0 {
|
||||
return ErrHeader
|
||||
}
|
||||
|
||||
tr.pad = blockPadding(nb)
|
||||
tr.curr = ®FileReader{r: tr.r, nb: nb}
|
||||
return nil
|
||||
}
|
||||
|
||||
// handleSparseFile checks if the current file is a sparse format of any type
|
||||
// and sets the curr reader appropriately.
|
||||
func (tr *Reader) handleSparseFile(hdr *Header, rawHdr *block) error {
|
||||
var spd sparseDatas
|
||||
var err error
|
||||
if hdr.Typeflag == TypeGNUSparse {
|
||||
spd, err = tr.readOldGNUSparseMap(hdr, rawHdr)
|
||||
} else {
|
||||
spd, err = tr.readGNUSparsePAXHeaders(hdr)
|
||||
}
|
||||
|
||||
// If sp is non-nil, then this is a sparse file.
|
||||
// Note that it is possible for len(sp) == 0.
|
||||
if err == nil && spd != nil {
|
||||
if isHeaderOnlyType(hdr.Typeflag) || !validateSparseEntries(spd, hdr.Size) {
|
||||
return ErrHeader
|
||||
}
|
||||
sph := invertSparseEntries(spd, hdr.Size)
|
||||
tr.curr = &sparseFileReader{tr.curr, sph, 0}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// readGNUSparsePAXHeaders checks the PAX headers for GNU sparse headers.
|
||||
// If they are found, then this function reads the sparse map and returns it.
|
||||
// This assumes that 0.0 headers have already been converted to 0.1 headers
|
||||
// by the PAX header parsing logic.
|
||||
func (tr *Reader) readGNUSparsePAXHeaders(hdr *Header) (sparseDatas, error) {
|
||||
// Identify the version of GNU headers.
|
||||
var is1x0 bool
|
||||
major, minor := hdr.PAXRecords[paxGNUSparseMajor], hdr.PAXRecords[paxGNUSparseMinor]
|
||||
switch {
|
||||
case major == "0" && (minor == "0" || minor == "1"):
|
||||
is1x0 = false
|
||||
case major == "1" && minor == "0":
|
||||
is1x0 = true
|
||||
case major != "" || minor != "":
|
||||
return nil, nil // Unknown GNU sparse PAX version
|
||||
case hdr.PAXRecords[paxGNUSparseMap] != "":
|
||||
is1x0 = false // 0.0 and 0.1 did not have explicit version records, so guess
|
||||
default:
|
||||
return nil, nil // Not a PAX format GNU sparse file.
|
||||
}
|
||||
hdr.Format.mayOnlyBe(FormatPAX)
|
||||
|
||||
// Update hdr from GNU sparse PAX headers.
|
||||
if name := hdr.PAXRecords[paxGNUSparseName]; name != "" {
|
||||
hdr.Name = name
|
||||
}
|
||||
size := hdr.PAXRecords[paxGNUSparseSize]
|
||||
if size == "" {
|
||||
size = hdr.PAXRecords[paxGNUSparseRealSize]
|
||||
}
|
||||
if size != "" {
|
||||
n, err := strconv.ParseInt(size, 10, 64)
|
||||
if err != nil {
|
||||
return nil, ErrHeader
|
||||
}
|
||||
hdr.Size = n
|
||||
}
|
||||
|
||||
// Read the sparse map according to the appropriate format.
|
||||
if is1x0 {
|
||||
return readGNUSparseMap1x0(tr.curr)
|
||||
}
|
||||
return readGNUSparseMap0x1(hdr.PAXRecords)
|
||||
}
|
||||
|
||||
// mergePAX merges paxHdrs into hdr for all relevant fields of Header.
|
||||
func mergePAX(hdr *Header, paxHdrs map[string]string) (err error) {
|
||||
for k, v := range paxHdrs {
|
||||
if v == "" {
|
||||
continue // Keep the original USTAR value
|
||||
}
|
||||
var id64 int64
|
||||
switch k {
|
||||
case paxPath:
|
||||
hdr.Name = v
|
||||
case paxLinkpath:
|
||||
hdr.Linkname = v
|
||||
case paxUname:
|
||||
hdr.Uname = v
|
||||
case paxGname:
|
||||
hdr.Gname = v
|
||||
case paxUid:
|
||||
id64, err = strconv.ParseInt(v, 10, 64)
|
||||
hdr.Uid = int(id64) // Integer overflow possible
|
||||
case paxGid:
|
||||
id64, err = strconv.ParseInt(v, 10, 64)
|
||||
hdr.Gid = int(id64) // Integer overflow possible
|
||||
case paxAtime:
|
||||
hdr.AccessTime, err = parsePAXTime(v)
|
||||
case paxMtime:
|
||||
hdr.ModTime, err = parsePAXTime(v)
|
||||
case paxCtime:
|
||||
hdr.ChangeTime, err = parsePAXTime(v)
|
||||
case paxSize:
|
||||
hdr.Size, err = strconv.ParseInt(v, 10, 64)
|
||||
default:
|
||||
if strings.HasPrefix(k, paxSchilyXattr) {
|
||||
if hdr.Xattrs == nil {
|
||||
hdr.Xattrs = make(map[string]string)
|
||||
}
|
||||
hdr.Xattrs[k[len(paxSchilyXattr):]] = v
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return ErrHeader
|
||||
}
|
||||
}
|
||||
hdr.PAXRecords = paxHdrs
|
||||
return nil
|
||||
}
|
||||
|
||||
// parsePAX parses PAX headers.
|
||||
// If an extended header (type 'x') is invalid, ErrHeader is returned
|
||||
func parsePAX(r io.Reader) (map[string]string, error) {
|
||||
buf, err := ioutil.ReadAll(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// leaving this function for io.Reader makes it more testable
|
||||
if tr, ok := r.(*Reader); ok && tr.RawAccounting {
|
||||
if _, err = tr.rawBytes.Write(buf); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
sbuf := string(buf)
|
||||
|
||||
// For GNU PAX sparse format 0.0 support.
|
||||
// This function transforms the sparse format 0.0 headers into format 0.1
|
||||
// headers since 0.0 headers were not PAX compliant.
|
||||
var sparseMap []string
|
||||
|
||||
paxHdrs := make(map[string]string)
|
||||
for len(sbuf) > 0 {
|
||||
key, value, residual, err := parsePAXRecord(sbuf)
|
||||
if err != nil {
|
||||
return nil, ErrHeader
|
||||
}
|
||||
sbuf = residual
|
||||
|
||||
switch key {
|
||||
case paxGNUSparseOffset, paxGNUSparseNumBytes:
|
||||
// Validate sparse header order and value.
|
||||
if (len(sparseMap)%2 == 0 && key != paxGNUSparseOffset) ||
|
||||
(len(sparseMap)%2 == 1 && key != paxGNUSparseNumBytes) ||
|
||||
strings.Contains(value, ",") {
|
||||
return nil, ErrHeader
|
||||
}
|
||||
sparseMap = append(sparseMap, value)
|
||||
default:
|
||||
paxHdrs[key] = value
|
||||
}
|
||||
}
|
||||
if len(sparseMap) > 0 {
|
||||
paxHdrs[paxGNUSparseMap] = strings.Join(sparseMap, ",")
|
||||
}
|
||||
return paxHdrs, nil
|
||||
}
|
||||
|
||||
// readHeader reads the next block header and assumes that the underlying reader
|
||||
// is already aligned to a block boundary. It returns the raw block of the
|
||||
// header in case further processing is required.
|
||||
//
|
||||
// The err will be set to io.EOF only when one of the following occurs:
|
||||
// * Exactly 0 bytes are read and EOF is hit.
|
||||
// * Exactly 1 block of zeros is read and EOF is hit.
|
||||
// * At least 2 blocks of zeros are read.
|
||||
func (tr *Reader) readHeader() (*Header, *block, error) {
|
||||
// Two blocks of zero bytes marks the end of the archive.
|
||||
n, err := io.ReadFull(tr.r, tr.blk[:])
|
||||
if tr.RawAccounting && (err == nil || err == io.EOF) {
|
||||
tr.rawBytes.Write(tr.blk[:n])
|
||||
}
|
||||
if err != nil {
|
||||
return nil, nil, err // EOF is okay here; exactly 0 bytes read
|
||||
}
|
||||
|
||||
if bytes.Equal(tr.blk[:], zeroBlock[:]) {
|
||||
n, err = io.ReadFull(tr.r, tr.blk[:])
|
||||
if tr.RawAccounting && (err == nil || err == io.EOF) {
|
||||
tr.rawBytes.Write(tr.blk[:n])
|
||||
}
|
||||
if err != nil {
|
||||
return nil, nil, err // EOF is okay here; exactly 1 block of zeros read
|
||||
}
|
||||
if bytes.Equal(tr.blk[:], zeroBlock[:]) {
|
||||
return nil, nil, io.EOF // normal EOF; exactly 2 block of zeros read
|
||||
}
|
||||
return nil, nil, ErrHeader // Zero block and then non-zero block
|
||||
}
|
||||
|
||||
// Verify the header matches a known format.
|
||||
format := tr.blk.GetFormat()
|
||||
if format == FormatUnknown {
|
||||
return nil, nil, ErrHeader
|
||||
}
|
||||
|
||||
var p parser
|
||||
hdr := new(Header)
|
||||
|
||||
// Unpack the V7 header.
|
||||
v7 := tr.blk.V7()
|
||||
hdr.Typeflag = v7.TypeFlag()[0]
|
||||
hdr.Name = p.parseString(v7.Name())
|
||||
hdr.Linkname = p.parseString(v7.LinkName())
|
||||
hdr.Size = p.parseNumeric(v7.Size())
|
||||
hdr.Mode = p.parseNumeric(v7.Mode())
|
||||
hdr.Uid = int(p.parseNumeric(v7.UID()))
|
||||
hdr.Gid = int(p.parseNumeric(v7.GID()))
|
||||
hdr.ModTime = time.Unix(p.parseNumeric(v7.ModTime()), 0)
|
||||
|
||||
// Unpack format specific fields.
|
||||
if format > formatV7 {
|
||||
ustar := tr.blk.USTAR()
|
||||
hdr.Uname = p.parseString(ustar.UserName())
|
||||
hdr.Gname = p.parseString(ustar.GroupName())
|
||||
hdr.Devmajor = p.parseNumeric(ustar.DevMajor())
|
||||
hdr.Devminor = p.parseNumeric(ustar.DevMinor())
|
||||
|
||||
var prefix string
|
||||
switch {
|
||||
case format.has(FormatUSTAR | FormatPAX):
|
||||
hdr.Format = format
|
||||
ustar := tr.blk.USTAR()
|
||||
prefix = p.parseString(ustar.Prefix())
|
||||
|
||||
// For Format detection, check if block is properly formatted since
|
||||
// the parser is more liberal than what USTAR actually permits.
|
||||
notASCII := func(r rune) bool { return r >= 0x80 }
|
||||
if bytes.IndexFunc(tr.blk[:], notASCII) >= 0 {
|
||||
hdr.Format = FormatUnknown // Non-ASCII characters in block.
|
||||
}
|
||||
nul := func(b []byte) bool { return int(b[len(b)-1]) == 0 }
|
||||
if !(nul(v7.Size()) && nul(v7.Mode()) && nul(v7.UID()) && nul(v7.GID()) &&
|
||||
nul(v7.ModTime()) && nul(ustar.DevMajor()) && nul(ustar.DevMinor())) {
|
||||
hdr.Format = FormatUnknown // Numeric fields must end in NUL
|
||||
}
|
||||
case format.has(formatSTAR):
|
||||
star := tr.blk.STAR()
|
||||
prefix = p.parseString(star.Prefix())
|
||||
hdr.AccessTime = time.Unix(p.parseNumeric(star.AccessTime()), 0)
|
||||
hdr.ChangeTime = time.Unix(p.parseNumeric(star.ChangeTime()), 0)
|
||||
case format.has(FormatGNU):
|
||||
hdr.Format = format
|
||||
var p2 parser
|
||||
gnu := tr.blk.GNU()
|
||||
if b := gnu.AccessTime(); b[0] != 0 {
|
||||
hdr.AccessTime = time.Unix(p2.parseNumeric(b), 0)
|
||||
}
|
||||
if b := gnu.ChangeTime(); b[0] != 0 {
|
||||
hdr.ChangeTime = time.Unix(p2.parseNumeric(b), 0)
|
||||
}
|
||||
|
||||
// Prior to Go1.8, the Writer had a bug where it would output
|
||||
// an invalid tar file in certain rare situations because the logic
|
||||
// incorrectly believed that the old GNU format had a prefix field.
|
||||
// This is wrong and leads to an output file that mangles the
|
||||
// atime and ctime fields, which are often left unused.
|
||||
//
|
||||
// In order to continue reading tar files created by former, buggy
|
||||
// versions of Go, we skeptically parse the atime and ctime fields.
|
||||
// If we are unable to parse them and the prefix field looks like
|
||||
// an ASCII string, then we fallback on the pre-Go1.8 behavior
|
||||
// of treating these fields as the USTAR prefix field.
|
||||
//
|
||||
// Note that this will not use the fallback logic for all possible
|
||||
// files generated by a pre-Go1.8 toolchain. If the generated file
|
||||
// happened to have a prefix field that parses as valid
|
||||
// atime and ctime fields (e.g., when they are valid octal strings),
|
||||
// then it is impossible to distinguish between an valid GNU file
|
||||
// and an invalid pre-Go1.8 file.
|
||||
//
|
||||
// See https://golang.org/issues/12594
|
||||
// See https://golang.org/issues/21005
|
||||
if p2.err != nil {
|
||||
hdr.AccessTime, hdr.ChangeTime = time.Time{}, time.Time{}
|
||||
ustar := tr.blk.USTAR()
|
||||
if s := p.parseString(ustar.Prefix()); isASCII(s) {
|
||||
prefix = s
|
||||
}
|
||||
hdr.Format = FormatUnknown // Buggy file is not GNU
|
||||
}
|
||||
}
|
||||
if len(prefix) > 0 {
|
||||
hdr.Name = prefix + "/" + hdr.Name
|
||||
}
|
||||
}
|
||||
return hdr, &tr.blk, p.err
|
||||
}
|
||||
|
||||
// readOldGNUSparseMap reads the sparse map from the old GNU sparse format.
|
||||
// The sparse map is stored in the tar header if it's small enough.
|
||||
// If it's larger than four entries, then one or more extension headers are used
|
||||
// to store the rest of the sparse map.
|
||||
//
|
||||
// The Header.Size does not reflect the size of any extended headers used.
|
||||
// Thus, this function will read from the raw io.Reader to fetch extra headers.
|
||||
// This method mutates blk in the process.
|
||||
func (tr *Reader) readOldGNUSparseMap(hdr *Header, blk *block) (sparseDatas, error) {
|
||||
// Make sure that the input format is GNU.
|
||||
// Unfortunately, the STAR format also has a sparse header format that uses
|
||||
// the same type flag but has a completely different layout.
|
||||
if blk.GetFormat() != FormatGNU {
|
||||
return nil, ErrHeader
|
||||
}
|
||||
hdr.Format.mayOnlyBe(FormatGNU)
|
||||
|
||||
var p parser
|
||||
hdr.Size = p.parseNumeric(blk.GNU().RealSize())
|
||||
if p.err != nil {
|
||||
return nil, p.err
|
||||
}
|
||||
s := blk.GNU().Sparse()
|
||||
spd := make(sparseDatas, 0, s.MaxEntries())
|
||||
for {
|
||||
for i := 0; i < s.MaxEntries(); i++ {
|
||||
// This termination condition is identical to GNU and BSD tar.
|
||||
if s.Entry(i).Offset()[0] == 0x00 {
|
||||
break // Don't return, need to process extended headers (even if empty)
|
||||
}
|
||||
offset := p.parseNumeric(s.Entry(i).Offset())
|
||||
length := p.parseNumeric(s.Entry(i).Length())
|
||||
if p.err != nil {
|
||||
return nil, p.err
|
||||
}
|
||||
spd = append(spd, sparseEntry{Offset: offset, Length: length})
|
||||
}
|
||||
|
||||
if s.IsExtended()[0] > 0 {
|
||||
// There are more entries. Read an extension header and parse its entries.
|
||||
if _, err := mustReadFull(tr.r, blk[:]); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if tr.RawAccounting {
|
||||
tr.rawBytes.Write(blk[:])
|
||||
}
|
||||
s = blk.Sparse()
|
||||
continue
|
||||
}
|
||||
return spd, nil // Done
|
||||
}
|
||||
}
|
||||
|
||||
// readGNUSparseMap1x0 reads the sparse map as stored in GNU's PAX sparse format
|
||||
// version 1.0. The format of the sparse map consists of a series of
|
||||
// newline-terminated numeric fields. The first field is the number of entries
|
||||
// and is always present. Following this are the entries, consisting of two
|
||||
// fields (offset, length). This function must stop reading at the end
|
||||
// boundary of the block containing the last newline.
|
||||
//
|
||||
// Note that the GNU manual says that numeric values should be encoded in octal
|
||||
// format. However, the GNU tar utility itself outputs these values in decimal.
|
||||
// As such, this library treats values as being encoded in decimal.
|
||||
func readGNUSparseMap1x0(r io.Reader) (sparseDatas, error) {
|
||||
var (
|
||||
cntNewline int64
|
||||
buf bytes.Buffer
|
||||
blk block
|
||||
)
|
||||
|
||||
// feedTokens copies data in blocks from r into buf until there are
|
||||
// at least cnt newlines in buf. It will not read more blocks than needed.
|
||||
feedTokens := func(n int64) error {
|
||||
for cntNewline < n {
|
||||
if _, err := mustReadFull(r, blk[:]); err != nil {
|
||||
return err
|
||||
}
|
||||
buf.Write(blk[:])
|
||||
for _, c := range blk {
|
||||
if c == '\n' {
|
||||
cntNewline++
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// nextToken gets the next token delimited by a newline. This assumes that
|
||||
// at least one newline exists in the buffer.
|
||||
nextToken := func() string {
|
||||
cntNewline--
|
||||
tok, _ := buf.ReadString('\n')
|
||||
return strings.TrimRight(tok, "\n")
|
||||
}
|
||||
|
||||
// Parse for the number of entries.
|
||||
// Use integer overflow resistant math to check this.
|
||||
if err := feedTokens(1); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
numEntries, err := strconv.ParseInt(nextToken(), 10, 0) // Intentionally parse as native int
|
||||
if err != nil || numEntries < 0 || int(2*numEntries) < int(numEntries) {
|
||||
return nil, ErrHeader
|
||||
}
|
||||
|
||||
// Parse for all member entries.
|
||||
// numEntries is trusted after this since a potential attacker must have
|
||||
// committed resources proportional to what this library used.
|
||||
if err := feedTokens(2 * numEntries); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
spd := make(sparseDatas, 0, numEntries)
|
||||
for i := int64(0); i < numEntries; i++ {
|
||||
offset, err1 := strconv.ParseInt(nextToken(), 10, 64)
|
||||
length, err2 := strconv.ParseInt(nextToken(), 10, 64)
|
||||
if err1 != nil || err2 != nil {
|
||||
return nil, ErrHeader
|
||||
}
|
||||
spd = append(spd, sparseEntry{Offset: offset, Length: length})
|
||||
}
|
||||
return spd, nil
|
||||
}
|
||||
|
||||
// readGNUSparseMap0x1 reads the sparse map as stored in GNU's PAX sparse format
|
||||
// version 0.1. The sparse map is stored in the PAX headers.
|
||||
func readGNUSparseMap0x1(paxHdrs map[string]string) (sparseDatas, error) {
|
||||
// Get number of entries.
|
||||
// Use integer overflow resistant math to check this.
|
||||
numEntriesStr := paxHdrs[paxGNUSparseNumBlocks]
|
||||
numEntries, err := strconv.ParseInt(numEntriesStr, 10, 0) // Intentionally parse as native int
|
||||
if err != nil || numEntries < 0 || int(2*numEntries) < int(numEntries) {
|
||||
return nil, ErrHeader
|
||||
}
|
||||
|
||||
// There should be two numbers in sparseMap for each entry.
|
||||
sparseMap := strings.Split(paxHdrs[paxGNUSparseMap], ",")
|
||||
if len(sparseMap) == 1 && sparseMap[0] == "" {
|
||||
sparseMap = sparseMap[:0]
|
||||
}
|
||||
if int64(len(sparseMap)) != 2*numEntries {
|
||||
return nil, ErrHeader
|
||||
}
|
||||
|
||||
// Loop through the entries in the sparse map.
|
||||
// numEntries is trusted now.
|
||||
spd := make(sparseDatas, 0, numEntries)
|
||||
for len(sparseMap) >= 2 {
|
||||
offset, err1 := strconv.ParseInt(sparseMap[0], 10, 64)
|
||||
length, err2 := strconv.ParseInt(sparseMap[1], 10, 64)
|
||||
if err1 != nil || err2 != nil {
|
||||
return nil, ErrHeader
|
||||
}
|
||||
spd = append(spd, sparseEntry{Offset: offset, Length: length})
|
||||
sparseMap = sparseMap[2:]
|
||||
}
|
||||
return spd, nil
|
||||
}
|
||||
|
||||
// Read reads from the current file in the tar archive.
|
||||
// It returns (0, io.EOF) when it reaches the end of that file,
|
||||
// until Next is called to advance to the next file.
|
||||
//
|
||||
// If the current file is sparse, then the regions marked as a hole
|
||||
// are read back as NUL-bytes.
|
||||
//
|
||||
// Calling Read on special types like TypeLink, TypeSymlink, TypeChar,
|
||||
// TypeBlock, TypeDir, and TypeFifo returns (0, io.EOF) regardless of what
|
||||
// the Header.Size claims.
|
||||
func (tr *Reader) Read(b []byte) (int, error) {
|
||||
if tr.err != nil {
|
||||
return 0, tr.err
|
||||
}
|
||||
n, err := tr.curr.Read(b)
|
||||
if err != nil && err != io.EOF {
|
||||
tr.err = err
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
// writeTo writes the content of the current file to w.
|
||||
// The bytes written matches the number of remaining bytes in the current file.
|
||||
//
|
||||
// If the current file is sparse and w is an io.WriteSeeker,
|
||||
// then writeTo uses Seek to skip past holes defined in Header.SparseHoles,
|
||||
// assuming that skipped regions are filled with NULs.
|
||||
// This always writes the last byte to ensure w is the right size.
|
||||
//
|
||||
// TODO(dsnet): Re-export this when adding sparse file support.
|
||||
// See https://golang.org/issue/22735
|
||||
func (tr *Reader) writeTo(w io.Writer) (int64, error) {
|
||||
if tr.err != nil {
|
||||
return 0, tr.err
|
||||
}
|
||||
n, err := tr.curr.WriteTo(w)
|
||||
if err != nil {
|
||||
tr.err = err
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
// regFileReader is a fileReader for reading data from a regular file entry.
|
||||
type regFileReader struct {
|
||||
r io.Reader // Underlying Reader
|
||||
nb int64 // Number of remaining bytes to read
|
||||
}
|
||||
|
||||
func (fr *regFileReader) Read(b []byte) (n int, err error) {
|
||||
if int64(len(b)) > fr.nb {
|
||||
b = b[:fr.nb]
|
||||
}
|
||||
if len(b) > 0 {
|
||||
n, err = fr.r.Read(b)
|
||||
fr.nb -= int64(n)
|
||||
}
|
||||
switch {
|
||||
case err == io.EOF && fr.nb > 0:
|
||||
return n, io.ErrUnexpectedEOF
|
||||
case err == nil && fr.nb == 0:
|
||||
return n, io.EOF
|
||||
default:
|
||||
return n, err
|
||||
}
|
||||
}
|
||||
|
||||
func (fr *regFileReader) WriteTo(w io.Writer) (int64, error) {
|
||||
return io.Copy(w, struct{ io.Reader }{fr})
|
||||
}
|
||||
|
||||
func (fr regFileReader) LogicalRemaining() int64 {
|
||||
return fr.nb
|
||||
}
|
||||
|
||||
func (fr regFileReader) PhysicalRemaining() int64 {
|
||||
return fr.nb
|
||||
}
|
||||
|
||||
// sparseFileReader is a fileReader for reading data from a sparse file entry.
|
||||
type sparseFileReader struct {
|
||||
fr fileReader // Underlying fileReader
|
||||
sp sparseHoles // Normalized list of sparse holes
|
||||
pos int64 // Current position in sparse file
|
||||
}
|
||||
|
||||
func (sr *sparseFileReader) Read(b []byte) (n int, err error) {
|
||||
finished := int64(len(b)) >= sr.LogicalRemaining()
|
||||
if finished {
|
||||
b = b[:sr.LogicalRemaining()]
|
||||
}
|
||||
|
||||
b0 := b
|
||||
endPos := sr.pos + int64(len(b))
|
||||
for endPos > sr.pos && err == nil {
|
||||
var nf int // Bytes read in fragment
|
||||
holeStart, holeEnd := sr.sp[0].Offset, sr.sp[0].endOffset()
|
||||
if sr.pos < holeStart { // In a data fragment
|
||||
bf := b[:min(int64(len(b)), holeStart-sr.pos)]
|
||||
nf, err = tryReadFull(sr.fr, bf)
|
||||
} else { // In a hole fragment
|
||||
bf := b[:min(int64(len(b)), holeEnd-sr.pos)]
|
||||
nf, err = tryReadFull(zeroReader{}, bf)
|
||||
}
|
||||
b = b[nf:]
|
||||
sr.pos += int64(nf)
|
||||
if sr.pos >= holeEnd && len(sr.sp) > 1 {
|
||||
sr.sp = sr.sp[1:] // Ensure last fragment always remains
|
||||
}
|
||||
}
|
||||
|
||||
n = len(b0) - len(b)
|
||||
switch {
|
||||
case err == io.EOF:
|
||||
return n, errMissData // Less data in dense file than sparse file
|
||||
case err != nil:
|
||||
return n, err
|
||||
case sr.LogicalRemaining() == 0 && sr.PhysicalRemaining() > 0:
|
||||
return n, errUnrefData // More data in dense file than sparse file
|
||||
case finished:
|
||||
return n, io.EOF
|
||||
default:
|
||||
return n, nil
|
||||
}
|
||||
}
|
||||
|
||||
func (sr *sparseFileReader) WriteTo(w io.Writer) (n int64, err error) {
|
||||
ws, ok := w.(io.WriteSeeker)
|
||||
if ok {
|
||||
if _, err := ws.Seek(0, io.SeekCurrent); err != nil {
|
||||
ok = false // Not all io.Seeker can really seek
|
||||
}
|
||||
}
|
||||
if !ok {
|
||||
return io.Copy(w, struct{ io.Reader }{sr})
|
||||
}
|
||||
|
||||
var writeLastByte bool
|
||||
pos0 := sr.pos
|
||||
for sr.LogicalRemaining() > 0 && !writeLastByte && err == nil {
|
||||
var nf int64 // Size of fragment
|
||||
holeStart, holeEnd := sr.sp[0].Offset, sr.sp[0].endOffset()
|
||||
if sr.pos < holeStart { // In a data fragment
|
||||
nf = holeStart - sr.pos
|
||||
nf, err = io.CopyN(ws, sr.fr, nf)
|
||||
} else { // In a hole fragment
|
||||
nf = holeEnd - sr.pos
|
||||
if sr.PhysicalRemaining() == 0 {
|
||||
writeLastByte = true
|
||||
nf--
|
||||
}
|
||||
_, err = ws.Seek(nf, io.SeekCurrent)
|
||||
}
|
||||
sr.pos += nf
|
||||
if sr.pos >= holeEnd && len(sr.sp) > 1 {
|
||||
sr.sp = sr.sp[1:] // Ensure last fragment always remains
|
||||
}
|
||||
}
|
||||
|
||||
// If the last fragment is a hole, then seek to 1-byte before EOF, and
|
||||
// write a single byte to ensure the file is the right size.
|
||||
if writeLastByte && err == nil {
|
||||
_, err = ws.Write([]byte{0})
|
||||
sr.pos++
|
||||
}
|
||||
|
||||
n = sr.pos - pos0
|
||||
switch {
|
||||
case err == io.EOF:
|
||||
return n, errMissData // Less data in dense file than sparse file
|
||||
case err != nil:
|
||||
return n, err
|
||||
case sr.LogicalRemaining() == 0 && sr.PhysicalRemaining() > 0:
|
||||
return n, errUnrefData // More data in dense file than sparse file
|
||||
default:
|
||||
return n, nil
|
||||
}
|
||||
}
|
||||
|
||||
func (sr sparseFileReader) LogicalRemaining() int64 {
|
||||
return sr.sp[len(sr.sp)-1].endOffset() - sr.pos
|
||||
}
|
||||
func (sr sparseFileReader) PhysicalRemaining() int64 {
|
||||
return sr.fr.PhysicalRemaining()
|
||||
}
|
||||
|
||||
type zeroReader struct{}
|
||||
|
||||
func (zeroReader) Read(b []byte) (int, error) {
|
||||
for i := range b {
|
||||
b[i] = 0
|
||||
}
|
||||
return len(b), nil
|
||||
}
|
||||
|
||||
// mustReadFull is like io.ReadFull except it returns
|
||||
// io.ErrUnexpectedEOF when io.EOF is hit before len(b) bytes are read.
|
||||
func mustReadFull(r io.Reader, b []byte) (int, error) {
|
||||
n, err := tryReadFull(r, b)
|
||||
if err == io.EOF {
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
// tryReadFull is like io.ReadFull except it returns
|
||||
// io.EOF when it is hit before len(b) bytes are read.
|
||||
func tryReadFull(r io.Reader, b []byte) (n int, err error) {
|
||||
for len(b) > n && err == nil {
|
||||
var nn int
|
||||
nn, err = r.Read(b[n:])
|
||||
n += nn
|
||||
}
|
||||
if len(b) == n && err == io.EOF {
|
||||
err = nil
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
// discard skips n bytes in r, reporting an error if unable to do so.
|
||||
func discard(tr *Reader, n int64) error {
|
||||
var seekSkipped, copySkipped int64
|
||||
var err error
|
||||
r := tr.r
|
||||
if tr.RawAccounting {
|
||||
|
||||
copySkipped, err = io.CopyN(tr.rawBytes, tr.r, n)
|
||||
goto out
|
||||
}
|
||||
|
||||
// If possible, Seek to the last byte before the end of the data section.
|
||||
// Do this because Seek is often lazy about reporting errors; this will mask
|
||||
// the fact that the stream may be truncated. We can rely on the
|
||||
// io.CopyN done shortly afterwards to trigger any IO errors.
|
||||
if sr, ok := r.(io.Seeker); ok && n > 1 {
|
||||
// Not all io.Seeker can actually Seek. For example, os.Stdin implements
|
||||
// io.Seeker, but calling Seek always returns an error and performs
|
||||
// no action. Thus, we try an innocent seek to the current position
|
||||
// to see if Seek is really supported.
|
||||
pos1, err := sr.Seek(0, io.SeekCurrent)
|
||||
if pos1 >= 0 && err == nil {
|
||||
// Seek seems supported, so perform the real Seek.
|
||||
pos2, err := sr.Seek(n-1, io.SeekCurrent)
|
||||
if pos2 < 0 || err != nil {
|
||||
return err
|
||||
}
|
||||
seekSkipped = pos2 - pos1
|
||||
}
|
||||
}
|
||||
|
||||
copySkipped, err = io.CopyN(ioutil.Discard, r, n-seekSkipped)
|
||||
out:
|
||||
if err == io.EOF && seekSkipped+copySkipped < n {
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
return err
|
||||
}
|
20
vendor/github.com/vbatts/tar-split/archive/tar/stat_actime1.go
generated
vendored
Normal file
20
vendor/github.com/vbatts/tar-split/archive/tar/stat_actime1.go
generated
vendored
Normal file
@ -0,0 +1,20 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build linux dragonfly openbsd solaris
|
||||
|
||||
package tar
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"time"
|
||||
)
|
||||
|
||||
func statAtime(st *syscall.Stat_t) time.Time {
|
||||
return time.Unix(st.Atim.Unix())
|
||||
}
|
||||
|
||||
func statCtime(st *syscall.Stat_t) time.Time {
|
||||
return time.Unix(st.Ctim.Unix())
|
||||
}
|
20
vendor/github.com/vbatts/tar-split/archive/tar/stat_actime2.go
generated
vendored
Normal file
20
vendor/github.com/vbatts/tar-split/archive/tar/stat_actime2.go
generated
vendored
Normal file
@ -0,0 +1,20 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build darwin freebsd netbsd
|
||||
|
||||
package tar
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"time"
|
||||
)
|
||||
|
||||
func statAtime(st *syscall.Stat_t) time.Time {
|
||||
return time.Unix(st.Atimespec.Unix())
|
||||
}
|
||||
|
||||
func statCtime(st *syscall.Stat_t) time.Time {
|
||||
return time.Unix(st.Ctimespec.Unix())
|
||||
}
|
96
vendor/github.com/vbatts/tar-split/archive/tar/stat_unix.go
generated
vendored
Normal file
96
vendor/github.com/vbatts/tar-split/archive/tar/stat_unix.go
generated
vendored
Normal file
@ -0,0 +1,96 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build linux darwin dragonfly freebsd openbsd netbsd solaris
|
||||
|
||||
package tar
|
||||
|
||||
import (
|
||||
"os"
|
||||
"os/user"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"sync"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
func init() {
|
||||
sysStat = statUnix
|
||||
}
|
||||
|
||||
// userMap and groupMap caches UID and GID lookups for performance reasons.
|
||||
// The downside is that renaming uname or gname by the OS never takes effect.
|
||||
var userMap, groupMap sync.Map // map[int]string
|
||||
|
||||
func statUnix(fi os.FileInfo, h *Header) error {
|
||||
sys, ok := fi.Sys().(*syscall.Stat_t)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
h.Uid = int(sys.Uid)
|
||||
h.Gid = int(sys.Gid)
|
||||
|
||||
// Best effort at populating Uname and Gname.
|
||||
// The os/user functions may fail for any number of reasons
|
||||
// (not implemented on that platform, cgo not enabled, etc).
|
||||
if u, ok := userMap.Load(h.Uid); ok {
|
||||
h.Uname = u.(string)
|
||||
} else if u, err := user.LookupId(strconv.Itoa(h.Uid)); err == nil {
|
||||
h.Uname = u.Username
|
||||
userMap.Store(h.Uid, h.Uname)
|
||||
}
|
||||
if g, ok := groupMap.Load(h.Gid); ok {
|
||||
h.Gname = g.(string)
|
||||
} else if g, err := user.LookupGroupId(strconv.Itoa(h.Gid)); err == nil {
|
||||
h.Gname = g.Name
|
||||
groupMap.Store(h.Gid, h.Gname)
|
||||
}
|
||||
|
||||
h.AccessTime = statAtime(sys)
|
||||
h.ChangeTime = statCtime(sys)
|
||||
|
||||
// Best effort at populating Devmajor and Devminor.
|
||||
if h.Typeflag == TypeChar || h.Typeflag == TypeBlock {
|
||||
dev := uint64(sys.Rdev) // May be int32 or uint32
|
||||
switch runtime.GOOS {
|
||||
case "linux":
|
||||
// Copied from golang.org/x/sys/unix/dev_linux.go.
|
||||
major := uint32((dev & 0x00000000000fff00) >> 8)
|
||||
major |= uint32((dev & 0xfffff00000000000) >> 32)
|
||||
minor := uint32((dev & 0x00000000000000ff) >> 0)
|
||||
minor |= uint32((dev & 0x00000ffffff00000) >> 12)
|
||||
h.Devmajor, h.Devminor = int64(major), int64(minor)
|
||||
case "darwin":
|
||||
// Copied from golang.org/x/sys/unix/dev_darwin.go.
|
||||
major := uint32((dev >> 24) & 0xff)
|
||||
minor := uint32(dev & 0xffffff)
|
||||
h.Devmajor, h.Devminor = int64(major), int64(minor)
|
||||
case "dragonfly":
|
||||
// Copied from golang.org/x/sys/unix/dev_dragonfly.go.
|
||||
major := uint32((dev >> 8) & 0xff)
|
||||
minor := uint32(dev & 0xffff00ff)
|
||||
h.Devmajor, h.Devminor = int64(major), int64(minor)
|
||||
case "freebsd":
|
||||
// Copied from golang.org/x/sys/unix/dev_freebsd.go.
|
||||
major := uint32((dev >> 8) & 0xff)
|
||||
minor := uint32(dev & 0xffff00ff)
|
||||
h.Devmajor, h.Devminor = int64(major), int64(minor)
|
||||
case "netbsd":
|
||||
// Copied from golang.org/x/sys/unix/dev_netbsd.go.
|
||||
major := uint32((dev & 0x000fff00) >> 8)
|
||||
minor := uint32((dev & 0x000000ff) >> 0)
|
||||
minor |= uint32((dev & 0xfff00000) >> 12)
|
||||
h.Devmajor, h.Devminor = int64(major), int64(minor)
|
||||
case "openbsd":
|
||||
// Copied from golang.org/x/sys/unix/dev_openbsd.go.
|
||||
major := uint32((dev & 0x0000ff00) >> 8)
|
||||
minor := uint32((dev & 0x000000ff) >> 0)
|
||||
minor |= uint32((dev & 0xffff0000) >> 8)
|
||||
h.Devmajor, h.Devminor = int64(major), int64(minor)
|
||||
default:
|
||||
// TODO: Implement solaris (see https://golang.org/issue/8106)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
326
vendor/github.com/vbatts/tar-split/archive/tar/strconv.go
generated
vendored
Normal file
326
vendor/github.com/vbatts/tar-split/archive/tar/strconv.go
generated
vendored
Normal file
@ -0,0 +1,326 @@
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package tar
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// hasNUL reports whether the NUL character exists within s.
|
||||
func hasNUL(s string) bool {
|
||||
return strings.IndexByte(s, 0) >= 0
|
||||
}
|
||||
|
||||
// isASCII reports whether the input is an ASCII C-style string.
|
||||
func isASCII(s string) bool {
|
||||
for _, c := range s {
|
||||
if c >= 0x80 || c == 0x00 {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// toASCII converts the input to an ASCII C-style string.
|
||||
// This a best effort conversion, so invalid characters are dropped.
|
||||
func toASCII(s string) string {
|
||||
if isASCII(s) {
|
||||
return s
|
||||
}
|
||||
b := make([]byte, 0, len(s))
|
||||
for _, c := range s {
|
||||
if c < 0x80 && c != 0x00 {
|
||||
b = append(b, byte(c))
|
||||
}
|
||||
}
|
||||
return string(b)
|
||||
}
|
||||
|
||||
type parser struct {
|
||||
err error // Last error seen
|
||||
}
|
||||
|
||||
type formatter struct {
|
||||
err error // Last error seen
|
||||
}
|
||||
|
||||
// parseString parses bytes as a NUL-terminated C-style string.
|
||||
// If a NUL byte is not found then the whole slice is returned as a string.
|
||||
func (*parser) parseString(b []byte) string {
|
||||
if i := bytes.IndexByte(b, 0); i >= 0 {
|
||||
return string(b[:i])
|
||||
}
|
||||
return string(b)
|
||||
}
|
||||
|
||||
// formatString copies s into b, NUL-terminating if possible.
|
||||
func (f *formatter) formatString(b []byte, s string) {
|
||||
if len(s) > len(b) {
|
||||
f.err = ErrFieldTooLong
|
||||
}
|
||||
copy(b, s)
|
||||
if len(s) < len(b) {
|
||||
b[len(s)] = 0
|
||||
}
|
||||
|
||||
// Some buggy readers treat regular files with a trailing slash
|
||||
// in the V7 path field as a directory even though the full path
|
||||
// recorded elsewhere (e.g., via PAX record) contains no trailing slash.
|
||||
if len(s) > len(b) && b[len(b)-1] == '/' {
|
||||
n := len(strings.TrimRight(s[:len(b)], "/"))
|
||||
b[n] = 0 // Replace trailing slash with NUL terminator
|
||||
}
|
||||
}
|
||||
|
||||
// fitsInBase256 reports whether x can be encoded into n bytes using base-256
|
||||
// encoding. Unlike octal encoding, base-256 encoding does not require that the
|
||||
// string ends with a NUL character. Thus, all n bytes are available for output.
|
||||
//
|
||||
// If operating in binary mode, this assumes strict GNU binary mode; which means
|
||||
// that the first byte can only be either 0x80 or 0xff. Thus, the first byte is
|
||||
// equivalent to the sign bit in two's complement form.
|
||||
func fitsInBase256(n int, x int64) bool {
|
||||
binBits := uint(n-1) * 8
|
||||
return n >= 9 || (x >= -1<<binBits && x < 1<<binBits)
|
||||
}
|
||||
|
||||
// parseNumeric parses the input as being encoded in either base-256 or octal.
|
||||
// This function may return negative numbers.
|
||||
// If parsing fails or an integer overflow occurs, err will be set.
|
||||
func (p *parser) parseNumeric(b []byte) int64 {
|
||||
// Check for base-256 (binary) format first.
|
||||
// If the first bit is set, then all following bits constitute a two's
|
||||
// complement encoded number in big-endian byte order.
|
||||
if len(b) > 0 && b[0]&0x80 != 0 {
|
||||
// Handling negative numbers relies on the following identity:
|
||||
// -a-1 == ^a
|
||||
//
|
||||
// If the number is negative, we use an inversion mask to invert the
|
||||
// data bytes and treat the value as an unsigned number.
|
||||
var inv byte // 0x00 if positive or zero, 0xff if negative
|
||||
if b[0]&0x40 != 0 {
|
||||
inv = 0xff
|
||||
}
|
||||
|
||||
var x uint64
|
||||
for i, c := range b {
|
||||
c ^= inv // Inverts c only if inv is 0xff, otherwise does nothing
|
||||
if i == 0 {
|
||||
c &= 0x7f // Ignore signal bit in first byte
|
||||
}
|
||||
if (x >> 56) > 0 {
|
||||
p.err = ErrHeader // Integer overflow
|
||||
return 0
|
||||
}
|
||||
x = x<<8 | uint64(c)
|
||||
}
|
||||
if (x >> 63) > 0 {
|
||||
p.err = ErrHeader // Integer overflow
|
||||
return 0
|
||||
}
|
||||
if inv == 0xff {
|
||||
return ^int64(x)
|
||||
}
|
||||
return int64(x)
|
||||
}
|
||||
|
||||
// Normal case is base-8 (octal) format.
|
||||
return p.parseOctal(b)
|
||||
}
|
||||
|
||||
// formatNumeric encodes x into b using base-8 (octal) encoding if possible.
|
||||
// Otherwise it will attempt to use base-256 (binary) encoding.
|
||||
func (f *formatter) formatNumeric(b []byte, x int64) {
|
||||
if fitsInOctal(len(b), x) {
|
||||
f.formatOctal(b, x)
|
||||
return
|
||||
}
|
||||
|
||||
if fitsInBase256(len(b), x) {
|
||||
for i := len(b) - 1; i >= 0; i-- {
|
||||
b[i] = byte(x)
|
||||
x >>= 8
|
||||
}
|
||||
b[0] |= 0x80 // Highest bit indicates binary format
|
||||
return
|
||||
}
|
||||
|
||||
f.formatOctal(b, 0) // Last resort, just write zero
|
||||
f.err = ErrFieldTooLong
|
||||
}
|
||||
|
||||
func (p *parser) parseOctal(b []byte) int64 {
|
||||
// Because unused fields are filled with NULs, we need
|
||||
// to skip leading NULs. Fields may also be padded with
|
||||
// spaces or NULs.
|
||||
// So we remove leading and trailing NULs and spaces to
|
||||
// be sure.
|
||||
b = bytes.Trim(b, " \x00")
|
||||
|
||||
if len(b) == 0 {
|
||||
return 0
|
||||
}
|
||||
x, perr := strconv.ParseUint(p.parseString(b), 8, 64)
|
||||
if perr != nil {
|
||||
p.err = ErrHeader
|
||||
}
|
||||
return int64(x)
|
||||
}
|
||||
|
||||
func (f *formatter) formatOctal(b []byte, x int64) {
|
||||
if !fitsInOctal(len(b), x) {
|
||||
x = 0 // Last resort, just write zero
|
||||
f.err = ErrFieldTooLong
|
||||
}
|
||||
|
||||
s := strconv.FormatInt(x, 8)
|
||||
// Add leading zeros, but leave room for a NUL.
|
||||
if n := len(b) - len(s) - 1; n > 0 {
|
||||
s = strings.Repeat("0", n) + s
|
||||
}
|
||||
f.formatString(b, s)
|
||||
}
|
||||
|
||||
// fitsInOctal reports whether the integer x fits in a field n-bytes long
|
||||
// using octal encoding with the appropriate NUL terminator.
|
||||
func fitsInOctal(n int, x int64) bool {
|
||||
octBits := uint(n-1) * 3
|
||||
return x >= 0 && (n >= 22 || x < 1<<octBits)
|
||||
}
|
||||
|
||||
// parsePAXTime takes a string of the form %d.%d as described in the PAX
|
||||
// specification. Note that this implementation allows for negative timestamps,
|
||||
// which is allowed for by the PAX specification, but not always portable.
|
||||
func parsePAXTime(s string) (time.Time, error) {
|
||||
const maxNanoSecondDigits = 9
|
||||
|
||||
// Split string into seconds and sub-seconds parts.
|
||||
ss, sn := s, ""
|
||||
if pos := strings.IndexByte(s, '.'); pos >= 0 {
|
||||
ss, sn = s[:pos], s[pos+1:]
|
||||
}
|
||||
|
||||
// Parse the seconds.
|
||||
secs, err := strconv.ParseInt(ss, 10, 64)
|
||||
if err != nil {
|
||||
return time.Time{}, ErrHeader
|
||||
}
|
||||
if len(sn) == 0 {
|
||||
return time.Unix(secs, 0), nil // No sub-second values
|
||||
}
|
||||
|
||||
// Parse the nanoseconds.
|
||||
if strings.Trim(sn, "0123456789") != "" {
|
||||
return time.Time{}, ErrHeader
|
||||
}
|
||||
if len(sn) < maxNanoSecondDigits {
|
||||
sn += strings.Repeat("0", maxNanoSecondDigits-len(sn)) // Right pad
|
||||
} else {
|
||||
sn = sn[:maxNanoSecondDigits] // Right truncate
|
||||
}
|
||||
nsecs, _ := strconv.ParseInt(sn, 10, 64) // Must succeed
|
||||
if len(ss) > 0 && ss[0] == '-' {
|
||||
return time.Unix(secs, -1*nsecs), nil // Negative correction
|
||||
}
|
||||
return time.Unix(secs, nsecs), nil
|
||||
}
|
||||
|
||||
// formatPAXTime converts ts into a time of the form %d.%d as described in the
|
||||
// PAX specification. This function is capable of negative timestamps.
|
||||
func formatPAXTime(ts time.Time) (s string) {
|
||||
secs, nsecs := ts.Unix(), ts.Nanosecond()
|
||||
if nsecs == 0 {
|
||||
return strconv.FormatInt(secs, 10)
|
||||
}
|
||||
|
||||
// If seconds is negative, then perform correction.
|
||||
sign := ""
|
||||
if secs < 0 {
|
||||
sign = "-" // Remember sign
|
||||
secs = -(secs + 1) // Add a second to secs
|
||||
nsecs = -(nsecs - 1E9) // Take that second away from nsecs
|
||||
}
|
||||
return strings.TrimRight(fmt.Sprintf("%s%d.%09d", sign, secs, nsecs), "0")
|
||||
}
|
||||
|
||||
// parsePAXRecord parses the input PAX record string into a key-value pair.
|
||||
// If parsing is successful, it will slice off the currently read record and
|
||||
// return the remainder as r.
|
||||
func parsePAXRecord(s string) (k, v, r string, err error) {
|
||||
// The size field ends at the first space.
|
||||
sp := strings.IndexByte(s, ' ')
|
||||
if sp == -1 {
|
||||
return "", "", s, ErrHeader
|
||||
}
|
||||
|
||||
// Parse the first token as a decimal integer.
|
||||
n, perr := strconv.ParseInt(s[:sp], 10, 0) // Intentionally parse as native int
|
||||
if perr != nil || n < 5 || int64(len(s)) < n {
|
||||
return "", "", s, ErrHeader
|
||||
}
|
||||
|
||||
// Extract everything between the space and the final newline.
|
||||
rec, nl, rem := s[sp+1:n-1], s[n-1:n], s[n:]
|
||||
if nl != "\n" {
|
||||
return "", "", s, ErrHeader
|
||||
}
|
||||
|
||||
// The first equals separates the key from the value.
|
||||
eq := strings.IndexByte(rec, '=')
|
||||
if eq == -1 {
|
||||
return "", "", s, ErrHeader
|
||||
}
|
||||
k, v = rec[:eq], rec[eq+1:]
|
||||
|
||||
if !validPAXRecord(k, v) {
|
||||
return "", "", s, ErrHeader
|
||||
}
|
||||
return k, v, rem, nil
|
||||
}
|
||||
|
||||
// formatPAXRecord formats a single PAX record, prefixing it with the
|
||||
// appropriate length.
|
||||
func formatPAXRecord(k, v string) (string, error) {
|
||||
if !validPAXRecord(k, v) {
|
||||
return "", ErrHeader
|
||||
}
|
||||
|
||||
const padding = 3 // Extra padding for ' ', '=', and '\n'
|
||||
size := len(k) + len(v) + padding
|
||||
size += len(strconv.Itoa(size))
|
||||
record := strconv.Itoa(size) + " " + k + "=" + v + "\n"
|
||||
|
||||
// Final adjustment if adding size field increased the record size.
|
||||
if len(record) != size {
|
||||
size = len(record)
|
||||
record = strconv.Itoa(size) + " " + k + "=" + v + "\n"
|
||||
}
|
||||
return record, nil
|
||||
}
|
||||
|
||||
// validPAXRecord reports whether the key-value pair is valid where each
|
||||
// record is formatted as:
|
||||
// "%d %s=%s\n" % (size, key, value)
|
||||
//
|
||||
// Keys and values should be UTF-8, but the number of bad writers out there
|
||||
// forces us to be a more liberal.
|
||||
// Thus, we only reject all keys with NUL, and only reject NULs in values
|
||||
// for the PAX version of the USTAR string fields.
|
||||
// The key must not contain an '=' character.
|
||||
func validPAXRecord(k, v string) bool {
|
||||
if k == "" || strings.IndexByte(k, '=') >= 0 {
|
||||
return false
|
||||
}
|
||||
switch k {
|
||||
case paxPath, paxLinkpath, paxUname, paxGname:
|
||||
return !hasNUL(v)
|
||||
default:
|
||||
return !hasNUL(k)
|
||||
}
|
||||
}
|
653
vendor/github.com/vbatts/tar-split/archive/tar/writer.go
generated
vendored
Normal file
653
vendor/github.com/vbatts/tar-split/archive/tar/writer.go
generated
vendored
Normal file
@ -0,0 +1,653 @@
|
||||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package tar
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"path"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Writer provides sequential writing of a tar archive.
|
||||
// Write.WriteHeader begins a new file with the provided Header,
|
||||
// and then Writer can be treated as an io.Writer to supply that file's data.
|
||||
type Writer struct {
|
||||
w io.Writer
|
||||
pad int64 // Amount of padding to write after current file entry
|
||||
curr fileWriter // Writer for current file entry
|
||||
hdr Header // Shallow copy of Header that is safe for mutations
|
||||
blk block // Buffer to use as temporary local storage
|
||||
|
||||
// err is a persistent error.
|
||||
// It is only the responsibility of every exported method of Writer to
|
||||
// ensure that this error is sticky.
|
||||
err error
|
||||
}
|
||||
|
||||
// NewWriter creates a new Writer writing to w.
|
||||
func NewWriter(w io.Writer) *Writer {
|
||||
return &Writer{w: w, curr: ®FileWriter{w, 0}}
|
||||
}
|
||||
|
||||
type fileWriter interface {
|
||||
io.Writer
|
||||
fileState
|
||||
|
||||
ReadFrom(io.Reader) (int64, error)
|
||||
}
|
||||
|
||||
// Flush finishes writing the current file's block padding.
|
||||
// The current file must be fully written before Flush can be called.
|
||||
//
|
||||
// This is unnecessary as the next call to WriteHeader or Close
|
||||
// will implicitly flush out the file's padding.
|
||||
func (tw *Writer) Flush() error {
|
||||
if tw.err != nil {
|
||||
return tw.err
|
||||
}
|
||||
if nb := tw.curr.LogicalRemaining(); nb > 0 {
|
||||
return fmt.Errorf("archive/tar: missed writing %d bytes", nb)
|
||||
}
|
||||
if _, tw.err = tw.w.Write(zeroBlock[:tw.pad]); tw.err != nil {
|
||||
return tw.err
|
||||
}
|
||||
tw.pad = 0
|
||||
return nil
|
||||
}
|
||||
|
||||
// WriteHeader writes hdr and prepares to accept the file's contents.
|
||||
// The Header.Size determines how many bytes can be written for the next file.
|
||||
// If the current file is not fully written, then this returns an error.
|
||||
// This implicitly flushes any padding necessary before writing the header.
|
||||
func (tw *Writer) WriteHeader(hdr *Header) error {
|
||||
if err := tw.Flush(); err != nil {
|
||||
return err
|
||||
}
|
||||
tw.hdr = *hdr // Shallow copy of Header
|
||||
|
||||
// Avoid usage of the legacy TypeRegA flag, and automatically promote
|
||||
// it to use TypeReg or TypeDir.
|
||||
if tw.hdr.Typeflag == TypeRegA {
|
||||
if strings.HasSuffix(tw.hdr.Name, "/") {
|
||||
tw.hdr.Typeflag = TypeDir
|
||||
} else {
|
||||
tw.hdr.Typeflag = TypeReg
|
||||
}
|
||||
}
|
||||
|
||||
// Round ModTime and ignore AccessTime and ChangeTime unless
|
||||
// the format is explicitly chosen.
|
||||
// This ensures nominal usage of WriteHeader (without specifying the format)
|
||||
// does not always result in the PAX format being chosen, which
|
||||
// causes a 1KiB increase to every header.
|
||||
if tw.hdr.Format == FormatUnknown {
|
||||
tw.hdr.ModTime = tw.hdr.ModTime.Round(time.Second)
|
||||
tw.hdr.AccessTime = time.Time{}
|
||||
tw.hdr.ChangeTime = time.Time{}
|
||||
}
|
||||
|
||||
allowedFormats, paxHdrs, err := tw.hdr.allowedFormats()
|
||||
switch {
|
||||
case allowedFormats.has(FormatUSTAR):
|
||||
tw.err = tw.writeUSTARHeader(&tw.hdr)
|
||||
return tw.err
|
||||
case allowedFormats.has(FormatPAX):
|
||||
tw.err = tw.writePAXHeader(&tw.hdr, paxHdrs)
|
||||
return tw.err
|
||||
case allowedFormats.has(FormatGNU):
|
||||
tw.err = tw.writeGNUHeader(&tw.hdr)
|
||||
return tw.err
|
||||
default:
|
||||
return err // Non-fatal error
|
||||
}
|
||||
}
|
||||
|
||||
func (tw *Writer) writeUSTARHeader(hdr *Header) error {
|
||||
// Check if we can use USTAR prefix/suffix splitting.
|
||||
var namePrefix string
|
||||
if prefix, suffix, ok := splitUSTARPath(hdr.Name); ok {
|
||||
namePrefix, hdr.Name = prefix, suffix
|
||||
}
|
||||
|
||||
// Pack the main header.
|
||||
var f formatter
|
||||
blk := tw.templateV7Plus(hdr, f.formatString, f.formatOctal)
|
||||
f.formatString(blk.USTAR().Prefix(), namePrefix)
|
||||
blk.SetFormat(FormatUSTAR)
|
||||
if f.err != nil {
|
||||
return f.err // Should never happen since header is validated
|
||||
}
|
||||
return tw.writeRawHeader(blk, hdr.Size, hdr.Typeflag)
|
||||
}
|
||||
|
||||
func (tw *Writer) writePAXHeader(hdr *Header, paxHdrs map[string]string) error {
|
||||
realName, realSize := hdr.Name, hdr.Size
|
||||
|
||||
// TODO(dsnet): Re-enable this when adding sparse support.
|
||||
// See https://golang.org/issue/22735
|
||||
/*
|
||||
// Handle sparse files.
|
||||
var spd sparseDatas
|
||||
var spb []byte
|
||||
if len(hdr.SparseHoles) > 0 {
|
||||
sph := append([]sparseEntry{}, hdr.SparseHoles...) // Copy sparse map
|
||||
sph = alignSparseEntries(sph, hdr.Size)
|
||||
spd = invertSparseEntries(sph, hdr.Size)
|
||||
|
||||
// Format the sparse map.
|
||||
hdr.Size = 0 // Replace with encoded size
|
||||
spb = append(strconv.AppendInt(spb, int64(len(spd)), 10), '\n')
|
||||
for _, s := range spd {
|
||||
hdr.Size += s.Length
|
||||
spb = append(strconv.AppendInt(spb, s.Offset, 10), '\n')
|
||||
spb = append(strconv.AppendInt(spb, s.Length, 10), '\n')
|
||||
}
|
||||
pad := blockPadding(int64(len(spb)))
|
||||
spb = append(spb, zeroBlock[:pad]...)
|
||||
hdr.Size += int64(len(spb)) // Accounts for encoded sparse map
|
||||
|
||||
// Add and modify appropriate PAX records.
|
||||
dir, file := path.Split(realName)
|
||||
hdr.Name = path.Join(dir, "GNUSparseFile.0", file)
|
||||
paxHdrs[paxGNUSparseMajor] = "1"
|
||||
paxHdrs[paxGNUSparseMinor] = "0"
|
||||
paxHdrs[paxGNUSparseName] = realName
|
||||
paxHdrs[paxGNUSparseRealSize] = strconv.FormatInt(realSize, 10)
|
||||
paxHdrs[paxSize] = strconv.FormatInt(hdr.Size, 10)
|
||||
delete(paxHdrs, paxPath) // Recorded by paxGNUSparseName
|
||||
}
|
||||
*/
|
||||
_ = realSize
|
||||
|
||||
// Write PAX records to the output.
|
||||
isGlobal := hdr.Typeflag == TypeXGlobalHeader
|
||||
if len(paxHdrs) > 0 || isGlobal {
|
||||
// Sort keys for deterministic ordering.
|
||||
var keys []string
|
||||
for k := range paxHdrs {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
sort.Strings(keys)
|
||||
|
||||
// Write each record to a buffer.
|
||||
var buf strings.Builder
|
||||
for _, k := range keys {
|
||||
rec, err := formatPAXRecord(k, paxHdrs[k])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
buf.WriteString(rec)
|
||||
}
|
||||
|
||||
// Write the extended header file.
|
||||
var name string
|
||||
var flag byte
|
||||
if isGlobal {
|
||||
name = realName
|
||||
if name == "" {
|
||||
name = "GlobalHead.0.0"
|
||||
}
|
||||
flag = TypeXGlobalHeader
|
||||
} else {
|
||||
dir, file := path.Split(realName)
|
||||
name = path.Join(dir, "PaxHeaders.0", file)
|
||||
flag = TypeXHeader
|
||||
}
|
||||
data := buf.String()
|
||||
if err := tw.writeRawFile(name, data, flag, FormatPAX); err != nil || isGlobal {
|
||||
return err // Global headers return here
|
||||
}
|
||||
}
|
||||
|
||||
// Pack the main header.
|
||||
var f formatter // Ignore errors since they are expected
|
||||
fmtStr := func(b []byte, s string) { f.formatString(b, toASCII(s)) }
|
||||
blk := tw.templateV7Plus(hdr, fmtStr, f.formatOctal)
|
||||
blk.SetFormat(FormatPAX)
|
||||
if err := tw.writeRawHeader(blk, hdr.Size, hdr.Typeflag); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// TODO(dsnet): Re-enable this when adding sparse support.
|
||||
// See https://golang.org/issue/22735
|
||||
/*
|
||||
// Write the sparse map and setup the sparse writer if necessary.
|
||||
if len(spd) > 0 {
|
||||
// Use tw.curr since the sparse map is accounted for in hdr.Size.
|
||||
if _, err := tw.curr.Write(spb); err != nil {
|
||||
return err
|
||||
}
|
||||
tw.curr = &sparseFileWriter{tw.curr, spd, 0}
|
||||
}
|
||||
*/
|
||||
return nil
|
||||
}
|
||||
|
||||
func (tw *Writer) writeGNUHeader(hdr *Header) error {
|
||||
// Use long-link files if Name or Linkname exceeds the field size.
|
||||
const longName = "././@LongLink"
|
||||
if len(hdr.Name) > nameSize {
|
||||
data := hdr.Name + "\x00"
|
||||
if err := tw.writeRawFile(longName, data, TypeGNULongName, FormatGNU); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if len(hdr.Linkname) > nameSize {
|
||||
data := hdr.Linkname + "\x00"
|
||||
if err := tw.writeRawFile(longName, data, TypeGNULongLink, FormatGNU); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Pack the main header.
|
||||
var f formatter // Ignore errors since they are expected
|
||||
var spd sparseDatas
|
||||
var spb []byte
|
||||
blk := tw.templateV7Plus(hdr, f.formatString, f.formatNumeric)
|
||||
if !hdr.AccessTime.IsZero() {
|
||||
f.formatNumeric(blk.GNU().AccessTime(), hdr.AccessTime.Unix())
|
||||
}
|
||||
if !hdr.ChangeTime.IsZero() {
|
||||
f.formatNumeric(blk.GNU().ChangeTime(), hdr.ChangeTime.Unix())
|
||||
}
|
||||
// TODO(dsnet): Re-enable this when adding sparse support.
|
||||
// See https://golang.org/issue/22735
|
||||
/*
|
||||
if hdr.Typeflag == TypeGNUSparse {
|
||||
sph := append([]sparseEntry{}, hdr.SparseHoles...) // Copy sparse map
|
||||
sph = alignSparseEntries(sph, hdr.Size)
|
||||
spd = invertSparseEntries(sph, hdr.Size)
|
||||
|
||||
// Format the sparse map.
|
||||
formatSPD := func(sp sparseDatas, sa sparseArray) sparseDatas {
|
||||
for i := 0; len(sp) > 0 && i < sa.MaxEntries(); i++ {
|
||||
f.formatNumeric(sa.Entry(i).Offset(), sp[0].Offset)
|
||||
f.formatNumeric(sa.Entry(i).Length(), sp[0].Length)
|
||||
sp = sp[1:]
|
||||
}
|
||||
if len(sp) > 0 {
|
||||
sa.IsExtended()[0] = 1
|
||||
}
|
||||
return sp
|
||||
}
|
||||
sp2 := formatSPD(spd, blk.GNU().Sparse())
|
||||
for len(sp2) > 0 {
|
||||
var spHdr block
|
||||
sp2 = formatSPD(sp2, spHdr.Sparse())
|
||||
spb = append(spb, spHdr[:]...)
|
||||
}
|
||||
|
||||
// Update size fields in the header block.
|
||||
realSize := hdr.Size
|
||||
hdr.Size = 0 // Encoded size; does not account for encoded sparse map
|
||||
for _, s := range spd {
|
||||
hdr.Size += s.Length
|
||||
}
|
||||
copy(blk.V7().Size(), zeroBlock[:]) // Reset field
|
||||
f.formatNumeric(blk.V7().Size(), hdr.Size)
|
||||
f.formatNumeric(blk.GNU().RealSize(), realSize)
|
||||
}
|
||||
*/
|
||||
blk.SetFormat(FormatGNU)
|
||||
if err := tw.writeRawHeader(blk, hdr.Size, hdr.Typeflag); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Write the extended sparse map and setup the sparse writer if necessary.
|
||||
if len(spd) > 0 {
|
||||
// Use tw.w since the sparse map is not accounted for in hdr.Size.
|
||||
if _, err := tw.w.Write(spb); err != nil {
|
||||
return err
|
||||
}
|
||||
tw.curr = &sparseFileWriter{tw.curr, spd, 0}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type (
|
||||
stringFormatter func([]byte, string)
|
||||
numberFormatter func([]byte, int64)
|
||||
)
|
||||
|
||||
// templateV7Plus fills out the V7 fields of a block using values from hdr.
|
||||
// It also fills out fields (uname, gname, devmajor, devminor) that are
|
||||
// shared in the USTAR, PAX, and GNU formats using the provided formatters.
|
||||
//
|
||||
// The block returned is only valid until the next call to
|
||||
// templateV7Plus or writeRawFile.
|
||||
func (tw *Writer) templateV7Plus(hdr *Header, fmtStr stringFormatter, fmtNum numberFormatter) *block {
|
||||
tw.blk.Reset()
|
||||
|
||||
modTime := hdr.ModTime
|
||||
if modTime.IsZero() {
|
||||
modTime = time.Unix(0, 0)
|
||||
}
|
||||
|
||||
v7 := tw.blk.V7()
|
||||
v7.TypeFlag()[0] = hdr.Typeflag
|
||||
fmtStr(v7.Name(), hdr.Name)
|
||||
fmtStr(v7.LinkName(), hdr.Linkname)
|
||||
fmtNum(v7.Mode(), hdr.Mode)
|
||||
fmtNum(v7.UID(), int64(hdr.Uid))
|
||||
fmtNum(v7.GID(), int64(hdr.Gid))
|
||||
fmtNum(v7.Size(), hdr.Size)
|
||||
fmtNum(v7.ModTime(), modTime.Unix())
|
||||
|
||||
ustar := tw.blk.USTAR()
|
||||
fmtStr(ustar.UserName(), hdr.Uname)
|
||||
fmtStr(ustar.GroupName(), hdr.Gname)
|
||||
fmtNum(ustar.DevMajor(), hdr.Devmajor)
|
||||
fmtNum(ustar.DevMinor(), hdr.Devminor)
|
||||
|
||||
return &tw.blk
|
||||
}
|
||||
|
||||
// writeRawFile writes a minimal file with the given name and flag type.
|
||||
// It uses format to encode the header format and will write data as the body.
|
||||
// It uses default values for all of the other fields (as BSD and GNU tar does).
|
||||
func (tw *Writer) writeRawFile(name, data string, flag byte, format Format) error {
|
||||
tw.blk.Reset()
|
||||
|
||||
// Best effort for the filename.
|
||||
name = toASCII(name)
|
||||
if len(name) > nameSize {
|
||||
name = name[:nameSize]
|
||||
}
|
||||
name = strings.TrimRight(name, "/")
|
||||
|
||||
var f formatter
|
||||
v7 := tw.blk.V7()
|
||||
v7.TypeFlag()[0] = flag
|
||||
f.formatString(v7.Name(), name)
|
||||
f.formatOctal(v7.Mode(), 0)
|
||||
f.formatOctal(v7.UID(), 0)
|
||||
f.formatOctal(v7.GID(), 0)
|
||||
f.formatOctal(v7.Size(), int64(len(data))) // Must be < 8GiB
|
||||
f.formatOctal(v7.ModTime(), 0)
|
||||
tw.blk.SetFormat(format)
|
||||
if f.err != nil {
|
||||
return f.err // Only occurs if size condition is violated
|
||||
}
|
||||
|
||||
// Write the header and data.
|
||||
if err := tw.writeRawHeader(&tw.blk, int64(len(data)), flag); err != nil {
|
||||
return err
|
||||
}
|
||||
_, err := io.WriteString(tw, data)
|
||||
return err
|
||||
}
|
||||
|
||||
// writeRawHeader writes the value of blk, regardless of its value.
|
||||
// It sets up the Writer such that it can accept a file of the given size.
|
||||
// If the flag is a special header-only flag, then the size is treated as zero.
|
||||
func (tw *Writer) writeRawHeader(blk *block, size int64, flag byte) error {
|
||||
if err := tw.Flush(); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := tw.w.Write(blk[:]); err != nil {
|
||||
return err
|
||||
}
|
||||
if isHeaderOnlyType(flag) {
|
||||
size = 0
|
||||
}
|
||||
tw.curr = ®FileWriter{tw.w, size}
|
||||
tw.pad = blockPadding(size)
|
||||
return nil
|
||||
}
|
||||
|
||||
// splitUSTARPath splits a path according to USTAR prefix and suffix rules.
|
||||
// If the path is not splittable, then it will return ("", "", false).
|
||||
func splitUSTARPath(name string) (prefix, suffix string, ok bool) {
|
||||
length := len(name)
|
||||
if length <= nameSize || !isASCII(name) {
|
||||
return "", "", false
|
||||
} else if length > prefixSize+1 {
|
||||
length = prefixSize + 1
|
||||
} else if name[length-1] == '/' {
|
||||
length--
|
||||
}
|
||||
|
||||
i := strings.LastIndex(name[:length], "/")
|
||||
nlen := len(name) - i - 1 // nlen is length of suffix
|
||||
plen := i // plen is length of prefix
|
||||
if i <= 0 || nlen > nameSize || nlen == 0 || plen > prefixSize {
|
||||
return "", "", false
|
||||
}
|
||||
return name[:i], name[i+1:], true
|
||||
}
|
||||
|
||||
// Write writes to the current file in the tar archive.
|
||||
// Write returns the error ErrWriteTooLong if more than
|
||||
// Header.Size bytes are written after WriteHeader.
|
||||
//
|
||||
// Calling Write on special types like TypeLink, TypeSymlink, TypeChar,
|
||||
// TypeBlock, TypeDir, and TypeFifo returns (0, ErrWriteTooLong) regardless
|
||||
// of what the Header.Size claims.
|
||||
func (tw *Writer) Write(b []byte) (int, error) {
|
||||
if tw.err != nil {
|
||||
return 0, tw.err
|
||||
}
|
||||
n, err := tw.curr.Write(b)
|
||||
if err != nil && err != ErrWriteTooLong {
|
||||
tw.err = err
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
// readFrom populates the content of the current file by reading from r.
|
||||
// The bytes read must match the number of remaining bytes in the current file.
|
||||
//
|
||||
// If the current file is sparse and r is an io.ReadSeeker,
|
||||
// then readFrom uses Seek to skip past holes defined in Header.SparseHoles,
|
||||
// assuming that skipped regions are all NULs.
|
||||
// This always reads the last byte to ensure r is the right size.
|
||||
//
|
||||
// TODO(dsnet): Re-export this when adding sparse file support.
|
||||
// See https://golang.org/issue/22735
|
||||
func (tw *Writer) readFrom(r io.Reader) (int64, error) {
|
||||
if tw.err != nil {
|
||||
return 0, tw.err
|
||||
}
|
||||
n, err := tw.curr.ReadFrom(r)
|
||||
if err != nil && err != ErrWriteTooLong {
|
||||
tw.err = err
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
// Close closes the tar archive by flushing the padding, and writing the footer.
|
||||
// If the current file (from a prior call to WriteHeader) is not fully written,
|
||||
// then this returns an error.
|
||||
func (tw *Writer) Close() error {
|
||||
if tw.err == ErrWriteAfterClose {
|
||||
return nil
|
||||
}
|
||||
if tw.err != nil {
|
||||
return tw.err
|
||||
}
|
||||
|
||||
// Trailer: two zero blocks.
|
||||
err := tw.Flush()
|
||||
for i := 0; i < 2 && err == nil; i++ {
|
||||
_, err = tw.w.Write(zeroBlock[:])
|
||||
}
|
||||
|
||||
// Ensure all future actions are invalid.
|
||||
tw.err = ErrWriteAfterClose
|
||||
return err // Report IO errors
|
||||
}
|
||||
|
||||
// regFileWriter is a fileWriter for writing data to a regular file entry.
|
||||
type regFileWriter struct {
|
||||
w io.Writer // Underlying Writer
|
||||
nb int64 // Number of remaining bytes to write
|
||||
}
|
||||
|
||||
func (fw *regFileWriter) Write(b []byte) (n int, err error) {
|
||||
overwrite := int64(len(b)) > fw.nb
|
||||
if overwrite {
|
||||
b = b[:fw.nb]
|
||||
}
|
||||
if len(b) > 0 {
|
||||
n, err = fw.w.Write(b)
|
||||
fw.nb -= int64(n)
|
||||
}
|
||||
switch {
|
||||
case err != nil:
|
||||
return n, err
|
||||
case overwrite:
|
||||
return n, ErrWriteTooLong
|
||||
default:
|
||||
return n, nil
|
||||
}
|
||||
}
|
||||
|
||||
func (fw *regFileWriter) ReadFrom(r io.Reader) (int64, error) {
|
||||
return io.Copy(struct{ io.Writer }{fw}, r)
|
||||
}
|
||||
|
||||
func (fw regFileWriter) LogicalRemaining() int64 {
|
||||
return fw.nb
|
||||
}
|
||||
func (fw regFileWriter) PhysicalRemaining() int64 {
|
||||
return fw.nb
|
||||
}
|
||||
|
||||
// sparseFileWriter is a fileWriter for writing data to a sparse file entry.
|
||||
type sparseFileWriter struct {
|
||||
fw fileWriter // Underlying fileWriter
|
||||
sp sparseDatas // Normalized list of data fragments
|
||||
pos int64 // Current position in sparse file
|
||||
}
|
||||
|
||||
func (sw *sparseFileWriter) Write(b []byte) (n int, err error) {
|
||||
overwrite := int64(len(b)) > sw.LogicalRemaining()
|
||||
if overwrite {
|
||||
b = b[:sw.LogicalRemaining()]
|
||||
}
|
||||
|
||||
b0 := b
|
||||
endPos := sw.pos + int64(len(b))
|
||||
for endPos > sw.pos && err == nil {
|
||||
var nf int // Bytes written in fragment
|
||||
dataStart, dataEnd := sw.sp[0].Offset, sw.sp[0].endOffset()
|
||||
if sw.pos < dataStart { // In a hole fragment
|
||||
bf := b[:min(int64(len(b)), dataStart-sw.pos)]
|
||||
nf, err = zeroWriter{}.Write(bf)
|
||||
} else { // In a data fragment
|
||||
bf := b[:min(int64(len(b)), dataEnd-sw.pos)]
|
||||
nf, err = sw.fw.Write(bf)
|
||||
}
|
||||
b = b[nf:]
|
||||
sw.pos += int64(nf)
|
||||
if sw.pos >= dataEnd && len(sw.sp) > 1 {
|
||||
sw.sp = sw.sp[1:] // Ensure last fragment always remains
|
||||
}
|
||||
}
|
||||
|
||||
n = len(b0) - len(b)
|
||||
switch {
|
||||
case err == ErrWriteTooLong:
|
||||
return n, errMissData // Not possible; implies bug in validation logic
|
||||
case err != nil:
|
||||
return n, err
|
||||
case sw.LogicalRemaining() == 0 && sw.PhysicalRemaining() > 0:
|
||||
return n, errUnrefData // Not possible; implies bug in validation logic
|
||||
case overwrite:
|
||||
return n, ErrWriteTooLong
|
||||
default:
|
||||
return n, nil
|
||||
}
|
||||
}
|
||||
|
||||
func (sw *sparseFileWriter) ReadFrom(r io.Reader) (n int64, err error) {
|
||||
rs, ok := r.(io.ReadSeeker)
|
||||
if ok {
|
||||
if _, err := rs.Seek(0, io.SeekCurrent); err != nil {
|
||||
ok = false // Not all io.Seeker can really seek
|
||||
}
|
||||
}
|
||||
if !ok {
|
||||
return io.Copy(struct{ io.Writer }{sw}, r)
|
||||
}
|
||||
|
||||
var readLastByte bool
|
||||
pos0 := sw.pos
|
||||
for sw.LogicalRemaining() > 0 && !readLastByte && err == nil {
|
||||
var nf int64 // Size of fragment
|
||||
dataStart, dataEnd := sw.sp[0].Offset, sw.sp[0].endOffset()
|
||||
if sw.pos < dataStart { // In a hole fragment
|
||||
nf = dataStart - sw.pos
|
||||
if sw.PhysicalRemaining() == 0 {
|
||||
readLastByte = true
|
||||
nf--
|
||||
}
|
||||
_, err = rs.Seek(nf, io.SeekCurrent)
|
||||
} else { // In a data fragment
|
||||
nf = dataEnd - sw.pos
|
||||
nf, err = io.CopyN(sw.fw, rs, nf)
|
||||
}
|
||||
sw.pos += nf
|
||||
if sw.pos >= dataEnd && len(sw.sp) > 1 {
|
||||
sw.sp = sw.sp[1:] // Ensure last fragment always remains
|
||||
}
|
||||
}
|
||||
|
||||
// If the last fragment is a hole, then seek to 1-byte before EOF, and
|
||||
// read a single byte to ensure the file is the right size.
|
||||
if readLastByte && err == nil {
|
||||
_, err = mustReadFull(rs, []byte{0})
|
||||
sw.pos++
|
||||
}
|
||||
|
||||
n = sw.pos - pos0
|
||||
switch {
|
||||
case err == io.EOF:
|
||||
return n, io.ErrUnexpectedEOF
|
||||
case err == ErrWriteTooLong:
|
||||
return n, errMissData // Not possible; implies bug in validation logic
|
||||
case err != nil:
|
||||
return n, err
|
||||
case sw.LogicalRemaining() == 0 && sw.PhysicalRemaining() > 0:
|
||||
return n, errUnrefData // Not possible; implies bug in validation logic
|
||||
default:
|
||||
return n, ensureEOF(rs)
|
||||
}
|
||||
}
|
||||
|
||||
func (sw sparseFileWriter) LogicalRemaining() int64 {
|
||||
return sw.sp[len(sw.sp)-1].endOffset() - sw.pos
|
||||
}
|
||||
func (sw sparseFileWriter) PhysicalRemaining() int64 {
|
||||
return sw.fw.PhysicalRemaining()
|
||||
}
|
||||
|
||||
// zeroWriter may only be written with NULs, otherwise it returns errWriteHole.
|
||||
type zeroWriter struct{}
|
||||
|
||||
func (zeroWriter) Write(b []byte) (int, error) {
|
||||
for i, c := range b {
|
||||
if c != 0 {
|
||||
return i, errWriteHole
|
||||
}
|
||||
}
|
||||
return len(b), nil
|
||||
}
|
||||
|
||||
// ensureEOF checks whether r is at EOF, reporting ErrWriteTooLong if not so.
|
||||
func ensureEOF(r io.Reader) error {
|
||||
n, err := tryReadFull(r, []byte{0})
|
||||
switch {
|
||||
case n > 0:
|
||||
return ErrWriteTooLong
|
||||
case err == io.EOF:
|
||||
return nil
|
||||
default:
|
||||
return err
|
||||
}
|
||||
}
|
5
vendor/modules.txt
vendored
5
vendor/modules.txt
vendored
@ -10,7 +10,7 @@ github.com/alessio/shellescape
|
||||
github.com/containerd/containerd/errdefs
|
||||
github.com/containerd/containerd/log
|
||||
github.com/containerd/containerd/platforms
|
||||
# github.com/containerd/stargz-snapshotter/estargz v0.8.0
|
||||
# github.com/containerd/stargz-snapshotter/estargz v0.9.0
|
||||
## explicit
|
||||
github.com/containerd/stargz-snapshotter/estargz
|
||||
github.com/containerd/stargz-snapshotter/estargz/errorutil
|
||||
@ -135,7 +135,6 @@ github.com/hashicorp/hcl/json/token
|
||||
# github.com/inconshreveable/mousetrap v1.0.0
|
||||
github.com/inconshreveable/mousetrap
|
||||
# github.com/klauspost/compress v1.13.6
|
||||
## explicit
|
||||
github.com/klauspost/compress
|
||||
github.com/klauspost/compress/fse
|
||||
github.com/klauspost/compress/huff0
|
||||
@ -190,6 +189,8 @@ github.com/spf13/viper/internal/encoding/toml
|
||||
github.com/spf13/viper/internal/encoding/yaml
|
||||
# github.com/subosito/gotenv v1.2.0
|
||||
github.com/subosito/gotenv
|
||||
# github.com/vbatts/tar-split v0.11.2
|
||||
github.com/vbatts/tar-split/archive/tar
|
||||
# golang.org/x/mod v0.4.2
|
||||
golang.org/x/mod/semver
|
||||
# golang.org/x/net v0.0.0-20211007125505-59d4e928ea9d
|
||||
|
Loading…
x
Reference in New Issue
Block a user