1
0
mirror of https://github.com/ko-build/ko.git synced 2025-11-06 09:19:12 +02:00

Allow ko to produce estargz layers. (#271)

* Allow ko to produce estargz layers.

This pulls in the latest google/go-containerregistry, which enables folks to set `GGCR_EXPERIMENT_ESTARGZ=1` to enable `ko` to start producing layers compatible with the estargz format, which enables the containerd estargz-snapshotter to lazy-load parts of images.

* Add README
This commit is contained in:
Matt Moore
2020-12-17 15:00:30 -08:00
committed by GitHub
parent 68ebcc8454
commit 222b0fc788
37 changed files with 2340 additions and 203 deletions

View File

@@ -526,6 +526,16 @@ or to the latest git commit's timestamp with:
export SOURCE_DATE_EPOCH=$(git log -1 --format='%ct') export SOURCE_DATE_EPOCH=$(git log -1 --format='%ct')
## Experiments
Over time, we will add new functionality under experimental environment variables listed here.
| Env Var | Value(s) | What is does |
|---------|----------|--------------|
| `GGCR_EXPERIMENT_ESTARGZ` | `"1"` | When enabled this experiment will direct ko to emit [estargz](https://github.com/opencontainers/image-spec/issues/815) compatible layers, which enable them to be lazily loaded by an appropriately configured containerd. |
## Acknowledgements ## Acknowledgements
This work is based heavily on learnings from having built the This work is based heavily on learnings from having built the

5
go.mod
View File

@@ -3,6 +3,7 @@ module github.com/google/ko
go 1.14 go 1.14
require ( require (
github.com/containerd/stargz-snapshotter/estargz v0.0.0-20201217071531-2b97b583765b
github.com/docker/cli v0.0.0-20200303162255-7d407207c304 // indirect github.com/docker/cli v0.0.0-20200303162255-7d407207c304 // indirect
github.com/docker/docker v1.4.2-0.20190924003213-a8608b5b67c7 github.com/docker/docker v1.4.2-0.20190924003213-a8608b5b67c7
github.com/dprotaso/go-yit v0.0.0-20191028211022-135eb7262960 github.com/dprotaso/go-yit v0.0.0-20191028211022-135eb7262960
@@ -10,7 +11,7 @@ require (
github.com/fsnotify/fsnotify v1.4.9 github.com/fsnotify/fsnotify v1.4.9
github.com/go-training/helloworld v0.0.0-20200225145412-ba5f4379d78b github.com/go-training/helloworld v0.0.0-20200225145412-ba5f4379d78b
github.com/google/go-cmp v0.4.1 github.com/google/go-cmp v0.4.1
github.com/google/go-containerregistry v0.2.2-0.20201213184227-8b5370a4d663 github.com/google/go-containerregistry v0.2.2-0.20201217211853-687e0e365894
github.com/googleapis/gnostic v0.4.0 // indirect github.com/googleapis/gnostic v0.4.0 // indirect
github.com/gregjones/httpcache v0.0.0-20190212212710-3befbb6ad0cc // indirect github.com/gregjones/httpcache v0.0.0-20190212212710-3befbb6ad0cc // indirect
github.com/json-iterator/go v1.1.10 // indirect github.com/json-iterator/go v1.1.10 // indirect
@@ -19,7 +20,7 @@ require (
github.com/spf13/jwalterweatherman v1.1.0 // indirect github.com/spf13/jwalterweatherman v1.1.0 // indirect
github.com/spf13/pflag v1.0.5 github.com/spf13/pflag v1.0.5
github.com/spf13/viper v1.4.0 github.com/spf13/viper v1.4.0
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208 golang.org/x/sync v0.0.0-20201207232520-09787c993a3a
golang.org/x/text v0.3.3 // indirect golang.org/x/text v0.3.3 // indirect
golang.org/x/tools v0.0.0-20200924205911-8a9a89368bd3 golang.org/x/tools v0.0.0-20200924205911-8a9a89368bd3
gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776 gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776

12
go.sum
View File

@@ -83,6 +83,8 @@ github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGX
github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8=
github.com/containerd/containerd v1.3.0 h1:xjvXQWABwS2uiv3TWgQt5Uth60Gu86LTGZXMJkjc7rY= github.com/containerd/containerd v1.3.0 h1:xjvXQWABwS2uiv3TWgQt5Uth60Gu86LTGZXMJkjc7rY=
github.com/containerd/containerd v1.3.0/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.3.0/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
github.com/containerd/stargz-snapshotter/estargz v0.0.0-20201217071531-2b97b583765b h1:tnP4txDzNKsBOISNYG/f48Mt477CBeh9sS5rlu8MvSY=
github.com/containerd/stargz-snapshotter/estargz v0.0.0-20201217071531-2b97b583765b/go.mod h1:E9uVkkBKf0EaC39j2JVW9EzdNhYvpz6eQIjILHebruk=
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk=
@@ -218,8 +220,8 @@ github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.4.1 h1:/exdXoGamhu5ONeUJH0deniYLWYvQwW66yvlfiiKTu0= github.com/google/go-cmp v0.4.1 h1:/exdXoGamhu5ONeUJH0deniYLWYvQwW66yvlfiiKTu0=
github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-containerregistry v0.2.2-0.20201213184227-8b5370a4d663 h1:lhVB3HiyY5UT7zgzPCpjlysuiPfa+944p3lzTOzUDS8= github.com/google/go-containerregistry v0.2.2-0.20201217211853-687e0e365894 h1:NBEggUeBhViaOpDws3ThoeG5mbjqoxYjN+z6NO4YKhc=
github.com/google/go-containerregistry v0.2.2-0.20201213184227-8b5370a4d663/go.mod h1:Ts3Wioz1r5ayWx8sS6vLcWltWcM1aqFjd/eVrkFhrWM= github.com/google/go-containerregistry v0.2.2-0.20201217211853-687e0e365894/go.mod h1:BJ7VxR1hAhdiZBGGnvGETHEmFs1hzXc4VM1xjOPO9wA=
github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI=
github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw= github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
@@ -352,8 +354,8 @@ github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1Cpa
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
github.com/onsi/gomega v1.9.0 h1:R1uwffexN6Pr340GtYRIdZmAiN4J+iw6WG4wog1DUXg= github.com/onsi/gomega v1.9.0 h1:R1uwffexN6Pr340GtYRIdZmAiN4J+iw6WG4wog1DUXg=
github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA=
github.com/opencontainers/go-digest v1.0.0-rc1 h1:WzifXhOVOEOuFYOJAW6aQqW0TooG2iki3E3Ii+WN7gQ= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI= github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI=
github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc= github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc=
@@ -559,6 +561,8 @@ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208 h1:qwRHBd0NqMbJxfbotnDhm2ByMI1Shq4Y6oRJo21SGJA= golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208 h1:qwRHBd0NqMbJxfbotnDhm2ByMI1Shq4Y6oRJo21SGJA=
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a h1:DcqTD9SDLc+1P/r1EmRBwnVsrOwW+kk2vWf9n+1sGhs=
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=

View File

@@ -32,6 +32,7 @@ import (
"strconv" "strconv"
"strings" "strings"
"github.com/containerd/stargz-snapshotter/estargz"
v1 "github.com/google/go-containerregistry/pkg/v1" v1 "github.com/google/go-containerregistry/pkg/v1"
"github.com/google/go-containerregistry/pkg/v1/empty" "github.com/google/go-containerregistry/pkg/v1/empty"
"github.com/google/go-containerregistry/pkg/v1/mutate" "github.com/google/go-containerregistry/pkg/v1/mutate"
@@ -507,7 +508,10 @@ func (g *gobuild) buildOne(ctx context.Context, s string, base v1.Image, platfor
binaryLayerBytes := binaryLayerBuf.Bytes() binaryLayerBytes := binaryLayerBuf.Bytes()
binaryLayer, err := tarball.LayerFromOpener(func() (io.ReadCloser, error) { binaryLayer, err := tarball.LayerFromOpener(func() (io.ReadCloser, error) {
return ioutil.NopCloser(bytes.NewBuffer(binaryLayerBytes)), nil return ioutil.NopCloser(bytes.NewBuffer(binaryLayerBytes)), nil
}) }, tarball.WithCompressedCaching, tarball.WithEstargzOptions(estargz.WithPrioritizedFiles([]string{
// When using estargz, prioritize downloading the binary entrypoint.
appPath,
})))
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@@ -0,0 +1,202 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@@ -0,0 +1,547 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Copyright 2019 The Go Authors. All rights reserved.
Use of this source code is governed by a BSD-style
license that can be found in the LICENSE file.
*/
package estargz
import (
"archive/tar"
"bytes"
"compress/gzip"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"os"
"path"
"runtime"
"strings"
"sync"
"github.com/containerd/stargz-snapshotter/estargz/errorutil"
digest "github.com/opencontainers/go-digest"
"github.com/pkg/errors"
"golang.org/x/sync/errgroup"
)
type options struct {
chunkSize int
compressionLevel int
prioritizedFiles []string
}
type Option func(o *options)
// WithChunkSize option specifies the chunk size of eStargz blob to build.
func WithChunkSize(chunkSize int) Option {
return func(o *options) {
o.chunkSize = chunkSize
}
}
// WithCompressionLevel option specifies the gzip compression level.
// The default is gzip.BestCompression.
// See also: https://godoc.org/compress/gzip#pkg-constants
func WithCompressionLevel(level int) Option {
return func(o *options) {
o.compressionLevel = level
}
}
// WithPrioritizedFiles option specifies the list of prioritized files.
// These files must be a complete path relative to "/" (e.g. "foo/bar",
// "./foo/bar")
func WithPrioritizedFiles(files []string) Option {
return func(o *options) {
o.prioritizedFiles = files
}
}
// Blob is an eStargz blob.
type Blob struct {
io.ReadCloser
diffID digest.Digester
tocDigest digest.Digest
}
// DiffID returns the digest of uncompressed blob.
// It is only valid to call DiffID after Close.
func (b *Blob) DiffID() digest.Digest {
return b.diffID.Digest()
}
// TOCDigest returns the digest of uncompressed TOC JSON.
func (b *Blob) TOCDigest() digest.Digest {
return b.tocDigest
}
// Build builds an eStargz blob which is an extended version of stargz, from tar blob passed
// through the argument. If there are some prioritized files are listed in the option, these
// files are grouped as "prioritized" and can be used for runtime optimization (e.g. prefetch).
// This function builds a blob in parallel, with dividing that blob into several (at least the
// number of runtime.GOMAXPROCS(0)) sub-blobs.
func Build(tarBlob *io.SectionReader, opt ...Option) (_ *Blob, rErr error) {
var opts options
opts.compressionLevel = gzip.BestCompression // BestCompression by default
for _, o := range opt {
o(&opts)
}
layerFiles := newTempFiles()
defer func() {
if rErr != nil {
if err := layerFiles.CleanupAll(); err != nil {
rErr = errors.Wrapf(rErr, "failed to cleanup tmp files: %v", err)
}
}
}()
entries, err := sortEntries(tarBlob, opts.prioritizedFiles)
if err != nil {
return nil, err
}
tarParts := divideEntries(entries, runtime.GOMAXPROCS(0))
writers := make([]*Writer, len(tarParts))
payloads := make([]*os.File, len(tarParts))
var mu sync.Mutex
var eg errgroup.Group
for i, parts := range tarParts {
i, parts := i, parts
// builds verifiable stargz sub-blobs
eg.Go(func() error {
esgzFile, err := layerFiles.TempFile("", "esgzdata")
if err != nil {
return err
}
sw := NewWriterLevel(esgzFile, opts.compressionLevel)
sw.ChunkSize = opts.chunkSize
if err := sw.AppendTar(readerFromEntries(parts...)); err != nil {
return err
}
mu.Lock()
writers[i] = sw
payloads[i] = esgzFile
mu.Unlock()
return nil
})
}
if err := eg.Wait(); err != nil {
rErr = err
return nil, err
}
tocAndFooter, tocDgst, err := closeWithCombine(opts.compressionLevel, writers...)
if err != nil {
rErr = err
return nil, err
}
var rs []io.Reader
for _, p := range payloads {
fs, err := fileSectionReader(p)
if err != nil {
return nil, err
}
rs = append(rs, fs)
}
diffID := digest.Canonical.Digester()
pr, pw := io.Pipe()
go func() {
r, err := gzip.NewReader(io.TeeReader(io.MultiReader(append(rs, tocAndFooter)...), pw))
if err != nil {
pw.CloseWithError(err)
return
}
if _, err := io.Copy(diffID.Hash(), r); err != nil {
pw.CloseWithError(err)
return
}
pw.Close()
}()
return &Blob{
ReadCloser: readCloser{
Reader: pr,
closeFunc: layerFiles.CleanupAll,
},
tocDigest: tocDgst,
diffID: diffID,
}, nil
}
// closeWithCombine takes unclosed Writers and close them. This also returns the
// toc that combined all Writers into.
// Writers doesn't write TOC and footer to the underlying writers so they can be
// combined into a single eStargz and tocAndFooter returned by this function can
// be appended at the tail of that combined blob.
func closeWithCombine(compressionLevel int, ws ...*Writer) (tocAndFooter io.Reader, tocDgst digest.Digest, err error) {
if len(ws) == 0 {
return nil, "", fmt.Errorf("at least one writer must be passed")
}
for _, w := range ws {
if w.closed {
return nil, "", fmt.Errorf("writer must be unclosed")
}
defer func(w *Writer) { w.closed = true }(w)
if err := w.closeGz(); err != nil {
return nil, "", err
}
if err := w.bw.Flush(); err != nil {
return nil, "", err
}
}
var (
mtoc = new(jtoc)
currentOffset int64
)
mtoc.Version = ws[0].toc.Version
for _, w := range ws {
for _, e := range w.toc.Entries {
// Recalculate Offset of non-empty files/chunks
if (e.Type == "reg" && e.Size > 0) || e.Type == "chunk" {
e.Offset += currentOffset
}
mtoc.Entries = append(mtoc.Entries, e)
}
if w.toc.Version > mtoc.Version {
mtoc.Version = w.toc.Version
}
currentOffset += w.cw.n
}
tocJSON, err := json.MarshalIndent(mtoc, "", "\t")
if err != nil {
return nil, "", err
}
pr, pw := io.Pipe()
go func() {
zw, _ := gzip.NewWriterLevel(pw, compressionLevel)
tw := tar.NewWriter(zw)
if err := tw.WriteHeader(&tar.Header{
Typeflag: tar.TypeReg,
Name: TOCTarName,
Size: int64(len(tocJSON)),
}); err != nil {
pw.CloseWithError(err)
return
}
if _, err := tw.Write(tocJSON); err != nil {
pw.CloseWithError(err)
return
}
if err := tw.Close(); err != nil {
pw.CloseWithError(err)
return
}
if err := zw.Close(); err != nil {
pw.CloseWithError(err)
return
}
pw.Close()
}()
return io.MultiReader(
pr,
bytes.NewReader(footerBytes(currentOffset)),
), digest.FromBytes(tocJSON), nil
}
// divideEntries divides passed entries to the parts at least the number specified by the
// argument.
func divideEntries(entries []*entry, minPartsNum int) (set [][]*entry) {
var estimatedSize int64
for _, e := range entries {
estimatedSize += e.header.Size
}
unitSize := estimatedSize / int64(minPartsNum)
var (
nextEnd = unitSize
offset int64
)
set = append(set, []*entry{})
for _, e := range entries {
set[len(set)-1] = append(set[len(set)-1], e)
offset += e.header.Size
if offset > nextEnd {
set = append(set, []*entry{})
nextEnd += unitSize
}
}
return
}
// sortEntries reads the specified tar blob and returns a list of tar entries.
// If some of prioritized files are specified, the list starts from these
// files with keeping the order specified by the argument.
func sortEntries(in io.ReaderAt, prioritized []string) ([]*entry, error) {
// Import tar file.
intar, err := importTar(in)
if err != nil {
return nil, errors.Wrap(err, "failed to sort")
}
// Sort the tar file respecting to the prioritized files list.
sorted := &tarFile{}
for _, l := range prioritized {
moveRec(l, intar, sorted)
}
if len(prioritized) == 0 {
sorted.add(&entry{
header: &tar.Header{
Name: NoPrefetchLandmark,
Typeflag: tar.TypeReg,
Size: int64(len([]byte{landmarkContents})),
},
payload: bytes.NewReader([]byte{landmarkContents}),
})
} else {
sorted.add(&entry{
header: &tar.Header{
Name: PrefetchLandmark,
Typeflag: tar.TypeReg,
Size: int64(len([]byte{landmarkContents})),
},
payload: bytes.NewReader([]byte{landmarkContents}),
})
}
// Dump all entry and concatinate them.
return append(sorted.dump(), intar.dump()...), nil
}
// readerFromEntries returns a reader of tar archive that contains entries passed
// through the arguments.
func readerFromEntries(entries ...*entry) io.Reader {
pr, pw := io.Pipe()
go func() {
tw := tar.NewWriter(pw)
defer tw.Close()
for _, entry := range entries {
if err := tw.WriteHeader(entry.header); err != nil {
pw.CloseWithError(fmt.Errorf("Failed to write tar header: %v", err))
return
}
if _, err := io.Copy(tw, entry.payload); err != nil {
pw.CloseWithError(fmt.Errorf("Failed to write tar payload: %v", err))
return
}
}
pw.Close()
}()
return pr
}
func importTar(in io.ReaderAt) (*tarFile, error) {
tf := &tarFile{}
pw, err := newCountReader(in)
if err != nil {
return nil, errors.Wrap(err, "failed to make position watcher")
}
tr := tar.NewReader(pw)
// Walk through all nodes.
for {
// Fetch and parse next header.
h, err := tr.Next()
if err != nil {
if err == io.EOF {
break
} else {
return nil, errors.Wrap(err, "failed to parse tar file")
}
}
switch trimNamePrefix(h.Name) {
case PrefetchLandmark, NoPrefetchLandmark:
// Ignore existing landmark
continue
}
// Add entry if not exist.
if _, ok := tf.get(h.Name); ok {
return nil, fmt.Errorf("Duplicated entry(%q) is not supported", h.Name)
}
tf.add(&entry{
header: h,
payload: io.NewSectionReader(in, pw.currentPos(), h.Size),
})
}
return tf, nil
}
func moveRec(name string, in *tarFile, out *tarFile) {
if name == "" {
return
}
parent, _ := path.Split(strings.TrimSuffix(name, "/"))
moveRec(parent, in, out)
if e, ok := in.get(name); ok && e.header.Typeflag == tar.TypeLink {
moveRec(e.header.Linkname, in, out)
}
if e, ok := in.get(name); ok {
out.add(e)
in.remove(name)
}
}
type entry struct {
header *tar.Header
payload io.ReadSeeker
}
type tarFile struct {
index map[string]*entry
stream []*entry
}
func (f *tarFile) add(e *entry) {
if f.index == nil {
f.index = make(map[string]*entry)
}
f.index[trimNamePrefix(e.header.Name)] = e
f.stream = append(f.stream, e)
}
func (f *tarFile) remove(name string) {
name = trimNamePrefix(name)
if f.index != nil {
delete(f.index, name)
}
var filtered []*entry
for _, e := range f.stream {
if trimNamePrefix(e.header.Name) == name {
continue
}
filtered = append(filtered, e)
}
f.stream = filtered
}
func (f *tarFile) get(name string) (e *entry, ok bool) {
if f.index == nil {
return nil, false
}
e, ok = f.index[trimNamePrefix(name)]
return
}
func (f *tarFile) dump() []*entry {
return f.stream
}
type readCloser struct {
io.Reader
closeFunc func() error
}
func (rc readCloser) Close() error {
return rc.closeFunc()
}
func fileSectionReader(file *os.File) (*io.SectionReader, error) {
info, err := file.Stat()
if err != nil {
return nil, err
}
return io.NewSectionReader(file, 0, info.Size()), nil
}
func newTempFiles() *tempFiles {
return &tempFiles{}
}
type tempFiles struct {
files []*os.File
filesMu sync.Mutex
}
func (tf *tempFiles) TempFile(dir, pattern string) (*os.File, error) {
f, err := ioutil.TempFile(dir, pattern)
if err != nil {
return nil, err
}
tf.filesMu.Lock()
tf.files = append(tf.files, f)
tf.filesMu.Unlock()
return f, nil
}
func (tf *tempFiles) CleanupAll() error {
tf.filesMu.Lock()
defer tf.filesMu.Unlock()
var allErr []error
for _, f := range tf.files {
if err := f.Close(); err != nil {
allErr = append(allErr, err)
}
if err := os.Remove(f.Name()); err != nil {
allErr = append(allErr, err)
}
}
tf.files = nil
return errorutil.Aggregate(allErr)
}
func newCountReader(r io.ReaderAt) (*countReader, error) {
pos := int64(0)
return &countReader{r: r, cPos: &pos}, nil
}
type countReader struct {
r io.ReaderAt
cPos *int64
mu sync.Mutex
}
func (cr *countReader) Read(p []byte) (int, error) {
cr.mu.Lock()
defer cr.mu.Unlock()
n, err := cr.r.ReadAt(p, *cr.cPos)
if err == nil {
*cr.cPos += int64(n)
}
return n, err
}
func (cr *countReader) Seek(offset int64, whence int) (int64, error) {
cr.mu.Lock()
defer cr.mu.Unlock()
switch whence {
default:
return 0, fmt.Errorf("Unknown whence: %v", whence)
case io.SeekStart:
case io.SeekCurrent:
offset += *cr.cPos
case io.SeekEnd:
return 0, fmt.Errorf("Unsupported whence: %v", whence)
}
if offset < 0 {
return 0, fmt.Errorf("invalid offset")
}
*cr.cPos = offset
return offset, nil
}
func (cr *countReader) currentPos() int64 {
cr.mu.Lock()
defer cr.mu.Unlock()
return *cr.cPos
}

View File

@@ -0,0 +1,40 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package errorutil
import (
"errors"
"fmt"
"strings"
)
// Aggregate combines a list of errors into a single new error.
func Aggregate(errs []error) error {
switch len(errs) {
case 0:
return nil
case 1:
return errs[0]
default:
points := make([]string, len(errs)+1)
points[0] = fmt.Sprintf("%d error(s) occurred:", len(errs))
for i, err := range errs {
points[i+1] = fmt.Sprintf("* %s", err)
}
return errors.New(strings.Join(points, "\n\t"))
}
}

View File

@@ -0,0 +1,836 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Copyright 2019 The Go Authors. All rights reserved.
Use of this source code is governed by a BSD-style
license that can be found in the LICENSE file.
*/
package estargz
import (
"archive/tar"
"bufio"
"bytes"
"compress/gzip"
"crypto/sha256"
"encoding/binary"
"encoding/json"
"fmt"
"hash"
"io"
"io/ioutil"
"os"
"path"
"sort"
"strconv"
"strings"
"sync"
"time"
"github.com/containerd/stargz-snapshotter/estargz/errorutil"
digest "github.com/opencontainers/go-digest"
"github.com/pkg/errors"
)
// A Reader permits random access reads from a stargz file.
type Reader struct {
sr *io.SectionReader
toc *jtoc
tocDigest digest.Digest
// m stores all non-chunk entries, keyed by name.
m map[string]*TOCEntry
// chunks stores all TOCEntry values for regular files that
// are split up. For a file with a single chunk, it's only
// stored in m.
chunks map[string][]*TOCEntry
}
// Open opens a stargz file for reading.
func Open(sr *io.SectionReader) (*Reader, error) {
tocOff, footerSize, err := OpenFooter(sr)
if err != nil {
return nil, errors.Wrapf(err, "error parsing footer")
}
tocTargz := make([]byte, sr.Size()-tocOff-footerSize)
if _, err := sr.ReadAt(tocTargz, tocOff); err != nil {
return nil, fmt.Errorf("error reading %d byte TOC targz: %v", len(tocTargz), err)
}
zr, err := gzip.NewReader(bytes.NewReader(tocTargz))
if err != nil {
return nil, fmt.Errorf("malformed TOC gzip header: %v", err)
}
zr.Multistream(false)
tr := tar.NewReader(zr)
h, err := tr.Next()
if err != nil {
return nil, fmt.Errorf("failed to find tar header in TOC gzip stream: %v", err)
}
if h.Name != TOCTarName {
return nil, fmt.Errorf("TOC tar entry had name %q; expected %q", h.Name, TOCTarName)
}
dgstr := digest.Canonical.Digester()
toc := new(jtoc)
if err := json.NewDecoder(io.TeeReader(tr, dgstr.Hash())).Decode(&toc); err != nil {
return nil, fmt.Errorf("error decoding TOC JSON: %v", err)
}
r := &Reader{sr: sr, toc: toc, tocDigest: dgstr.Digest()}
if err := r.initFields(); err != nil {
return nil, fmt.Errorf("failed to initialize fields of entries: %v", err)
}
return r, nil
}
// OpenFooter extracts and parses footer from the given blob.
func OpenFooter(sr *io.SectionReader) (tocOffset int64, footerSize int64, rErr error) {
if sr.Size() < FooterSize && sr.Size() < legacyFooterSize {
return 0, 0, fmt.Errorf("blob size %d is smaller than the footer size", sr.Size())
}
// TODO: read a bigger chunk (1MB?) at once here to hopefully
// get the TOC + footer in one go.
var footer [FooterSize]byte
if _, err := sr.ReadAt(footer[:], sr.Size()-FooterSize); err != nil {
return 0, 0, fmt.Errorf("error reading footer: %v", err)
}
return parseFooter(footer[:])
}
// initFields populates the Reader from r.toc after decoding it from
// JSON.
//
// Unexported fields are populated and TOCEntry fields that were
// implicit in the JSON are populated.
func (r *Reader) initFields() error {
r.m = make(map[string]*TOCEntry, len(r.toc.Entries))
r.chunks = make(map[string][]*TOCEntry)
var lastPath string
uname := map[int]string{}
gname := map[int]string{}
var lastRegEnt *TOCEntry
for _, ent := range r.toc.Entries {
ent.Name = trimNamePrefix(ent.Name)
if ent.Type == "reg" {
lastRegEnt = ent
}
if ent.Type == "chunk" {
ent.Name = lastPath
r.chunks[ent.Name] = append(r.chunks[ent.Name], ent)
if ent.ChunkSize == 0 && lastRegEnt != nil {
ent.ChunkSize = lastRegEnt.Size - ent.ChunkOffset
}
} else {
lastPath = ent.Name
if ent.Uname != "" {
uname[ent.UID] = ent.Uname
} else {
ent.Uname = uname[ent.UID]
}
if ent.Gname != "" {
gname[ent.GID] = ent.Gname
} else {
ent.Gname = uname[ent.GID]
}
ent.modTime, _ = time.Parse(time.RFC3339, ent.ModTime3339)
if ent.Type == "dir" {
ent.NumLink++ // Parent dir links to this directory
r.m[strings.TrimSuffix(ent.Name, "/")] = ent
} else {
r.m[ent.Name] = ent
}
}
if ent.Type == "reg" && ent.ChunkSize > 0 && ent.ChunkSize < ent.Size {
r.chunks[ent.Name] = make([]*TOCEntry, 0, ent.Size/ent.ChunkSize+1)
r.chunks[ent.Name] = append(r.chunks[ent.Name], ent)
}
if ent.ChunkSize == 0 && ent.Size != 0 {
ent.ChunkSize = ent.Size
}
}
// Populate children, add implicit directories:
for _, ent := range r.toc.Entries {
if ent.Type == "chunk" {
continue
}
// add "foo/":
// add "foo" child to "" (creating "" if necessary)
//
// add "foo/bar/":
// add "bar" child to "foo" (creating "foo" if necessary)
//
// add "foo/bar.txt":
// add "bar.txt" child to "foo" (creating "foo" if necessary)
//
// add "a/b/c/d/e/f.txt":
// create "a/b/c/d/e" node
// add "f.txt" child to "e"
name := ent.Name
if ent.Type == "dir" {
name = strings.TrimSuffix(name, "/")
}
pdir := r.getOrCreateDir(parentDir(name))
ent.NumLink++ // at least one name(ent.Name) references this entry.
if ent.Type == "hardlink" {
if org, ok := r.m[trimNamePrefix(ent.LinkName)]; ok {
org.NumLink++ // original entry is referenced by this ent.Name.
ent = org
} else {
return fmt.Errorf("%q is a hardlink but the linkname %q isn't found", ent.Name, ent.LinkName)
}
}
pdir.addChild(path.Base(name), ent)
}
lastOffset := r.sr.Size()
for i := len(r.toc.Entries) - 1; i >= 0; i-- {
e := r.toc.Entries[i]
if e.isDataType() {
e.nextOffset = lastOffset
}
if e.Offset != 0 {
lastOffset = e.Offset
}
}
return nil
}
func parentDir(p string) string {
dir, _ := path.Split(p)
return strings.TrimSuffix(dir, "/")
}
func (r *Reader) getOrCreateDir(d string) *TOCEntry {
e, ok := r.m[d]
if !ok {
e = &TOCEntry{
Name: d,
Type: "dir",
Mode: 0755,
NumLink: 2, // The directory itself(.) and the parent link to this directory.
}
r.m[d] = e
if d != "" {
pdir := r.getOrCreateDir(parentDir(d))
pdir.addChild(path.Base(d), e)
}
}
return e
}
// VerifyTOC checks that the TOC JSON in the passed blob matches the
// passed digests and that the TOC JSON contains digests for all chunks
// contained in the blob. If the verification succceeds, this function
// returns TOCEntryVerifier which holds all chunk digests in the stargz blob.
func (r *Reader) VerifyTOC(tocDigest digest.Digest) (TOCEntryVerifier, error) {
// Verify the digest of TOC JSON
if r.tocDigest != tocDigest {
return nil, fmt.Errorf("invalid TOC JSON %q; want %q", r.tocDigest, tocDigest)
}
digestMap := make(map[int64]digest.Digest) // map from chunk offset to the digest
for _, e := range r.toc.Entries {
if e.Type == "reg" || e.Type == "chunk" {
if e.Type == "reg" && e.Size == 0 {
continue // ignores empty file
}
// offset must be unique in stargz blob
if _, ok := digestMap[e.Offset]; ok {
return nil, fmt.Errorf("offset %d found twice", e.Offset)
}
// all chunk entries must contain digest
if e.ChunkDigest == "" {
return nil, fmt.Errorf("ChunkDigest of %q(off=%d) not found in TOC JSON",
e.Name, e.Offset)
}
d, err := digest.Parse(e.ChunkDigest)
if err != nil {
return nil, errors.Wrapf(err, "failed to parse digest %q", e.ChunkDigest)
}
digestMap[e.Offset] = d
}
}
return &verifier{digestMap: digestMap}, nil
}
// verifier is an implementation of TOCEntryVerifier which holds verifiers keyed by
// offset of the chunk.
type verifier struct {
digestMap map[int64]digest.Digest
digestMapMu sync.Mutex
}
// Verifier returns a content verifier specified by TOCEntry.
func (v *verifier) Verifier(ce *TOCEntry) (digest.Verifier, error) {
v.digestMapMu.Lock()
defer v.digestMapMu.Unlock()
d, ok := v.digestMap[ce.Offset]
if !ok {
return nil, fmt.Errorf("verifier for offset=%d,size=%d hasn't been registered",
ce.Offset, ce.ChunkSize)
}
return d.Verifier(), nil
}
// ChunkEntryForOffset returns the TOCEntry containing the byte of the
// named file at the given offset within the file.
func (r *Reader) ChunkEntryForOffset(name string, offset int64) (e *TOCEntry, ok bool) {
e, ok = r.Lookup(name)
if !ok || !e.isDataType() {
return nil, false
}
ents := r.chunks[name]
if len(ents) < 2 {
if offset >= e.ChunkSize {
return nil, false
}
return e, true
}
i := sort.Search(len(ents), func(i int) bool {
e := ents[i]
return e.ChunkOffset >= offset || (offset > e.ChunkOffset && offset < e.ChunkOffset+e.ChunkSize)
})
if i == len(ents) {
return nil, false
}
return ents[i], true
}
// Lookup returns the Table of Contents entry for the given path.
//
// To get the root directory, use the empty string.
func (r *Reader) Lookup(path string) (e *TOCEntry, ok bool) {
if r == nil {
return
}
e, ok = r.m[path]
if ok && e.Type == "hardlink" {
e, ok = r.m[e.LinkName]
}
return
}
func (r *Reader) OpenFile(name string) (*io.SectionReader, error) {
ent, ok := r.Lookup(name)
if !ok {
// TODO: come up with some error plan. This is lazy:
return nil, &os.PathError{
Path: name,
Op: "OpenFile",
Err: os.ErrNotExist,
}
}
if ent.Type != "reg" {
return nil, &os.PathError{
Path: name,
Op: "OpenFile",
Err: errors.New("not a regular file"),
}
}
fr := &fileReader{
r: r,
size: ent.Size,
ents: r.getChunks(ent),
}
return io.NewSectionReader(fr, 0, fr.size), nil
}
func (r *Reader) getChunks(ent *TOCEntry) []*TOCEntry {
if ents, ok := r.chunks[ent.Name]; ok {
return ents
}
return []*TOCEntry{ent}
}
type fileReader struct {
r *Reader
size int64
ents []*TOCEntry // 1 or more reg/chunk entries
}
func (fr *fileReader) ReadAt(p []byte, off int64) (n int, err error) {
if off >= fr.size {
return 0, io.EOF
}
if off < 0 {
return 0, errors.New("invalid offset")
}
var i int
if len(fr.ents) > 1 {
i = sort.Search(len(fr.ents), func(i int) bool {
return fr.ents[i].ChunkOffset >= off
})
if i == len(fr.ents) {
i = len(fr.ents) - 1
}
}
ent := fr.ents[i]
if ent.ChunkOffset > off {
if i == 0 {
return 0, errors.New("internal error; first chunk offset is non-zero")
}
ent = fr.ents[i-1]
}
// If ent is a chunk of a large file, adjust the ReadAt
// offset by the chunk's offset.
off -= ent.ChunkOffset
finalEnt := fr.ents[len(fr.ents)-1]
gzOff := ent.Offset
// gzBytesRemain is the number of compressed gzip bytes in this
// file remaining, over 1+ gzip chunks.
gzBytesRemain := finalEnt.NextOffset() - gzOff
sr := io.NewSectionReader(fr.r.sr, gzOff, gzBytesRemain)
const maxGZread = 2 << 20
var bufSize = maxGZread
if gzBytesRemain < maxGZread {
bufSize = int(gzBytesRemain)
}
br := bufio.NewReaderSize(sr, bufSize)
if _, err := br.Peek(bufSize); err != nil {
return 0, fmt.Errorf("fileReader.ReadAt.peek: %v", err)
}
gz, err := gzip.NewReader(br)
if err != nil {
return 0, fmt.Errorf("fileReader.ReadAt.gzipNewReader: %v", err)
}
if n, err := io.CopyN(ioutil.Discard, gz, off); n != off || err != nil {
return 0, fmt.Errorf("discard of %d bytes = %v, %v", off, n, err)
}
return io.ReadFull(gz, p)
}
// A Writer writes stargz files.
//
// Use NewWriter to create a new Writer.
type Writer struct {
bw *bufio.Writer
cw *countWriter
toc *jtoc
diffHash hash.Hash // SHA-256 of uncompressed tar
closed bool
gz *gzip.Writer
lastUsername map[int]string
lastGroupname map[int]string
compressionLevel int
// ChunkSize optionally controls the maximum number of bytes
// of data of a regular file that can be written in one gzip
// stream before a new gzip stream is started.
// Zero means to use a default, currently 4 MiB.
ChunkSize int
}
// currentGzipWriter writes to the current w.gz field, which can
// change throughout writing a tar entry.
//
// Additionally, it updates w's SHA-256 of the uncompressed bytes
// of the tar file.
type currentGzipWriter struct{ w *Writer }
func (cgw currentGzipWriter) Write(p []byte) (int, error) {
cgw.w.diffHash.Write(p)
return cgw.w.gz.Write(p)
}
func (w *Writer) chunkSize() int {
if w.ChunkSize <= 0 {
return 4 << 20
}
return w.ChunkSize
}
// NewWriter returns a new stargz writer writing to w.
//
// The writer must be closed to write its trailing table of contents.
func NewWriter(w io.Writer) *Writer {
return NewWriterLevel(w, gzip.BestCompression)
}
// NewWriterLevel returns a new stargz writer writing to w.
// The compression level is configurable.
//
// The writer must be closed to write its trailing table of contents.
func NewWriterLevel(w io.Writer, compressionLevel int) *Writer {
bw := bufio.NewWriter(w)
cw := &countWriter{w: bw}
return &Writer{
bw: bw,
cw: cw,
toc: &jtoc{Version: 1},
diffHash: sha256.New(),
compressionLevel: compressionLevel,
}
}
// Close writes the stargz's table of contents and flushes all the
// buffers, returning any error.
func (w *Writer) Close() (digest.Digest, error) {
if w.closed {
return "", nil
}
defer func() { w.closed = true }()
if err := w.closeGz(); err != nil {
return "", err
}
// Write the TOC index.
tocOff := w.cw.n
w.gz, _ = gzip.NewWriterLevel(w.cw, w.compressionLevel)
tw := tar.NewWriter(currentGzipWriter{w})
tocJSON, err := json.MarshalIndent(w.toc, "", "\t")
if err != nil {
return "", err
}
if err := tw.WriteHeader(&tar.Header{
Typeflag: tar.TypeReg,
Name: TOCTarName,
Size: int64(len(tocJSON)),
}); err != nil {
return "", err
}
if _, err := tw.Write(tocJSON); err != nil {
return "", err
}
if err := tw.Close(); err != nil {
return "", err
}
if err := w.closeGz(); err != nil {
return "", err
}
// And a little footer with pointer to the TOC gzip stream.
if _, err := w.bw.Write(footerBytes(tocOff)); err != nil {
return "", err
}
if err := w.bw.Flush(); err != nil {
return "", err
}
return digest.FromBytes(tocJSON), nil
}
func (w *Writer) closeGz() error {
if w.closed {
return errors.New("write on closed Writer")
}
if w.gz != nil {
if err := w.gz.Close(); err != nil {
return err
}
w.gz = nil
}
return nil
}
// nameIfChanged returns name, unless it was the already the value of (*mp)[id],
// in which case it returns the empty string.
func (w *Writer) nameIfChanged(mp *map[int]string, id int, name string) string {
if name == "" {
return ""
}
if *mp == nil {
*mp = make(map[int]string)
}
if (*mp)[id] == name {
return ""
}
(*mp)[id] = name
return name
}
func (w *Writer) condOpenGz() {
if w.gz == nil {
w.gz, _ = gzip.NewWriterLevel(w.cw, w.compressionLevel)
}
}
// AppendTar reads the tar or tar.gz file from r and appends
// each of its contents to w.
//
// The input r can optionally be gzip compressed but the output will
// always be gzip compressed.
func (w *Writer) AppendTar(r io.Reader) error {
br := bufio.NewReader(r)
var tr *tar.Reader
if isGzip(br) {
// NewReader can't fail if isGzip returned true.
zr, _ := gzip.NewReader(br)
tr = tar.NewReader(zr)
} else {
tr = tar.NewReader(br)
}
for {
h, err := tr.Next()
if err == io.EOF {
break
}
if err != nil {
return fmt.Errorf("error reading from source tar: tar.Reader.Next: %v", err)
}
if h.Name == TOCTarName {
// It is possible for a layer to be "stargzified" twice during the
// distribution lifecycle. So we reserve "TOCTarName" here to avoid
// duplicated entries in the resulting layer.
continue
}
xattrs := make(map[string][]byte)
const xattrPAXRecordsPrefix = "SCHILY.xattr."
if h.PAXRecords != nil {
for k, v := range h.PAXRecords {
if strings.HasPrefix(k, xattrPAXRecordsPrefix) {
xattrs[k[len(xattrPAXRecordsPrefix):]] = []byte(v)
}
}
}
ent := &TOCEntry{
Name: h.Name,
Mode: h.Mode,
UID: h.Uid,
GID: h.Gid,
Uname: w.nameIfChanged(&w.lastUsername, h.Uid, h.Uname),
Gname: w.nameIfChanged(&w.lastGroupname, h.Gid, h.Gname),
ModTime3339: formatModtime(h.ModTime),
Xattrs: xattrs,
}
w.condOpenGz()
tw := tar.NewWriter(currentGzipWriter{w})
if err := tw.WriteHeader(h); err != nil {
return err
}
switch h.Typeflag {
case tar.TypeLink:
ent.Type = "hardlink"
ent.LinkName = h.Linkname
case tar.TypeSymlink:
ent.Type = "symlink"
ent.LinkName = h.Linkname
case tar.TypeDir:
ent.Type = "dir"
case tar.TypeReg:
ent.Type = "reg"
ent.Size = h.Size
case tar.TypeChar:
ent.Type = "char"
ent.DevMajor = int(h.Devmajor)
ent.DevMinor = int(h.Devminor)
case tar.TypeBlock:
ent.Type = "block"
ent.DevMajor = int(h.Devmajor)
ent.DevMinor = int(h.Devminor)
case tar.TypeFifo:
ent.Type = "fifo"
default:
return fmt.Errorf("unsupported input tar entry %q", h.Typeflag)
}
// We need to keep a reference to the TOC entry for regular files, so that we
// can fill the digest later.
var regFileEntry *TOCEntry
var payloadDigest digest.Digester
if h.Typeflag == tar.TypeReg {
regFileEntry = ent
payloadDigest = digest.Canonical.Digester()
}
if h.Typeflag == tar.TypeReg && ent.Size > 0 {
var written int64
totalSize := ent.Size // save it before we destroy ent
tee := io.TeeReader(tr, payloadDigest.Hash())
for written < totalSize {
if err := w.closeGz(); err != nil {
return err
}
chunkSize := int64(w.chunkSize())
remain := totalSize - written
if remain < chunkSize {
chunkSize = remain
} else {
ent.ChunkSize = chunkSize
}
ent.Offset = w.cw.n
ent.ChunkOffset = written
chunkDigest := digest.Canonical.Digester()
w.condOpenGz()
teeChunk := io.TeeReader(tee, chunkDigest.Hash())
if _, err := io.CopyN(tw, teeChunk, chunkSize); err != nil {
return fmt.Errorf("error copying %q: %v", h.Name, err)
}
ent.ChunkDigest = chunkDigest.Digest().String()
w.toc.Entries = append(w.toc.Entries, ent)
written += chunkSize
ent = &TOCEntry{
Name: h.Name,
Type: "chunk",
}
}
} else {
w.toc.Entries = append(w.toc.Entries, ent)
}
if payloadDigest != nil {
regFileEntry.Digest = payloadDigest.Digest().String()
}
if err := tw.Flush(); err != nil {
return err
}
}
return nil
}
// DiffID returns the SHA-256 of the uncompressed tar bytes.
// It is only valid to call DiffID after Close.
func (w *Writer) DiffID() string {
return fmt.Sprintf("sha256:%x", w.diffHash.Sum(nil))
}
// footerBytes returns the 51 bytes footer.
func footerBytes(tocOff int64) []byte {
buf := bytes.NewBuffer(make([]byte, 0, FooterSize))
gz, _ := gzip.NewWriterLevel(buf, gzip.NoCompression) // MUST be NoCompression to keep 51 bytes
// Extra header indicating the offset of TOCJSON
// https://tools.ietf.org/html/rfc1952#section-2.3.1.1
header := make([]byte, 4)
header[0], header[1] = 'S', 'G'
subfield := fmt.Sprintf("%016xSTARGZ", tocOff)
binary.LittleEndian.PutUint16(header[2:4], uint16(len(subfield))) // little-endian per RFC1952
gz.Header.Extra = append(header, []byte(subfield)...)
gz.Close()
if buf.Len() != FooterSize {
panic(fmt.Sprintf("footer buffer = %d, not %d", buf.Len(), FooterSize))
}
return buf.Bytes()
}
func parseFooter(p []byte) (tocOffset int64, footerSize int64, rErr error) {
var allErr []error
tocOffset, err := parseEStargzFooter(p)
if err == nil {
return tocOffset, FooterSize, nil
}
allErr = append(allErr, err)
pad := len(p) - legacyFooterSize
if pad < 0 {
pad = 0
}
tocOffset, err = parseLegacyFooter(p[pad:])
if err == nil {
return tocOffset, legacyFooterSize, nil
}
return 0, 0, errorutil.Aggregate(append(allErr, err))
}
func parseEStargzFooter(p []byte) (tocOffset int64, err error) {
if len(p) != FooterSize {
return 0, fmt.Errorf("invalid length %d cannot be parsed", len(p))
}
zr, err := gzip.NewReader(bytes.NewReader(p))
if err != nil {
return 0, err
}
extra := zr.Header.Extra
si1, si2, subfieldlen, subfield := extra[0], extra[1], extra[2:4], extra[4:]
if si1 != 'S' || si2 != 'G' {
return 0, fmt.Errorf("invalid subfield IDs: %q, %q; want E, S", si1, si2)
}
if slen := binary.LittleEndian.Uint16(subfieldlen); slen != uint16(16+len("STARGZ")) {
return 0, fmt.Errorf("invalid length of subfield %d; want %d", slen, 16+len("STARGZ"))
}
if string(subfield[16:]) != "STARGZ" {
return 0, fmt.Errorf("STARGZ magic string must be included in the footer subfield")
}
return strconv.ParseInt(string(subfield[:16]), 16, 64)
}
func parseLegacyFooter(p []byte) (tocOffset int64, err error) {
if len(p) != legacyFooterSize {
return 0, fmt.Errorf("legacy: invalid length %d cannot be parsed", len(p))
}
zr, err := gzip.NewReader(bytes.NewReader(p))
if err != nil {
return 0, errors.Wrapf(err, "legacy: failed to get footer gzip reader")
}
extra := zr.Header.Extra
if len(extra) != 16+len("STARGZ") {
return 0, fmt.Errorf("legacy: invalid stargz's extra field size")
}
if string(extra[16:]) != "STARGZ" {
return 0, fmt.Errorf("legacy: magic string STARGZ not found")
}
return strconv.ParseInt(string(extra[:16]), 16, 64)
}
func formatModtime(t time.Time) string {
if t.IsZero() || t.Unix() == 0 {
return ""
}
return t.UTC().Round(time.Second).Format(time.RFC3339)
}
func trimNamePrefix(name string) string {
// We don't use filepath.Clean here to preserve "/" suffix for directory entry.
return strings.TrimPrefix(name, "./")
}
// countWriter counts how many bytes have been written to its wrapped
// io.Writer.
type countWriter struct {
w io.Writer
n int64
}
func (cw *countWriter) Write(p []byte) (n int, err error) {
n, err = cw.w.Write(p)
cw.n += int64(n)
return
}
// isGzip reports whether br is positioned right before an upcoming gzip stream.
// It does not consume any bytes from br.
func isGzip(br *bufio.Reader) bool {
const (
gzipID1 = 0x1f
gzipID2 = 0x8b
gzipDeflate = 8
)
peek, _ := br.Peek(3)
return len(peek) >= 3 && peek[0] == gzipID1 && peek[1] == gzipID2 && peek[2] == gzipDeflate
}

View File

@@ -0,0 +1,9 @@
module github.com/containerd/stargz-snapshotter/estargz
go 1.13
require (
github.com/opencontainers/go-digest v1.0.0
github.com/pkg/errors v0.9.1
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a
)

View File

@@ -0,0 +1,6 @@
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a h1:DcqTD9SDLc+1P/r1EmRBwnVsrOwW+kk2vWf9n+1sGhs=
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=

View File

@@ -0,0 +1,254 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Copyright 2019 The Go Authors. All rights reserved.
Use of this source code is governed by a BSD-style
license that can be found in the LICENSE file.
*/
package estargz
import (
"os"
"path"
"time"
digest "github.com/opencontainers/go-digest"
)
const (
// TOCTarName is the name of the JSON file in the tar archive in the
// table of contents gzip stream.
TOCTarName = "stargz.index.json"
// FooterSize is the number of bytes in the footer
//
// The footer is an empty gzip stream with no compression and an Extra
// header of the form "%016xSTARGZ", where the 64 bit hex-encoded
// number is the offset to the gzip stream of JSON TOC.
//
// 51 comes from:
//
// 10 bytes gzip header
// 2 bytes XLEN (length of Extra field) = 26 (4 bytes header + 16 hex digits + len("STARGZ"))
// 2 bytes Extra: SI1 = 'S', SI2 = 'G'
// 2 bytes Extra: LEN = 22 (16 hex digits + len("STARGZ"))
// 22 bytes Extra: subfield = fmt.Sprintf("%016xSTARGZ", offsetOfTOC)
// 5 bytes flate header
// 8 bytes gzip footer
// (End of the eStargz blob)
//
// NOTE: For Extra fields, subfield IDs SI1='S' SI2='G' is used for eStargz.
FooterSize = 51
// legacyFooterSize is the number of bytes in the legacy stargz footer.
//
// 47 comes from:
//
// 10 byte gzip header +
// 2 byte (LE16) length of extra, encoding 22 (16 hex digits + len("STARGZ")) == "\x16\x00" +
// 22 bytes of extra (fmt.Sprintf("%016xSTARGZ", tocGzipOffset))
// 5 byte flate header
// 8 byte gzip footer (two little endian uint32s: digest, size)
legacyFooterSize = 47
// TOCJSONDigestAnnotation is an annotation for image manifest. This stores the
// digest of the TOC JSON
TOCJSONDigestAnnotation = "containerd.io/snapshot/stargz/toc.digest"
// PrefetchLandmark is a file entry which indicates the end position of
// prefetch in the stargz file.
PrefetchLandmark = ".prefetch.landmark"
// NoPrefetchLandmark is a file entry which indicates that no prefetch should
// occur in the stargz file.
NoPrefetchLandmark = ".no.prefetch.landmark"
landmarkContents = 0xf
)
// jtoc is the JSON-serialized table of contents index of the files in the stargz file.
type jtoc struct {
Version int `json:"version"`
Entries []*TOCEntry `json:"entries"`
}
// TOCEntry is an entry in the stargz file's TOC (Table of Contents).
type TOCEntry struct {
// Name is the tar entry's name. It is the complete path
// stored in the tar file, not just the base name.
Name string `json:"name"`
// Type is one of "dir", "reg", "symlink", "hardlink", "char",
// "block", "fifo", or "chunk".
// The "chunk" type is used for regular file data chunks past the first
// TOCEntry; the 2nd chunk and on have only Type ("chunk"), Offset,
// ChunkOffset, and ChunkSize populated.
Type string `json:"type"`
// Size, for regular files, is the logical size of the file.
Size int64 `json:"size,omitempty"`
// ModTime3339 is the modification time of the tar entry. Empty
// means zero or unknown. Otherwise it's in UTC RFC3339
// format. Use the ModTime method to access the time.Time value.
ModTime3339 string `json:"modtime,omitempty"`
modTime time.Time
// LinkName, for symlinks and hardlinks, is the link target.
LinkName string `json:"linkName,omitempty"`
// Mode is the permission and mode bits.
Mode int64 `json:"mode,omitempty"`
// UID is the user ID of the owner.
UID int `json:"uid,omitempty"`
// GID is the group ID of the owner.
GID int `json:"gid,omitempty"`
// Uname is the username of the owner.
//
// In the serialized JSON, this field may only be present for
// the first entry with the same UID.
Uname string `json:"userName,omitempty"`
// Gname is the group name of the owner.
//
// In the serialized JSON, this field may only be present for
// the first entry with the same GID.
Gname string `json:"groupName,omitempty"`
// Offset, for regular files, provides the offset in the
// stargz file to the file's data bytes. See ChunkOffset and
// ChunkSize.
Offset int64 `json:"offset,omitempty"`
nextOffset int64 // the Offset of the next entry with a non-zero Offset
// DevMajor is the major device number for "char" and "block" types.
DevMajor int `json:"devMajor,omitempty"`
// DevMinor is the major device number for "char" and "block" types.
DevMinor int `json:"devMinor,omitempty"`
// NumLink is the number of entry names pointing to this entry.
// Zero means one name references this entry.
NumLink int
// Xattrs are the extended attribute for the entry.
Xattrs map[string][]byte `json:"xattrs,omitempty"`
// Digest stores the OCI checksum for regular files payload.
// It has the form "sha256:abcdef01234....".
Digest string `json:"digest,omitempty"`
// ChunkOffset is non-zero if this is a chunk of a large,
// regular file. If so, the Offset is where the gzip header of
// ChunkSize bytes at ChunkOffset in Name begin.
//
// In serialized form, a "chunkSize" JSON field of zero means
// that the chunk goes to the end of the file. After reading
// from the stargz TOC, though, the ChunkSize is initialized
// to a non-zero file for when Type is either "reg" or
// "chunk".
ChunkOffset int64 `json:"chunkOffset,omitempty"`
ChunkSize int64 `json:"chunkSize,omitempty"`
// ChunkDigest stores an OCI digest of the chunk. This must be formed
// as "sha256:0123abcd...".
ChunkDigest string `json:"chunkDigest,omitempty"`
children map[string]*TOCEntry
}
// ModTime returns the entry's modification time.
func (e *TOCEntry) ModTime() time.Time { return e.modTime }
// NextOffset returns the position (relative to the start of the
// stargz file) of the next gzip boundary after e.Offset.
func (e *TOCEntry) NextOffset() int64 { return e.nextOffset }
func (e *TOCEntry) addChild(baseName string, child *TOCEntry) {
if e.children == nil {
e.children = make(map[string]*TOCEntry)
}
if child.Type == "dir" {
e.NumLink++ // Entry ".." in the subdirectory links to this directory
}
e.children[baseName] = child
}
// isDataType reports whether TOCEntry is a regular file or chunk (something that
// contains regular file data).
func (e *TOCEntry) isDataType() bool { return e.Type == "reg" || e.Type == "chunk" }
// Stat returns a FileInfo value representing e.
func (e *TOCEntry) Stat() os.FileInfo { return fileInfo{e} }
// ForeachChild calls f for each child item. If f returns false, iteration ends.
// If e is not a directory, f is not called.
func (e *TOCEntry) ForeachChild(f func(baseName string, ent *TOCEntry) bool) {
for name, ent := range e.children {
if !f(name, ent) {
return
}
}
}
// LookupChild returns the directory e's child by its base name.
func (e *TOCEntry) LookupChild(baseName string) (child *TOCEntry, ok bool) {
child, ok = e.children[baseName]
return
}
// fileInfo implements os.FileInfo using the wrapped *TOCEntry.
type fileInfo struct{ e *TOCEntry }
var _ os.FileInfo = fileInfo{}
func (fi fileInfo) Name() string { return path.Base(fi.e.Name) }
func (fi fileInfo) IsDir() bool { return fi.e.Type == "dir" }
func (fi fileInfo) Size() int64 { return fi.e.Size }
func (fi fileInfo) ModTime() time.Time { return fi.e.ModTime() }
func (fi fileInfo) Sys() interface{} { return fi.e }
func (fi fileInfo) Mode() (m os.FileMode) {
m = os.FileMode(fi.e.Mode) & os.ModePerm
switch fi.e.Type {
case "dir":
m |= os.ModeDir
case "symlink":
m |= os.ModeSymlink
case "char":
m |= os.ModeDevice | os.ModeCharDevice
case "block":
m |= os.ModeDevice
case "fifo":
m |= os.ModeNamedPipe
}
// TODO: ModeSetuid, ModeSetgid, if/as needed.
return m
}
// TOCEntryVerifier holds verifiers that are usable for verifying chunks contained
// in a eStargz blob.
type TOCEntryVerifier interface {
// Verifier provides a content verifier that can be used for verifying the
// contents of the specified TOCEntry.
Verifier(ce *TOCEntry) (digest.Verifier, error)
}

View File

@@ -1,4 +1,4 @@
// Copyright 2018 Google LLC All Rights Reserved. // Copyright 2020 Google LLC All Rights Reserved.
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.
@@ -12,36 +12,36 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
package v1util package and
import ( import (
"io" "io"
) )
// readAndCloser implements io.ReadCloser by reading from a particular io.Reader // ReadCloser implements io.ReadCloser by reading from a particular io.Reader
// and then calling the provided "Close()" method. // and then calling the provided "Close()" method.
type readAndCloser struct { type ReadCloser struct {
io.Reader io.Reader
CloseFunc func() error CloseFunc func() error
} }
var _ io.ReadCloser = (*readAndCloser)(nil) var _ io.ReadCloser = (*ReadCloser)(nil)
// Close implements io.ReadCloser // Close implements io.ReadCloser
func (rac *readAndCloser) Close() error { func (rac *ReadCloser) Close() error {
return rac.CloseFunc() return rac.CloseFunc()
} }
// writeAndCloser implements io.WriteCloser by reading from a particular io.Writer // WriteCloser implements io.WriteCloser by reading from a particular io.Writer
// and then calling the provided "Close()" method. // and then calling the provided "Close()" method.
type writeAndCloser struct { type WriteCloser struct {
io.Writer io.Writer
CloseFunc func() error CloseFunc func() error
} }
var _ io.WriteCloser = (*writeAndCloser)(nil) var _ io.WriteCloser = (*WriteCloser)(nil)
// Close implements io.WriteCloser // Close implements io.WriteCloser
func (wac *writeAndCloser) Close() error { func (wac *WriteCloser) Close() error {
return wac.CloseFunc() return wac.CloseFunc()
} }

View File

@@ -0,0 +1,54 @@
// Copyright 2020 Google LLC All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package estargz
import (
"bytes"
"io"
"io/ioutil"
"github.com/containerd/stargz-snapshotter/estargz"
v1 "github.com/google/go-containerregistry/pkg/v1"
)
// Assert that what we're returning is an io.ReadCloser
var _ io.ReadCloser = (*estargz.Blob)(nil)
// ReadCloser reads uncompressed tarball input from the io.ReadCloser and
// returns:
// * An io.ReadCloser from which compressed data may be read, and
// * A v1.Hash with the hash of the estargz table of contents, or
// * An error if the estargz processing encountered a problem.
//
// Refer to estargz for the options:
// https://pkg.go.dev/github.com/containerd/stargz-snapshotter@v0.2.0/estargz#Option
func ReadCloser(r io.ReadCloser, opts ...estargz.Option) (*estargz.Blob, v1.Hash, error) {
defer r.Close()
// TODO(#876): Avoid buffering into memory.
bs, err := ioutil.ReadAll(r)
if err != nil {
return nil, v1.Hash{}, err
}
br := bytes.NewReader(bs)
rc, err := estargz.Build(io.NewSectionReader(br, 0, int64(len(bs))), opts...)
if err != nil {
return nil, v1.Hash{}, err
}
h, err := v1.NewHash(rc.TOCDigest().String())
return rc, h, err
}

View File

@@ -0,0 +1,96 @@
// Copyright 2020 Google LLC All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package gzip
import (
"bytes"
"compress/gzip"
"io"
"github.com/google/go-containerregistry/pkg/v1/internal/and"
)
var gzipMagicHeader = []byte{'\x1f', '\x8b'}
// ReadCloser reads uncompressed input data from the io.ReadCloser and
// returns an io.ReadCloser from which compressed data may be read.
// This uses gzip.BestSpeed for the compression level.
func ReadCloser(r io.ReadCloser) io.ReadCloser {
return ReadCloserLevel(r, gzip.BestSpeed)
}
// ReadCloserLevel reads uncompressed input data from the io.ReadCloser and
// returns an io.ReadCloser from which compressed data may be read.
// Refer to compress/gzip for the level:
// https://golang.org/pkg/compress/gzip/#pkg-constants
func ReadCloserLevel(r io.ReadCloser, level int) io.ReadCloser {
pr, pw := io.Pipe()
// Returns err so we can pw.CloseWithError(err)
go func() error {
// TODO(go1.14): Just defer {pw,gw,r}.Close like you'd expect.
// Context: https://golang.org/issue/24283
gw, err := gzip.NewWriterLevel(pw, level)
if err != nil {
return pw.CloseWithError(err)
}
if _, err := io.Copy(gw, r); err != nil {
defer r.Close()
defer gw.Close()
return pw.CloseWithError(err)
}
defer pw.Close()
defer r.Close()
defer gw.Close()
return nil
}()
return pr
}
// UnzipReadCloser reads compressed input data from the io.ReadCloser and
// returns an io.ReadCloser from which uncompessed data may be read.
func UnzipReadCloser(r io.ReadCloser) (io.ReadCloser, error) {
gr, err := gzip.NewReader(r)
if err != nil {
return nil, err
}
return &and.ReadCloser{
Reader: gr,
CloseFunc: func() error {
// If the unzip fails, then this seems to return the same
// error as the read. We don't want this to interfere with
// us closing the main ReadCloser, since this could leave
// an open file descriptor (fails on Windows).
gr.Close()
return r.Close()
},
}, nil
}
// Is detects whether the input stream is compressed.
func Is(r io.Reader) (bool, error) {
magicHeader := make([]byte, 2)
n, err := r.Read(magicHeader)
if n == 0 && err == io.EOF {
return false, nil
}
if err != nil {
return false, err
}
return bytes.Equal(magicHeader, gzipMagicHeader), nil
}

View File

@@ -0,0 +1,62 @@
// Copyright 2020 Google LLC All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package verify
import (
"encoding/hex"
"fmt"
"hash"
"io"
v1 "github.com/google/go-containerregistry/pkg/v1"
"github.com/google/go-containerregistry/pkg/v1/internal/and"
)
type verifyReader struct {
inner io.Reader
hasher hash.Hash
expected v1.Hash
}
// Read implements io.Reader
func (vc *verifyReader) Read(b []byte) (int, error) {
n, err := vc.inner.Read(b)
if err == io.EOF {
got := hex.EncodeToString(vc.hasher.Sum(make([]byte, 0, vc.hasher.Size())))
if want := vc.expected.Hex; got != want {
return n, fmt.Errorf("error verifying %s checksum; got %q, want %q",
vc.expected.Algorithm, got, want)
}
}
return n, err
}
// ReadCloser wraps the given io.ReadCloser to verify that its contents match
// the provided v1.Hash before io.EOF is returned.
func ReadCloser(r io.ReadCloser, h v1.Hash) (io.ReadCloser, error) {
w, err := v1.Hasher(h.Algorithm)
if err != nil {
return nil, err
}
r2 := io.TeeReader(r, w)
return &and.ReadCloser{
Reader: &verifyReader{
inner: r2,
hasher: w,
expected: h,
},
CloseFunc: r.Close,
}, nil
}

View File

@@ -26,9 +26,9 @@ import (
v1 "github.com/google/go-containerregistry/pkg/v1" v1 "github.com/google/go-containerregistry/pkg/v1"
"github.com/google/go-containerregistry/pkg/v1/empty" "github.com/google/go-containerregistry/pkg/v1/empty"
"github.com/google/go-containerregistry/pkg/v1/internal/gzip"
"github.com/google/go-containerregistry/pkg/v1/tarball" "github.com/google/go-containerregistry/pkg/v1/tarball"
"github.com/google/go-containerregistry/pkg/v1/types" "github.com/google/go-containerregistry/pkg/v1/types"
"github.com/google/go-containerregistry/pkg/v1/v1util"
) )
const whiteoutPrefix = ".wh." const whiteoutPrefix = ".wh."
@@ -335,7 +335,7 @@ func layerTime(layer v1.Layer, t time.Time) (v1.Layer, error) {
b := w.Bytes() b := w.Bytes()
// gzip the contents, then create the layer // gzip the contents, then create the layer
opener := func() (io.ReadCloser, error) { opener := func() (io.ReadCloser, error) {
return v1util.GzipReadCloser(ioutil.NopCloser(bytes.NewReader(b))), nil return gzip.ReadCloser(ioutil.NopCloser(bytes.NewReader(b))), nil
} }
layer, err = tarball.LayerFromOpener(opener) layer, err = tarball.LayerFromOpener(opener)
if err != nil { if err != nil {

View File

@@ -18,8 +18,8 @@ import (
"io" "io"
v1 "github.com/google/go-containerregistry/pkg/v1" v1 "github.com/google/go-containerregistry/pkg/v1"
"github.com/google/go-containerregistry/pkg/v1/internal/gzip"
"github.com/google/go-containerregistry/pkg/v1/types" "github.com/google/go-containerregistry/pkg/v1/types"
"github.com/google/go-containerregistry/pkg/v1/v1util"
) )
// CompressedLayer represents the bare minimum interface a natively // CompressedLayer represents the bare minimum interface a natively
@@ -49,7 +49,7 @@ func (cle *compressedLayerExtender) Uncompressed() (io.ReadCloser, error) {
if err != nil { if err != nil {
return nil, err return nil, err
} }
return v1util.GunzipReadCloser(r) return gzip.UnzipReadCloser(r)
} }
// DiffID implements v1.Layer // DiffID implements v1.Layer

View File

@@ -20,8 +20,8 @@ import (
"sync" "sync"
v1 "github.com/google/go-containerregistry/pkg/v1" v1 "github.com/google/go-containerregistry/pkg/v1"
"github.com/google/go-containerregistry/pkg/v1/internal/gzip"
"github.com/google/go-containerregistry/pkg/v1/types" "github.com/google/go-containerregistry/pkg/v1/types"
"github.com/google/go-containerregistry/pkg/v1/v1util"
) )
// UncompressedLayer represents the bare minimum interface a natively // UncompressedLayer represents the bare minimum interface a natively
@@ -54,7 +54,7 @@ func (ule *uncompressedLayerExtender) Compressed() (io.ReadCloser, error) {
if err != nil { if err != nil {
return nil, err return nil, err
} }
return v1util.GzipReadCloser(u), nil return gzip.ReadCloser(u), nil
} }
// Digest implements v1.Layer // Digest implements v1.Layer

View File

@@ -28,10 +28,10 @@ import (
"github.com/google/go-containerregistry/pkg/logs" "github.com/google/go-containerregistry/pkg/logs"
"github.com/google/go-containerregistry/pkg/name" "github.com/google/go-containerregistry/pkg/name"
v1 "github.com/google/go-containerregistry/pkg/v1" v1 "github.com/google/go-containerregistry/pkg/v1"
"github.com/google/go-containerregistry/pkg/v1/internal/verify"
"github.com/google/go-containerregistry/pkg/v1/partial" "github.com/google/go-containerregistry/pkg/v1/partial"
"github.com/google/go-containerregistry/pkg/v1/remote/transport" "github.com/google/go-containerregistry/pkg/v1/remote/transport"
"github.com/google/go-containerregistry/pkg/v1/types" "github.com/google/go-containerregistry/pkg/v1/types"
"github.com/google/go-containerregistry/pkg/v1/v1util"
) )
// ErrSchema1 indicates that we received a schema1 manifest from the registry. // ErrSchema1 indicates that we received a schema1 manifest from the registry.
@@ -368,7 +368,7 @@ func (f *fetcher) fetchBlob(ctx context.Context, h v1.Hash) (io.ReadCloser, erro
return nil, err return nil, err
} }
return v1util.VerifyReadCloser(resp.Body, h) return verify.ReadCloser(resp.Body, h)
} }
func (f *fetcher) headBlob(h v1.Hash) (*http.Response, error) { func (f *fetcher) headBlob(h v1.Hash) (*http.Response, error) {

View File

@@ -24,10 +24,10 @@ import (
"github.com/google/go-containerregistry/pkg/internal/redact" "github.com/google/go-containerregistry/pkg/internal/redact"
"github.com/google/go-containerregistry/pkg/name" "github.com/google/go-containerregistry/pkg/name"
v1 "github.com/google/go-containerregistry/pkg/v1" v1 "github.com/google/go-containerregistry/pkg/v1"
"github.com/google/go-containerregistry/pkg/v1/internal/verify"
"github.com/google/go-containerregistry/pkg/v1/partial" "github.com/google/go-containerregistry/pkg/v1/partial"
"github.com/google/go-containerregistry/pkg/v1/remote/transport" "github.com/google/go-containerregistry/pkg/v1/remote/transport"
"github.com/google/go-containerregistry/pkg/v1/types" "github.com/google/go-containerregistry/pkg/v1/types"
"github.com/google/go-containerregistry/pkg/v1/v1util"
) )
var acceptableImageMediaTypes = []types.MediaType{ var acceptableImageMediaTypes = []types.MediaType{
@@ -177,7 +177,7 @@ func (rl *remoteImageLayer) Compressed() (io.ReadCloser, error) {
continue continue
} }
return v1util.VerifyReadCloser(resp.Body, rl.digest) return verify.ReadCloser(resp.Body, rl.digest)
} }
return nil, lastErr return nil, lastErr

View File

@@ -27,9 +27,9 @@ import (
"github.com/google/go-containerregistry/pkg/name" "github.com/google/go-containerregistry/pkg/name"
v1 "github.com/google/go-containerregistry/pkg/v1" v1 "github.com/google/go-containerregistry/pkg/v1"
"github.com/google/go-containerregistry/pkg/v1/internal/gzip"
"github.com/google/go-containerregistry/pkg/v1/partial" "github.com/google/go-containerregistry/pkg/v1/partial"
"github.com/google/go-containerregistry/pkg/v1/types" "github.com/google/go-containerregistry/pkg/v1/types"
"github.com/google/go-containerregistry/pkg/v1/v1util"
) )
type image struct { type image struct {
@@ -148,7 +148,7 @@ func (i *image) areLayersCompressed() (bool, error) {
return false, err return false, err
} }
defer blob.Close() defer blob.Close()
return v1util.IsGzipped(blob) return gzip.Is(blob)
} }
func (i *image) loadTarDescriptorAndConfig() error { func (i *image) loadTarDescriptorAndConfig() error {

View File

@@ -22,7 +22,11 @@ import (
"os" "os"
"sync" "sync"
"github.com/containerd/stargz-snapshotter/estargz"
v1 "github.com/google/go-containerregistry/pkg/v1" v1 "github.com/google/go-containerregistry/pkg/v1"
"github.com/google/go-containerregistry/pkg/v1/internal/and"
gestargz "github.com/google/go-containerregistry/pkg/v1/internal/estargz"
ggzip "github.com/google/go-containerregistry/pkg/v1/internal/gzip"
"github.com/google/go-containerregistry/pkg/v1/types" "github.com/google/go-containerregistry/pkg/v1/types"
"github.com/google/go-containerregistry/pkg/v1/v1util" "github.com/google/go-containerregistry/pkg/v1/v1util"
) )
@@ -34,28 +38,50 @@ type layer struct {
compressedopener Opener compressedopener Opener
uncompressedopener Opener uncompressedopener Opener
compression int compression int
annotations map[string]string
estgzopts []estargz.Option
} }
// Descriptor implements partial.withDescriptor.
func (l *layer) Descriptor() (*v1.Descriptor, error) {
digest, err := l.Digest()
if err != nil {
return nil, err
}
return &v1.Descriptor{
Size: l.size,
Digest: digest,
Annotations: l.annotations,
MediaType: types.DockerLayer,
}, nil
}
// Digest implements v1.Layer
func (l *layer) Digest() (v1.Hash, error) { func (l *layer) Digest() (v1.Hash, error) {
return l.digest, nil return l.digest, nil
} }
// DiffID implements v1.Layer
func (l *layer) DiffID() (v1.Hash, error) { func (l *layer) DiffID() (v1.Hash, error) {
return l.diffID, nil return l.diffID, nil
} }
// Compressed implements v1.Layer
func (l *layer) Compressed() (io.ReadCloser, error) { func (l *layer) Compressed() (io.ReadCloser, error) {
return l.compressedopener() return l.compressedopener()
} }
// Uncompressed implements v1.Layer
func (l *layer) Uncompressed() (io.ReadCloser, error) { func (l *layer) Uncompressed() (io.ReadCloser, error) {
return l.uncompressedopener() return l.uncompressedopener()
} }
// Size implements v1.Layer
func (l *layer) Size() (int64, error) { func (l *layer) Size() (int64, error) {
return l.size, nil return l.size, nil
} }
// MediaType implements v1.Layer
func (l *layer) MediaType() (types.MediaType, error) { func (l *layer) MediaType() (types.MediaType, error) {
return types.DockerLayer, nil return types.DockerLayer, nil
} }
@@ -98,6 +124,15 @@ func WithCompressedCaching(l *layer) {
} }
} }
// WithEstargzOptions is a functional option that allow the caller to pass
// through estargz.Options to the underlying compression layer. This is
// only meaningful when estargz is enabled.
func WithEstargzOptions(opts ...estargz.Option) LayerOption {
return func(l *layer) {
l.estgzopts = opts
}
}
// LayerFromFile returns a v1.Layer given a tarball // LayerFromFile returns a v1.Layer given a tarball
func LayerFromFile(path string, opts ...LayerOption) (v1.Layer, error) { func LayerFromFile(path string, opts ...LayerOption) (v1.Layer, error) {
opener := func() (io.ReadCloser, error) { opener := func() (io.ReadCloser, error) {
@@ -123,13 +158,14 @@ func LayerFromOpener(opener Opener, opts ...LayerOption) (v1.Layer, error) {
} }
defer rc.Close() defer rc.Close()
compressed, err := v1util.IsGzipped(rc) compressed, err := ggzip.Is(rc)
if err != nil { if err != nil {
return nil, err return nil, err
} }
layer := &layer{ layer := &layer{
compression: gzip.BestSpeed, compression: gzip.BestSpeed,
annotations: make(map[string]string, 1),
} }
if compressed { if compressed {
@@ -139,6 +175,38 @@ func LayerFromOpener(opener Opener, opts ...LayerOption) (v1.Layer, error) {
if err != nil { if err != nil {
return nil, err return nil, err
} }
return ggzip.UnzipReadCloser(urc)
}
} else if estgz := os.Getenv("GGCR_EXPERIMENT_ESTARGZ"); estgz == "1" {
layer.compressedopener = func() (io.ReadCloser, error) {
crc, err := opener()
if err != nil {
return nil, err
}
eopts := append(layer.estgzopts, estargz.WithCompressionLevel(layer.compression))
rc, h, err := gestargz.ReadCloser(crc, eopts...)
if err != nil {
return nil, err
}
layer.annotations[estargz.TOCJSONDigestAnnotation] = h.String()
return &and.ReadCloser{
Reader: rc,
CloseFunc: func() error {
err := rc.Close()
if err != nil {
return err
}
// As an optimization, leverage the DiffID exposed by the estargz ReadCloser
layer.diffID, err = v1.NewHash(rc.DiffID().String())
return err
},
}, nil
}
layer.uncompressedopener = func() (io.ReadCloser, error) {
urc, err := layer.compressedopener()
if err != nil {
return nil, err
}
return v1util.GunzipReadCloser(urc) return v1util.GunzipReadCloser(urc)
} }
} else { } else {
@@ -148,7 +216,7 @@ func LayerFromOpener(opener Opener, opts ...LayerOption) (v1.Layer, error) {
if err != nil { if err != nil {
return nil, err return nil, err
} }
return v1util.GzipReadCloserLevel(crc, layer.compression), nil return ggzip.ReadCloserLevel(crc, layer.compression), nil
} }
} }
@@ -160,8 +228,11 @@ func LayerFromOpener(opener Opener, opts ...LayerOption) (v1.Layer, error) {
return nil, err return nil, err
} }
if layer.diffID, err = computeDiffID(layer.uncompressedopener); err != nil { empty := v1.Hash{}
return nil, err if layer.diffID == empty {
if layer.diffID, err = computeDiffID(layer.uncompressedopener); err != nil {
return nil, err
}
} }
return layer, nil return layer, nil

View File

@@ -14,48 +14,10 @@
package v1util package v1util
import ( import "github.com/google/go-containerregistry/pkg/v1/internal/verify"
"encoding/hex"
"fmt"
"hash"
"io"
v1 "github.com/google/go-containerregistry/pkg/v1"
)
type verifyReader struct {
inner io.Reader
hasher hash.Hash
expected v1.Hash
}
// Read implements io.Reader
func (vc *verifyReader) Read(b []byte) (int, error) {
n, err := vc.inner.Read(b)
if err == io.EOF {
got := hex.EncodeToString(vc.hasher.Sum(make([]byte, 0, vc.hasher.Size())))
if want := vc.expected.Hex; got != want {
return n, fmt.Errorf("error verifying %s checksum; got %q, want %q",
vc.expected.Algorithm, got, want)
}
}
return n, err
}
// VerifyReadCloser wraps the given io.ReadCloser to verify that its contents match // VerifyReadCloser wraps the given io.ReadCloser to verify that its contents match
// the provided v1.Hash before io.EOF is returned. // the provided v1.Hash before io.EOF is returned.
func VerifyReadCloser(r io.ReadCloser, h v1.Hash) (io.ReadCloser, error) { // TODO(#873): Remove this package.
w, err := v1.Hasher(h.Algorithm) // Deprecated: please use v1.VerifyReadCloser
if err != nil { var VerifyReadCloser = verify.ReadCloser
return nil, err
}
r2 := io.TeeReader(r, w)
return &readAndCloser{
Reader: &verifyReader{
inner: r2,
hasher: w,
expected: h,
},
CloseFunc: r.Close,
}, nil
}

View File

@@ -14,81 +14,30 @@
package v1util package v1util
import ( import "github.com/google/go-containerregistry/pkg/v1/internal/gzip"
"bytes"
"compress/gzip"
"io"
)
var gzipMagicHeader = []byte{'\x1f', '\x8b'}
// GzipReadCloser reads uncompressed input data from the io.ReadCloser and // GzipReadCloser reads uncompressed input data from the io.ReadCloser and
// returns an io.ReadCloser from which compressed data may be read. // returns an io.ReadCloser from which compressed data may be read.
// This uses gzip.BestSpeed for the compression level. // This uses gzip.BestSpeed for the compression level.
func GzipReadCloser(r io.ReadCloser) io.ReadCloser { // TODO(#873): Remove this package.
return GzipReadCloserLevel(r, gzip.BestSpeed) // Deprecated: please move off of this API
} var GzipReadCloser = gzip.ReadCloser
// GzipReadCloserLevel reads uncompressed input data from the io.ReadCloser and // GzipReadCloserLevel reads uncompressed input data from the io.ReadCloser and
// returns an io.ReadCloser from which compressed data may be read. // returns an io.ReadCloser from which compressed data may be read.
// Refer to compress/gzip for the level: // Refer to compress/gzip for the level:
// https://golang.org/pkg/compress/gzip/#pkg-constants // https://golang.org/pkg/compress/gzip/#pkg-constants
func GzipReadCloserLevel(r io.ReadCloser, level int) io.ReadCloser { // TODO(#873): Remove this package.
pr, pw := io.Pipe() // Deprecated: please move off of this API
var GzipReadCloserLevel = gzip.ReadCloserLevel
// Returns err so we can pw.CloseWithError(err)
go func() error {
// TODO(go1.14): Just defer {pw,gw,r}.Close like you'd expect.
// Context: https://golang.org/issue/24283
gw, err := gzip.NewWriterLevel(pw, level)
if err != nil {
return pw.CloseWithError(err)
}
if _, err := io.Copy(gw, r); err != nil {
defer r.Close()
defer gw.Close()
return pw.CloseWithError(err)
}
defer pw.Close()
defer r.Close()
defer gw.Close()
return nil
}()
return pr
}
// GunzipReadCloser reads compressed input data from the io.ReadCloser and // GunzipReadCloser reads compressed input data from the io.ReadCloser and
// returns an io.ReadCloser from which uncompessed data may be read. // returns an io.ReadCloser from which uncompessed data may be read.
func GunzipReadCloser(r io.ReadCloser) (io.ReadCloser, error) { // TODO(#873): Remove this package.
gr, err := gzip.NewReader(r) // Deprecated: please move off of this API
if err != nil { var GunzipReadCloser = gzip.UnzipReadCloser
return nil, err
}
return &readAndCloser{
Reader: gr,
CloseFunc: func() error {
// If the unzip fails, then this seems to return the same
// error as the read. We don't want this to interfere with
// us closing the main ReadCloser, since this could leave
// an open file descriptor (fails on Windows).
gr.Close()
return r.Close()
},
}, nil
}
// IsGzipped detects whether the input stream is compressed. // IsGzipped detects whether the input stream is compressed.
func IsGzipped(r io.Reader) (bool, error) { // TODO(#873): Remove this package.
magicHeader := make([]byte, 2) // Deprecated: please move off of this API
n, err := r.Read(magicHeader) var IsGzipped = gzip.Is
if n == 0 && err == io.EOF {
return false, nil
}
if err != nil {
return false, err
}
return bytes.Equal(magicHeader, gzipMagicHeader), nil
}

View File

@@ -1 +1,4 @@
Aaron Lehmann <aaronl@vitelus.com> <aaron.lehmann@docker.com>
Derek McGowan <derek@mcg.dev> <derek@mcgstyle.net>
Stephen J Day <stephen.day@docker.com> <stevvooe@users.noreply.github.com> Stephen J Day <stephen.day@docker.com> <stevvooe@users.noreply.github.com>
Haibing Zhou <zhouhaibing089@gmail.com>

View File

@@ -1,12 +1,28 @@
approve_by_comment: true version: 2
approve_regex: '^(Approved|lgtm|LGTM|:shipit:|:star:|:\+1:|:ship:)'
reject_regex: ^Rejected requirements:
reset_on_push: true signed_off_by:
author_approval: ignored required: true
signed_off_by:
required: true always_pending:
reviewers: title_regex: '^WIP'
teams: explanation: 'Work in progress...'
- go-digest-maintainers
name: default group_defaults:
required: 2 required: 2
approve_by_comment:
enabled: true
approve_regex: '^LGTM'
reject_regex: '^Rejected'
reset_on_push:
enabled: true
author_approval:
ignored: true
conditions:
branches:
- master
groups:
go-digest:
teams:
- go-digest-maintainers

View File

@@ -1,4 +1,5 @@
language: go language: go
go: go:
- 1.7 - 1.12.x
- 1.13.x
- master - master

View File

@@ -176,6 +176,7 @@
END OF TERMS AND CONDITIONS END OF TERMS AND CONDITIONS
Copyright 2019, 2020 OCI Contributors
Copyright 2016 Docker, Inc. Copyright 2016 Docker, Inc.
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");

View File

@@ -1,9 +1,5 @@
Aaron Lehmann <aaron.lehmann@docker.com> (@aaronlehmann)
Brandon Philips <brandon.philips@coreos.com> (@philips)
Brendan Burns <bburns@microsoft.com> (@brendandburns)
Derek McGowan <derek@mcgstyle.net> (@dmcgowan) Derek McGowan <derek@mcgstyle.net> (@dmcgowan)
Jason Bouzane <jbouzane@google.com> (@jbouzane) Stephen Day <stevvooe@gmail.com> (@stevvooe)
John Starks <jostarks@microsoft.com> (@jstarks) Vincent Batts <vbatts@hashbangbash.com> (@vbatts)
Jonathan Boulle <jon.boulle@coreos.com> (@jonboulle) Akihiro Suda <akihiro.suda.cz@hco.ntt.co.jp> (@AkihiroSuda)
Stephen Day <stephen.day@docker.com> (@stevvooe) Sebastiaan van Stijn <github@gone.nl> (@thaJeztah)
Vincent Batts <vbatts@redhat.com> (@vbatts)

View File

@@ -8,20 +8,16 @@ Please see the [godoc](https://godoc.org/github.com/opencontainers/go-digest) fo
# What is a digest? # What is a digest?
A digest is just a hash. A digest is just a [hash](https://en.wikipedia.org/wiki/Hash_function).
The most common use case for a digest is to create a content The most common use case for a digest is to create a content identifier for use in [Content Addressable Storage](https://en.wikipedia.org/wiki/Content-addressable_storage) systems:
identifier for use in [Content Addressable Storage](https://en.wikipedia.org/wiki/Content-addressable_storage)
systems:
```go ```go
id := digest.FromBytes([]byte("my content")) id := digest.FromBytes([]byte("my content"))
``` ```
In the example above, the id can be used to uniquely identify In the example above, the id can be used to uniquely identify the byte slice "my content".
the byte slice "my content". This allows two disparate applications This allows two disparate applications to agree on a verifiable identifier without having to trust one another.
to agree on a verifiable identifier without having to trust one
another.
An identifying digest can be verified, as follows: An identifying digest can be verified, as follows:
@@ -31,8 +27,7 @@ if id != digest.FromBytes([]byte("my content")) {
} }
``` ```
A `Verifier` type can be used to handle cases where an `io.Reader` A `Verifier` type can be used to handle cases where an `io.Reader` makes more sense:
makes more sense:
```go ```go
rd := getContent() rd := getContent()
@@ -44,33 +39,28 @@ if !verifier.Verified() {
} }
``` ```
Using [Merkle DAGs](https://en.wikipedia.org/wiki/Merkle_tree), this Using [Merkle DAGs](https://en.wikipedia.org/wiki/Merkle_tree), this can power a rich, safe, content distribution system.
can power a rich, safe, content distribution system.
# Usage # Usage
While the [godoc](https://godoc.org/github.com/opencontainers/go-digest) is While the [godoc](https://godoc.org/github.com/opencontainers/go-digest) is considered the best resource, a few important items need to be called out when using this package.
considered the best resource, a few important items need to be called
out when using this package.
1. Make sure to import the hash implementations into your application 1. Make sure to import the hash implementations into your application or the package will panic.
or the package will panic. You should have something like the You should have something like the following in the main (or other entrypoint) of your application:
following in the main (or other entrypoint) of your application:
```go ```go
import ( import (
_ "crypto/sha256" _ "crypto/sha256"
_ "crypto/sha512" _ "crypto/sha512"
) )
``` ```
This may seem inconvenient but it allows you replace the hash This may seem inconvenient but it allows you replace the hash
implementations with others, such as https://github.com/stevvooe/resumable. implementations with others, such as https://github.com/stevvooe/resumable.
2. Even though `digest.Digest` may be assemable as a string, _always_ 2. Even though `digest.Digest` may be assemblable as a string, _always_ verify your input with `digest.Parse` or use `Digest.Validate` when accepting untrusted input.
verify your input with `digest.Parse` or use `Digest.Validate` While there are measures to avoid common problems, this will ensure you have valid digests in the rest of your application.
when accepting untrusted input. While there are measures to
avoid common problems, this will ensure you have valid digests 3. While alternative encodings of hash values (digests) are possible (for example, base64), this package deals exclusively with hex-encoded digests.
in the rest of your application.
# Stability # Stability
@@ -80,25 +70,27 @@ As always, before using a package export, read the [godoc](https://godoc.org/git
# Contributing # Contributing
This package is considered fairly complete. It has been in production This package is considered fairly complete.
in thousands (millions?) of deployments and is fairly battle-hardened. It has been in production in thousands (millions?) of deployments and is fairly battle-hardened.
New additions will be met with skepticism. If you think there is a New additions will be met with skepticism.
missing feature, please file a bug clearly describing the problem and If you think there is a missing feature, please file a bug clearly describing the problem and the alternatives you tried before submitting a PR.
the alternatives you tried before submitting a PR.
# Reporting security issues ## Code of Conduct
Please DO NOT file a public issue, instead send your report privately to Participation in the OpenContainers community is governed by [OpenContainer's Code of Conduct][code-of-conduct].
security@opencontainers.org.
The maintainers take security seriously. If you discover a security issue, ## Security
please bring it to their attention right away!
If you are reporting a security issue, do not create an issue or file a pull If you find an issue, please follow the [security][security] protocol to report it.
request on GitHub. Instead, disclose the issue responsibly by sending an email
to security@opencontainers.org (which is inhabited only by the maintainers of
the various OCI projects).
# Copyright and license # Copyright and license
Copyright © 2016 Docker, Inc. All rights reserved, except as follows. Code is released under the [Apache 2.0 license](LICENSE.code). This `README.md` file and the [`CONTRIBUTING.md`](CONTRIBUTING.md) file are licensed under the Creative Commons Attribution 4.0 International License under the terms and conditions set forth in the file [`LICENSE.docs`](LICENSE.docs). You may obtain a duplicate copy of the same license, titled CC BY-SA 4.0, at http://creativecommons.org/licenses/by-sa/4.0/. Copyright © 2019, 2020 OCI Contributors
Copyright © 2016 Docker, Inc.
All rights reserved, except as follows.
Code is released under the [Apache 2.0 license](LICENSE).
This `README.md` file and the [`CONTRIBUTING.md`](CONTRIBUTING.md) file are licensed under the Creative Commons Attribution 4.0 International License under the terms and conditions set forth in the file [`LICENSE.docs`](LICENSE.docs).
You may obtain a duplicate copy of the same license, titled CC BY-SA 4.0, at http://creativecommons.org/licenses/by-sa/4.0/.
[security]: https://github.com/opencontainers/org/blob/master/security
[code-of-conduct]: https://github.com/opencontainers/org/blob/master/CODE_OF_CONDUCT.md

View File

@@ -1,3 +1,4 @@
// Copyright 2019, 2020 OCI Contributors
// Copyright 2017 Docker, Inc. // Copyright 2017 Docker, Inc.
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");

View File

@@ -1,3 +1,4 @@
// Copyright 2019, 2020 OCI Contributors
// Copyright 2017 Docker, Inc. // Copyright 2017 Docker, Inc.
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");

View File

@@ -1,3 +1,4 @@
// Copyright 2019, 2020 OCI Contributors
// Copyright 2017 Docker, Inc. // Copyright 2017 Docker, Inc.
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");

View File

@@ -1,3 +1,4 @@
// Copyright 2019, 2020 OCI Contributors
// Copyright 2017 Docker, Inc. // Copyright 2017 Docker, Inc.
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
@@ -29,8 +30,13 @@
// //
// sha256:7173b809ca12ec5dee4506cd86be934c4596dd234ee82c0662eac04a8c2c71dc // sha256:7173b809ca12ec5dee4506cd86be934c4596dd234ee82c0662eac04a8c2c71dc
// //
// In this case, the string "sha256" is the algorithm and the hex bytes are // The "algorithm" portion defines both the hashing algorithm used to calculate
// the "digest". // the digest and the encoding of the resulting digest, which defaults to "hex"
// if not otherwise specified. Currently, all supported algorithms have their
// digests encoded in hex strings.
//
// In the example above, the string "sha256" is the algorithm and the hex bytes
// are the "digest".
// //
// Because the Digest type is simply a string, once a valid Digest is // Because the Digest type is simply a string, once a valid Digest is
// obtained, comparisons are cheap, quick and simple to express with the // obtained, comparisons are cheap, quick and simple to express with the

3
vendor/github.com/opencontainers/go-digest/go.mod generated vendored Normal file
View File

@@ -0,0 +1,3 @@
module github.com/opencontainers/go-digest
go 1.13

View File

@@ -1,3 +1,4 @@
// Copyright 2019, 2020 OCI Contributors
// Copyright 2017 Docker, Inc. // Copyright 2017 Docker, Inc.
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");

14
vendor/modules.txt vendored
View File

@@ -11,6 +11,10 @@ github.com/PuerkitoBio/urlesc
github.com/alessio/shellescape github.com/alessio/shellescape
# github.com/containerd/containerd v1.3.0 # github.com/containerd/containerd v1.3.0
github.com/containerd/containerd/errdefs github.com/containerd/containerd/errdefs
# github.com/containerd/stargz-snapshotter/estargz v0.0.0-20201217071531-2b97b583765b
## explicit
github.com/containerd/stargz-snapshotter/estargz
github.com/containerd/stargz-snapshotter/estargz/errorutil
# github.com/davecgh/go-spew v1.1.1 # github.com/davecgh/go-spew v1.1.1
github.com/davecgh/go-spew/spew github.com/davecgh/go-spew/spew
# github.com/docker/cli v0.0.0-20200303162255-7d407207c304 # github.com/docker/cli v0.0.0-20200303162255-7d407207c304
@@ -100,7 +104,7 @@ github.com/google/go-cmp/cmp/internal/diff
github.com/google/go-cmp/cmp/internal/flags github.com/google/go-cmp/cmp/internal/flags
github.com/google/go-cmp/cmp/internal/function github.com/google/go-cmp/cmp/internal/function
github.com/google/go-cmp/cmp/internal/value github.com/google/go-cmp/cmp/internal/value
# github.com/google/go-containerregistry v0.2.2-0.20201213184227-8b5370a4d663 # github.com/google/go-containerregistry v0.2.2-0.20201217211853-687e0e365894
## explicit ## explicit
github.com/google/go-containerregistry/cmd/crane/cmd github.com/google/go-containerregistry/cmd/crane/cmd
github.com/google/go-containerregistry/pkg/authn github.com/google/go-containerregistry/pkg/authn
@@ -119,6 +123,10 @@ github.com/google/go-containerregistry/pkg/v1
github.com/google/go-containerregistry/pkg/v1/cache github.com/google/go-containerregistry/pkg/v1/cache
github.com/google/go-containerregistry/pkg/v1/daemon github.com/google/go-containerregistry/pkg/v1/daemon
github.com/google/go-containerregistry/pkg/v1/empty github.com/google/go-containerregistry/pkg/v1/empty
github.com/google/go-containerregistry/pkg/v1/internal/and
github.com/google/go-containerregistry/pkg/v1/internal/estargz
github.com/google/go-containerregistry/pkg/v1/internal/gzip
github.com/google/go-containerregistry/pkg/v1/internal/verify
github.com/google/go-containerregistry/pkg/v1/layout github.com/google/go-containerregistry/pkg/v1/layout
github.com/google/go-containerregistry/pkg/v1/match github.com/google/go-containerregistry/pkg/v1/match
github.com/google/go-containerregistry/pkg/v1/mutate github.com/google/go-containerregistry/pkg/v1/mutate
@@ -181,7 +189,7 @@ github.com/mitchellh/mapstructure
github.com/modern-go/concurrent github.com/modern-go/concurrent
# github.com/modern-go/reflect2 v1.0.1 # github.com/modern-go/reflect2 v1.0.1
github.com/modern-go/reflect2 github.com/modern-go/reflect2
# github.com/opencontainers/go-digest v1.0.0-rc1 # github.com/opencontainers/go-digest v1.0.0
github.com/opencontainers/go-digest github.com/opencontainers/go-digest
# github.com/opencontainers/image-spec v1.0.1 # github.com/opencontainers/image-spec v1.0.1
github.com/opencontainers/image-spec/specs-go github.com/opencontainers/image-spec/specs-go
@@ -227,7 +235,7 @@ golang.org/x/net/proxy
# golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d # golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d
golang.org/x/oauth2 golang.org/x/oauth2
golang.org/x/oauth2/internal golang.org/x/oauth2/internal
# golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208 # golang.org/x/sync v0.0.0-20201207232520-09787c993a3a
## explicit ## explicit
golang.org/x/sync/errgroup golang.org/x/sync/errgroup
golang.org/x/sync/semaphore golang.org/x/sync/semaphore