mirror of
https://github.com/ko-build/ko.git
synced 2025-02-01 19:14:40 +02:00
Bump github.com/sigstore/cosign/v2 from 2.0.0-rc.2 to 2.0.0 (#965)
Bumps [github.com/sigstore/cosign/v2](https://github.com/sigstore/cosign) from 2.0.0-rc.2 to 2.0.0. - [Release notes](https://github.com/sigstore/cosign/releases) - [Changelog](https://github.com/sigstore/cosign/blob/main/CHANGELOG.md) - [Commits](https://github.com/sigstore/cosign/compare/v2.0.0-rc.2...v2.0.0) --- updated-dependencies: - dependency-name: github.com/sigstore/cosign/v2 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] <support@github.com> Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
This commit is contained in:
parent
03f4aed682
commit
d5125daacd
16
go.mod
16
go.mod
@ -10,9 +10,9 @@ require (
|
||||
github.com/dprotaso/go-yit v0.0.0-20220510233725-9ba8df137936
|
||||
github.com/go-training/helloworld v0.0.0-20200225145412-ba5f4379d78b
|
||||
github.com/google/go-cmp v0.5.9
|
||||
github.com/google/go-containerregistry v0.13.0
|
||||
github.com/google/go-containerregistry v0.13.1-0.20230203223142-b3c23b4c3f28
|
||||
github.com/opencontainers/image-spec v1.1.0-rc2
|
||||
github.com/sigstore/cosign/v2 v2.0.0-rc.2
|
||||
github.com/sigstore/cosign/v2 v2.0.0
|
||||
github.com/spf13/cobra v1.6.1
|
||||
github.com/spf13/viper v1.15.0
|
||||
go.uber.org/automaxprocs v1.5.1
|
||||
@ -24,7 +24,7 @@ require (
|
||||
)
|
||||
|
||||
require (
|
||||
cloud.google.com/go/compute v1.14.0 // indirect
|
||||
cloud.google.com/go/compute v1.18.0 // indirect
|
||||
cloud.google.com/go/compute/metadata v0.2.3 // indirect
|
||||
github.com/Azure/azure-sdk-for-go v68.0.0+incompatible // indirect
|
||||
github.com/Azure/go-autorest v14.2.0+incompatible // indirect
|
||||
@ -108,19 +108,19 @@ require (
|
||||
go.mongodb.org/mongo-driver v1.10.2 // indirect
|
||||
golang.org/x/crypto v0.6.0 // indirect
|
||||
golang.org/x/mod v0.8.0 // indirect
|
||||
golang.org/x/net v0.6.0 // indirect
|
||||
golang.org/x/net v0.7.0 // indirect
|
||||
golang.org/x/oauth2 v0.5.0 // indirect
|
||||
golang.org/x/sys v0.5.0 // indirect
|
||||
golang.org/x/term v0.5.0 // indirect
|
||||
golang.org/x/text v0.7.0 // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
google.golang.org/genproto v0.0.0-20230109162033-3c3c17ce83e6 // indirect
|
||||
google.golang.org/grpc v1.52.3 // indirect
|
||||
google.golang.org/genproto v0.0.0-20230209215440-0dfe4f8abfcc // indirect
|
||||
google.golang.org/grpc v1.53.0 // indirect
|
||||
google.golang.org/protobuf v1.28.1 // indirect
|
||||
gopkg.in/ini.v1 v1.67.0 // indirect
|
||||
gopkg.in/square/go-jose.v2 v2.6.0 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
k8s.io/klog/v2 v2.80.1 // indirect
|
||||
k8s.io/utils v0.0.0-20221107191617-1a15be271d1d // indirect
|
||||
k8s.io/klog/v2 v2.90.0 // indirect
|
||||
k8s.io/utils v0.0.0-20230115233650-391b47cb4029 // indirect
|
||||
sigs.k8s.io/yaml v1.3.0 // indirect
|
||||
)
|
||||
|
38
go.sum
38
go.sum
@ -23,8 +23,8 @@ cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvf
|
||||
cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
|
||||
cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
|
||||
cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
|
||||
cloud.google.com/go/compute v1.14.0 h1:hfm2+FfxVmnRlh6LpB7cg1ZNU+5edAHmW679JePztk0=
|
||||
cloud.google.com/go/compute v1.14.0/go.mod h1:YfLtxrj9sU4Yxv+sXzZkyPjEyPBZfXHUvjxega5vAdo=
|
||||
cloud.google.com/go/compute v1.18.0 h1:FEigFqoDbys2cvFkZ9Fjq4gnHBP55anJ0yQyau2f9oY=
|
||||
cloud.google.com/go/compute v1.18.0/go.mod h1:1X7yHxec2Ga+Ss6jPyjxRxpu2uu7PLgsOVXvgU0yacs=
|
||||
cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY=
|
||||
cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA=
|
||||
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
|
||||
@ -126,7 +126,7 @@ github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.0.0-20220517224237-
|
||||
github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.0.0-20220517224237-e6f29200ae04/go.mod h1:Z+bXnIbhKJYSvxNwsNnwde7pDKxuqlEZCbUBoTwAqf0=
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE=
|
||||
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
|
||||
github.com/chrismellard/docker-credential-acr-env v0.0.0-20220327082430-c57b701bfc08 h1:9Qh4lJ/KMr5iS1zfZ8I97+3MDpiKjl+0lZVUNBhdvRs=
|
||||
github.com/chrismellard/docker-credential-acr-env v0.0.0-20220327082430-c57b701bfc08/go.mod h1:MAuu1uDJNOS3T3ui0qmKdPUwm59+bO19BbTph2wZafE=
|
||||
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
|
||||
@ -299,8 +299,8 @@ github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8
|
||||
github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
|
||||
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/go-containerregistry v0.13.0 h1:y1C7Z3e149OJbOPDBxLYR8ITPz8dTKqQwjErKVHJC8k=
|
||||
github.com/google/go-containerregistry v0.13.0/go.mod h1:J9FQ+eSS4a1aC2GNZxvNpbWhgp0487v+cgiilB4FqDo=
|
||||
github.com/google/go-containerregistry v0.13.1-0.20230203223142-b3c23b4c3f28 h1:gFDKHwyCxpzgUozSOM8eLCx0V7muSr30QYU2QH+p48E=
|
||||
github.com/google/go-containerregistry v0.13.1-0.20230203223142-b3c23b4c3f28/go.mod h1:J9FQ+eSS4a1aC2GNZxvNpbWhgp0487v+cgiilB4FqDo=
|
||||
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
|
||||
github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
|
||||
github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
|
||||
@ -424,8 +424,8 @@ github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4
|
||||
github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw=
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4=
|
||||
github.com/prometheus/common v0.37.0 h1:ccBbHCgIiT9uSoFY0vX8H3zsNR5eLt17/RQLUvn8pXE=
|
||||
github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo=
|
||||
github.com/prometheus/common v0.39.0 h1:oOyhkDq05hPZKItWVBkJ6g6AtGxi+fy7F4JvUV8uhsI=
|
||||
github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI=
|
||||
github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
@ -434,8 +434,8 @@ github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQD
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
|
||||
github.com/sigstore/cosign/v2 v2.0.0-rc.2 h1:LDSSQYzThg7uKHJqFBp03kaObTDuWfifJqBiAK1elUU=
|
||||
github.com/sigstore/cosign/v2 v2.0.0-rc.2/go.mod h1:oKIsv9cCwtfakSd64Rzief3Izk/cPSkougoWU/F3OBI=
|
||||
github.com/sigstore/cosign/v2 v2.0.0 h1:x+K6VQKtrBR9/MYOx6ebJB6/Aux56nmf2Zn3chZlP5w=
|
||||
github.com/sigstore/cosign/v2 v2.0.0/go.mod h1:MeJyYfKll3AAsb+CdnhI3YkecDPX2erPvf1JaUaFCrM=
|
||||
github.com/sigstore/rekor v1.0.1 h1:rcESXSNkAPRWFYZel9rarspdvneET60F2ngNkadi89c=
|
||||
github.com/sigstore/rekor v1.0.1/go.mod h1:ecTKdZWGWqE1pl3U1m1JebQJLU/hSjD9vYHOmHQ7w4g=
|
||||
github.com/sigstore/sigstore v1.5.1 h1:iUou0QJW8eQKMUkTXbFyof9ZOblDtfaW2Sn2+QI8Tcs=
|
||||
@ -599,8 +599,8 @@ golang.org/x/net v0.0.0-20210421230115-4e50805a0758/go.mod h1:72T/g9IO56b78aLF+1
|
||||
golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk=
|
||||
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||
golang.org/x/net v0.6.0 h1:L4ZwwTvKW9gr0ZMS1yrHD9GZhIuVjOBBnaKH+SPQK0Q=
|
||||
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
||||
golang.org/x/net v0.7.0 h1:rJrUqqhjsgNp7KqAIc25s9pZnjU7TUcSY7HcVZjdn1g=
|
||||
golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
@ -822,8 +822,8 @@ google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6D
|
||||
google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20230109162033-3c3c17ce83e6 h1:uUn6GsgKK2eCI0bWeRMgRCcqDaQXYDuB+5tXA5Xeg/8=
|
||||
google.golang.org/genproto v0.0.0-20230109162033-3c3c17ce83e6/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM=
|
||||
google.golang.org/genproto v0.0.0-20230209215440-0dfe4f8abfcc h1:ijGwO+0vL2hJt5gaygqP2j6PfflOBrRot0IczKbmtio=
|
||||
google.golang.org/genproto v0.0.0-20230209215440-0dfe4f8abfcc/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
||||
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
||||
@ -840,8 +840,8 @@ google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM
|
||||
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
|
||||
google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8=
|
||||
google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
|
||||
google.golang.org/grpc v1.52.3 h1:pf7sOysg4LdgBqduXveGKrcEwbStiK2rtfghdzlUYDQ=
|
||||
google.golang.org/grpc v1.52.3/go.mod h1:pu6fVzoFb+NBYNAvQL08ic+lvB2IojljRYuun5vorUY=
|
||||
google.golang.org/grpc v1.53.0 h1:LAv2ds7cmFV/XTS3XG1NneeENYrXGmorPxsBbptIjNc=
|
||||
google.golang.org/grpc v1.53.0/go.mod h1:OnIrk0ipVdj4N5d9IUoFUx72/VlD7+jUsHwZgwSMQpw=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||
@ -893,10 +893,10 @@ honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9
|
||||
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
||||
k8s.io/apimachinery v0.26.1 h1:8EZ/eGJL+hY/MYCNwhmDzVqq2lPl3N3Bo8rvweJwXUQ=
|
||||
k8s.io/apimachinery v0.26.1/go.mod h1:tnPmbONNJ7ByJNz9+n9kMjNP8ON+1qoAIIC70lztu74=
|
||||
k8s.io/klog/v2 v2.80.1 h1:atnLQ121W371wYYFawwYx1aEY2eUfs4l3J72wtgAwV4=
|
||||
k8s.io/klog/v2 v2.80.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
|
||||
k8s.io/utils v0.0.0-20221107191617-1a15be271d1d h1:0Smp/HP1OH4Rvhe+4B8nWGERtlqAGSftbSbbmm45oFs=
|
||||
k8s.io/utils v0.0.0-20221107191617-1a15be271d1d/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||
k8s.io/klog/v2 v2.90.0 h1:VkTxIV/FjRXn1fgNNcKGM8cfmL1Z33ZjXRTVxKCoF5M=
|
||||
k8s.io/klog/v2 v2.90.0/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
|
||||
k8s.io/utils v0.0.0-20230115233650-391b47cb4029 h1:L8zDtT4jrxj+TaQYD0k8KNlr556WaVQylDXswKmX+dE=
|
||||
k8s.io/utils v0.0.0-20230115233650-391b47cb4029/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
|
||||
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
|
||||
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
|
||||
|
2
vendor/cloud.google.com/go/compute/internal/version.go
generated
vendored
2
vendor/cloud.google.com/go/compute/internal/version.go
generated
vendored
@ -15,4 +15,4 @@
|
||||
package internal
|
||||
|
||||
// Version is the current tagged release of the library.
|
||||
const Version = "1.14.0"
|
||||
const Version = "1.18.0"
|
||||
|
69
vendor/github.com/google/go-containerregistry/internal/cmd/edit.go
generated
vendored
69
vendor/github.com/google/go-containerregistry/internal/cmd/edit.go
generated
vendored
@ -17,6 +17,7 @@ package cmd
|
||||
import (
|
||||
"archive/tar"
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
@ -30,6 +31,7 @@ import (
|
||||
v1 "github.com/google/go-containerregistry/pkg/v1"
|
||||
"github.com/google/go-containerregistry/pkg/v1/mutate"
|
||||
"github.com/google/go-containerregistry/pkg/v1/remote"
|
||||
"github.com/google/go-containerregistry/pkg/v1/static"
|
||||
"github.com/google/go-containerregistry/pkg/v1/tarball"
|
||||
"github.com/google/go-containerregistry/pkg/v1/types"
|
||||
"github.com/spf13/cobra"
|
||||
@ -82,18 +84,18 @@ func NewCmdEditConfig(options *[]crane.Option) *cobra.Command {
|
||||
|
||||
// NewCmdManifest creates a new cobra.Command for the manifest subcommand.
|
||||
func NewCmdEditManifest(options *[]crane.Option) *cobra.Command {
|
||||
var dst string
|
||||
var (
|
||||
dst string
|
||||
mt string
|
||||
)
|
||||
cmd := &cobra.Command{
|
||||
Use: "manifest",
|
||||
Short: "Edit an image's manifest.",
|
||||
Example: ` # Edit ubuntu's config file
|
||||
crane edit config ubuntu
|
||||
|
||||
# Overwrite ubuntu's config file with '{}'
|
||||
echo '{}' | crane edit config ubuntu`,
|
||||
Example: ` # Edit ubuntu's manifest
|
||||
crane edit manifest ubuntu`,
|
||||
Args: cobra.ExactArgs(1),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
ref, err := editManifest(cmd.InOrStdin(), cmd.OutOrStdout(), args[0], dst, *options...)
|
||||
ref, err := editManifest(cmd.InOrStdin(), cmd.OutOrStdout(), args[0], dst, mt, *options...)
|
||||
if err != nil {
|
||||
return fmt.Errorf("editing manifest: %w", err)
|
||||
}
|
||||
@ -102,6 +104,7 @@ func NewCmdEditManifest(options *[]crane.Option) *cobra.Command {
|
||||
},
|
||||
}
|
||||
cmd.Flags().StringVarP(&dst, "tag", "t", "", "New tag reference to apply to mutated image. If not provided, uses original tag or pushes a new digest.")
|
||||
cmd.Flags().StringVarP(&mt, "media-type", "m", "", "Override the mediaType used as the Content-Type for PUT")
|
||||
|
||||
return cmd
|
||||
}
|
||||
@ -158,6 +161,15 @@ func editConfig(in io.Reader, out io.Writer, src, dst string, options ...crane.O
|
||||
return nil, err
|
||||
}
|
||||
|
||||
m, err := img.Manifest()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mt, err := img.MediaType()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var edited []byte
|
||||
if interactive(in, out) {
|
||||
rcf, err := img.RawConfigFile()
|
||||
@ -176,20 +188,24 @@ func editConfig(in io.Reader, out io.Writer, src, dst string, options ...crane.O
|
||||
edited = b
|
||||
}
|
||||
|
||||
cf, err := v1.ParseConfigFile(bytes.NewReader(edited))
|
||||
l := static.NewLayer(edited, m.Config.MediaType)
|
||||
layerDigest, err := l.Digest()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
img, err = mutate.ConfigFile(img, cf)
|
||||
m.Config.Digest = layerDigest
|
||||
m.Config.Size = int64(len(edited))
|
||||
b, err := json.Marshal(m)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rm := &rawManifest{
|
||||
body: b,
|
||||
mediaType: mt,
|
||||
}
|
||||
|
||||
digest, err := img.Digest()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
digest, _, _ := v1.SHA256(bytes.NewReader(b))
|
||||
|
||||
if dst == "" {
|
||||
dst = src
|
||||
@ -207,14 +223,18 @@ func editConfig(in io.Reader, out io.Writer, src, dst string, options ...crane.O
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := crane.Push(img, dst, options...); err != nil {
|
||||
if err := remote.WriteLayer(dstRef.Context(), l, o.Remote...); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := remote.Put(dstRef, rm, o.Remote...); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return dstRef, nil
|
||||
}
|
||||
|
||||
func editManifest(in io.Reader, out io.Writer, src string, dst string, options ...crane.Option) (name.Reference, error) {
|
||||
func editManifest(in io.Reader, out io.Writer, src string, dst string, mt string, options ...crane.Option) (name.Reference, error) {
|
||||
o := crane.GetOptions(options...)
|
||||
|
||||
ref, err := name.ParseReference(src, o.Name...)
|
||||
@ -257,9 +277,22 @@ func editManifest(in io.Reader, out io.Writer, src string, dst string, options .
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if mt == "" {
|
||||
// If --media-type is unset, use Content-Type by default.
|
||||
mt = string(desc.MediaType)
|
||||
|
||||
// If document contains mediaType, default to that.
|
||||
wmt := withMediaType{}
|
||||
if err := json.Unmarshal(edited, &wmt); err == nil {
|
||||
if wmt.MediaType != "" {
|
||||
mt = wmt.MediaType
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
rm := &rawManifest{
|
||||
body: edited,
|
||||
mediaType: desc.MediaType,
|
||||
mediaType: types.MediaType(mt),
|
||||
}
|
||||
|
||||
if err := remote.Put(dstRef, rm, o.Remote...); err != nil {
|
||||
@ -402,6 +435,10 @@ func normalize(name string) string {
|
||||
return filepath.Clean("/" + name)
|
||||
}
|
||||
|
||||
type withMediaType struct {
|
||||
MediaType string `json:"mediaType,omitempty"`
|
||||
}
|
||||
|
||||
type rawManifest struct {
|
||||
body []byte
|
||||
mediaType types.MediaType
|
||||
|
7
vendor/github.com/google/go-containerregistry/pkg/legacy/tarball/write.go
generated
vendored
7
vendor/github.com/google/go-containerregistry/pkg/legacy/tarball/write.go
generated
vendored
@ -17,8 +17,6 @@ package tarball
|
||||
import (
|
||||
"archive/tar"
|
||||
"bytes"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
@ -63,8 +61,9 @@ func v1LayerID(layer v1.Layer, parentID string, rawConfig []byte) (string, error
|
||||
if len(rawConfig) != 0 {
|
||||
s = fmt.Sprintf("%s %s", s, string(rawConfig))
|
||||
}
|
||||
rawDigest := sha256.Sum256([]byte(s))
|
||||
return hex.EncodeToString(rawDigest[:]), nil
|
||||
|
||||
h, _, _ := v1.SHA256(strings.NewReader(s))
|
||||
return h.Hex, nil
|
||||
}
|
||||
|
||||
// newTopV1Layer creates a new v1Layer for a layer other than the top layer in a v1 image tarball.
|
||||
|
1
vendor/github.com/google/go-containerregistry/pkg/name/digest.go
generated
vendored
1
vendor/github.com/google/go-containerregistry/pkg/name/digest.go
generated
vendored
@ -15,6 +15,7 @@
|
||||
package name
|
||||
|
||||
import (
|
||||
// nolint: depguard
|
||||
_ "crypto/sha256" // Recommended by go-digest.
|
||||
"strings"
|
||||
|
||||
|
105
vendor/github.com/google/go-containerregistry/pkg/registry/manifest.go
generated
vendored
105
vendor/github.com/google/go-containerregistry/pkg/registry/manifest.go
generated
vendored
@ -16,8 +16,6 @@ package registry
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
@ -81,6 +79,16 @@ func isCatalog(req *http.Request) bool {
|
||||
return elems[len(elems)-1] == "_catalog"
|
||||
}
|
||||
|
||||
// Returns whether this url should be handled by the referrers handler
|
||||
func isReferrers(req *http.Request) bool {
|
||||
elems := strings.Split(req.URL.Path, "/")
|
||||
elems = elems[1:]
|
||||
if len(elems) < 4 {
|
||||
return false
|
||||
}
|
||||
return elems[len(elems)-2] == "referrers"
|
||||
}
|
||||
|
||||
// https://github.com/opencontainers/distribution-spec/blob/master/spec.md#pulling-an-image-manifest
|
||||
// https://github.com/opencontainers/distribution-spec/blob/master/spec.md#pushing-an-image
|
||||
func (m *manifests) handle(resp http.ResponseWriter, req *http.Request) *regError {
|
||||
@ -110,9 +118,8 @@ func (m *manifests) handle(resp http.ResponseWriter, req *http.Request) *regErro
|
||||
Message: "Unknown manifest",
|
||||
}
|
||||
}
|
||||
rd := sha256.Sum256(m.blob)
|
||||
d := "sha256:" + hex.EncodeToString(rd[:])
|
||||
resp.Header().Set("Docker-Content-Digest", d)
|
||||
h, _, _ := v1.SHA256(bytes.NewReader(m.blob))
|
||||
resp.Header().Set("Docker-Content-Digest", h.String())
|
||||
resp.Header().Set("Content-Type", m.contentType)
|
||||
resp.Header().Set("Content-Length", fmt.Sprint(len(m.blob)))
|
||||
resp.WriteHeader(http.StatusOK)
|
||||
@ -137,9 +144,8 @@ func (m *manifests) handle(resp http.ResponseWriter, req *http.Request) *regErro
|
||||
Message: "Unknown manifest",
|
||||
}
|
||||
}
|
||||
rd := sha256.Sum256(m.blob)
|
||||
d := "sha256:" + hex.EncodeToString(rd[:])
|
||||
resp.Header().Set("Docker-Content-Digest", d)
|
||||
h, _, _ := v1.SHA256(bytes.NewReader(m.blob))
|
||||
resp.Header().Set("Docker-Content-Digest", h.String())
|
||||
resp.Header().Set("Content-Type", m.contentType)
|
||||
resp.Header().Set("Content-Length", fmt.Sprint(len(m.blob)))
|
||||
resp.WriteHeader(http.StatusOK)
|
||||
@ -153,8 +159,8 @@ func (m *manifests) handle(resp http.ResponseWriter, req *http.Request) *regErro
|
||||
}
|
||||
b := &bytes.Buffer{}
|
||||
io.Copy(b, req.Body)
|
||||
rd := sha256.Sum256(b.Bytes())
|
||||
digest := "sha256:" + hex.EncodeToString(rd[:])
|
||||
h, _, _ := v1.SHA256(bytes.NewReader(b.Bytes()))
|
||||
digest := h.String()
|
||||
mf := manifest{
|
||||
blob: b.Bytes(),
|
||||
contentType: req.Header.Get("Content-Type"),
|
||||
@ -343,3 +349,82 @@ func (m *manifests) handleCatalog(resp http.ResponseWriter, req *http.Request) *
|
||||
Message: "We don't understand your method + url",
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: implement handling of artifactType querystring
|
||||
func (m *manifests) handleReferrers(resp http.ResponseWriter, req *http.Request) *regError {
|
||||
// Ensure this is a GET request
|
||||
if req.Method != "GET" {
|
||||
return ®Error{
|
||||
Status: http.StatusBadRequest,
|
||||
Code: "METHOD_UNKNOWN",
|
||||
Message: "We don't understand your method + url",
|
||||
}
|
||||
}
|
||||
|
||||
elem := strings.Split(req.URL.Path, "/")
|
||||
elem = elem[1:]
|
||||
target := elem[len(elem)-1]
|
||||
repo := strings.Join(elem[1:len(elem)-2], "/")
|
||||
|
||||
// Validate that incoming target is a valid digest
|
||||
if _, err := v1.NewHash(target); err != nil {
|
||||
return ®Error{
|
||||
Status: http.StatusBadRequest,
|
||||
Code: "UNSUPPORTED",
|
||||
Message: "Target must be a valid digest",
|
||||
}
|
||||
}
|
||||
|
||||
m.lock.Lock()
|
||||
defer m.lock.Unlock()
|
||||
|
||||
digestToManifestMap, repoExists := m.manifests[repo]
|
||||
if !repoExists {
|
||||
return ®Error{
|
||||
Status: http.StatusNotFound,
|
||||
Code: "NAME_UNKNOWN",
|
||||
Message: "Unknown name",
|
||||
}
|
||||
}
|
||||
|
||||
im := v1.IndexManifest{
|
||||
SchemaVersion: 2,
|
||||
MediaType: types.OCIImageIndex,
|
||||
Manifests: []v1.Descriptor{},
|
||||
}
|
||||
for digest, manifest := range digestToManifestMap {
|
||||
h, err := v1.NewHash(digest)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
var refPointer struct {
|
||||
Subject *v1.Descriptor `json:"subject"`
|
||||
}
|
||||
json.Unmarshal(manifest.blob, &refPointer)
|
||||
if refPointer.Subject == nil {
|
||||
continue
|
||||
}
|
||||
referenceDigest := refPointer.Subject.Digest
|
||||
if referenceDigest.String() != target {
|
||||
continue
|
||||
}
|
||||
// At this point, we know the current digest references the target
|
||||
var imageAsArtifact struct {
|
||||
Config struct {
|
||||
MediaType string `json:"mediaType"`
|
||||
} `json:"config"`
|
||||
}
|
||||
json.Unmarshal(manifest.blob, &imageAsArtifact)
|
||||
im.Manifests = append(im.Manifests, v1.Descriptor{
|
||||
MediaType: types.MediaType(manifest.contentType),
|
||||
Size: int64(len(manifest.blob)),
|
||||
Digest: h,
|
||||
ArtifactType: imageAsArtifact.Config.MediaType,
|
||||
})
|
||||
}
|
||||
msg, _ := json.Marshal(&im)
|
||||
resp.Header().Set("Content-Length", fmt.Sprint(len(msg)))
|
||||
resp.WriteHeader(http.StatusOK)
|
||||
io.Copy(resp, bytes.NewReader([]byte(msg)))
|
||||
return nil
|
||||
}
|
||||
|
17
vendor/github.com/google/go-containerregistry/pkg/registry/registry.go
generated
vendored
17
vendor/github.com/google/go-containerregistry/pkg/registry/registry.go
generated
vendored
@ -30,9 +30,10 @@ import (
|
||||
)
|
||||
|
||||
type registry struct {
|
||||
log *log.Logger
|
||||
blobs blobs
|
||||
manifests manifests
|
||||
log *log.Logger
|
||||
blobs blobs
|
||||
manifests manifests
|
||||
referrersEnabled bool
|
||||
}
|
||||
|
||||
// https://docs.docker.com/registry/spec/api/#api-version-check
|
||||
@ -50,6 +51,9 @@ func (r *registry) v2(resp http.ResponseWriter, req *http.Request) *regError {
|
||||
if isCatalog(req) {
|
||||
return r.manifests.handleCatalog(resp, req)
|
||||
}
|
||||
if r.referrersEnabled && isReferrers(req) {
|
||||
return r.manifests.handleReferrers(resp, req)
|
||||
}
|
||||
resp.Header().Set("Docker-Distribution-API-Version", "registry/2.0")
|
||||
if req.URL.Path != "/v2/" && req.URL.Path != "/v2" {
|
||||
return ®Error{
|
||||
@ -104,3 +108,10 @@ func Logger(l *log.Logger) Option {
|
||||
r.blobs.log = l
|
||||
}
|
||||
}
|
||||
|
||||
// WithReferrersSupport enables the referrers API endpoint (OCI 1.1+)
|
||||
func WithReferrersSupport(enabled bool) Option {
|
||||
return func(r *registry) {
|
||||
r.referrersEnabled = enabled
|
||||
}
|
||||
}
|
||||
|
6
vendor/github.com/google/go-containerregistry/pkg/v1/hash.go
generated
vendored
6
vendor/github.com/google/go-containerregistry/pkg/v1/hash.go
generated
vendored
@ -15,7 +15,7 @@
|
||||
package v1
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"crypto"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
@ -78,7 +78,7 @@ func (h *Hash) UnmarshalText(text []byte) error {
|
||||
func Hasher(name string) (hash.Hash, error) {
|
||||
switch name {
|
||||
case "sha256":
|
||||
return sha256.New(), nil
|
||||
return crypto.SHA256.New(), nil
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported hash: %q", name)
|
||||
}
|
||||
@ -111,7 +111,7 @@ func (h *Hash) parse(unquoted string) error {
|
||||
|
||||
// SHA256 computes the Hash of the provided io.Reader's content.
|
||||
func SHA256(r io.Reader) (Hash, int64, error) {
|
||||
hasher := sha256.New()
|
||||
hasher := crypto.SHA256.New()
|
||||
n, err := io.Copy(hasher, r)
|
||||
if err != nil {
|
||||
return Hash{}, 0, err
|
||||
|
17
vendor/github.com/google/go-containerregistry/pkg/v1/manifest.go
generated
vendored
17
vendor/github.com/google/go-containerregistry/pkg/v1/manifest.go
generated
vendored
@ -28,6 +28,7 @@ type Manifest struct {
|
||||
Config Descriptor `json:"config"`
|
||||
Layers []Descriptor `json:"layers"`
|
||||
Annotations map[string]string `json:"annotations,omitempty"`
|
||||
Subject *Descriptor `json:"subject,omitempty"`
|
||||
}
|
||||
|
||||
// IndexManifest represents an OCI image index in a structured way.
|
||||
@ -36,17 +37,19 @@ type IndexManifest struct {
|
||||
MediaType types.MediaType `json:"mediaType,omitempty"`
|
||||
Manifests []Descriptor `json:"manifests"`
|
||||
Annotations map[string]string `json:"annotations,omitempty"`
|
||||
Subject *Descriptor `json:"subject,omitempty"`
|
||||
}
|
||||
|
||||
// Descriptor holds a reference from the manifest to one of its constituent elements.
|
||||
type Descriptor struct {
|
||||
MediaType types.MediaType `json:"mediaType"`
|
||||
Size int64 `json:"size"`
|
||||
Digest Hash `json:"digest"`
|
||||
Data []byte `json:"data,omitempty"`
|
||||
URLs []string `json:"urls,omitempty"`
|
||||
Annotations map[string]string `json:"annotations,omitempty"`
|
||||
Platform *Platform `json:"platform,omitempty"`
|
||||
MediaType types.MediaType `json:"mediaType"`
|
||||
Size int64 `json:"size"`
|
||||
Digest Hash `json:"digest"`
|
||||
Data []byte `json:"data,omitempty"`
|
||||
URLs []string `json:"urls,omitempty"`
|
||||
Annotations map[string]string `json:"annotations,omitempty"`
|
||||
Platform *Platform `json:"platform,omitempty"`
|
||||
ArtifactType string `json:"artifactType,omitempty"`
|
||||
}
|
||||
|
||||
// ParseManifest parses the io.Reader's contents into a Manifest.
|
||||
|
2
vendor/github.com/google/go-containerregistry/pkg/v1/mutate/image.go
generated
vendored
2
vendor/github.com/google/go-containerregistry/pkg/v1/mutate/image.go
generated
vendored
@ -37,6 +37,7 @@ type image struct {
|
||||
configMediaType *types.MediaType
|
||||
diffIDMap map[v1.Hash]v1.Layer
|
||||
digestMap map[v1.Hash]v1.Layer
|
||||
subject *v1.Descriptor
|
||||
}
|
||||
|
||||
var _ v1.Image = (*image)(nil)
|
||||
@ -153,6 +154,7 @@ func (i *image) compute() error {
|
||||
manifest.Annotations[k] = v
|
||||
}
|
||||
}
|
||||
manifest.Subject = i.subject
|
||||
|
||||
i.configFile = configFile
|
||||
i.manifest = manifest
|
||||
|
2
vendor/github.com/google/go-containerregistry/pkg/v1/mutate/index.go
generated
vendored
2
vendor/github.com/google/go-containerregistry/pkg/v1/mutate/index.go
generated
vendored
@ -70,6 +70,7 @@ type index struct {
|
||||
imageMap map[v1.Hash]v1.Image
|
||||
indexMap map[v1.Hash]v1.ImageIndex
|
||||
layerMap map[v1.Hash]v1.Layer
|
||||
subject *v1.Descriptor
|
||||
}
|
||||
|
||||
var _ v1.ImageIndex = (*index)(nil)
|
||||
@ -142,6 +143,7 @@ func (i *index) compute() error {
|
||||
manifest.Annotations[k] = v
|
||||
}
|
||||
}
|
||||
manifest.Subject = i.subject
|
||||
|
||||
i.manifest = manifest
|
||||
i.computed = true
|
||||
|
51
vendor/github.com/google/go-containerregistry/pkg/v1/mutate/mutate.go
generated
vendored
51
vendor/github.com/google/go-containerregistry/pkg/v1/mutate/mutate.go
generated
vendored
@ -115,9 +115,33 @@ func Config(base v1.Image, cfg v1.Config) (v1.Image, error) {
|
||||
return ConfigFile(base, cf)
|
||||
}
|
||||
|
||||
// Annotatable represents a manifest that can carry annotations.
|
||||
type Annotatable interface {
|
||||
partial.WithRawManifest
|
||||
// Subject mutates the subject on an image or index manifest.
|
||||
//
|
||||
// The input is expected to be a v1.Image or v1.ImageIndex, and
|
||||
// returns the same type. You can type-assert the result like so:
|
||||
//
|
||||
// img := Subject(empty.Image, subj).(v1.Image)
|
||||
//
|
||||
// Or for an index:
|
||||
//
|
||||
// idx := Subject(empty.Index, subj).(v1.ImageIndex)
|
||||
//
|
||||
// If the input is not an Image or ImageIndex, the result will
|
||||
// attempt to lazily annotate the raw manifest.
|
||||
func Subject(f partial.WithRawManifest, subject v1.Descriptor) partial.WithRawManifest {
|
||||
if img, ok := f.(v1.Image); ok {
|
||||
return &image{
|
||||
base: img,
|
||||
subject: &subject,
|
||||
}
|
||||
}
|
||||
if idx, ok := f.(v1.ImageIndex); ok {
|
||||
return &index{
|
||||
base: idx,
|
||||
subject: &subject,
|
||||
}
|
||||
}
|
||||
return arbitraryRawManifest{a: f, subject: &subject}
|
||||
}
|
||||
|
||||
// Annotations mutates the annotations on an annotatable image or index manifest.
|
||||
@ -137,7 +161,7 @@ type Annotatable interface {
|
||||
//
|
||||
// If the input Annotatable is not an Image or ImageIndex, the result will
|
||||
// attempt to lazily annotate the raw manifest.
|
||||
func Annotations(f Annotatable, anns map[string]string) Annotatable {
|
||||
func Annotations(f partial.WithRawManifest, anns map[string]string) partial.WithRawManifest {
|
||||
if img, ok := f.(v1.Image); ok {
|
||||
return &image{
|
||||
base: img,
|
||||
@ -150,12 +174,13 @@ func Annotations(f Annotatable, anns map[string]string) Annotatable {
|
||||
annotations: anns,
|
||||
}
|
||||
}
|
||||
return arbitraryRawManifest{f, anns}
|
||||
return arbitraryRawManifest{a: f, anns: anns}
|
||||
}
|
||||
|
||||
type arbitraryRawManifest struct {
|
||||
a Annotatable
|
||||
anns map[string]string
|
||||
a partial.WithRawManifest
|
||||
anns map[string]string
|
||||
subject *v1.Descriptor
|
||||
}
|
||||
|
||||
func (a arbitraryRawManifest) RawManifest() ([]byte, error) {
|
||||
@ -178,6 +203,9 @@ func (a arbitraryRawManifest) RawManifest() ([]byte, error) {
|
||||
} else {
|
||||
m["annotations"] = a.anns
|
||||
}
|
||||
if a.subject != nil {
|
||||
m["subject"] = a.subject
|
||||
}
|
||||
return json.Marshal(m)
|
||||
}
|
||||
|
||||
@ -437,6 +465,13 @@ func layerTime(layer v1.Layer, t time.Time) (v1.Layer, error) {
|
||||
}
|
||||
|
||||
header.ModTime = t
|
||||
|
||||
//PAX and GNU Format support additional timestamps in the header
|
||||
if header.Format == tar.FormatPAX || header.Format == tar.FormatGNU {
|
||||
header.AccessTime = t
|
||||
header.ChangeTime = t
|
||||
}
|
||||
|
||||
if err := tarWriter.WriteHeader(header); err != nil {
|
||||
return nil, fmt.Errorf("writing tar header: %w", err)
|
||||
}
|
||||
@ -500,6 +535,8 @@ func MediaType(img v1.Image, mt types.MediaType) v1.Image {
|
||||
}
|
||||
|
||||
// ConfigMediaType modifies the MediaType() of the given image's Config.
|
||||
//
|
||||
// If !mt.IsConfig(), this will be the image's artifactType in any indexes it's a part of.
|
||||
func ConfigMediaType(img v1.Image, mt types.MediaType) v1.Image {
|
||||
return &image{
|
||||
base: img,
|
||||
|
35
vendor/github.com/google/go-containerregistry/pkg/v1/partial/with.go
generated
vendored
35
vendor/github.com/google/go-containerregistry/pkg/v1/partial/with.go
generated
vendored
@ -328,10 +328,28 @@ func Descriptor(d Describable) (*v1.Descriptor, error) {
|
||||
if desc.MediaType, err = d.MediaType(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if wat, ok := d.(withArtifactType); ok {
|
||||
if desc.ArtifactType, err = wat.ArtifactType(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
if wrm, ok := d.(WithRawManifest); ok && desc.MediaType.IsImage() {
|
||||
mf, _ := Manifest(wrm)
|
||||
// Failing to parse as a manifest should just be ignored.
|
||||
// The manifest might not be valid, and that's okay.
|
||||
if mf != nil && !mf.Config.MediaType.IsConfig() {
|
||||
desc.ArtifactType = string(mf.Config.MediaType)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return &desc, nil
|
||||
}
|
||||
|
||||
type withArtifactType interface {
|
||||
ArtifactType() (string, error)
|
||||
}
|
||||
|
||||
type withUncompressedSize interface {
|
||||
UncompressedSize() (int64, error)
|
||||
}
|
||||
@ -399,3 +417,20 @@ func unwrap(i any) any {
|
||||
}
|
||||
return i
|
||||
}
|
||||
|
||||
// ArtifactType returns the artifact type for the given manifest.
|
||||
//
|
||||
// If the manifest reports its own artifact type, that's returned, otherwise
|
||||
// the manifest is parsed and, if successful, its config.mediaType is returned.
|
||||
func ArtifactType(w WithManifest) (string, error) {
|
||||
if wat, ok := w.(withArtifactType); ok {
|
||||
return wat.ArtifactType()
|
||||
}
|
||||
mf, _ := w.Manifest()
|
||||
// Failing to parse as a manifest should just be ignored.
|
||||
// The manifest might not be valid, and that's okay.
|
||||
if mf != nil && !mf.Config.MediaType.IsConfig() {
|
||||
return string(mf.Config.MediaType), nil
|
||||
}
|
||||
return "", nil
|
||||
}
|
||||
|
4
vendor/github.com/google/go-containerregistry/pkg/v1/random/image.go
generated
vendored
4
vendor/github.com/google/go-containerregistry/pkg/v1/random/image.go
generated
vendored
@ -17,8 +17,8 @@ package random
|
||||
import (
|
||||
"archive/tar"
|
||||
"bytes"
|
||||
"crypto"
|
||||
"crypto/rand"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io"
|
||||
@ -84,7 +84,7 @@ func Layer(byteSize int64, mt types.MediaType) (v1.Layer, error) {
|
||||
|
||||
// Hash the contents as we write it out to the buffer.
|
||||
var b bytes.Buffer
|
||||
hasher := sha256.New()
|
||||
hasher := crypto.SHA256.New()
|
||||
mw := io.MultiWriter(&b, hasher)
|
||||
|
||||
// Write a single file with a random name and random contents.
|
||||
|
4
vendor/github.com/google/go-containerregistry/pkg/v1/remote/delete.go
generated
vendored
4
vendor/github.com/google/go-containerregistry/pkg/v1/remote/delete.go
generated
vendored
@ -54,4 +54,8 @@ func Delete(ref name.Reference, options ...Option) error {
|
||||
defer resp.Body.Close()
|
||||
|
||||
return transport.CheckError(resp, http.StatusOK, http.StatusAccepted)
|
||||
|
||||
// TODO(jason): If the manifest had a `subject`, and if the registry
|
||||
// doesn't support Referrers, update the index pointed to by the
|
||||
// subject's fallback tag to remove the descriptor for this manifest.
|
||||
}
|
||||
|
89
vendor/github.com/google/go-containerregistry/pkg/v1/remote/descriptor.go
generated
vendored
89
vendor/github.com/google/go-containerregistry/pkg/v1/remote/descriptor.go
generated
vendored
@ -17,6 +17,8 @@ package remote
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
@ -59,7 +61,7 @@ type Descriptor struct {
|
||||
v1.Descriptor
|
||||
Manifest []byte
|
||||
|
||||
// So we can share this implementation with Image..
|
||||
// So we can share this implementation with Image.
|
||||
platform v1.Platform
|
||||
}
|
||||
|
||||
@ -237,6 +239,56 @@ func (f *fetcher) url(resource, identifier string) url.URL {
|
||||
}
|
||||
}
|
||||
|
||||
// https://github.com/opencontainers/distribution-spec/blob/main/spec.md#referrers-tag-schema
|
||||
func fallbackTag(d name.Digest) name.Tag {
|
||||
return d.Context().Tag(strings.Replace(d.DigestStr(), ":", "-", 1))
|
||||
}
|
||||
|
||||
func (f *fetcher) fetchReferrers(ctx context.Context, filter map[string]string, d name.Digest) (*v1.IndexManifest, error) {
|
||||
// Check the Referrers API endpoint first.
|
||||
u := f.url("referrers", d.DigestStr())
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, u.String(), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req.Header.Set("Accept", string(types.OCIImageIndex))
|
||||
|
||||
resp, err := f.Client.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if err := transport.CheckError(resp, http.StatusOK, http.StatusNotFound, http.StatusBadRequest); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if resp.StatusCode == http.StatusOK {
|
||||
var im v1.IndexManifest
|
||||
if err := json.NewDecoder(resp.Body).Decode(&im); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return filterReferrersResponse(filter, &im), nil
|
||||
}
|
||||
|
||||
// The registry doesn't support the Referrers API endpoint, so we'll use the fallback tag scheme.
|
||||
b, _, err := f.fetchManifest(fallbackTag(d), []types.MediaType{types.OCIImageIndex})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var terr *transport.Error
|
||||
if ok := errors.As(err, &terr); ok && terr.StatusCode == http.StatusNotFound {
|
||||
// Not found just means there are no attachments yet. Start with an empty manifest.
|
||||
return &v1.IndexManifest{MediaType: types.OCIImageIndex}, nil
|
||||
}
|
||||
|
||||
var im v1.IndexManifest
|
||||
if err := json.Unmarshal(b, &im); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return filterReferrersResponse(filter, &im), nil
|
||||
}
|
||||
|
||||
func (f *fetcher) fetchManifest(ref name.Reference, acceptable []types.MediaType) ([]byte, *v1.Descriptor, error) {
|
||||
u := f.url("manifests", ref.Identifier())
|
||||
req, err := http.NewRequest(http.MethodGet, u.String(), nil)
|
||||
@ -283,6 +335,15 @@ func (f *fetcher) fetchManifest(ref name.Reference, acceptable []types.MediaType
|
||||
return nil, nil, fmt.Errorf("manifest digest: %q does not match requested digest: %q for %q", digest, dgst.DigestStr(), f.Ref)
|
||||
}
|
||||
}
|
||||
|
||||
var artifactType string
|
||||
mf, _ := v1.ParseManifest(bytes.NewReader(manifest))
|
||||
// Failing to parse as a manifest should just be ignored.
|
||||
// The manifest might not be valid, and that's okay.
|
||||
if mf != nil && !mf.Config.MediaType.IsConfig() {
|
||||
artifactType = string(mf.Config.MediaType)
|
||||
}
|
||||
|
||||
// Do nothing for tags; I give up.
|
||||
//
|
||||
// We'd like to validate that the "Docker-Content-Digest" header matches what is returned by the registry,
|
||||
@ -293,9 +354,10 @@ func (f *fetcher) fetchManifest(ref name.Reference, acceptable []types.MediaType
|
||||
|
||||
// Return all this info since we have to calculate it anyway.
|
||||
desc := v1.Descriptor{
|
||||
Digest: digest,
|
||||
Size: size,
|
||||
MediaType: mediaType,
|
||||
Digest: digest,
|
||||
Size: size,
|
||||
MediaType: mediaType,
|
||||
ArtifactType: artifactType,
|
||||
}
|
||||
|
||||
return manifest, &desc, nil
|
||||
@ -428,3 +490,22 @@ func (f *fetcher) blobExists(h v1.Hash) (bool, error) {
|
||||
|
||||
return resp.StatusCode == http.StatusOK, nil
|
||||
}
|
||||
|
||||
// If filter applied, filter out by artifactType.
|
||||
// See https://github.com/opencontainers/distribution-spec/blob/main/spec.md#listing-referrers
|
||||
func filterReferrersResponse(filter map[string]string, origIndex *v1.IndexManifest) *v1.IndexManifest {
|
||||
newIndex := origIndex
|
||||
if filter == nil {
|
||||
return newIndex
|
||||
}
|
||||
if v, ok := filter["artifactType"]; ok {
|
||||
tmp := []v1.Descriptor{}
|
||||
for _, desc := range newIndex.Manifests {
|
||||
if desc.ArtifactType == v {
|
||||
tmp = append(tmp, desc)
|
||||
}
|
||||
}
|
||||
newIndex.Manifests = tmp
|
||||
}
|
||||
return newIndex
|
||||
}
|
||||
|
9
vendor/github.com/google/go-containerregistry/pkg/v1/remote/image.go
generated
vendored
9
vendor/github.com/google/go-containerregistry/pkg/v1/remote/image.go
generated
vendored
@ -46,6 +46,15 @@ type remoteImage struct {
|
||||
descriptor *v1.Descriptor
|
||||
}
|
||||
|
||||
func (r *remoteImage) ArtifactType() (string, error) {
|
||||
// kind of a hack, but RawManifest does appropriate locking/memoization
|
||||
// and makes sure r.descriptor is populated.
|
||||
if _, err := r.RawManifest(); err != nil {
|
||||
return "", err
|
||||
}
|
||||
return r.descriptor.ArtifactType, nil
|
||||
}
|
||||
|
||||
var _ partial.CompressedImageCore = (*remoteImage)(nil)
|
||||
|
||||
// Image provides access to a remote image reference.
|
||||
|
10
vendor/github.com/google/go-containerregistry/pkg/v1/remote/index.go
generated
vendored
10
vendor/github.com/google/go-containerregistry/pkg/v1/remote/index.go
generated
vendored
@ -250,6 +250,16 @@ func (r *remoteIndex) childDescriptor(child v1.Descriptor, platform v1.Platform)
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if child.MediaType.IsImage() {
|
||||
mf, _ := v1.ParseManifest(bytes.NewReader(manifest))
|
||||
// Failing to parse as a manifest should just be ignored.
|
||||
// The manifest might not be valid, and that's okay.
|
||||
if mf != nil && !mf.Config.MediaType.IsConfig() {
|
||||
child.ArtifactType = string(mf.Config.MediaType)
|
||||
}
|
||||
}
|
||||
|
||||
return &Descriptor{
|
||||
fetcher: fetcher{
|
||||
Ref: ref,
|
||||
|
12
vendor/github.com/google/go-containerregistry/pkg/v1/remote/options.go
generated
vendored
12
vendor/github.com/google/go-containerregistry/pkg/v1/remote/options.go
generated
vendored
@ -46,6 +46,7 @@ type options struct {
|
||||
pageSize int
|
||||
retryBackoff Backoff
|
||||
retryPredicate retry.Predicate
|
||||
filter map[string]string
|
||||
}
|
||||
|
||||
var defaultPlatform = v1.Platform{
|
||||
@ -303,3 +304,14 @@ func WithRetryPredicate(predicate retry.Predicate) Option {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithFilter sets the filter querystring for HTTP operations.
|
||||
func WithFilter(key string, value string) Option {
|
||||
return func(o *options) error {
|
||||
if o.filter == nil {
|
||||
o.filter = map[string]string{}
|
||||
}
|
||||
o.filter[key] = value
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
35
vendor/github.com/google/go-containerregistry/pkg/v1/remote/referrers.go
generated
vendored
Normal file
35
vendor/github.com/google/go-containerregistry/pkg/v1/remote/referrers.go
generated
vendored
Normal file
@ -0,0 +1,35 @@
|
||||
// Copyright 2023 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package remote
|
||||
|
||||
import (
|
||||
"github.com/google/go-containerregistry/pkg/name"
|
||||
v1 "github.com/google/go-containerregistry/pkg/v1"
|
||||
)
|
||||
|
||||
// Referrers returns a list of descriptors that refer to the given manifest digest.
|
||||
//
|
||||
// The subject manifest doesn't have to exist in the registry for there to be descriptors that refer to it.
|
||||
func Referrers(d name.Digest, options ...Option) (*v1.IndexManifest, error) {
|
||||
o, err := makeOptions(d.Context(), options...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f, err := makeFetcher(d, o)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return f.fetchReferrers(o.context, o.filter, d)
|
||||
}
|
127
vendor/github.com/google/go-containerregistry/pkg/v1/remote/write.go
generated
vendored
127
vendor/github.com/google/go-containerregistry/pkg/v1/remote/write.go
generated
vendored
@ -17,11 +17,13 @@ package remote
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/google/go-containerregistry/internal/redact"
|
||||
@ -577,8 +579,113 @@ func unpackTaggable(t Taggable) ([]byte, *v1.Descriptor, error) {
|
||||
}, nil
|
||||
}
|
||||
|
||||
// commitSubjectReferrers is responsible for updating the fallback tag manifest to track descriptors referring to a subject for registries that don't yet support the Referrers API.
|
||||
// TODO: use conditional requests to avoid race conditions
|
||||
func (w *writer) commitSubjectReferrers(ctx context.Context, sub name.Digest, add v1.Descriptor) error {
|
||||
// Check if the registry supports Referrers API.
|
||||
// TODO: This should be done once per registry, not once per subject.
|
||||
u := w.url(fmt.Sprintf("/v2/%s/referrers/%s", w.repo.RepositoryStr(), sub.DigestStr()))
|
||||
req, err := http.NewRequest(http.MethodGet, u.String(), nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req.Header.Set("Accept", string(types.OCIImageIndex))
|
||||
resp, err := w.client.Do(req.WithContext(ctx))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if err := transport.CheckError(resp, http.StatusOK, http.StatusNotFound, http.StatusBadRequest); err != nil {
|
||||
return err
|
||||
}
|
||||
if resp.StatusCode == http.StatusOK {
|
||||
// The registry supports Referrers API. The registry is responsible for updating the referrers list.
|
||||
return nil
|
||||
}
|
||||
|
||||
// The registry doesn't support Referrers API, we need to update the manifest tagged with the fallback tag.
|
||||
// Make the request to GET the current manifest.
|
||||
t := fallbackTag(sub)
|
||||
u = w.url(fmt.Sprintf("/v2/%s/manifests/%s", w.repo.RepositoryStr(), t.Identifier()))
|
||||
req, err = http.NewRequest(http.MethodGet, u.String(), nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req.Header.Set("Accept", string(types.OCIImageIndex))
|
||||
resp, err = w.client.Do(req.WithContext(ctx))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
var im v1.IndexManifest
|
||||
if err := transport.CheckError(resp, http.StatusOK, http.StatusNotFound); err != nil {
|
||||
return err
|
||||
} else if resp.StatusCode == http.StatusNotFound {
|
||||
// Not found just means there are no attachments. Start with an empty index.
|
||||
im = v1.IndexManifest{
|
||||
SchemaVersion: 2,
|
||||
MediaType: types.OCIImageIndex,
|
||||
Manifests: []v1.Descriptor{add},
|
||||
}
|
||||
} else {
|
||||
if err := json.NewDecoder(resp.Body).Decode(&im); err != nil {
|
||||
return err
|
||||
}
|
||||
if im.SchemaVersion != 2 {
|
||||
return fmt.Errorf("fallback tag manifest is not a schema version 2: %d", im.SchemaVersion)
|
||||
}
|
||||
if im.MediaType != types.OCIImageIndex {
|
||||
return fmt.Errorf("fallback tag manifest is not an OCI image index: %s", im.MediaType)
|
||||
}
|
||||
for _, desc := range im.Manifests {
|
||||
if desc.Digest == add.Digest {
|
||||
// The digest is already attached, nothing to do.
|
||||
logs.Progress.Printf("fallback tag %s already had referrer", t.Identifier())
|
||||
return nil
|
||||
}
|
||||
}
|
||||
// Append the new descriptor to the index.
|
||||
im.Manifests = append(im.Manifests, add)
|
||||
}
|
||||
|
||||
// Sort the manifests for reproducibility.
|
||||
sort.Slice(im.Manifests, func(i, j int) bool {
|
||||
return im.Manifests[i].Digest.String() < im.Manifests[j].Digest.String()
|
||||
})
|
||||
logs.Progress.Printf("updating fallback tag %s with new referrer", t.Identifier())
|
||||
if err := w.commitManifest(ctx, fallbackTaggable{im}, t); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type fallbackTaggable struct {
|
||||
im v1.IndexManifest
|
||||
}
|
||||
|
||||
func (f fallbackTaggable) RawManifest() ([]byte, error) { return json.Marshal(f.im) }
|
||||
func (f fallbackTaggable) MediaType() (types.MediaType, error) { return types.OCIImageIndex, nil }
|
||||
|
||||
// commitManifest does a PUT of the image's manifest.
|
||||
func (w *writer) commitManifest(ctx context.Context, t Taggable, ref name.Reference) error {
|
||||
// If the manifest refers to a subject, we need to check whether we need to update the fallback tag manifest.
|
||||
raw, err := t.RawManifest()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var mf struct {
|
||||
MediaType types.MediaType `json:"mediaType"`
|
||||
Subject *v1.Descriptor `json:"subject,omitempty"`
|
||||
Config struct {
|
||||
MediaType types.MediaType `json:"mediaType"`
|
||||
} `json:"config"`
|
||||
}
|
||||
if err := json.Unmarshal(raw, &mf); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
tryUpload := func() error {
|
||||
ctx := retry.Never(ctx)
|
||||
raw, desc, err := unpackTaggable(t)
|
||||
@ -605,6 +712,26 @@ func (w *writer) commitManifest(ctx context.Context, t Taggable, ref name.Refere
|
||||
return err
|
||||
}
|
||||
|
||||
// If the manifest referred to a subject, we may need to update the fallback tag manifest.
|
||||
// TODO: If this fails, we'll retry the whole upload. We should retry just this part.
|
||||
if mf.Subject != nil {
|
||||
h, size, err := v1.SHA256(bytes.NewReader(raw))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
desc := v1.Descriptor{
|
||||
ArtifactType: string(mf.Config.MediaType),
|
||||
MediaType: mf.MediaType,
|
||||
Digest: h,
|
||||
Size: size,
|
||||
}
|
||||
if err := w.commitSubjectReferrers(ctx,
|
||||
ref.Context().Digest(mf.Subject.Digest.String()),
|
||||
desc); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// The image was successfully pushed!
|
||||
logs.Progress.Printf("%v: digest: %v size: %d", ref, desc.Digest, desc.Size)
|
||||
w.incrProgress(int64(len(raw)))
|
||||
|
68
vendor/github.com/google/go-containerregistry/pkg/v1/static/layer.go
generated
vendored
Normal file
68
vendor/github.com/google/go-containerregistry/pkg/v1/static/layer.go
generated
vendored
Normal file
@ -0,0 +1,68 @@
|
||||
// Copyright 2021 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package static
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"sync"
|
||||
|
||||
v1 "github.com/google/go-containerregistry/pkg/v1"
|
||||
"github.com/google/go-containerregistry/pkg/v1/types"
|
||||
)
|
||||
|
||||
// NewLayer returns a layer containing the given bytes, with the given mediaType.
|
||||
//
|
||||
// Contents will not be compressed.
|
||||
func NewLayer(b []byte, mt types.MediaType) v1.Layer {
|
||||
return &staticLayer{b: b, mt: mt}
|
||||
}
|
||||
|
||||
type staticLayer struct {
|
||||
b []byte
|
||||
mt types.MediaType
|
||||
|
||||
once sync.Once
|
||||
h v1.Hash
|
||||
}
|
||||
|
||||
func (l *staticLayer) Digest() (v1.Hash, error) {
|
||||
var err error
|
||||
// Only calculate digest the first time we're asked.
|
||||
l.once.Do(func() {
|
||||
l.h, _, err = v1.SHA256(bytes.NewReader(l.b))
|
||||
})
|
||||
return l.h, err
|
||||
}
|
||||
|
||||
func (l *staticLayer) DiffID() (v1.Hash, error) {
|
||||
return l.Digest()
|
||||
}
|
||||
|
||||
func (l *staticLayer) Compressed() (io.ReadCloser, error) {
|
||||
return io.NopCloser(bytes.NewReader(l.b)), nil
|
||||
}
|
||||
|
||||
func (l *staticLayer) Uncompressed() (io.ReadCloser, error) {
|
||||
return io.NopCloser(bytes.NewReader(l.b)), nil
|
||||
}
|
||||
|
||||
func (l *staticLayer) Size() (int64, error) {
|
||||
return int64(len(l.b)), nil
|
||||
}
|
||||
|
||||
func (l *staticLayer) MediaType() (types.MediaType, error) {
|
||||
return l.mt, nil
|
||||
}
|
6
vendor/github.com/google/go-containerregistry/pkg/v1/stream/layer.go
generated
vendored
6
vendor/github.com/google/go-containerregistry/pkg/v1/stream/layer.go
generated
vendored
@ -18,7 +18,7 @@ package stream
|
||||
import (
|
||||
"bufio"
|
||||
"compress/gzip"
|
||||
"crypto/sha256"
|
||||
"crypto"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"hash"
|
||||
@ -166,8 +166,8 @@ type compressedReader struct {
|
||||
func newCompressedReader(l *Layer) (*compressedReader, error) {
|
||||
// Collect digests of compressed and uncompressed stream and size of
|
||||
// compressed stream.
|
||||
h := sha256.New()
|
||||
zh := sha256.New()
|
||||
h := crypto.SHA256.New()
|
||||
zh := crypto.SHA256.New()
|
||||
count := &countWriter{}
|
||||
|
||||
// gzip.Writer writes to the output stream via pipe, a hasher to
|
||||
|
9
vendor/github.com/google/go-containerregistry/pkg/v1/types/types.go
generated
vendored
9
vendor/github.com/google/go-containerregistry/pkg/v1/types/types.go
generated
vendored
@ -71,3 +71,12 @@ func (m MediaType) IsIndex() bool {
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// IsConfig returns true if the mediaType represents a config, as opposed to something else, like an image.
|
||||
func (m MediaType) IsConfig() bool {
|
||||
switch m {
|
||||
case OCIConfigJSON, DockerConfigJSON:
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
6
vendor/github.com/google/go-containerregistry/pkg/v1/validate/layer.go
generated
vendored
6
vendor/github.com/google/go-containerregistry/pkg/v1/validate/layer.go
generated
vendored
@ -17,7 +17,7 @@ package validate
|
||||
import (
|
||||
"archive/tar"
|
||||
"compress/gzip"
|
||||
"crypto/sha256"
|
||||
"crypto"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
@ -104,7 +104,7 @@ func computeLayer(layer v1.Layer) (*computedLayer, error) {
|
||||
}
|
||||
|
||||
// Keep track of compressed digest.
|
||||
digester := sha256.New()
|
||||
digester := crypto.SHA256.New()
|
||||
// Everything read from compressed is written to digester to compute digest.
|
||||
hashCompressed := io.TeeReader(compressed, digester)
|
||||
|
||||
@ -132,7 +132,7 @@ func computeLayer(layer v1.Layer) (*computedLayer, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
diffider := sha256.New()
|
||||
diffider := crypto.SHA256.New()
|
||||
hashUncompressed := io.TeeReader(uncompressed, diffider)
|
||||
|
||||
// Ensure there aren't duplicate file paths.
|
||||
|
25
vendor/github.com/sigstore/cosign/v2/internal/pkg/oci/remote/remote.go
generated
vendored
Normal file
25
vendor/github.com/sigstore/cosign/v2/internal/pkg/oci/remote/remote.go
generated
vendored
Normal file
@ -0,0 +1,25 @@
|
||||
//
|
||||
// Copyright 2023 The Sigstore Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package remote
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// ArtifactType converts a attachment name (sig/sbom/att/etc.) into a valid artifactType (OCI 1.1+).
|
||||
func ArtifactType(attName string) string {
|
||||
return fmt.Sprintf("application/vnd.dev.cosign.artifact.%s.v1+json", attName)
|
||||
}
|
30
vendor/github.com/sigstore/cosign/v2/pkg/oci/remote/referrers.go
generated
vendored
Normal file
30
vendor/github.com/sigstore/cosign/v2/pkg/oci/remote/referrers.go
generated
vendored
Normal file
@ -0,0 +1,30 @@
|
||||
//
|
||||
// Copyright 2023 The Sigstore Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package remote
|
||||
|
||||
import (
|
||||
"github.com/google/go-containerregistry/pkg/name"
|
||||
v1 "github.com/google/go-containerregistry/pkg/v1"
|
||||
"github.com/google/go-containerregistry/pkg/v1/remote"
|
||||
)
|
||||
|
||||
// Referrers fetches references using registry options.
|
||||
func Referrers(d name.Digest, artifactType string, opts ...Option) (*v1.IndexManifest, error) {
|
||||
o := makeOptions(name.Repository{}, opts...)
|
||||
rOpt := o.ROpt
|
||||
rOpt = append(rOpt, remote.WithFilter("artifactType", artifactType))
|
||||
return remote.Referrers(d, rOpt...)
|
||||
}
|
49
vendor/github.com/sigstore/cosign/v2/pkg/oci/remote/remote.go
generated
vendored
49
vendor/github.com/sigstore/cosign/v2/pkg/oci/remote/remote.go
generated
vendored
@ -26,6 +26,7 @@ import (
|
||||
"github.com/google/go-containerregistry/pkg/v1/remote"
|
||||
"github.com/google/go-containerregistry/pkg/v1/remote/transport"
|
||||
"github.com/google/go-containerregistry/pkg/v1/types"
|
||||
ociexperimental "github.com/sigstore/cosign/v2/internal/pkg/oci/remote"
|
||||
"github.com/sigstore/cosign/v2/pkg/oci"
|
||||
)
|
||||
|
||||
@ -146,6 +147,11 @@ func attestations(digestable digestable, o *options) (oci.Signatures, error) {
|
||||
|
||||
// attachment is a shared implementation of the oci.Signed* Attachment method.
|
||||
func attachment(digestable digestable, attName string, o *options) (oci.File, error) {
|
||||
// Try using OCI 1.1 behavior
|
||||
if file, err := attachmentExperimentalOCI(digestable, attName, o); err == nil {
|
||||
return file, nil
|
||||
}
|
||||
|
||||
h, err := digestable.Digest()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -193,3 +199,46 @@ func (f *attached) Payload() ([]byte, error) {
|
||||
defer rc.Close()
|
||||
return io.ReadAll(rc)
|
||||
}
|
||||
|
||||
// attachmentExperimentalOCI is a shared implementation of the oci.Signed* Attachment method (for OCI 1.1+ behavior).
|
||||
func attachmentExperimentalOCI(digestable digestable, attName string, o *options) (oci.File, error) {
|
||||
h, err := digestable.Digest()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
d := o.TargetRepository.Digest(h.String())
|
||||
|
||||
artifactType := ociexperimental.ArtifactType(attName)
|
||||
index, err := Referrers(d, artifactType, o.OriginalOptions...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
results := index.Manifests
|
||||
|
||||
numResults := len(results)
|
||||
if numResults == 0 {
|
||||
return nil, fmt.Errorf("unable to locate reference with artifactType %s", artifactType)
|
||||
} else if numResults > 1 {
|
||||
// TODO: if there is more than 1 result.. what does that even mean?
|
||||
// TODO: use ui.Warn
|
||||
fmt.Printf("WARNING: there were a total of %d references with artifactType %s\n", numResults, artifactType)
|
||||
}
|
||||
// TODO: do this smarter using "created" annotations
|
||||
lastResult := results[numResults-1]
|
||||
|
||||
img, err := SignedImage(o.TargetRepository.Digest(lastResult.Digest.String()), o.OriginalOptions...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ls, err := img.Layers()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(ls) != 1 {
|
||||
return nil, fmt.Errorf("expected exactly one layer in attachment, got %d", len(ls))
|
||||
}
|
||||
return &attached{
|
||||
SignedImage: img,
|
||||
layer: ls[0],
|
||||
}, nil
|
||||
}
|
||||
|
94
vendor/github.com/sigstore/cosign/v2/pkg/oci/remote/write.go
generated
vendored
94
vendor/github.com/sigstore/cosign/v2/pkg/oci/remote/write.go
generated
vendored
@ -16,12 +16,19 @@
|
||||
package remote
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/google/go-containerregistry/pkg/name"
|
||||
v1 "github.com/google/go-containerregistry/pkg/v1"
|
||||
"github.com/google/go-containerregistry/pkg/v1/remote"
|
||||
"github.com/google/go-containerregistry/pkg/v1/static"
|
||||
"github.com/google/go-containerregistry/pkg/v1/types"
|
||||
ociexperimental "github.com/sigstore/cosign/v2/internal/pkg/oci/remote"
|
||||
"github.com/sigstore/cosign/v2/pkg/oci"
|
||||
ctypes "github.com/sigstore/cosign/v2/pkg/types"
|
||||
)
|
||||
|
||||
// WriteSignedImageIndexImages writes the images within the image index
|
||||
@ -127,3 +134,90 @@ func WriteAttestations(repo name.Repository, se oci.SignedEntity, opts ...Option
|
||||
// Write the Signatures image to the tag, with the provided remote.Options
|
||||
return remoteWrite(tag, atts, o.ROpt...)
|
||||
}
|
||||
|
||||
// WriteSignaturesExperimentalOCI publishes the signatures attached to the given entity
|
||||
// into the provided repository (using OCI 1.1 methods).
|
||||
func WriteSignaturesExperimentalOCI(d name.Digest, se oci.SignedEntity, opts ...Option) error {
|
||||
o := makeOptions(d.Repository, opts...)
|
||||
signTarget := d.String()
|
||||
ref, err := name.ParseReference(signTarget, o.NameOpts...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
desc, err := remote.Head(ref, o.ROpt...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sigs, err := se.Signatures()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Write the signature blobs
|
||||
s, err := sigs.Get()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, v := range s {
|
||||
if err := remote.WriteLayer(d.Repository, v, o.ROpt...); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Write the config
|
||||
configBytes, err := sigs.RawConfigFile()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var configDesc v1.Descriptor
|
||||
if err := json.Unmarshal(configBytes, &configDesc); err != nil {
|
||||
return err
|
||||
}
|
||||
configLayer := static.NewLayer(configBytes, configDesc.MediaType)
|
||||
if err := remote.WriteLayer(d.Repository, configLayer, o.ROpt...); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Write the manifest containing a subject
|
||||
b, err := sigs.RawManifest()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var m v1.Manifest
|
||||
if err := json.Unmarshal(b, &m); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
artifactType := ociexperimental.ArtifactType("sig")
|
||||
m.Config.MediaType = types.MediaType(artifactType)
|
||||
m.Subject = desc
|
||||
b, err = json.Marshal(&m)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
digest, _, err := v1.SHA256(bytes.NewReader(b))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
targetRef, err := name.ParseReference(fmt.Sprintf("%s/%s@%s", d.RegistryStr(), d.RepositoryStr(), digest.String()))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// TODO: use ui.Infof
|
||||
fmt.Fprintf(os.Stderr, "Uploading signature for [%s] to [%s] with config.mediaType [%s] layers[0].mediaType [%s].\n",
|
||||
d.String(), targetRef.String(), artifactType, ctypes.SimpleSigningMediaType)
|
||||
return remote.Put(targetRef, &taggableManifest{raw: b, mediaType: m.MediaType}, o.ROpt...)
|
||||
}
|
||||
|
||||
type taggableManifest struct {
|
||||
raw []byte
|
||||
mediaType types.MediaType
|
||||
}
|
||||
|
||||
func (taggable taggableManifest) RawManifest() ([]byte, error) {
|
||||
return taggable.raw, nil
|
||||
}
|
||||
|
||||
func (taggable taggableManifest) MediaType() (types.MediaType, error) {
|
||||
return taggable.mediaType, nil
|
||||
}
|
||||
|
11
vendor/golang.org/x/net/http2/frame.go
generated
vendored
11
vendor/golang.org/x/net/http2/frame.go
generated
vendored
@ -662,6 +662,15 @@ func (f *Framer) WriteData(streamID uint32, endStream bool, data []byte) error {
|
||||
// It is the caller's responsibility not to violate the maximum frame size
|
||||
// and to not call other Write methods concurrently.
|
||||
func (f *Framer) WriteDataPadded(streamID uint32, endStream bool, data, pad []byte) error {
|
||||
if err := f.startWriteDataPadded(streamID, endStream, data, pad); err != nil {
|
||||
return err
|
||||
}
|
||||
return f.endWrite()
|
||||
}
|
||||
|
||||
// startWriteDataPadded is WriteDataPadded, but only writes the frame to the Framer's internal buffer.
|
||||
// The caller should call endWrite to flush the frame to the underlying writer.
|
||||
func (f *Framer) startWriteDataPadded(streamID uint32, endStream bool, data, pad []byte) error {
|
||||
if !validStreamID(streamID) && !f.AllowIllegalWrites {
|
||||
return errStreamID
|
||||
}
|
||||
@ -691,7 +700,7 @@ func (f *Framer) WriteDataPadded(streamID uint32, endStream bool, data, pad []by
|
||||
}
|
||||
f.wbuf = append(f.wbuf, data...)
|
||||
f.wbuf = append(f.wbuf, pad...)
|
||||
return f.endWrite()
|
||||
return nil
|
||||
}
|
||||
|
||||
// A SettingsFrame conveys configuration parameters that affect how
|
||||
|
85
vendor/golang.org/x/net/http2/hpack/hpack.go
generated
vendored
85
vendor/golang.org/x/net/http2/hpack/hpack.go
generated
vendored
@ -359,6 +359,7 @@ func (d *Decoder) parseFieldLiteral(n uint8, it indexType) error {
|
||||
|
||||
var hf HeaderField
|
||||
wantStr := d.emitEnabled || it.indexed()
|
||||
var undecodedName undecodedString
|
||||
if nameIdx > 0 {
|
||||
ihf, ok := d.at(nameIdx)
|
||||
if !ok {
|
||||
@ -366,15 +367,27 @@ func (d *Decoder) parseFieldLiteral(n uint8, it indexType) error {
|
||||
}
|
||||
hf.Name = ihf.Name
|
||||
} else {
|
||||
hf.Name, buf, err = d.readString(buf, wantStr)
|
||||
undecodedName, buf, err = d.readString(buf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
hf.Value, buf, err = d.readString(buf, wantStr)
|
||||
undecodedValue, buf, err := d.readString(buf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if wantStr {
|
||||
if nameIdx <= 0 {
|
||||
hf.Name, err = d.decodeString(undecodedName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
hf.Value, err = d.decodeString(undecodedValue)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
d.buf = buf
|
||||
if it.indexed() {
|
||||
d.dynTab.add(hf)
|
||||
@ -459,46 +472,52 @@ func readVarInt(n byte, p []byte) (i uint64, remain []byte, err error) {
|
||||
return 0, origP, errNeedMore
|
||||
}
|
||||
|
||||
// readString decodes an hpack string from p.
|
||||
// readString reads an hpack string from p.
|
||||
//
|
||||
// wantStr is whether s will be used. If false, decompression and
|
||||
// []byte->string garbage are skipped if s will be ignored
|
||||
// anyway. This does mean that huffman decoding errors for non-indexed
|
||||
// strings past the MAX_HEADER_LIST_SIZE are ignored, but the server
|
||||
// is returning an error anyway, and because they're not indexed, the error
|
||||
// won't affect the decoding state.
|
||||
func (d *Decoder) readString(p []byte, wantStr bool) (s string, remain []byte, err error) {
|
||||
// It returns a reference to the encoded string data to permit deferring decode costs
|
||||
// until after the caller verifies all data is present.
|
||||
func (d *Decoder) readString(p []byte) (u undecodedString, remain []byte, err error) {
|
||||
if len(p) == 0 {
|
||||
return "", p, errNeedMore
|
||||
return u, p, errNeedMore
|
||||
}
|
||||
isHuff := p[0]&128 != 0
|
||||
strLen, p, err := readVarInt(7, p)
|
||||
if err != nil {
|
||||
return "", p, err
|
||||
return u, p, err
|
||||
}
|
||||
if d.maxStrLen != 0 && strLen > uint64(d.maxStrLen) {
|
||||
return "", nil, ErrStringLength
|
||||
// Returning an error here means Huffman decoding errors
|
||||
// for non-indexed strings past the maximum string length
|
||||
// are ignored, but the server is returning an error anyway
|
||||
// and because the string is not indexed the error will not
|
||||
// affect the decoding state.
|
||||
return u, nil, ErrStringLength
|
||||
}
|
||||
if uint64(len(p)) < strLen {
|
||||
return "", p, errNeedMore
|
||||
return u, p, errNeedMore
|
||||
}
|
||||
if !isHuff {
|
||||
if wantStr {
|
||||
s = string(p[:strLen])
|
||||
}
|
||||
return s, p[strLen:], nil
|
||||
}
|
||||
|
||||
if wantStr {
|
||||
buf := bufPool.Get().(*bytes.Buffer)
|
||||
buf.Reset() // don't trust others
|
||||
defer bufPool.Put(buf)
|
||||
if err := huffmanDecode(buf, d.maxStrLen, p[:strLen]); err != nil {
|
||||
buf.Reset()
|
||||
return "", nil, err
|
||||
}
|
||||
s = buf.String()
|
||||
buf.Reset() // be nice to GC
|
||||
}
|
||||
return s, p[strLen:], nil
|
||||
u.isHuff = isHuff
|
||||
u.b = p[:strLen]
|
||||
return u, p[strLen:], nil
|
||||
}
|
||||
|
||||
type undecodedString struct {
|
||||
isHuff bool
|
||||
b []byte
|
||||
}
|
||||
|
||||
func (d *Decoder) decodeString(u undecodedString) (string, error) {
|
||||
if !u.isHuff {
|
||||
return string(u.b), nil
|
||||
}
|
||||
buf := bufPool.Get().(*bytes.Buffer)
|
||||
buf.Reset() // don't trust others
|
||||
var s string
|
||||
err := huffmanDecode(buf, d.maxStrLen, u.b)
|
||||
if err == nil {
|
||||
s = buf.String()
|
||||
}
|
||||
buf.Reset() // be nice to GC
|
||||
bufPool.Put(buf)
|
||||
return s, err
|
||||
}
|
||||
|
18
vendor/golang.org/x/net/http2/server.go
generated
vendored
18
vendor/golang.org/x/net/http2/server.go
generated
vendored
@ -843,8 +843,13 @@ type frameWriteResult struct {
|
||||
// and then reports when it's done.
|
||||
// At most one goroutine can be running writeFrameAsync at a time per
|
||||
// serverConn.
|
||||
func (sc *serverConn) writeFrameAsync(wr FrameWriteRequest) {
|
||||
err := wr.write.writeFrame(sc)
|
||||
func (sc *serverConn) writeFrameAsync(wr FrameWriteRequest, wd *writeData) {
|
||||
var err error
|
||||
if wd == nil {
|
||||
err = wr.write.writeFrame(sc)
|
||||
} else {
|
||||
err = sc.framer.endWrite()
|
||||
}
|
||||
sc.wroteFrameCh <- frameWriteResult{wr: wr, err: err}
|
||||
}
|
||||
|
||||
@ -1251,9 +1256,16 @@ func (sc *serverConn) startFrameWrite(wr FrameWriteRequest) {
|
||||
sc.writingFrameAsync = false
|
||||
err := wr.write.writeFrame(sc)
|
||||
sc.wroteFrame(frameWriteResult{wr: wr, err: err})
|
||||
} else if wd, ok := wr.write.(*writeData); ok {
|
||||
// Encode the frame in the serve goroutine, to ensure we don't have
|
||||
// any lingering asynchronous references to data passed to Write.
|
||||
// See https://go.dev/issue/58446.
|
||||
sc.framer.startWriteDataPadded(wd.streamID, wd.endStream, wd.p, nil)
|
||||
sc.writingFrameAsync = true
|
||||
go sc.writeFrameAsync(wr, wd)
|
||||
} else {
|
||||
sc.writingFrameAsync = true
|
||||
go sc.writeFrameAsync(wr)
|
||||
go sc.writeFrameAsync(wr, nil)
|
||||
}
|
||||
}
|
||||
|
||||
|
8
vendor/google.golang.org/grpc/balancer/balancer.go
generated
vendored
8
vendor/google.golang.org/grpc/balancer/balancer.go
generated
vendored
@ -279,6 +279,14 @@ type PickResult struct {
|
||||
// type, Done may not be called. May be nil if the balancer does not wish
|
||||
// to be notified when the RPC completes.
|
||||
Done func(DoneInfo)
|
||||
|
||||
// Metadata provides a way for LB policies to inject arbitrary per-call
|
||||
// metadata. Any metadata returned here will be merged with existing
|
||||
// metadata added by the client application.
|
||||
//
|
||||
// LB policies with child policies are responsible for propagating metadata
|
||||
// injected by their children to the ClientConn, as part of Pick().
|
||||
Metatada metadata.MD
|
||||
}
|
||||
|
||||
// TransientFailureError returns e. It exists for backward compatibility and
|
||||
|
35
vendor/google.golang.org/grpc/clientconn.go
generated
vendored
35
vendor/google.golang.org/grpc/clientconn.go
generated
vendored
@ -256,7 +256,7 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cc.authority, err = determineAuthority(cc.parsedTarget.Endpoint, cc.target, cc.dopts)
|
||||
cc.authority, err = determineAuthority(cc.parsedTarget.Endpoint(), cc.target, cc.dopts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -934,7 +934,7 @@ func (cc *ClientConn) healthCheckConfig() *healthCheckConfig {
|
||||
return cc.sc.healthCheckConfig
|
||||
}
|
||||
|
||||
func (cc *ClientConn) getTransport(ctx context.Context, failfast bool, method string) (transport.ClientTransport, func(balancer.DoneInfo), error) {
|
||||
func (cc *ClientConn) getTransport(ctx context.Context, failfast bool, method string) (transport.ClientTransport, balancer.PickResult, error) {
|
||||
return cc.blockingpicker.pick(ctx, failfast, balancer.PickInfo{
|
||||
Ctx: ctx,
|
||||
FullMethodName: method,
|
||||
@ -1237,9 +1237,11 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne
|
||||
addr.ServerName = ac.cc.getServerName(addr)
|
||||
hctx, hcancel := context.WithCancel(ac.ctx)
|
||||
|
||||
onClose := grpcsync.OnceFunc(func() {
|
||||
onClose := func(r transport.GoAwayReason) {
|
||||
ac.mu.Lock()
|
||||
defer ac.mu.Unlock()
|
||||
// adjust params based on GoAwayReason
|
||||
ac.adjustParams(r)
|
||||
if ac.state == connectivity.Shutdown {
|
||||
// Already shut down. tearDown() already cleared the transport and
|
||||
// canceled hctx via ac.ctx, and we expected this connection to be
|
||||
@ -1260,19 +1262,13 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne
|
||||
// Always go idle and wait for the LB policy to initiate a new
|
||||
// connection attempt.
|
||||
ac.updateConnectivityState(connectivity.Idle, nil)
|
||||
})
|
||||
onGoAway := func(r transport.GoAwayReason) {
|
||||
ac.mu.Lock()
|
||||
ac.adjustParams(r)
|
||||
ac.mu.Unlock()
|
||||
onClose()
|
||||
}
|
||||
|
||||
connectCtx, cancel := context.WithDeadline(ac.ctx, connectDeadline)
|
||||
defer cancel()
|
||||
copts.ChannelzParentID = ac.channelzID
|
||||
|
||||
newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, addr, copts, onGoAway, onClose)
|
||||
newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, addr, copts, onClose)
|
||||
if err != nil {
|
||||
if logger.V(2) {
|
||||
logger.Infof("Creating new client transport to %q: %v", addr, err)
|
||||
@ -1380,7 +1376,7 @@ func (ac *addrConn) startHealthCheck(ctx context.Context) {
|
||||
if status.Code(err) == codes.Unimplemented {
|
||||
channelz.Error(logger, ac.channelzID, "Subchannel health check is unimplemented at server side, thus health check is disabled")
|
||||
} else {
|
||||
channelz.Errorf(logger, ac.channelzID, "HealthCheckFunc exits with unexpected error %v", err)
|
||||
channelz.Errorf(logger, ac.channelzID, "Health checking failed: %v", err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
@ -1591,30 +1587,17 @@ func (cc *ClientConn) parseTargetAndFindResolver() (resolver.Builder, error) {
|
||||
}
|
||||
|
||||
// parseTarget uses RFC 3986 semantics to parse the given target into a
|
||||
// resolver.Target struct containing scheme, authority and endpoint. Query
|
||||
// resolver.Target struct containing scheme, authority and url. Query
|
||||
// params are stripped from the endpoint.
|
||||
func parseTarget(target string) (resolver.Target, error) {
|
||||
u, err := url.Parse(target)
|
||||
if err != nil {
|
||||
return resolver.Target{}, err
|
||||
}
|
||||
// For targets of the form "[scheme]://[authority]/endpoint, the endpoint
|
||||
// value returned from url.Parse() contains a leading "/". Although this is
|
||||
// in accordance with RFC 3986, we do not want to break existing resolver
|
||||
// implementations which expect the endpoint without the leading "/". So, we
|
||||
// end up stripping the leading "/" here. But this will result in an
|
||||
// incorrect parsing for something like "unix:///path/to/socket". Since we
|
||||
// own the "unix" resolver, we can workaround in the unix resolver by using
|
||||
// the `URL` field instead of the `Endpoint` field.
|
||||
endpoint := u.Path
|
||||
if endpoint == "" {
|
||||
endpoint = u.Opaque
|
||||
}
|
||||
endpoint = strings.TrimPrefix(endpoint, "/")
|
||||
|
||||
return resolver.Target{
|
||||
Scheme: u.Scheme,
|
||||
Authority: u.Host,
|
||||
Endpoint: endpoint,
|
||||
URL: *u,
|
||||
}, nil
|
||||
}
|
||||
|
4
vendor/google.golang.org/grpc/credentials/tls.go
generated
vendored
4
vendor/google.golang.org/grpc/credentials/tls.go
generated
vendored
@ -23,9 +23,9 @@ import (
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/url"
|
||||
"os"
|
||||
|
||||
credinternal "google.golang.org/grpc/internal/credentials"
|
||||
)
|
||||
@ -166,7 +166,7 @@ func NewClientTLSFromCert(cp *x509.CertPool, serverNameOverride string) Transpor
|
||||
// it will override the virtual host name of authority (e.g. :authority header
|
||||
// field) in requests.
|
||||
func NewClientTLSFromFile(certFile, serverNameOverride string) (TransportCredentials, error) {
|
||||
b, err := ioutil.ReadFile(certFile)
|
||||
b, err := os.ReadFile(certFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
15
vendor/google.golang.org/grpc/dialoptions.go
generated
vendored
15
vendor/google.golang.org/grpc/dialoptions.go
generated
vendored
@ -44,6 +44,7 @@ func init() {
|
||||
extraDialOptions = nil
|
||||
}
|
||||
internal.WithBinaryLogger = withBinaryLogger
|
||||
internal.JoinDialOptions = newJoinDialOption
|
||||
}
|
||||
|
||||
// dialOptions configure a Dial call. dialOptions are set by the DialOption
|
||||
@ -111,6 +112,20 @@ func newFuncDialOption(f func(*dialOptions)) *funcDialOption {
|
||||
}
|
||||
}
|
||||
|
||||
type joinDialOption struct {
|
||||
opts []DialOption
|
||||
}
|
||||
|
||||
func (jdo *joinDialOption) apply(do *dialOptions) {
|
||||
for _, opt := range jdo.opts {
|
||||
opt.apply(do)
|
||||
}
|
||||
}
|
||||
|
||||
func newJoinDialOption(opts ...DialOption) DialOption {
|
||||
return &joinDialOption{opts: opts}
|
||||
}
|
||||
|
||||
// WithWriteBufferSize determines how much data can be batched before doing a
|
||||
// write on the wire. The corresponding memory allocation for this buffer will
|
||||
// be twice the size to keep syscalls low. The default value for this buffer is
|
||||
|
4
vendor/google.golang.org/grpc/encoding/encoding.go
generated
vendored
4
vendor/google.golang.org/grpc/encoding/encoding.go
generated
vendored
@ -75,7 +75,9 @@ var registeredCompressor = make(map[string]Compressor)
|
||||
// registered with the same name, the one registered last will take effect.
|
||||
func RegisterCompressor(c Compressor) {
|
||||
registeredCompressor[c.Name()] = c
|
||||
grpcutil.RegisteredCompressorNames = append(grpcutil.RegisteredCompressorNames, c.Name())
|
||||
if !grpcutil.IsCompressorNameRegistered(c.Name()) {
|
||||
grpcutil.RegisteredCompressorNames = append(grpcutil.RegisteredCompressorNames, c.Name())
|
||||
}
|
||||
}
|
||||
|
||||
// GetCompressor returns Compressor for the given compressor name.
|
||||
|
7
vendor/google.golang.org/grpc/grpclog/loggerv2.go
generated
vendored
7
vendor/google.golang.org/grpc/grpclog/loggerv2.go
generated
vendored
@ -22,7 +22,6 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"strconv"
|
||||
@ -140,9 +139,9 @@ func newLoggerV2WithConfig(infoW, warningW, errorW io.Writer, c loggerV2Config)
|
||||
// newLoggerV2 creates a loggerV2 to be used as default logger.
|
||||
// All logs are written to stderr.
|
||||
func newLoggerV2() LoggerV2 {
|
||||
errorW := ioutil.Discard
|
||||
warningW := ioutil.Discard
|
||||
infoW := ioutil.Discard
|
||||
errorW := io.Discard
|
||||
warningW := io.Discard
|
||||
infoW := io.Discard
|
||||
|
||||
logLevel := os.Getenv("GRPC_GO_LOG_SEVERITY_LEVEL")
|
||||
switch logLevel {
|
||||
|
126
vendor/google.golang.org/grpc/internal/binarylog/method_logger.go
generated
vendored
126
vendor/google.golang.org/grpc/internal/binarylog/method_logger.go
generated
vendored
@ -26,7 +26,7 @@ import (
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/golang/protobuf/ptypes"
|
||||
pb "google.golang.org/grpc/binarylog/grpc_binarylog_v1"
|
||||
binlogpb "google.golang.org/grpc/binarylog/grpc_binarylog_v1"
|
||||
"google.golang.org/grpc/metadata"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
@ -79,7 +79,7 @@ func NewTruncatingMethodLogger(h, m uint64) *TruncatingMethodLogger {
|
||||
// Build is an internal only method for building the proto message out of the
|
||||
// input event. It's made public to enable other library to reuse as much logic
|
||||
// in TruncatingMethodLogger as possible.
|
||||
func (ml *TruncatingMethodLogger) Build(c LogEntryConfig) *pb.GrpcLogEntry {
|
||||
func (ml *TruncatingMethodLogger) Build(c LogEntryConfig) *binlogpb.GrpcLogEntry {
|
||||
m := c.toProto()
|
||||
timestamp, _ := ptypes.TimestampProto(time.Now())
|
||||
m.Timestamp = timestamp
|
||||
@ -87,11 +87,11 @@ func (ml *TruncatingMethodLogger) Build(c LogEntryConfig) *pb.GrpcLogEntry {
|
||||
m.SequenceIdWithinCall = ml.idWithinCallGen.next()
|
||||
|
||||
switch pay := m.Payload.(type) {
|
||||
case *pb.GrpcLogEntry_ClientHeader:
|
||||
case *binlogpb.GrpcLogEntry_ClientHeader:
|
||||
m.PayloadTruncated = ml.truncateMetadata(pay.ClientHeader.GetMetadata())
|
||||
case *pb.GrpcLogEntry_ServerHeader:
|
||||
case *binlogpb.GrpcLogEntry_ServerHeader:
|
||||
m.PayloadTruncated = ml.truncateMetadata(pay.ServerHeader.GetMetadata())
|
||||
case *pb.GrpcLogEntry_Message:
|
||||
case *binlogpb.GrpcLogEntry_Message:
|
||||
m.PayloadTruncated = ml.truncateMessage(pay.Message)
|
||||
}
|
||||
return m
|
||||
@ -102,7 +102,7 @@ func (ml *TruncatingMethodLogger) Log(c LogEntryConfig) {
|
||||
ml.sink.Write(ml.Build(c))
|
||||
}
|
||||
|
||||
func (ml *TruncatingMethodLogger) truncateMetadata(mdPb *pb.Metadata) (truncated bool) {
|
||||
func (ml *TruncatingMethodLogger) truncateMetadata(mdPb *binlogpb.Metadata) (truncated bool) {
|
||||
if ml.headerMaxLen == maxUInt {
|
||||
return false
|
||||
}
|
||||
@ -132,7 +132,7 @@ func (ml *TruncatingMethodLogger) truncateMetadata(mdPb *pb.Metadata) (truncated
|
||||
return truncated
|
||||
}
|
||||
|
||||
func (ml *TruncatingMethodLogger) truncateMessage(msgPb *pb.Message) (truncated bool) {
|
||||
func (ml *TruncatingMethodLogger) truncateMessage(msgPb *binlogpb.Message) (truncated bool) {
|
||||
if ml.messageMaxLen == maxUInt {
|
||||
return false
|
||||
}
|
||||
@ -145,7 +145,7 @@ func (ml *TruncatingMethodLogger) truncateMessage(msgPb *pb.Message) (truncated
|
||||
|
||||
// LogEntryConfig represents the configuration for binary log entry.
|
||||
type LogEntryConfig interface {
|
||||
toProto() *pb.GrpcLogEntry
|
||||
toProto() *binlogpb.GrpcLogEntry
|
||||
}
|
||||
|
||||
// ClientHeader configs the binary log entry to be a ClientHeader entry.
|
||||
@ -159,10 +159,10 @@ type ClientHeader struct {
|
||||
PeerAddr net.Addr
|
||||
}
|
||||
|
||||
func (c *ClientHeader) toProto() *pb.GrpcLogEntry {
|
||||
func (c *ClientHeader) toProto() *binlogpb.GrpcLogEntry {
|
||||
// This function doesn't need to set all the fields (e.g. seq ID). The Log
|
||||
// function will set the fields when necessary.
|
||||
clientHeader := &pb.ClientHeader{
|
||||
clientHeader := &binlogpb.ClientHeader{
|
||||
Metadata: mdToMetadataProto(c.Header),
|
||||
MethodName: c.MethodName,
|
||||
Authority: c.Authority,
|
||||
@ -170,16 +170,16 @@ func (c *ClientHeader) toProto() *pb.GrpcLogEntry {
|
||||
if c.Timeout > 0 {
|
||||
clientHeader.Timeout = ptypes.DurationProto(c.Timeout)
|
||||
}
|
||||
ret := &pb.GrpcLogEntry{
|
||||
Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_HEADER,
|
||||
Payload: &pb.GrpcLogEntry_ClientHeader{
|
||||
ret := &binlogpb.GrpcLogEntry{
|
||||
Type: binlogpb.GrpcLogEntry_EVENT_TYPE_CLIENT_HEADER,
|
||||
Payload: &binlogpb.GrpcLogEntry_ClientHeader{
|
||||
ClientHeader: clientHeader,
|
||||
},
|
||||
}
|
||||
if c.OnClientSide {
|
||||
ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT
|
||||
ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT
|
||||
} else {
|
||||
ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER
|
||||
ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER
|
||||
}
|
||||
if c.PeerAddr != nil {
|
||||
ret.Peer = addrToProto(c.PeerAddr)
|
||||
@ -195,19 +195,19 @@ type ServerHeader struct {
|
||||
PeerAddr net.Addr
|
||||
}
|
||||
|
||||
func (c *ServerHeader) toProto() *pb.GrpcLogEntry {
|
||||
ret := &pb.GrpcLogEntry{
|
||||
Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_HEADER,
|
||||
Payload: &pb.GrpcLogEntry_ServerHeader{
|
||||
ServerHeader: &pb.ServerHeader{
|
||||
func (c *ServerHeader) toProto() *binlogpb.GrpcLogEntry {
|
||||
ret := &binlogpb.GrpcLogEntry{
|
||||
Type: binlogpb.GrpcLogEntry_EVENT_TYPE_SERVER_HEADER,
|
||||
Payload: &binlogpb.GrpcLogEntry_ServerHeader{
|
||||
ServerHeader: &binlogpb.ServerHeader{
|
||||
Metadata: mdToMetadataProto(c.Header),
|
||||
},
|
||||
},
|
||||
}
|
||||
if c.OnClientSide {
|
||||
ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT
|
||||
ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT
|
||||
} else {
|
||||
ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER
|
||||
ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER
|
||||
}
|
||||
if c.PeerAddr != nil {
|
||||
ret.Peer = addrToProto(c.PeerAddr)
|
||||
@ -223,7 +223,7 @@ type ClientMessage struct {
|
||||
Message interface{}
|
||||
}
|
||||
|
||||
func (c *ClientMessage) toProto() *pb.GrpcLogEntry {
|
||||
func (c *ClientMessage) toProto() *binlogpb.GrpcLogEntry {
|
||||
var (
|
||||
data []byte
|
||||
err error
|
||||
@ -238,19 +238,19 @@ func (c *ClientMessage) toProto() *pb.GrpcLogEntry {
|
||||
} else {
|
||||
grpclogLogger.Infof("binarylogging: message to log is neither proto.message nor []byte")
|
||||
}
|
||||
ret := &pb.GrpcLogEntry{
|
||||
Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_MESSAGE,
|
||||
Payload: &pb.GrpcLogEntry_Message{
|
||||
Message: &pb.Message{
|
||||
ret := &binlogpb.GrpcLogEntry{
|
||||
Type: binlogpb.GrpcLogEntry_EVENT_TYPE_CLIENT_MESSAGE,
|
||||
Payload: &binlogpb.GrpcLogEntry_Message{
|
||||
Message: &binlogpb.Message{
|
||||
Length: uint32(len(data)),
|
||||
Data: data,
|
||||
},
|
||||
},
|
||||
}
|
||||
if c.OnClientSide {
|
||||
ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT
|
||||
ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT
|
||||
} else {
|
||||
ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER
|
||||
ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER
|
||||
}
|
||||
return ret
|
||||
}
|
||||
@ -263,7 +263,7 @@ type ServerMessage struct {
|
||||
Message interface{}
|
||||
}
|
||||
|
||||
func (c *ServerMessage) toProto() *pb.GrpcLogEntry {
|
||||
func (c *ServerMessage) toProto() *binlogpb.GrpcLogEntry {
|
||||
var (
|
||||
data []byte
|
||||
err error
|
||||
@ -278,19 +278,19 @@ func (c *ServerMessage) toProto() *pb.GrpcLogEntry {
|
||||
} else {
|
||||
grpclogLogger.Infof("binarylogging: message to log is neither proto.message nor []byte")
|
||||
}
|
||||
ret := &pb.GrpcLogEntry{
|
||||
Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_MESSAGE,
|
||||
Payload: &pb.GrpcLogEntry_Message{
|
||||
Message: &pb.Message{
|
||||
ret := &binlogpb.GrpcLogEntry{
|
||||
Type: binlogpb.GrpcLogEntry_EVENT_TYPE_SERVER_MESSAGE,
|
||||
Payload: &binlogpb.GrpcLogEntry_Message{
|
||||
Message: &binlogpb.Message{
|
||||
Length: uint32(len(data)),
|
||||
Data: data,
|
||||
},
|
||||
},
|
||||
}
|
||||
if c.OnClientSide {
|
||||
ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT
|
||||
ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT
|
||||
} else {
|
||||
ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER
|
||||
ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER
|
||||
}
|
||||
return ret
|
||||
}
|
||||
@ -300,15 +300,15 @@ type ClientHalfClose struct {
|
||||
OnClientSide bool
|
||||
}
|
||||
|
||||
func (c *ClientHalfClose) toProto() *pb.GrpcLogEntry {
|
||||
ret := &pb.GrpcLogEntry{
|
||||
Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_HALF_CLOSE,
|
||||
func (c *ClientHalfClose) toProto() *binlogpb.GrpcLogEntry {
|
||||
ret := &binlogpb.GrpcLogEntry{
|
||||
Type: binlogpb.GrpcLogEntry_EVENT_TYPE_CLIENT_HALF_CLOSE,
|
||||
Payload: nil, // No payload here.
|
||||
}
|
||||
if c.OnClientSide {
|
||||
ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT
|
||||
ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT
|
||||
} else {
|
||||
ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER
|
||||
ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER
|
||||
}
|
||||
return ret
|
||||
}
|
||||
@ -324,7 +324,7 @@ type ServerTrailer struct {
|
||||
PeerAddr net.Addr
|
||||
}
|
||||
|
||||
func (c *ServerTrailer) toProto() *pb.GrpcLogEntry {
|
||||
func (c *ServerTrailer) toProto() *binlogpb.GrpcLogEntry {
|
||||
st, ok := status.FromError(c.Err)
|
||||
if !ok {
|
||||
grpclogLogger.Info("binarylogging: error in trailer is not a status error")
|
||||
@ -340,10 +340,10 @@ func (c *ServerTrailer) toProto() *pb.GrpcLogEntry {
|
||||
grpclogLogger.Infof("binarylogging: failed to marshal status proto: %v", err)
|
||||
}
|
||||
}
|
||||
ret := &pb.GrpcLogEntry{
|
||||
Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_TRAILER,
|
||||
Payload: &pb.GrpcLogEntry_Trailer{
|
||||
Trailer: &pb.Trailer{
|
||||
ret := &binlogpb.GrpcLogEntry{
|
||||
Type: binlogpb.GrpcLogEntry_EVENT_TYPE_SERVER_TRAILER,
|
||||
Payload: &binlogpb.GrpcLogEntry_Trailer{
|
||||
Trailer: &binlogpb.Trailer{
|
||||
Metadata: mdToMetadataProto(c.Trailer),
|
||||
StatusCode: uint32(st.Code()),
|
||||
StatusMessage: st.Message(),
|
||||
@ -352,9 +352,9 @@ func (c *ServerTrailer) toProto() *pb.GrpcLogEntry {
|
||||
},
|
||||
}
|
||||
if c.OnClientSide {
|
||||
ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT
|
||||
ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT
|
||||
} else {
|
||||
ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER
|
||||
ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER
|
||||
}
|
||||
if c.PeerAddr != nil {
|
||||
ret.Peer = addrToProto(c.PeerAddr)
|
||||
@ -367,15 +367,15 @@ type Cancel struct {
|
||||
OnClientSide bool
|
||||
}
|
||||
|
||||
func (c *Cancel) toProto() *pb.GrpcLogEntry {
|
||||
ret := &pb.GrpcLogEntry{
|
||||
Type: pb.GrpcLogEntry_EVENT_TYPE_CANCEL,
|
||||
func (c *Cancel) toProto() *binlogpb.GrpcLogEntry {
|
||||
ret := &binlogpb.GrpcLogEntry{
|
||||
Type: binlogpb.GrpcLogEntry_EVENT_TYPE_CANCEL,
|
||||
Payload: nil,
|
||||
}
|
||||
if c.OnClientSide {
|
||||
ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT
|
||||
ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT
|
||||
} else {
|
||||
ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER
|
||||
ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER
|
||||
}
|
||||
return ret
|
||||
}
|
||||
@ -392,15 +392,15 @@ func metadataKeyOmit(key string) bool {
|
||||
return strings.HasPrefix(key, "grpc-")
|
||||
}
|
||||
|
||||
func mdToMetadataProto(md metadata.MD) *pb.Metadata {
|
||||
ret := &pb.Metadata{}
|
||||
func mdToMetadataProto(md metadata.MD) *binlogpb.Metadata {
|
||||
ret := &binlogpb.Metadata{}
|
||||
for k, vv := range md {
|
||||
if metadataKeyOmit(k) {
|
||||
continue
|
||||
}
|
||||
for _, v := range vv {
|
||||
ret.Entry = append(ret.Entry,
|
||||
&pb.MetadataEntry{
|
||||
&binlogpb.MetadataEntry{
|
||||
Key: k,
|
||||
Value: []byte(v),
|
||||
},
|
||||
@ -410,26 +410,26 @@ func mdToMetadataProto(md metadata.MD) *pb.Metadata {
|
||||
return ret
|
||||
}
|
||||
|
||||
func addrToProto(addr net.Addr) *pb.Address {
|
||||
ret := &pb.Address{}
|
||||
func addrToProto(addr net.Addr) *binlogpb.Address {
|
||||
ret := &binlogpb.Address{}
|
||||
switch a := addr.(type) {
|
||||
case *net.TCPAddr:
|
||||
if a.IP.To4() != nil {
|
||||
ret.Type = pb.Address_TYPE_IPV4
|
||||
ret.Type = binlogpb.Address_TYPE_IPV4
|
||||
} else if a.IP.To16() != nil {
|
||||
ret.Type = pb.Address_TYPE_IPV6
|
||||
ret.Type = binlogpb.Address_TYPE_IPV6
|
||||
} else {
|
||||
ret.Type = pb.Address_TYPE_UNKNOWN
|
||||
ret.Type = binlogpb.Address_TYPE_UNKNOWN
|
||||
// Do not set address and port fields.
|
||||
break
|
||||
}
|
||||
ret.Address = a.IP.String()
|
||||
ret.IpPort = uint32(a.Port)
|
||||
case *net.UnixAddr:
|
||||
ret.Type = pb.Address_TYPE_UNIX
|
||||
ret.Type = binlogpb.Address_TYPE_UNIX
|
||||
ret.Address = a.String()
|
||||
default:
|
||||
ret.Type = pb.Address_TYPE_UNKNOWN
|
||||
ret.Type = binlogpb.Address_TYPE_UNKNOWN
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
12
vendor/google.golang.org/grpc/internal/binarylog/sink.go
generated
vendored
12
vendor/google.golang.org/grpc/internal/binarylog/sink.go
generated
vendored
@ -26,7 +26,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
pb "google.golang.org/grpc/binarylog/grpc_binarylog_v1"
|
||||
binlogpb "google.golang.org/grpc/binarylog/grpc_binarylog_v1"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -42,15 +42,15 @@ type Sink interface {
|
||||
// Write will be called to write the log entry into the sink.
|
||||
//
|
||||
// It should be thread-safe so it can be called in parallel.
|
||||
Write(*pb.GrpcLogEntry) error
|
||||
Write(*binlogpb.GrpcLogEntry) error
|
||||
// Close will be called when the Sink is replaced by a new Sink.
|
||||
Close() error
|
||||
}
|
||||
|
||||
type noopSink struct{}
|
||||
|
||||
func (ns *noopSink) Write(*pb.GrpcLogEntry) error { return nil }
|
||||
func (ns *noopSink) Close() error { return nil }
|
||||
func (ns *noopSink) Write(*binlogpb.GrpcLogEntry) error { return nil }
|
||||
func (ns *noopSink) Close() error { return nil }
|
||||
|
||||
// newWriterSink creates a binary log sink with the given writer.
|
||||
//
|
||||
@ -66,7 +66,7 @@ type writerSink struct {
|
||||
out io.Writer
|
||||
}
|
||||
|
||||
func (ws *writerSink) Write(e *pb.GrpcLogEntry) error {
|
||||
func (ws *writerSink) Write(e *binlogpb.GrpcLogEntry) error {
|
||||
b, err := proto.Marshal(e)
|
||||
if err != nil {
|
||||
grpclogLogger.Errorf("binary logging: failed to marshal proto message: %v", err)
|
||||
@ -96,7 +96,7 @@ type bufferedSink struct {
|
||||
done chan struct{}
|
||||
}
|
||||
|
||||
func (fs *bufferedSink) Write(e *pb.GrpcLogEntry) error {
|
||||
func (fs *bufferedSink) Write(e *binlogpb.GrpcLogEntry) error {
|
||||
fs.mu.Lock()
|
||||
defer fs.mu.Unlock()
|
||||
if !fs.flusherStarted {
|
||||
|
39
vendor/google.golang.org/grpc/internal/envconfig/envconfig.go
generated
vendored
39
vendor/google.golang.org/grpc/internal/envconfig/envconfig.go
generated
vendored
@ -21,19 +21,42 @@ package envconfig
|
||||
|
||||
import (
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
prefix = "GRPC_GO_"
|
||||
txtErrIgnoreStr = prefix + "IGNORE_TXT_ERRORS"
|
||||
advertiseCompressorsStr = prefix + "ADVERTISE_COMPRESSORS"
|
||||
)
|
||||
|
||||
var (
|
||||
// TXTErrIgnore is set if TXT errors should be ignored ("GRPC_GO_IGNORE_TXT_ERRORS" is not "false").
|
||||
TXTErrIgnore = !strings.EqualFold(os.Getenv(txtErrIgnoreStr), "false")
|
||||
TXTErrIgnore = boolFromEnv("GRPC_GO_IGNORE_TXT_ERRORS", true)
|
||||
// AdvertiseCompressors is set if registered compressor should be advertised
|
||||
// ("GRPC_GO_ADVERTISE_COMPRESSORS" is not "false").
|
||||
AdvertiseCompressors = !strings.EqualFold(os.Getenv(advertiseCompressorsStr), "false")
|
||||
AdvertiseCompressors = boolFromEnv("GRPC_GO_ADVERTISE_COMPRESSORS", true)
|
||||
// RingHashCap indicates the maximum ring size which defaults to 4096
|
||||
// entries but may be overridden by setting the environment variable
|
||||
// "GRPC_RING_HASH_CAP". This does not override the default bounds
|
||||
// checking which NACKs configs specifying ring sizes > 8*1024*1024 (~8M).
|
||||
RingHashCap = uint64FromEnv("GRPC_RING_HASH_CAP", 4096, 1, 8*1024*1024)
|
||||
)
|
||||
|
||||
func boolFromEnv(envVar string, def bool) bool {
|
||||
if def {
|
||||
// The default is true; return true unless the variable is "false".
|
||||
return !strings.EqualFold(os.Getenv(envVar), "false")
|
||||
}
|
||||
// The default is false; return false unless the variable is "true".
|
||||
return strings.EqualFold(os.Getenv(envVar), "true")
|
||||
}
|
||||
|
||||
func uint64FromEnv(envVar string, def, min, max uint64) uint64 {
|
||||
v, err := strconv.ParseUint(os.Getenv(envVar), 10, 64)
|
||||
if err != nil {
|
||||
return def
|
||||
}
|
||||
if v < min {
|
||||
return min
|
||||
}
|
||||
if v > max {
|
||||
return max
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
31
vendor/google.golang.org/grpc/internal/envconfig/xds.go
generated
vendored
31
vendor/google.golang.org/grpc/internal/envconfig/xds.go
generated
vendored
@ -20,7 +20,6 @@ package envconfig
|
||||
|
||||
import (
|
||||
"os"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -36,16 +35,6 @@ const (
|
||||
//
|
||||
// When both bootstrap FileName and FileContent are set, FileName is used.
|
||||
XDSBootstrapFileContentEnv = "GRPC_XDS_BOOTSTRAP_CONFIG"
|
||||
|
||||
ringHashSupportEnv = "GRPC_XDS_EXPERIMENTAL_ENABLE_RING_HASH"
|
||||
clientSideSecuritySupportEnv = "GRPC_XDS_EXPERIMENTAL_SECURITY_SUPPORT"
|
||||
aggregateAndDNSSupportEnv = "GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER"
|
||||
rbacSupportEnv = "GRPC_XDS_EXPERIMENTAL_RBAC"
|
||||
outlierDetectionSupportEnv = "GRPC_EXPERIMENTAL_ENABLE_OUTLIER_DETECTION"
|
||||
federationEnv = "GRPC_EXPERIMENTAL_XDS_FEDERATION"
|
||||
rlsInXDSEnv = "GRPC_EXPERIMENTAL_XDS_RLS_LB"
|
||||
|
||||
c2pResolverTestOnlyTrafficDirectorURIEnv = "GRPC_TEST_ONLY_GOOGLE_C2P_RESOLVER_TRAFFIC_DIRECTOR_URI"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -64,38 +53,40 @@ var (
|
||||
// XDSRingHash indicates whether ring hash support is enabled, which can be
|
||||
// disabled by setting the environment variable
|
||||
// "GRPC_XDS_EXPERIMENTAL_ENABLE_RING_HASH" to "false".
|
||||
XDSRingHash = !strings.EqualFold(os.Getenv(ringHashSupportEnv), "false")
|
||||
XDSRingHash = boolFromEnv("GRPC_XDS_EXPERIMENTAL_ENABLE_RING_HASH", true)
|
||||
// XDSClientSideSecurity is used to control processing of security
|
||||
// configuration on the client-side.
|
||||
//
|
||||
// Note that there is no env var protection for the server-side because we
|
||||
// have a brand new API on the server-side and users explicitly need to use
|
||||
// the new API to get security integration on the server.
|
||||
XDSClientSideSecurity = !strings.EqualFold(os.Getenv(clientSideSecuritySupportEnv), "false")
|
||||
XDSClientSideSecurity = boolFromEnv("GRPC_XDS_EXPERIMENTAL_SECURITY_SUPPORT", true)
|
||||
// XDSAggregateAndDNS indicates whether processing of aggregated cluster
|
||||
// and DNS cluster is enabled, which can be enabled by setting the
|
||||
// environment variable
|
||||
// "GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER" to
|
||||
// "true".
|
||||
XDSAggregateAndDNS = !strings.EqualFold(os.Getenv(aggregateAndDNSSupportEnv), "false")
|
||||
XDSAggregateAndDNS = boolFromEnv("GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER", true)
|
||||
|
||||
// XDSRBAC indicates whether xDS configured RBAC HTTP Filter is enabled,
|
||||
// which can be disabled by setting the environment variable
|
||||
// "GRPC_XDS_EXPERIMENTAL_RBAC" to "false".
|
||||
XDSRBAC = !strings.EqualFold(os.Getenv(rbacSupportEnv), "false")
|
||||
XDSRBAC = boolFromEnv("GRPC_XDS_EXPERIMENTAL_RBAC", true)
|
||||
// XDSOutlierDetection indicates whether outlier detection support is
|
||||
// enabled, which can be disabled by setting the environment variable
|
||||
// "GRPC_EXPERIMENTAL_ENABLE_OUTLIER_DETECTION" to "false".
|
||||
XDSOutlierDetection = !strings.EqualFold(os.Getenv(outlierDetectionSupportEnv), "false")
|
||||
// XDSFederation indicates whether federation support is enabled.
|
||||
XDSFederation = strings.EqualFold(os.Getenv(federationEnv), "true")
|
||||
XDSOutlierDetection = boolFromEnv("GRPC_EXPERIMENTAL_ENABLE_OUTLIER_DETECTION", true)
|
||||
// XDSFederation indicates whether federation support is enabled, which can
|
||||
// be enabled by setting the environment variable
|
||||
// "GRPC_EXPERIMENTAL_XDS_FEDERATION" to "true".
|
||||
XDSFederation = boolFromEnv("GRPC_EXPERIMENTAL_XDS_FEDERATION", false)
|
||||
|
||||
// XDSRLS indicates whether processing of Cluster Specifier plugins and
|
||||
// support for the RLS CLuster Specifier is enabled, which can be enabled by
|
||||
// setting the environment variable "GRPC_EXPERIMENTAL_XDS_RLS_LB" to
|
||||
// "true".
|
||||
XDSRLS = strings.EqualFold(os.Getenv(rlsInXDSEnv), "true")
|
||||
XDSRLS = boolFromEnv("GRPC_EXPERIMENTAL_XDS_RLS_LB", false)
|
||||
|
||||
// C2PResolverTestOnlyTrafficDirectorURI is the TD URI for testing.
|
||||
C2PResolverTestOnlyTrafficDirectorURI = os.Getenv(c2pResolverTestOnlyTrafficDirectorURIEnv)
|
||||
C2PResolverTestOnlyTrafficDirectorURI = os.Getenv("GRPC_TEST_ONLY_GOOGLE_C2P_RESOLVER_TRAFFIC_DIRECTOR_URI")
|
||||
)
|
||||
|
3
vendor/google.golang.org/grpc/internal/internal.go
generated
vendored
3
vendor/google.golang.org/grpc/internal/internal.go
generated
vendored
@ -77,6 +77,9 @@ var (
|
||||
// ClearGlobalDialOptions clears the array of extra DialOption. This
|
||||
// method is useful in testing and benchmarking.
|
||||
ClearGlobalDialOptions func()
|
||||
// JoinDialOptions combines the dial options passed as arguments into a
|
||||
// single dial option.
|
||||
JoinDialOptions interface{} // func(...grpc.DialOption) grpc.DialOption
|
||||
// JoinServerOptions combines the server options passed as arguments into a
|
||||
// single server option.
|
||||
JoinServerOptions interface{} // func(...grpc.ServerOption) grpc.ServerOption
|
||||
|
2
vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go
generated
vendored
2
vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go
generated
vendored
@ -116,7 +116,7 @@ type dnsBuilder struct{}
|
||||
|
||||
// Build creates and starts a DNS resolver that watches the name resolution of the target.
|
||||
func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) {
|
||||
host, port, err := parseTarget(target.Endpoint, defaultPort)
|
||||
host, port, err := parseTarget(target.Endpoint(), defaultPort)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
4
vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go
generated
vendored
4
vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go
generated
vendored
@ -31,7 +31,7 @@ const scheme = "passthrough"
|
||||
type passthroughBuilder struct{}
|
||||
|
||||
func (*passthroughBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) {
|
||||
if target.Endpoint == "" && opts.Dialer == nil {
|
||||
if target.Endpoint() == "" && opts.Dialer == nil {
|
||||
return nil, errors.New("passthrough: received empty target in Build()")
|
||||
}
|
||||
r := &passthroughResolver{
|
||||
@ -52,7 +52,7 @@ type passthroughResolver struct {
|
||||
}
|
||||
|
||||
func (r *passthroughResolver) start() {
|
||||
r.cc.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: r.target.Endpoint}}})
|
||||
r.cc.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: r.target.Endpoint()}}})
|
||||
}
|
||||
|
||||
func (*passthroughResolver) ResolveNow(o resolver.ResolveNowOptions) {}
|
||||
|
22
vendor/google.golang.org/grpc/internal/transport/controlbuf.go
generated
vendored
22
vendor/google.golang.org/grpc/internal/transport/controlbuf.go
generated
vendored
@ -527,6 +527,9 @@ const minBatchSize = 1000
|
||||
// As an optimization, to increase the batch size for each flush, loopy yields the processor, once
|
||||
// if the batch size is too low to give stream goroutines a chance to fill it up.
|
||||
func (l *loopyWriter) run() (err error) {
|
||||
// Always flush the writer before exiting in case there are pending frames
|
||||
// to be sent.
|
||||
defer l.framer.writer.Flush()
|
||||
for {
|
||||
it, err := l.cbuf.get(true)
|
||||
if err != nil {
|
||||
@ -650,16 +653,18 @@ func (l *loopyWriter) headerHandler(h *headerFrame) error {
|
||||
itl: &itemList{},
|
||||
wq: h.wq,
|
||||
}
|
||||
str.itl.enqueue(h)
|
||||
return l.originateStream(str)
|
||||
return l.originateStream(str, h)
|
||||
}
|
||||
|
||||
func (l *loopyWriter) originateStream(str *outStream) error {
|
||||
hdr := str.itl.dequeue().(*headerFrame)
|
||||
func (l *loopyWriter) originateStream(str *outStream, hdr *headerFrame) error {
|
||||
// l.draining is set when handling GoAway. In which case, we want to avoid
|
||||
// creating new streams.
|
||||
if l.draining {
|
||||
// TODO: provide a better error with the reason we are in draining.
|
||||
hdr.onOrphaned(errStreamDrain)
|
||||
return nil
|
||||
}
|
||||
if err := hdr.initStream(str.id); err != nil {
|
||||
if err == errStreamDrain { // errStreamDrain need not close transport
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
if err := l.writeHeader(str.id, hdr.endStream, hdr.hf, hdr.onWrite); err != nil {
|
||||
@ -757,7 +762,7 @@ func (l *loopyWriter) cleanupStreamHandler(c *cleanupStream) error {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if l.side == clientSide && l.draining && len(l.estdStreams) == 0 {
|
||||
if l.draining && len(l.estdStreams) == 0 {
|
||||
return errors.New("finished processing active streams while in draining mode")
|
||||
}
|
||||
return nil
|
||||
@ -812,7 +817,6 @@ func (l *loopyWriter) goAwayHandler(g *goAway) error {
|
||||
}
|
||||
|
||||
func (l *loopyWriter) closeConnectionHandler() error {
|
||||
l.framer.writer.Flush()
|
||||
// Exit loopyWriter entirely by returning an error here. This will lead to
|
||||
// the transport closing the connection, and, ultimately, transport
|
||||
// closure.
|
||||
|
6
vendor/google.golang.org/grpc/internal/transport/defaults.go
generated
vendored
6
vendor/google.golang.org/grpc/internal/transport/defaults.go
generated
vendored
@ -47,3 +47,9 @@ const (
|
||||
defaultClientMaxHeaderListSize = uint32(16 << 20)
|
||||
defaultServerMaxHeaderListSize = uint32(16 << 20)
|
||||
)
|
||||
|
||||
// MaxStreamID is the upper bound for the stream ID before the current
|
||||
// transport gracefully closes and new transport is created for subsequent RPCs.
|
||||
// This is set to 75% of 2^31-1. Streams are identified with an unsigned 31-bit
|
||||
// integer. It's exported so that tests can override it.
|
||||
var MaxStreamID = uint32(math.MaxInt32 * 3 / 4)
|
||||
|
4
vendor/google.golang.org/grpc/internal/transport/handler_server.go
generated
vendored
4
vendor/google.golang.org/grpc/internal/transport/handler_server.go
generated
vendored
@ -65,7 +65,7 @@ func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []s
|
||||
contentSubtype, validContentType := grpcutil.ContentSubtype(contentType)
|
||||
if !validContentType {
|
||||
msg := fmt.Sprintf("invalid gRPC request content-type %q", contentType)
|
||||
http.Error(w, msg, http.StatusBadRequest)
|
||||
http.Error(w, msg, http.StatusUnsupportedMediaType)
|
||||
return nil, errors.New(msg)
|
||||
}
|
||||
if _, ok := w.(http.Flusher); !ok {
|
||||
@ -87,7 +87,7 @@ func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []s
|
||||
if v := r.Header.Get("grpc-timeout"); v != "" {
|
||||
to, err := decodeTimeout(v)
|
||||
if err != nil {
|
||||
msg := fmt.Sprintf("malformed time-out: %v", err)
|
||||
msg := fmt.Sprintf("malformed grpc-timeout: %v", err)
|
||||
http.Error(w, msg, http.StatusBadRequest)
|
||||
return nil, status.Error(codes.Internal, msg)
|
||||
}
|
||||
|
52
vendor/google.golang.org/grpc/internal/transport/http2_client.go
generated
vendored
52
vendor/google.golang.org/grpc/internal/transport/http2_client.go
generated
vendored
@ -140,8 +140,7 @@ type http2Client struct {
|
||||
channelzID *channelz.Identifier
|
||||
czData *channelzData
|
||||
|
||||
onGoAway func(GoAwayReason)
|
||||
onClose func()
|
||||
onClose func(GoAwayReason)
|
||||
|
||||
bufferPool *bufferPool
|
||||
|
||||
@ -197,7 +196,7 @@ func isTemporary(err error) bool {
|
||||
// newHTTP2Client constructs a connected ClientTransport to addr based on HTTP2
|
||||
// and starts to receive messages on it. Non-nil error returns if construction
|
||||
// fails.
|
||||
func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onGoAway func(GoAwayReason), onClose func()) (_ *http2Client, err error) {
|
||||
func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onClose func(GoAwayReason)) (_ *http2Client, err error) {
|
||||
scheme := "http"
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer func() {
|
||||
@ -217,7 +216,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts
|
||||
if opts.FailOnNonTempDialError {
|
||||
return nil, connectionErrorf(isTemporary(err), err, "transport: error while dialing: %v", err)
|
||||
}
|
||||
return nil, connectionErrorf(true, err, "transport: Error while dialing %v", err)
|
||||
return nil, connectionErrorf(true, err, "transport: Error while dialing: %v", err)
|
||||
}
|
||||
|
||||
// Any further errors will close the underlying connection
|
||||
@ -343,7 +342,6 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts
|
||||
streamQuota: defaultMaxStreamsClient,
|
||||
streamsQuotaAvailable: make(chan struct{}, 1),
|
||||
czData: new(channelzData),
|
||||
onGoAway: onGoAway,
|
||||
keepaliveEnabled: keepaliveEnabled,
|
||||
bufferPool: newBufferPool(),
|
||||
onClose: onClose,
|
||||
@ -744,15 +742,12 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream,
|
||||
endStream: false,
|
||||
initStream: func(id uint32) error {
|
||||
t.mu.Lock()
|
||||
if state := t.state; state != reachable {
|
||||
// TODO: handle transport closure in loopy instead and remove this
|
||||
// initStream is never called when transport is draining.
|
||||
if t.state == closing {
|
||||
t.mu.Unlock()
|
||||
// Do a quick cleanup.
|
||||
err := error(errStreamDrain)
|
||||
if state == closing {
|
||||
err = ErrConnClosing
|
||||
}
|
||||
cleanup(err)
|
||||
return err
|
||||
cleanup(ErrConnClosing)
|
||||
return ErrConnClosing
|
||||
}
|
||||
if channelz.IsOn() {
|
||||
atomic.AddInt64(&t.czData.streamsStarted, 1)
|
||||
@ -770,6 +765,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream,
|
||||
}
|
||||
firstTry := true
|
||||
var ch chan struct{}
|
||||
transportDrainRequired := false
|
||||
checkForStreamQuota := func(it interface{}) bool {
|
||||
if t.streamQuota <= 0 { // Can go negative if server decreases it.
|
||||
if firstTry {
|
||||
@ -785,6 +781,11 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream,
|
||||
h := it.(*headerFrame)
|
||||
h.streamID = t.nextID
|
||||
t.nextID += 2
|
||||
|
||||
// Drain client transport if nextID > MaxStreamID which signals gRPC that
|
||||
// the connection is closed and a new one must be created for subsequent RPCs.
|
||||
transportDrainRequired = t.nextID > MaxStreamID
|
||||
|
||||
s.id = h.streamID
|
||||
s.fc = &inFlow{limit: uint32(t.initialWindowSize)}
|
||||
t.mu.Lock()
|
||||
@ -864,6 +865,12 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream,
|
||||
sh.HandleRPC(s.ctx, outHeader)
|
||||
}
|
||||
}
|
||||
if transportDrainRequired {
|
||||
if logger.V(logLevel) {
|
||||
logger.Infof("transport: t.nextID > MaxStreamID. Draining")
|
||||
}
|
||||
t.GracefulClose()
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
@ -957,7 +964,9 @@ func (t *http2Client) Close(err error) {
|
||||
}
|
||||
// Call t.onClose ASAP to prevent the client from attempting to create new
|
||||
// streams.
|
||||
t.onClose()
|
||||
if t.state != draining {
|
||||
t.onClose(GoAwayInvalid)
|
||||
}
|
||||
t.state = closing
|
||||
streams := t.activeStreams
|
||||
t.activeStreams = nil
|
||||
@ -1010,6 +1019,7 @@ func (t *http2Client) GracefulClose() {
|
||||
if logger.V(logLevel) {
|
||||
logger.Infof("transport: GracefulClose called")
|
||||
}
|
||||
t.onClose(GoAwayInvalid)
|
||||
t.state = draining
|
||||
active := len(t.activeStreams)
|
||||
t.mu.Unlock()
|
||||
@ -1172,7 +1182,7 @@ func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) {
|
||||
statusCode, ok := http2ErrConvTab[f.ErrCode]
|
||||
if !ok {
|
||||
if logger.V(logLevel) {
|
||||
logger.Warningf("transport: http2Client.handleRSTStream found no mapped gRPC status for the received http2 error %v", f.ErrCode)
|
||||
logger.Warningf("transport: http2Client.handleRSTStream found no mapped gRPC status for the received http2 error: %v", f.ErrCode)
|
||||
}
|
||||
statusCode = codes.Unknown
|
||||
}
|
||||
@ -1290,8 +1300,10 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) {
|
||||
// Notify the clientconn about the GOAWAY before we set the state to
|
||||
// draining, to allow the client to stop attempting to create streams
|
||||
// before disallowing new streams on this connection.
|
||||
t.onGoAway(t.goAwayReason)
|
||||
t.state = draining
|
||||
if t.state != draining {
|
||||
t.onClose(t.goAwayReason)
|
||||
t.state = draining
|
||||
}
|
||||
}
|
||||
// All streams with IDs greater than the GoAwayId
|
||||
// and smaller than the previous GoAway ID should be killed.
|
||||
@ -1780,3 +1792,9 @@ func (t *http2Client) getOutFlowWindow() int64 {
|
||||
return -2
|
||||
}
|
||||
}
|
||||
|
||||
func (t *http2Client) stateForTesting() transportState {
|
||||
t.mu.Lock()
|
||||
defer t.mu.Unlock()
|
||||
return t.state
|
||||
}
|
||||
|
46
vendor/google.golang.org/grpc/internal/transport/http2_server.go
generated
vendored
46
vendor/google.golang.org/grpc/internal/transport/http2_server.go
generated
vendored
@ -380,13 +380,14 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
|
||||
fc: &inFlow{limit: uint32(t.initialWindowSize)},
|
||||
}
|
||||
var (
|
||||
// If a gRPC Response-Headers has already been received, then it means
|
||||
// that the peer is speaking gRPC and we are in gRPC mode.
|
||||
isGRPC = false
|
||||
mdata = make(map[string][]string)
|
||||
httpMethod string
|
||||
// headerError is set if an error is encountered while parsing the headers
|
||||
headerError bool
|
||||
// if false, content-type was missing or invalid
|
||||
isGRPC = false
|
||||
contentType = ""
|
||||
mdata = make(map[string][]string)
|
||||
httpMethod string
|
||||
// these are set if an error is encountered while parsing the headers
|
||||
protocolError bool
|
||||
headerError *status.Status
|
||||
|
||||
timeoutSet bool
|
||||
timeout time.Duration
|
||||
@ -397,6 +398,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
|
||||
case "content-type":
|
||||
contentSubtype, validContentType := grpcutil.ContentSubtype(hf.Value)
|
||||
if !validContentType {
|
||||
contentType = hf.Value
|
||||
break
|
||||
}
|
||||
mdata[hf.Name] = append(mdata[hf.Name], hf.Value)
|
||||
@ -412,7 +414,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
|
||||
timeoutSet = true
|
||||
var err error
|
||||
if timeout, err = decodeTimeout(hf.Value); err != nil {
|
||||
headerError = true
|
||||
headerError = status.Newf(codes.Internal, "malformed grpc-timeout: %v", err)
|
||||
}
|
||||
// "Transports must consider requests containing the Connection header
|
||||
// as malformed." - A41
|
||||
@ -420,14 +422,14 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
|
||||
if logger.V(logLevel) {
|
||||
logger.Errorf("transport: http2Server.operateHeaders parsed a :connection header which makes a request malformed as per the HTTP/2 spec")
|
||||
}
|
||||
headerError = true
|
||||
protocolError = true
|
||||
default:
|
||||
if isReservedHeader(hf.Name) && !isWhitelistedHeader(hf.Name) {
|
||||
break
|
||||
}
|
||||
v, err := decodeMetadataHeader(hf.Name, hf.Value)
|
||||
if err != nil {
|
||||
headerError = true
|
||||
headerError = status.Newf(codes.Internal, "malformed binary metadata %q in header %q: %v", hf.Value, hf.Name, err)
|
||||
logger.Warningf("Failed to decode metadata header (%q, %q): %v", hf.Name, hf.Value, err)
|
||||
break
|
||||
}
|
||||
@ -446,7 +448,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
|
||||
logger.Errorf("transport: %v", errMsg)
|
||||
}
|
||||
t.controlBuf.put(&earlyAbortStream{
|
||||
httpStatus: 400,
|
||||
httpStatus: http.StatusBadRequest,
|
||||
streamID: streamID,
|
||||
contentSubtype: s.contentSubtype,
|
||||
status: status.New(codes.Internal, errMsg),
|
||||
@ -455,7 +457,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
|
||||
return nil
|
||||
}
|
||||
|
||||
if !isGRPC || headerError {
|
||||
if protocolError {
|
||||
t.controlBuf.put(&cleanupStream{
|
||||
streamID: streamID,
|
||||
rst: true,
|
||||
@ -464,6 +466,26 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
|
||||
})
|
||||
return nil
|
||||
}
|
||||
if !isGRPC {
|
||||
t.controlBuf.put(&earlyAbortStream{
|
||||
httpStatus: http.StatusUnsupportedMediaType,
|
||||
streamID: streamID,
|
||||
contentSubtype: s.contentSubtype,
|
||||
status: status.Newf(codes.InvalidArgument, "invalid gRPC request content-type %q", contentType),
|
||||
rst: !frame.StreamEnded(),
|
||||
})
|
||||
return nil
|
||||
}
|
||||
if headerError != nil {
|
||||
t.controlBuf.put(&earlyAbortStream{
|
||||
httpStatus: http.StatusBadRequest,
|
||||
streamID: streamID,
|
||||
contentSubtype: s.contentSubtype,
|
||||
status: headerError,
|
||||
rst: !frame.StreamEnded(),
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
// "If :authority is missing, Host must be renamed to :authority." - A41
|
||||
if len(mdata[":authority"]) == 0 {
|
||||
|
4
vendor/google.golang.org/grpc/internal/transport/transport.go
generated
vendored
4
vendor/google.golang.org/grpc/internal/transport/transport.go
generated
vendored
@ -583,8 +583,8 @@ type ConnectOptions struct {
|
||||
|
||||
// NewClientTransport establishes the transport with the required ConnectOptions
|
||||
// and returns it to the caller.
|
||||
func NewClientTransport(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onGoAway func(GoAwayReason), onClose func()) (ClientTransport, error) {
|
||||
return newHTTP2Client(connectCtx, ctx, addr, opts, onGoAway, onClose)
|
||||
func NewClientTransport(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onClose func(GoAwayReason)) (ClientTransport, error) {
|
||||
return newHTTP2Client(connectCtx, ctx, addr, opts, onClose)
|
||||
}
|
||||
|
||||
// Options provides additional hints and information for message
|
||||
|
28
vendor/google.golang.org/grpc/picker_wrapper.go
generated
vendored
28
vendor/google.golang.org/grpc/picker_wrapper.go
generated
vendored
@ -58,12 +58,18 @@ func (pw *pickerWrapper) updatePicker(p balancer.Picker) {
|
||||
pw.mu.Unlock()
|
||||
}
|
||||
|
||||
func doneChannelzWrapper(acw *acBalancerWrapper, done func(balancer.DoneInfo)) func(balancer.DoneInfo) {
|
||||
// doneChannelzWrapper performs the following:
|
||||
// - increments the calls started channelz counter
|
||||
// - wraps the done function in the passed in result to increment the calls
|
||||
// failed or calls succeeded channelz counter before invoking the actual
|
||||
// done function.
|
||||
func doneChannelzWrapper(acw *acBalancerWrapper, result *balancer.PickResult) {
|
||||
acw.mu.Lock()
|
||||
ac := acw.ac
|
||||
acw.mu.Unlock()
|
||||
ac.incrCallsStarted()
|
||||
return func(b balancer.DoneInfo) {
|
||||
done := result.Done
|
||||
result.Done = func(b balancer.DoneInfo) {
|
||||
if b.Err != nil && b.Err != io.EOF {
|
||||
ac.incrCallsFailed()
|
||||
} else {
|
||||
@ -82,7 +88,7 @@ func doneChannelzWrapper(acw *acBalancerWrapper, done func(balancer.DoneInfo)) f
|
||||
// - the current picker returns other errors and failfast is false.
|
||||
// - the subConn returned by the current picker is not READY
|
||||
// When one of these situations happens, pick blocks until the picker gets updated.
|
||||
func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.PickInfo) (transport.ClientTransport, func(balancer.DoneInfo), error) {
|
||||
func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.PickInfo) (transport.ClientTransport, balancer.PickResult, error) {
|
||||
var ch chan struct{}
|
||||
|
||||
var lastPickErr error
|
||||
@ -90,7 +96,7 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.
|
||||
pw.mu.Lock()
|
||||
if pw.done {
|
||||
pw.mu.Unlock()
|
||||
return nil, nil, ErrClientConnClosing
|
||||
return nil, balancer.PickResult{}, ErrClientConnClosing
|
||||
}
|
||||
|
||||
if pw.picker == nil {
|
||||
@ -111,9 +117,9 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.
|
||||
}
|
||||
switch ctx.Err() {
|
||||
case context.DeadlineExceeded:
|
||||
return nil, nil, status.Error(codes.DeadlineExceeded, errStr)
|
||||
return nil, balancer.PickResult{}, status.Error(codes.DeadlineExceeded, errStr)
|
||||
case context.Canceled:
|
||||
return nil, nil, status.Error(codes.Canceled, errStr)
|
||||
return nil, balancer.PickResult{}, status.Error(codes.Canceled, errStr)
|
||||
}
|
||||
case <-ch:
|
||||
}
|
||||
@ -125,7 +131,6 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.
|
||||
pw.mu.Unlock()
|
||||
|
||||
pickResult, err := p.Pick(info)
|
||||
|
||||
if err != nil {
|
||||
if err == balancer.ErrNoSubConnAvailable {
|
||||
continue
|
||||
@ -136,7 +141,7 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.
|
||||
if istatus.IsRestrictedControlPlaneCode(st) {
|
||||
err = status.Errorf(codes.Internal, "received picker error with illegal status: %v", err)
|
||||
}
|
||||
return nil, nil, dropError{error: err}
|
||||
return nil, balancer.PickResult{}, dropError{error: err}
|
||||
}
|
||||
// For all other errors, wait for ready RPCs should block and other
|
||||
// RPCs should fail with unavailable.
|
||||
@ -144,7 +149,7 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.
|
||||
lastPickErr = err
|
||||
continue
|
||||
}
|
||||
return nil, nil, status.Error(codes.Unavailable, err.Error())
|
||||
return nil, balancer.PickResult{}, status.Error(codes.Unavailable, err.Error())
|
||||
}
|
||||
|
||||
acw, ok := pickResult.SubConn.(*acBalancerWrapper)
|
||||
@ -154,9 +159,10 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.
|
||||
}
|
||||
if t := acw.getAddrConn().getReadyTransport(); t != nil {
|
||||
if channelz.IsOn() {
|
||||
return t, doneChannelzWrapper(acw, pickResult.Done), nil
|
||||
doneChannelzWrapper(acw, &pickResult)
|
||||
return t, pickResult, nil
|
||||
}
|
||||
return t, pickResult.Done, nil
|
||||
return t, pickResult, nil
|
||||
}
|
||||
if pickResult.Done != nil {
|
||||
// Calling done with nil error, no bytes sent and no bytes received.
|
||||
|
2
vendor/google.golang.org/grpc/pickfirst.go
generated
vendored
2
vendor/google.golang.org/grpc/pickfirst.go
generated
vendored
@ -51,7 +51,7 @@ type pickfirstBalancer struct {
|
||||
|
||||
func (b *pickfirstBalancer) ResolverError(err error) {
|
||||
if logger.V(2) {
|
||||
logger.Infof("pickfirstBalancer: ResolverError called with error %v", err)
|
||||
logger.Infof("pickfirstBalancer: ResolverError called with error: %v", err)
|
||||
}
|
||||
if b.subConn == nil {
|
||||
b.state = connectivity.TransientFailure
|
||||
|
22
vendor/google.golang.org/grpc/resolver/resolver.go
generated
vendored
22
vendor/google.golang.org/grpc/resolver/resolver.go
generated
vendored
@ -24,6 +24,7 @@ import (
|
||||
"context"
|
||||
"net"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"google.golang.org/grpc/attributes"
|
||||
"google.golang.org/grpc/credentials"
|
||||
@ -247,9 +248,6 @@ type Target struct {
|
||||
Scheme string
|
||||
// Deprecated: use URL.Host instead.
|
||||
Authority string
|
||||
// Deprecated: use URL.Path or URL.Opaque instead. The latter is set when
|
||||
// the former is empty.
|
||||
Endpoint string
|
||||
// URL contains the parsed dial target with an optional default scheme added
|
||||
// to it if the original dial target contained no scheme or contained an
|
||||
// unregistered scheme. Any query params specified in the original dial
|
||||
@ -257,6 +255,24 @@ type Target struct {
|
||||
URL url.URL
|
||||
}
|
||||
|
||||
// Endpoint retrieves endpoint without leading "/" from either `URL.Path`
|
||||
// or `URL.Opaque`. The latter is used when the former is empty.
|
||||
func (t Target) Endpoint() string {
|
||||
endpoint := t.URL.Path
|
||||
if endpoint == "" {
|
||||
endpoint = t.URL.Opaque
|
||||
}
|
||||
// For targets of the form "[scheme]://[authority]/endpoint, the endpoint
|
||||
// value returned from url.Parse() contains a leading "/". Although this is
|
||||
// in accordance with RFC 3986, we do not want to break existing resolver
|
||||
// implementations which expect the endpoint without the leading "/". So, we
|
||||
// end up stripping the leading "/" here. But this will result in an
|
||||
// incorrect parsing for something like "unix:///path/to/socket". Since we
|
||||
// own the "unix" resolver, we can workaround in the unix resolver by using
|
||||
// the `URL` field.
|
||||
return strings.TrimPrefix(endpoint, "/")
|
||||
}
|
||||
|
||||
// Builder creates a resolver that will be used to watch name resolution updates.
|
||||
type Builder interface {
|
||||
// Build creates a new resolver for the given target.
|
||||
|
17
vendor/google.golang.org/grpc/rpc_util.go
generated
vendored
17
vendor/google.golang.org/grpc/rpc_util.go
generated
vendored
@ -25,7 +25,6 @@ import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math"
|
||||
"strings"
|
||||
"sync"
|
||||
@ -77,7 +76,7 @@ func NewGZIPCompressorWithLevel(level int) (Compressor, error) {
|
||||
return &gzipCompressor{
|
||||
pool: sync.Pool{
|
||||
New: func() interface{} {
|
||||
w, err := gzip.NewWriterLevel(ioutil.Discard, level)
|
||||
w, err := gzip.NewWriterLevel(io.Discard, level)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
@ -143,7 +142,7 @@ func (d *gzipDecompressor) Do(r io.Reader) ([]byte, error) {
|
||||
z.Close()
|
||||
d.pool.Put(z)
|
||||
}()
|
||||
return ioutil.ReadAll(z)
|
||||
return io.ReadAll(z)
|
||||
}
|
||||
|
||||
func (d *gzipDecompressor) Type() string {
|
||||
@ -297,7 +296,8 @@ func (o FailFastCallOption) before(c *callInfo) error {
|
||||
func (o FailFastCallOption) after(c *callInfo, attempt *csAttempt) {}
|
||||
|
||||
// MaxCallRecvMsgSize returns a CallOption which sets the maximum message size
|
||||
// in bytes the client can receive.
|
||||
// in bytes the client can receive. If this is not set, gRPC uses the default
|
||||
// 4MB.
|
||||
func MaxCallRecvMsgSize(bytes int) CallOption {
|
||||
return MaxRecvMsgSizeCallOption{MaxRecvMsgSize: bytes}
|
||||
}
|
||||
@ -320,7 +320,8 @@ func (o MaxRecvMsgSizeCallOption) before(c *callInfo) error {
|
||||
func (o MaxRecvMsgSizeCallOption) after(c *callInfo, attempt *csAttempt) {}
|
||||
|
||||
// MaxCallSendMsgSize returns a CallOption which sets the maximum message size
|
||||
// in bytes the client can send.
|
||||
// in bytes the client can send. If this is not set, gRPC uses the default
|
||||
// `math.MaxInt32`.
|
||||
func MaxCallSendMsgSize(bytes int) CallOption {
|
||||
return MaxSendMsgSizeCallOption{MaxSendMsgSize: bytes}
|
||||
}
|
||||
@ -711,7 +712,7 @@ func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxRecei
|
||||
d, size, err = decompress(compressor, d, maxReceiveMessageSize)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err)
|
||||
return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message: %v", err)
|
||||
}
|
||||
if size > maxReceiveMessageSize {
|
||||
// TODO: Revisit the error code. Currently keep it consistent with java
|
||||
@ -746,7 +747,7 @@ func decompress(compressor encoding.Compressor, d []byte, maxReceiveMessageSize
|
||||
}
|
||||
// Read from LimitReader with limit max+1. So if the underlying
|
||||
// reader is over limit, the result will be bigger than max.
|
||||
d, err = ioutil.ReadAll(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1))
|
||||
d, err = io.ReadAll(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1))
|
||||
return d, len(d), err
|
||||
}
|
||||
|
||||
@ -759,7 +760,7 @@ func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m interf
|
||||
return err
|
||||
}
|
||||
if err := c.Unmarshal(d, m); err != nil {
|
||||
return status.Errorf(codes.Internal, "grpc: failed to unmarshal the received message %v", err)
|
||||
return status.Errorf(codes.Internal, "grpc: failed to unmarshal the received message: %v", err)
|
||||
}
|
||||
if payInfo != nil {
|
||||
payInfo.uncompressedBytes = d
|
||||
|
2
vendor/google.golang.org/grpc/server.go
generated
vendored
2
vendor/google.golang.org/grpc/server.go
generated
vendored
@ -1299,7 +1299,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
|
||||
d, err := recvAndDecompress(&parser{r: stream}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp)
|
||||
if err != nil {
|
||||
if e := t.WriteStatus(stream, status.Convert(err)); e != nil {
|
||||
channelz.Warningf(logger, s.channelzID, "grpc: Server.processUnaryRPC failed to write status %v", e)
|
||||
channelz.Warningf(logger, s.channelzID, "grpc: Server.processUnaryRPC failed to write status: %v", e)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
10
vendor/google.golang.org/grpc/service_config.go
generated
vendored
10
vendor/google.golang.org/grpc/service_config.go
generated
vendored
@ -226,7 +226,7 @@ func parseServiceConfig(js string) *serviceconfig.ParseResult {
|
||||
var rsc jsonSC
|
||||
err := json.Unmarshal([]byte(js), &rsc)
|
||||
if err != nil {
|
||||
logger.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err)
|
||||
logger.Warningf("grpc: unmarshaling service config %s: %v", js, err)
|
||||
return &serviceconfig.ParseResult{Err: err}
|
||||
}
|
||||
sc := ServiceConfig{
|
||||
@ -254,7 +254,7 @@ func parseServiceConfig(js string) *serviceconfig.ParseResult {
|
||||
}
|
||||
d, err := parseDuration(m.Timeout)
|
||||
if err != nil {
|
||||
logger.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err)
|
||||
logger.Warningf("grpc: unmarshaling service config %s: %v", js, err)
|
||||
return &serviceconfig.ParseResult{Err: err}
|
||||
}
|
||||
|
||||
@ -263,7 +263,7 @@ func parseServiceConfig(js string) *serviceconfig.ParseResult {
|
||||
Timeout: d,
|
||||
}
|
||||
if mc.RetryPolicy, err = convertRetryPolicy(m.RetryPolicy); err != nil {
|
||||
logger.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err)
|
||||
logger.Warningf("grpc: unmarshaling service config %s: %v", js, err)
|
||||
return &serviceconfig.ParseResult{Err: err}
|
||||
}
|
||||
if m.MaxRequestMessageBytes != nil {
|
||||
@ -283,13 +283,13 @@ func parseServiceConfig(js string) *serviceconfig.ParseResult {
|
||||
for i, n := range *m.Name {
|
||||
path, err := n.generatePath()
|
||||
if err != nil {
|
||||
logger.Warningf("grpc: parseServiceConfig error unmarshaling %s due to methodConfig[%d]: %v", js, i, err)
|
||||
logger.Warningf("grpc: error unmarshaling service config %s due to methodConfig[%d]: %v", js, i, err)
|
||||
return &serviceconfig.ParseResult{Err: err}
|
||||
}
|
||||
|
||||
if _, ok := paths[path]; ok {
|
||||
err = errDuplicatedName
|
||||
logger.Warningf("grpc: parseServiceConfig error unmarshaling %s due to methodConfig[%d]: %v", js, i, err)
|
||||
logger.Warningf("grpc: error unmarshaling service config %s due to methodConfig[%d]: %v", js, i, err)
|
||||
return &serviceconfig.ParseResult{Err: err}
|
||||
}
|
||||
paths[path] = struct{}{}
|
||||
|
40
vendor/google.golang.org/grpc/stream.go
generated
vendored
40
vendor/google.golang.org/grpc/stream.go
generated
vendored
@ -438,7 +438,7 @@ func (a *csAttempt) getTransport() error {
|
||||
cs := a.cs
|
||||
|
||||
var err error
|
||||
a.t, a.done, err = cs.cc.getTransport(a.ctx, cs.callInfo.failFast, cs.callHdr.Method)
|
||||
a.t, a.pickResult, err = cs.cc.getTransport(a.ctx, cs.callInfo.failFast, cs.callHdr.Method)
|
||||
if err != nil {
|
||||
if de, ok := err.(dropError); ok {
|
||||
err = de.error
|
||||
@ -455,6 +455,25 @@ func (a *csAttempt) getTransport() error {
|
||||
func (a *csAttempt) newStream() error {
|
||||
cs := a.cs
|
||||
cs.callHdr.PreviousAttempts = cs.numRetries
|
||||
|
||||
// Merge metadata stored in PickResult, if any, with existing call metadata.
|
||||
// It is safe to overwrite the csAttempt's context here, since all state
|
||||
// maintained in it are local to the attempt. When the attempt has to be
|
||||
// retried, a new instance of csAttempt will be created.
|
||||
if a.pickResult.Metatada != nil {
|
||||
// We currently do not have a function it the metadata package which
|
||||
// merges given metadata with existing metadata in a context. Existing
|
||||
// function `AppendToOutgoingContext()` takes a variadic argument of key
|
||||
// value pairs.
|
||||
//
|
||||
// TODO: Make it possible to retrieve key value pairs from metadata.MD
|
||||
// in a form passable to AppendToOutgoingContext(), or create a version
|
||||
// of AppendToOutgoingContext() that accepts a metadata.MD.
|
||||
md, _ := metadata.FromOutgoingContext(a.ctx)
|
||||
md = metadata.Join(md, a.pickResult.Metatada)
|
||||
a.ctx = metadata.NewOutgoingContext(a.ctx, md)
|
||||
}
|
||||
|
||||
s, err := a.t.NewStream(a.ctx, cs.callHdr)
|
||||
if err != nil {
|
||||
nse, ok := err.(*transport.NewStreamError)
|
||||
@ -529,12 +548,12 @@ type clientStream struct {
|
||||
// csAttempt implements a single transport stream attempt within a
|
||||
// clientStream.
|
||||
type csAttempt struct {
|
||||
ctx context.Context
|
||||
cs *clientStream
|
||||
t transport.ClientTransport
|
||||
s *transport.Stream
|
||||
p *parser
|
||||
done func(balancer.DoneInfo)
|
||||
ctx context.Context
|
||||
cs *clientStream
|
||||
t transport.ClientTransport
|
||||
s *transport.Stream
|
||||
p *parser
|
||||
pickResult balancer.PickResult
|
||||
|
||||
finished bool
|
||||
dc Decompressor
|
||||
@ -1103,12 +1122,12 @@ func (a *csAttempt) finish(err error) {
|
||||
tr = a.s.Trailer()
|
||||
}
|
||||
|
||||
if a.done != nil {
|
||||
if a.pickResult.Done != nil {
|
||||
br := false
|
||||
if a.s != nil {
|
||||
br = a.s.BytesReceived()
|
||||
}
|
||||
a.done(balancer.DoneInfo{
|
||||
a.pickResult.Done(balancer.DoneInfo{
|
||||
Err: err,
|
||||
Trailer: tr,
|
||||
BytesSent: a.s != nil,
|
||||
@ -1464,6 +1483,9 @@ type ServerStream interface {
|
||||
// It is safe to have a goroutine calling SendMsg and another goroutine
|
||||
// calling RecvMsg on the same stream at the same time, but it is not safe
|
||||
// to call SendMsg on the same stream in different goroutines.
|
||||
//
|
||||
// It is not safe to modify the message after calling SendMsg. Tracing
|
||||
// libraries and stats handlers may use the message lazily.
|
||||
SendMsg(m interface{}) error
|
||||
// RecvMsg blocks until it receives a message into m or the stream is
|
||||
// done. It returns io.EOF when the client has performed a CloseSend. On
|
||||
|
2
vendor/google.golang.org/grpc/version.go
generated
vendored
2
vendor/google.golang.org/grpc/version.go
generated
vendored
@ -19,4 +19,4 @@
|
||||
package grpc
|
||||
|
||||
// Version is the current grpc version.
|
||||
const Version = "1.52.3"
|
||||
const Version = "1.53.0"
|
||||
|
20
vendor/google.golang.org/grpc/vet.sh
generated
vendored
20
vendor/google.golang.org/grpc/vet.sh
generated
vendored
@ -66,6 +66,17 @@ elif [[ "$#" -ne 0 ]]; then
|
||||
die "Unknown argument(s): $*"
|
||||
fi
|
||||
|
||||
# - Check that generated proto files are up to date.
|
||||
if [[ -z "${VET_SKIP_PROTO}" ]]; then
|
||||
PATH="/home/travis/bin:${PATH}" make proto && \
|
||||
git status --porcelain 2>&1 | fail_on_output || \
|
||||
(git status; git --no-pager diff; exit 1)
|
||||
fi
|
||||
|
||||
if [[ -n "${VET_ONLY_PROTO}" ]]; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# - Ensure all source files contain a copyright message.
|
||||
# (Done in two parts because Darwin "git grep" has broken support for compound
|
||||
# exclusion matches.)
|
||||
@ -93,13 +104,6 @@ git grep '"github.com/envoyproxy/go-control-plane/envoy' -- '*.go' ':(exclude)*.
|
||||
|
||||
misspell -error .
|
||||
|
||||
# - Check that generated proto files are up to date.
|
||||
if [[ -z "${VET_SKIP_PROTO}" ]]; then
|
||||
PATH="/home/travis/bin:${PATH}" make proto && \
|
||||
git status --porcelain 2>&1 | fail_on_output || \
|
||||
(git status; git --no-pager diff; exit 1)
|
||||
fi
|
||||
|
||||
# - gofmt, goimports, golint (with exceptions for generated code), go vet,
|
||||
# go mod tidy.
|
||||
# Perform these checks on each module inside gRPC.
|
||||
@ -111,7 +115,7 @@ for MOD_FILE in $(find . -name 'go.mod'); do
|
||||
goimports -l . 2>&1 | not grep -vE "\.pb\.go"
|
||||
golint ./... 2>&1 | not grep -vE "/grpc_testing_not_regenerate/.*\.pb\.go:"
|
||||
|
||||
go mod tidy
|
||||
go mod tidy -compat=1.17
|
||||
git status --porcelain 2>&1 | fail_on_output || \
|
||||
(git status; git --no-pager diff; exit 1)
|
||||
popd
|
||||
|
40
vendor/k8s.io/klog/v2/internal/buffer/buffer.go
generated
vendored
40
vendor/k8s.io/klog/v2/internal/buffer/buffer.go
generated
vendored
@ -40,44 +40,22 @@ type Buffer struct {
|
||||
next *Buffer
|
||||
}
|
||||
|
||||
// Buffers manages the reuse of individual buffer instances. It is thread-safe.
|
||||
type Buffers struct {
|
||||
// mu protects the free list. It is separate from the main mutex
|
||||
// so buffers can be grabbed and printed to without holding the main lock,
|
||||
// for better parallelization.
|
||||
mu sync.Mutex
|
||||
|
||||
// freeList is a list of byte buffers, maintained under mu.
|
||||
freeList *Buffer
|
||||
var buffers = sync.Pool{
|
||||
New: func() interface{} {
|
||||
return new(Buffer)
|
||||
},
|
||||
}
|
||||
|
||||
// GetBuffer returns a new, ready-to-use buffer.
|
||||
func (bl *Buffers) GetBuffer() *Buffer {
|
||||
bl.mu.Lock()
|
||||
b := bl.freeList
|
||||
if b != nil {
|
||||
bl.freeList = b.next
|
||||
}
|
||||
bl.mu.Unlock()
|
||||
if b == nil {
|
||||
b = new(Buffer)
|
||||
} else {
|
||||
b.next = nil
|
||||
b.Reset()
|
||||
}
|
||||
func GetBuffer() *Buffer {
|
||||
b := buffers.Get().(*Buffer)
|
||||
b.Reset()
|
||||
return b
|
||||
}
|
||||
|
||||
// PutBuffer returns a buffer to the free list.
|
||||
func (bl *Buffers) PutBuffer(b *Buffer) {
|
||||
if b.Len() >= 256 {
|
||||
// Let big buffers die a natural death.
|
||||
return
|
||||
}
|
||||
bl.mu.Lock()
|
||||
b.next = bl.freeList
|
||||
bl.freeList = b
|
||||
bl.mu.Unlock()
|
||||
func PutBuffer(b *Buffer) {
|
||||
buffers.Put(b)
|
||||
}
|
||||
|
||||
// Some custom tiny helper functions to print the log header efficiently.
|
||||
|
179
vendor/k8s.io/klog/v2/internal/serialize/keyvalues.go
generated
vendored
179
vendor/k8s.io/klog/v2/internal/serialize/keyvalues.go
generated
vendored
@ -24,6 +24,10 @@ import (
|
||||
"github.com/go-logr/logr"
|
||||
)
|
||||
|
||||
type textWriter interface {
|
||||
WriteText(*bytes.Buffer)
|
||||
}
|
||||
|
||||
// WithValues implements LogSink.WithValues. The old key/value pairs are
|
||||
// assumed to be well-formed, the new ones are checked and padded if
|
||||
// necessary. It returns a new slice.
|
||||
@ -91,6 +95,51 @@ func MergeKVs(first, second []interface{}) []interface{} {
|
||||
return merged
|
||||
}
|
||||
|
||||
// MergeKVsInto is a variant of MergeKVs which directly formats the key/value
|
||||
// pairs into a buffer.
|
||||
func MergeAndFormatKVs(b *bytes.Buffer, first, second []interface{}) {
|
||||
if len(first) == 0 && len(second) == 0 {
|
||||
// Nothing to do at all.
|
||||
return
|
||||
}
|
||||
|
||||
if len(first) == 0 && len(second)%2 == 0 {
|
||||
// Nothing to be overridden, second slice is well-formed
|
||||
// and can be used directly.
|
||||
for i := 0; i < len(second); i += 2 {
|
||||
KVFormat(b, second[i], second[i+1])
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Determine which keys are in the second slice so that we can skip
|
||||
// them when iterating over the first one. The code intentionally
|
||||
// favors performance over completeness: we assume that keys are string
|
||||
// constants and thus compare equal when the string values are equal. A
|
||||
// string constant being overridden by, for example, a fmt.Stringer is
|
||||
// not handled.
|
||||
overrides := map[interface{}]bool{}
|
||||
for i := 0; i < len(second); i += 2 {
|
||||
overrides[second[i]] = true
|
||||
}
|
||||
for i := 0; i < len(first); i += 2 {
|
||||
key := first[i]
|
||||
if overrides[key] {
|
||||
continue
|
||||
}
|
||||
KVFormat(b, key, first[i+1])
|
||||
}
|
||||
// Round down.
|
||||
l := len(second)
|
||||
l = l / 2 * 2
|
||||
for i := 1; i < l; i += 2 {
|
||||
KVFormat(b, second[i-1], second[i])
|
||||
}
|
||||
if len(second)%2 == 1 {
|
||||
KVFormat(b, second[len(second)-1], missingValue)
|
||||
}
|
||||
}
|
||||
|
||||
const missingValue = "(MISSING)"
|
||||
|
||||
// KVListFormat serializes all key/value pairs into the provided buffer.
|
||||
@ -104,66 +153,74 @@ func KVListFormat(b *bytes.Buffer, keysAndValues ...interface{}) {
|
||||
} else {
|
||||
v = missingValue
|
||||
}
|
||||
b.WriteByte(' ')
|
||||
// Keys are assumed to be well-formed according to
|
||||
// https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/migration-to-structured-logging.md#name-arguments
|
||||
// for the sake of performance. Keys with spaces,
|
||||
// special characters, etc. will break parsing.
|
||||
if sK, ok := k.(string); ok {
|
||||
// Avoid one allocation when the key is a string, which
|
||||
// normally it should be.
|
||||
b.WriteString(sK)
|
||||
} else {
|
||||
b.WriteString(fmt.Sprintf("%s", k))
|
||||
}
|
||||
KVFormat(b, k, v)
|
||||
}
|
||||
}
|
||||
|
||||
// The type checks are sorted so that more frequently used ones
|
||||
// come first because that is then faster in the common
|
||||
// cases. In Kubernetes, ObjectRef (a Stringer) is more common
|
||||
// than plain strings
|
||||
// (https://github.com/kubernetes/kubernetes/pull/106594#issuecomment-975526235).
|
||||
switch v := v.(type) {
|
||||
case fmt.Stringer:
|
||||
writeStringValue(b, true, StringerToString(v))
|
||||
// KVFormat serializes one key/value pair into the provided buffer.
|
||||
// A space gets inserted before the pair.
|
||||
func KVFormat(b *bytes.Buffer, k, v interface{}) {
|
||||
b.WriteByte(' ')
|
||||
// Keys are assumed to be well-formed according to
|
||||
// https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/migration-to-structured-logging.md#name-arguments
|
||||
// for the sake of performance. Keys with spaces,
|
||||
// special characters, etc. will break parsing.
|
||||
if sK, ok := k.(string); ok {
|
||||
// Avoid one allocation when the key is a string, which
|
||||
// normally it should be.
|
||||
b.WriteString(sK)
|
||||
} else {
|
||||
b.WriteString(fmt.Sprintf("%s", k))
|
||||
}
|
||||
|
||||
// The type checks are sorted so that more frequently used ones
|
||||
// come first because that is then faster in the common
|
||||
// cases. In Kubernetes, ObjectRef (a Stringer) is more common
|
||||
// than plain strings
|
||||
// (https://github.com/kubernetes/kubernetes/pull/106594#issuecomment-975526235).
|
||||
switch v := v.(type) {
|
||||
case textWriter:
|
||||
writeTextWriterValue(b, v)
|
||||
case fmt.Stringer:
|
||||
writeStringValue(b, true, StringerToString(v))
|
||||
case string:
|
||||
writeStringValue(b, true, v)
|
||||
case error:
|
||||
writeStringValue(b, true, ErrorToString(v))
|
||||
case logr.Marshaler:
|
||||
value := MarshalerToValue(v)
|
||||
// A marshaler that returns a string is useful for
|
||||
// delayed formatting of complex values. We treat this
|
||||
// case like a normal string. This is useful for
|
||||
// multi-line support.
|
||||
//
|
||||
// We could do this by recursively formatting a value,
|
||||
// but that comes with the risk of infinite recursion
|
||||
// if a marshaler returns itself. Instead we call it
|
||||
// only once and rely on it returning the intended
|
||||
// value directly.
|
||||
switch value := value.(type) {
|
||||
case string:
|
||||
writeStringValue(b, true, v)
|
||||
case error:
|
||||
writeStringValue(b, true, ErrorToString(v))
|
||||
case logr.Marshaler:
|
||||
value := MarshalerToValue(v)
|
||||
// A marshaler that returns a string is useful for
|
||||
// delayed formatting of complex values. We treat this
|
||||
// case like a normal string. This is useful for
|
||||
// multi-line support.
|
||||
//
|
||||
// We could do this by recursively formatting a value,
|
||||
// but that comes with the risk of infinite recursion
|
||||
// if a marshaler returns itself. Instead we call it
|
||||
// only once and rely on it returning the intended
|
||||
// value directly.
|
||||
switch value := value.(type) {
|
||||
case string:
|
||||
writeStringValue(b, true, value)
|
||||
default:
|
||||
writeStringValue(b, false, fmt.Sprintf("%+v", value))
|
||||
}
|
||||
case []byte:
|
||||
// In https://github.com/kubernetes/klog/pull/237 it was decided
|
||||
// to format byte slices with "%+q". The advantages of that are:
|
||||
// - readable output if the bytes happen to be printable
|
||||
// - non-printable bytes get represented as unicode escape
|
||||
// sequences (\uxxxx)
|
||||
//
|
||||
// The downsides are that we cannot use the faster
|
||||
// strconv.Quote here and that multi-line output is not
|
||||
// supported. If developers know that a byte array is
|
||||
// printable and they want multi-line output, they can
|
||||
// convert the value to string before logging it.
|
||||
b.WriteByte('=')
|
||||
b.WriteString(fmt.Sprintf("%+q", v))
|
||||
writeStringValue(b, true, value)
|
||||
default:
|
||||
writeStringValue(b, false, fmt.Sprintf("%+v", v))
|
||||
writeStringValue(b, false, fmt.Sprintf("%+v", value))
|
||||
}
|
||||
case []byte:
|
||||
// In https://github.com/kubernetes/klog/pull/237 it was decided
|
||||
// to format byte slices with "%+q". The advantages of that are:
|
||||
// - readable output if the bytes happen to be printable
|
||||
// - non-printable bytes get represented as unicode escape
|
||||
// sequences (\uxxxx)
|
||||
//
|
||||
// The downsides are that we cannot use the faster
|
||||
// strconv.Quote here and that multi-line output is not
|
||||
// supported. If developers know that a byte array is
|
||||
// printable and they want multi-line output, they can
|
||||
// convert the value to string before logging it.
|
||||
b.WriteByte('=')
|
||||
b.WriteString(fmt.Sprintf("%+q", v))
|
||||
default:
|
||||
writeStringValue(b, false, fmt.Sprintf("%+v", v))
|
||||
}
|
||||
}
|
||||
|
||||
@ -203,6 +260,16 @@ func ErrorToString(err error) (ret string) {
|
||||
return
|
||||
}
|
||||
|
||||
func writeTextWriterValue(b *bytes.Buffer, v textWriter) {
|
||||
b.WriteRune('=')
|
||||
defer func() {
|
||||
if err := recover(); err != nil {
|
||||
fmt.Fprintf(b, `"<panic: %s>"`, err)
|
||||
}
|
||||
}()
|
||||
v.WriteText(b)
|
||||
}
|
||||
|
||||
func writeStringValue(b *bytes.Buffer, quote bool, v string) {
|
||||
data := []byte(v)
|
||||
index := bytes.IndexByte(data, '\n')
|
||||
|
78
vendor/k8s.io/klog/v2/k8s_references.go
generated
vendored
78
vendor/k8s.io/klog/v2/k8s_references.go
generated
vendored
@ -17,8 +17,10 @@ limitations under the License.
|
||||
package klog
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
)
|
||||
@ -31,11 +33,30 @@ type ObjectRef struct {
|
||||
|
||||
func (ref ObjectRef) String() string {
|
||||
if ref.Namespace != "" {
|
||||
return fmt.Sprintf("%s/%s", ref.Namespace, ref.Name)
|
||||
var builder strings.Builder
|
||||
builder.Grow(len(ref.Namespace) + len(ref.Name) + 1)
|
||||
builder.WriteString(ref.Namespace)
|
||||
builder.WriteRune('/')
|
||||
builder.WriteString(ref.Name)
|
||||
return builder.String()
|
||||
}
|
||||
return ref.Name
|
||||
}
|
||||
|
||||
func (ref ObjectRef) WriteText(out *bytes.Buffer) {
|
||||
out.WriteRune('"')
|
||||
ref.writeUnquoted(out)
|
||||
out.WriteRune('"')
|
||||
}
|
||||
|
||||
func (ref ObjectRef) writeUnquoted(out *bytes.Buffer) {
|
||||
if ref.Namespace != "" {
|
||||
out.WriteString(ref.Namespace)
|
||||
out.WriteRune('/')
|
||||
}
|
||||
out.WriteString(ref.Name)
|
||||
}
|
||||
|
||||
// MarshalLog ensures that loggers with support for structured output will log
|
||||
// as a struct by removing the String method via a custom type.
|
||||
func (ref ObjectRef) MarshalLog() interface{} {
|
||||
@ -117,31 +138,31 @@ var _ fmt.Stringer = kobjSlice{}
|
||||
var _ logr.Marshaler = kobjSlice{}
|
||||
|
||||
func (ks kobjSlice) String() string {
|
||||
objectRefs, err := ks.process()
|
||||
if err != nil {
|
||||
return err.Error()
|
||||
objectRefs, errStr := ks.process()
|
||||
if errStr != "" {
|
||||
return errStr
|
||||
}
|
||||
return fmt.Sprintf("%v", objectRefs)
|
||||
}
|
||||
|
||||
func (ks kobjSlice) MarshalLog() interface{} {
|
||||
objectRefs, err := ks.process()
|
||||
if err != nil {
|
||||
return err.Error()
|
||||
objectRefs, errStr := ks.process()
|
||||
if errStr != "" {
|
||||
return errStr
|
||||
}
|
||||
return objectRefs
|
||||
}
|
||||
|
||||
func (ks kobjSlice) process() ([]interface{}, error) {
|
||||
func (ks kobjSlice) process() (objs []interface{}, err string) {
|
||||
s := reflect.ValueOf(ks.arg)
|
||||
switch s.Kind() {
|
||||
case reflect.Invalid:
|
||||
// nil parameter, print as nil.
|
||||
return nil, nil
|
||||
return nil, ""
|
||||
case reflect.Slice:
|
||||
// Okay, handle below.
|
||||
default:
|
||||
return nil, fmt.Errorf("<KObjSlice needs a slice, got type %T>", ks.arg)
|
||||
return nil, fmt.Sprintf("<KObjSlice needs a slice, got type %T>", ks.arg)
|
||||
}
|
||||
objectRefs := make([]interface{}, 0, s.Len())
|
||||
for i := 0; i < s.Len(); i++ {
|
||||
@ -151,8 +172,41 @@ func (ks kobjSlice) process() ([]interface{}, error) {
|
||||
} else if v, ok := item.(KMetadata); ok {
|
||||
objectRefs = append(objectRefs, KObj(v))
|
||||
} else {
|
||||
return nil, fmt.Errorf("<KObjSlice needs a slice of values implementing KMetadata, got type %T>", item)
|
||||
return nil, fmt.Sprintf("<KObjSlice needs a slice of values implementing KMetadata, got type %T>", item)
|
||||
}
|
||||
}
|
||||
return objectRefs, ""
|
||||
}
|
||||
|
||||
var nilToken = []byte("<nil>")
|
||||
|
||||
func (ks kobjSlice) WriteText(out *bytes.Buffer) {
|
||||
s := reflect.ValueOf(ks.arg)
|
||||
switch s.Kind() {
|
||||
case reflect.Invalid:
|
||||
// nil parameter, print as empty slice.
|
||||
out.WriteString("[]")
|
||||
return
|
||||
case reflect.Slice:
|
||||
// Okay, handle below.
|
||||
default:
|
||||
fmt.Fprintf(out, `"<KObjSlice needs a slice, got type %T>"`, ks.arg)
|
||||
return
|
||||
}
|
||||
out.Write([]byte{'['})
|
||||
defer out.Write([]byte{']'})
|
||||
for i := 0; i < s.Len(); i++ {
|
||||
if i > 0 {
|
||||
out.Write([]byte{' '})
|
||||
}
|
||||
item := s.Index(i).Interface()
|
||||
if item == nil {
|
||||
out.Write(nilToken)
|
||||
} else if v, ok := item.(KMetadata); ok {
|
||||
KObj(v).writeUnquoted(out)
|
||||
} else {
|
||||
fmt.Fprintf(out, "<KObjSlice needs a slice of values implementing KMetadata, got type %T>", item)
|
||||
return
|
||||
}
|
||||
}
|
||||
return objectRefs, nil
|
||||
}
|
||||
|
38
vendor/k8s.io/klog/v2/klog.go
generated
vendored
38
vendor/k8s.io/klog/v2/klog.go
generated
vendored
@ -532,11 +532,6 @@ func (s settings) deepCopy() settings {
|
||||
type loggingT struct {
|
||||
settings
|
||||
|
||||
// bufferCache maintains the free list. It uses its own mutex
|
||||
// so buffers can be grabbed and printed to without holding the main lock,
|
||||
// for better parallelization.
|
||||
bufferCache buffer.Buffers
|
||||
|
||||
// flushD holds a flushDaemon that frequently flushes log file buffers.
|
||||
// Uses its own mutex.
|
||||
flushD *flushDaemon
|
||||
@ -664,7 +659,7 @@ func (l *loggingT) header(s severity.Severity, depth int) (*buffer.Buffer, strin
|
||||
|
||||
// formatHeader formats a log header using the provided file name and line number.
|
||||
func (l *loggingT) formatHeader(s severity.Severity, file string, line int) *buffer.Buffer {
|
||||
buf := l.bufferCache.GetBuffer()
|
||||
buf := buffer.GetBuffer()
|
||||
if l.skipHeaders {
|
||||
return buf
|
||||
}
|
||||
@ -682,8 +677,8 @@ func (l *loggingT) printlnDepth(s severity.Severity, logger *logr.Logger, filter
|
||||
// if logger is set, we clear the generated header as we rely on the backing
|
||||
// logger implementation to print headers
|
||||
if logger != nil {
|
||||
l.bufferCache.PutBuffer(buf)
|
||||
buf = l.bufferCache.GetBuffer()
|
||||
buffer.PutBuffer(buf)
|
||||
buf = buffer.GetBuffer()
|
||||
}
|
||||
if filter != nil {
|
||||
args = filter.Filter(args)
|
||||
@ -701,8 +696,8 @@ func (l *loggingT) printDepth(s severity.Severity, logger *logr.Logger, filter L
|
||||
// if logr is set, we clear the generated header as we rely on the backing
|
||||
// logr implementation to print headers
|
||||
if logger != nil {
|
||||
l.bufferCache.PutBuffer(buf)
|
||||
buf = l.bufferCache.GetBuffer()
|
||||
buffer.PutBuffer(buf)
|
||||
buf = buffer.GetBuffer()
|
||||
}
|
||||
if filter != nil {
|
||||
args = filter.Filter(args)
|
||||
@ -723,8 +718,8 @@ func (l *loggingT) printfDepth(s severity.Severity, logger *logr.Logger, filter
|
||||
// if logr is set, we clear the generated header as we rely on the backing
|
||||
// logr implementation to print headers
|
||||
if logger != nil {
|
||||
l.bufferCache.PutBuffer(buf)
|
||||
buf = l.bufferCache.GetBuffer()
|
||||
buffer.PutBuffer(buf)
|
||||
buf = buffer.GetBuffer()
|
||||
}
|
||||
if filter != nil {
|
||||
format, args = filter.FilterF(format, args)
|
||||
@ -744,8 +739,8 @@ func (l *loggingT) printWithFileLine(s severity.Severity, logger *logr.Logger, f
|
||||
// if logr is set, we clear the generated header as we rely on the backing
|
||||
// logr implementation to print headers
|
||||
if logger != nil {
|
||||
l.bufferCache.PutBuffer(buf)
|
||||
buf = l.bufferCache.GetBuffer()
|
||||
buffer.PutBuffer(buf)
|
||||
buf = buffer.GetBuffer()
|
||||
}
|
||||
if filter != nil {
|
||||
args = filter.Filter(args)
|
||||
@ -785,7 +780,7 @@ func (l *loggingT) infoS(logger *logr.Logger, filter LogFilter, depth int, msg s
|
||||
// set log severity by s
|
||||
func (l *loggingT) printS(err error, s severity.Severity, depth int, msg string, keysAndValues ...interface{}) {
|
||||
// Only create a new buffer if we don't have one cached.
|
||||
b := l.bufferCache.GetBuffer()
|
||||
b := buffer.GetBuffer()
|
||||
// The message is always quoted, even if it contains line breaks.
|
||||
// If developers want multi-line output, they should use a small, fixed
|
||||
// message and put the multi-line output into a value.
|
||||
@ -796,7 +791,7 @@ func (l *loggingT) printS(err error, s severity.Severity, depth int, msg string,
|
||||
serialize.KVListFormat(&b.Buffer, keysAndValues...)
|
||||
l.printDepth(s, logging.logger, nil, depth+1, &b.Buffer)
|
||||
// Make the buffer available for reuse.
|
||||
l.bufferCache.PutBuffer(b)
|
||||
buffer.PutBuffer(b)
|
||||
}
|
||||
|
||||
// redirectBuffer is used to set an alternate destination for the logs
|
||||
@ -948,7 +943,7 @@ func (l *loggingT) output(s severity.Severity, log *logr.Logger, buf *buffer.Buf
|
||||
timeoutFlush(ExitFlushTimeout)
|
||||
OsExit(255) // C++ uses -1, which is silly because it's anded with 255 anyway.
|
||||
}
|
||||
l.bufferCache.PutBuffer(buf)
|
||||
buffer.PutBuffer(buf)
|
||||
|
||||
if stats := severityStats[s]; stats != nil {
|
||||
atomic.AddInt64(&stats.lines, 1)
|
||||
@ -1313,6 +1308,13 @@ func newVerbose(level Level, b bool) Verbose {
|
||||
// less than or equal to the value of the -vmodule pattern matching the source file
|
||||
// containing the call.
|
||||
func V(level Level) Verbose {
|
||||
return VDepth(1, level)
|
||||
}
|
||||
|
||||
// VDepth is a variant of V that accepts a number of stack frames that will be
|
||||
// skipped when checking the -vmodule patterns. VDepth(0) is equivalent to
|
||||
// V().
|
||||
func VDepth(depth int, level Level) Verbose {
|
||||
// This function tries hard to be cheap unless there's work to do.
|
||||
// The fast path is two atomic loads and compares.
|
||||
|
||||
@ -1329,7 +1331,7 @@ func V(level Level) Verbose {
|
||||
// but if V logging is enabled we're slow anyway.
|
||||
logging.mu.Lock()
|
||||
defer logging.mu.Unlock()
|
||||
if runtime.Callers(2, logging.pcs[:]) == 0 {
|
||||
if runtime.Callers(2+depth, logging.pcs[:]) == 0 {
|
||||
return newVerbose(level, false)
|
||||
}
|
||||
// runtime.Callers returns "return PCs", but we want
|
||||
|
12
vendor/k8s.io/klog/v2/klogr.go
generated
vendored
12
vendor/k8s.io/klog/v2/klogr.go
generated
vendored
@ -42,19 +42,21 @@ func (l *klogger) Init(info logr.RuntimeInfo) {
|
||||
l.callDepth += info.CallDepth
|
||||
}
|
||||
|
||||
func (l klogger) Info(level int, msg string, kvList ...interface{}) {
|
||||
func (l *klogger) Info(level int, msg string, kvList ...interface{}) {
|
||||
merged := serialize.MergeKVs(l.values, kvList)
|
||||
if l.prefix != "" {
|
||||
msg = l.prefix + ": " + msg
|
||||
}
|
||||
V(Level(level)).InfoSDepth(l.callDepth+1, msg, merged...)
|
||||
// Skip this function.
|
||||
VDepth(l.callDepth+1, Level(level)).InfoSDepth(l.callDepth+1, msg, merged...)
|
||||
}
|
||||
|
||||
func (l klogger) Enabled(level int) bool {
|
||||
return V(Level(level)).Enabled()
|
||||
func (l *klogger) Enabled(level int) bool {
|
||||
// Skip this function and logr.Logger.Info where Enabled is called.
|
||||
return VDepth(l.callDepth+2, Level(level)).Enabled()
|
||||
}
|
||||
|
||||
func (l klogger) Error(err error, msg string, kvList ...interface{}) {
|
||||
func (l *klogger) Error(err error, msg string, kvList ...interface{}) {
|
||||
merged := serialize.MergeKVs(l.values, kvList)
|
||||
if l.prefix != "" {
|
||||
msg = l.prefix + ": " + msg
|
||||
|
181
vendor/k8s.io/utils/net/ipfamily.go
generated
vendored
Normal file
181
vendor/k8s.io/utils/net/ipfamily.go
generated
vendored
Normal file
@ -0,0 +1,181 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package net
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
)
|
||||
|
||||
// IPFamily refers to a specific family if not empty, i.e. "4" or "6".
|
||||
type IPFamily string
|
||||
|
||||
// Constants for valid IPFamilys:
|
||||
const (
|
||||
IPFamilyUnknown IPFamily = ""
|
||||
|
||||
IPv4 IPFamily = "4"
|
||||
IPv6 IPFamily = "6"
|
||||
)
|
||||
|
||||
// IsDualStackIPs returns true if:
|
||||
// - all elements of ips are valid
|
||||
// - at least one IP from each family (v4 and v6) is present
|
||||
func IsDualStackIPs(ips []net.IP) (bool, error) {
|
||||
v4Found := false
|
||||
v6Found := false
|
||||
for i, ip := range ips {
|
||||
switch IPFamilyOf(ip) {
|
||||
case IPv4:
|
||||
v4Found = true
|
||||
case IPv6:
|
||||
v6Found = true
|
||||
default:
|
||||
return false, fmt.Errorf("invalid IP[%d]: %v", i, ip)
|
||||
}
|
||||
}
|
||||
|
||||
return (v4Found && v6Found), nil
|
||||
}
|
||||
|
||||
// IsDualStackIPStrings returns true if:
|
||||
// - all elements of ips can be parsed as IPs
|
||||
// - at least one IP from each family (v4 and v6) is present
|
||||
func IsDualStackIPStrings(ips []string) (bool, error) {
|
||||
parsedIPs := make([]net.IP, 0, len(ips))
|
||||
for i, ip := range ips {
|
||||
parsedIP := ParseIPSloppy(ip)
|
||||
if parsedIP == nil {
|
||||
return false, fmt.Errorf("invalid IP[%d]: %v", i, ip)
|
||||
}
|
||||
parsedIPs = append(parsedIPs, parsedIP)
|
||||
}
|
||||
return IsDualStackIPs(parsedIPs)
|
||||
}
|
||||
|
||||
// IsDualStackCIDRs returns true if:
|
||||
// - all elements of cidrs are non-nil
|
||||
// - at least one CIDR from each family (v4 and v6) is present
|
||||
func IsDualStackCIDRs(cidrs []*net.IPNet) (bool, error) {
|
||||
v4Found := false
|
||||
v6Found := false
|
||||
for i, cidr := range cidrs {
|
||||
switch IPFamilyOfCIDR(cidr) {
|
||||
case IPv4:
|
||||
v4Found = true
|
||||
case IPv6:
|
||||
v6Found = true
|
||||
default:
|
||||
return false, fmt.Errorf("invalid CIDR[%d]: %v", i, cidr)
|
||||
}
|
||||
}
|
||||
|
||||
return (v4Found && v6Found), nil
|
||||
}
|
||||
|
||||
// IsDualStackCIDRStrings returns if
|
||||
// - all elements of cidrs can be parsed as CIDRs
|
||||
// - at least one CIDR from each family (v4 and v6) is present
|
||||
func IsDualStackCIDRStrings(cidrs []string) (bool, error) {
|
||||
parsedCIDRs, err := ParseCIDRs(cidrs)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return IsDualStackCIDRs(parsedCIDRs)
|
||||
}
|
||||
|
||||
// IPFamilyOf returns the IP family of ip, or IPFamilyUnknown if it is invalid.
|
||||
func IPFamilyOf(ip net.IP) IPFamily {
|
||||
switch {
|
||||
case ip.To4() != nil:
|
||||
return IPv4
|
||||
case ip.To16() != nil:
|
||||
return IPv6
|
||||
default:
|
||||
return IPFamilyUnknown
|
||||
}
|
||||
}
|
||||
|
||||
// IPFamilyOfString returns the IP family of ip, or IPFamilyUnknown if ip cannot
|
||||
// be parsed as an IP.
|
||||
func IPFamilyOfString(ip string) IPFamily {
|
||||
return IPFamilyOf(ParseIPSloppy(ip))
|
||||
}
|
||||
|
||||
// IPFamilyOfCIDR returns the IP family of cidr.
|
||||
func IPFamilyOfCIDR(cidr *net.IPNet) IPFamily {
|
||||
if cidr == nil {
|
||||
return IPFamilyUnknown
|
||||
}
|
||||
return IPFamilyOf(cidr.IP)
|
||||
}
|
||||
|
||||
// IPFamilyOfCIDRString returns the IP family of cidr.
|
||||
func IPFamilyOfCIDRString(cidr string) IPFamily {
|
||||
ip, _, _ := ParseCIDRSloppy(cidr)
|
||||
return IPFamilyOf(ip)
|
||||
}
|
||||
|
||||
// IsIPv6 returns true if netIP is IPv6 (and false if it is IPv4, nil, or invalid).
|
||||
func IsIPv6(netIP net.IP) bool {
|
||||
return IPFamilyOf(netIP) == IPv6
|
||||
}
|
||||
|
||||
// IsIPv6String returns true if ip contains a single IPv6 address and nothing else. It
|
||||
// returns false if ip is an empty string, an IPv4 address, or anything else that is not a
|
||||
// single IPv6 address.
|
||||
func IsIPv6String(ip string) bool {
|
||||
return IPFamilyOfString(ip) == IPv6
|
||||
}
|
||||
|
||||
// IsIPv6CIDR returns true if a cidr is a valid IPv6 CIDR. It returns false if cidr is
|
||||
// nil or an IPv4 CIDR. Its behavior is not defined if cidr is invalid.
|
||||
func IsIPv6CIDR(cidr *net.IPNet) bool {
|
||||
return IPFamilyOfCIDR(cidr) == IPv6
|
||||
}
|
||||
|
||||
// IsIPv6CIDRString returns true if cidr contains a single IPv6 CIDR and nothing else. It
|
||||
// returns false if cidr is an empty string, an IPv4 CIDR, or anything else that is not a
|
||||
// single valid IPv6 CIDR.
|
||||
func IsIPv6CIDRString(cidr string) bool {
|
||||
return IPFamilyOfCIDRString(cidr) == IPv6
|
||||
}
|
||||
|
||||
// IsIPv4 returns true if netIP is IPv4 (and false if it is IPv6, nil, or invalid).
|
||||
func IsIPv4(netIP net.IP) bool {
|
||||
return IPFamilyOf(netIP) == IPv4
|
||||
}
|
||||
|
||||
// IsIPv4String returns true if ip contains a single IPv4 address and nothing else. It
|
||||
// returns false if ip is an empty string, an IPv6 address, or anything else that is not a
|
||||
// single IPv4 address.
|
||||
func IsIPv4String(ip string) bool {
|
||||
return IPFamilyOfString(ip) == IPv4
|
||||
}
|
||||
|
||||
// IsIPv4CIDR returns true if cidr is a valid IPv4 CIDR. It returns false if cidr is nil
|
||||
// or an IPv6 CIDR. Its behavior is not defined if cidr is invalid.
|
||||
func IsIPv4CIDR(cidr *net.IPNet) bool {
|
||||
return IPFamilyOfCIDR(cidr) == IPv4
|
||||
}
|
||||
|
||||
// IsIPv4CIDRString returns true if cidr contains a single IPv4 CIDR and nothing else. It
|
||||
// returns false if cidr is an empty string, an IPv6 CIDR, or anything else that is not a
|
||||
// single valid IPv4 CIDR.
|
||||
func IsIPv4CIDRString(cidr string) bool {
|
||||
return IPFamilyOfCIDRString(cidr) == IPv4
|
||||
}
|
126
vendor/k8s.io/utils/net/net.go
generated
vendored
126
vendor/k8s.io/utils/net/net.go
generated
vendored
@ -29,138 +29,16 @@ import (
|
||||
// order is maintained
|
||||
func ParseCIDRs(cidrsString []string) ([]*net.IPNet, error) {
|
||||
cidrs := make([]*net.IPNet, 0, len(cidrsString))
|
||||
for _, cidrString := range cidrsString {
|
||||
for i, cidrString := range cidrsString {
|
||||
_, cidr, err := ParseCIDRSloppy(cidrString)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse cidr value:%q with error:%v", cidrString, err)
|
||||
return nil, fmt.Errorf("invalid CIDR[%d]: %v (%v)", i, cidr, err)
|
||||
}
|
||||
cidrs = append(cidrs, cidr)
|
||||
}
|
||||
return cidrs, nil
|
||||
}
|
||||
|
||||
// IsDualStackIPs returns if a slice of ips is:
|
||||
// - all are valid ips
|
||||
// - at least one ip from each family (v4 or v6)
|
||||
func IsDualStackIPs(ips []net.IP) (bool, error) {
|
||||
v4Found := false
|
||||
v6Found := false
|
||||
for _, ip := range ips {
|
||||
if ip == nil {
|
||||
return false, fmt.Errorf("ip %v is invalid", ip)
|
||||
}
|
||||
|
||||
if v4Found && v6Found {
|
||||
continue
|
||||
}
|
||||
|
||||
if IsIPv6(ip) {
|
||||
v6Found = true
|
||||
continue
|
||||
}
|
||||
|
||||
v4Found = true
|
||||
}
|
||||
|
||||
return (v4Found && v6Found), nil
|
||||
}
|
||||
|
||||
// IsDualStackIPStrings returns if
|
||||
// - all are valid ips
|
||||
// - at least one ip from each family (v4 or v6)
|
||||
func IsDualStackIPStrings(ips []string) (bool, error) {
|
||||
parsedIPs := make([]net.IP, 0, len(ips))
|
||||
for _, ip := range ips {
|
||||
parsedIP := ParseIPSloppy(ip)
|
||||
parsedIPs = append(parsedIPs, parsedIP)
|
||||
}
|
||||
return IsDualStackIPs(parsedIPs)
|
||||
}
|
||||
|
||||
// IsDualStackCIDRs returns if
|
||||
// - all are valid cidrs
|
||||
// - at least one cidr from each family (v4 or v6)
|
||||
func IsDualStackCIDRs(cidrs []*net.IPNet) (bool, error) {
|
||||
v4Found := false
|
||||
v6Found := false
|
||||
for _, cidr := range cidrs {
|
||||
if cidr == nil {
|
||||
return false, fmt.Errorf("cidr %v is invalid", cidr)
|
||||
}
|
||||
|
||||
if v4Found && v6Found {
|
||||
continue
|
||||
}
|
||||
|
||||
if IsIPv6(cidr.IP) {
|
||||
v6Found = true
|
||||
continue
|
||||
}
|
||||
v4Found = true
|
||||
}
|
||||
|
||||
return v4Found && v6Found, nil
|
||||
}
|
||||
|
||||
// IsDualStackCIDRStrings returns if
|
||||
// - all are valid cidrs
|
||||
// - at least one cidr from each family (v4 or v6)
|
||||
func IsDualStackCIDRStrings(cidrs []string) (bool, error) {
|
||||
parsedCIDRs, err := ParseCIDRs(cidrs)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return IsDualStackCIDRs(parsedCIDRs)
|
||||
}
|
||||
|
||||
// IsIPv6 returns if netIP is IPv6.
|
||||
func IsIPv6(netIP net.IP) bool {
|
||||
return netIP != nil && netIP.To4() == nil
|
||||
}
|
||||
|
||||
// IsIPv6String returns if ip is IPv6.
|
||||
func IsIPv6String(ip string) bool {
|
||||
netIP := ParseIPSloppy(ip)
|
||||
return IsIPv6(netIP)
|
||||
}
|
||||
|
||||
// IsIPv6CIDRString returns if cidr is IPv6.
|
||||
// This assumes cidr is a valid CIDR.
|
||||
func IsIPv6CIDRString(cidr string) bool {
|
||||
ip, _, _ := ParseCIDRSloppy(cidr)
|
||||
return IsIPv6(ip)
|
||||
}
|
||||
|
||||
// IsIPv6CIDR returns if a cidr is ipv6
|
||||
func IsIPv6CIDR(cidr *net.IPNet) bool {
|
||||
ip := cidr.IP
|
||||
return IsIPv6(ip)
|
||||
}
|
||||
|
||||
// IsIPv4 returns if netIP is IPv4.
|
||||
func IsIPv4(netIP net.IP) bool {
|
||||
return netIP != nil && netIP.To4() != nil
|
||||
}
|
||||
|
||||
// IsIPv4String returns if ip is IPv4.
|
||||
func IsIPv4String(ip string) bool {
|
||||
netIP := ParseIPSloppy(ip)
|
||||
return IsIPv4(netIP)
|
||||
}
|
||||
|
||||
// IsIPv4CIDR returns if a cidr is ipv4
|
||||
func IsIPv4CIDR(cidr *net.IPNet) bool {
|
||||
ip := cidr.IP
|
||||
return IsIPv4(ip)
|
||||
}
|
||||
|
||||
// IsIPv4CIDRString returns if cidr is IPv4.
|
||||
// This assumes cidr is a valid CIDR.
|
||||
func IsIPv4CIDRString(cidr string) bool {
|
||||
ip, _, _ := ParseCIDRSloppy(cidr)
|
||||
return IsIPv4(ip)
|
||||
}
|
||||
|
||||
// ParsePort parses a string representing an IP port. If the string is not a
|
||||
// valid port number, this returns an error.
|
||||
func ParsePort(port string, allowZero bool) (int, error) {
|
||||
|
18
vendor/k8s.io/utils/net/port.go
generated
vendored
18
vendor/k8s.io/utils/net/port.go
generated
vendored
@ -23,15 +23,6 @@ import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
// IPFamily refers to a specific family if not empty, i.e. "4" or "6".
|
||||
type IPFamily string
|
||||
|
||||
// Constants for valid IPFamilys:
|
||||
const (
|
||||
IPv4 IPFamily = "4"
|
||||
IPv6 = "6"
|
||||
)
|
||||
|
||||
// Protocol is a network protocol support by LocalPort.
|
||||
type Protocol string
|
||||
|
||||
@ -67,7 +58,7 @@ func NewLocalPort(desc, ip string, ipFamily IPFamily, port int, protocol Protoco
|
||||
if protocol != TCP && protocol != UDP {
|
||||
return nil, fmt.Errorf("Unsupported protocol %s", protocol)
|
||||
}
|
||||
if ipFamily != "" && ipFamily != "4" && ipFamily != "6" {
|
||||
if ipFamily != IPFamilyUnknown && ipFamily != IPv4 && ipFamily != IPv6 {
|
||||
return nil, fmt.Errorf("Invalid IP family %s", ipFamily)
|
||||
}
|
||||
if ip != "" {
|
||||
@ -75,9 +66,10 @@ func NewLocalPort(desc, ip string, ipFamily IPFamily, port int, protocol Protoco
|
||||
if parsedIP == nil {
|
||||
return nil, fmt.Errorf("invalid ip address %s", ip)
|
||||
}
|
||||
asIPv4 := parsedIP.To4()
|
||||
if asIPv4 == nil && ipFamily == IPv4 || asIPv4 != nil && ipFamily == IPv6 {
|
||||
return nil, fmt.Errorf("ip address and family mismatch %s, %s", ip, ipFamily)
|
||||
if ipFamily != IPFamilyUnknown {
|
||||
if IPFamily(parsedIP) != ipFamily {
|
||||
return nil, fmt.Errorf("ip address and family mismatch %s, %s", ip, ipFamily)
|
||||
}
|
||||
}
|
||||
}
|
||||
return &LocalPort{Description: desc, IP: ip, IPFamily: ipFamily, Port: port, Protocol: protocol}, nil
|
||||
|
18
vendor/modules.txt
vendored
18
vendor/modules.txt
vendored
@ -1,4 +1,4 @@
|
||||
# cloud.google.com/go/compute v1.14.0
|
||||
# cloud.google.com/go/compute v1.18.0
|
||||
## explicit; go 1.19
|
||||
cloud.google.com/go/compute/internal
|
||||
# cloud.google.com/go/compute/metadata v0.2.3
|
||||
@ -282,7 +282,7 @@ github.com/google/go-cmp/cmp/internal/diff
|
||||
github.com/google/go-cmp/cmp/internal/flags
|
||||
github.com/google/go-cmp/cmp/internal/function
|
||||
github.com/google/go-cmp/cmp/internal/value
|
||||
# github.com/google/go-containerregistry v0.13.0
|
||||
# github.com/google/go-containerregistry v0.13.1-0.20230203223142-b3c23b4c3f28
|
||||
## explicit; go 1.18
|
||||
github.com/google/go-containerregistry/cmd/crane/cmd
|
||||
github.com/google/go-containerregistry/internal/and
|
||||
@ -320,6 +320,7 @@ github.com/google/go-containerregistry/pkg/v1/partial
|
||||
github.com/google/go-containerregistry/pkg/v1/random
|
||||
github.com/google/go-containerregistry/pkg/v1/remote
|
||||
github.com/google/go-containerregistry/pkg/v1/remote/transport
|
||||
github.com/google/go-containerregistry/pkg/v1/static
|
||||
github.com/google/go-containerregistry/pkg/v1/stream
|
||||
github.com/google/go-containerregistry/pkg/v1/tarball
|
||||
github.com/google/go-containerregistry/pkg/v1/types
|
||||
@ -412,8 +413,9 @@ github.com/pkg/errors
|
||||
# github.com/russross/blackfriday/v2 v2.1.0
|
||||
## explicit
|
||||
github.com/russross/blackfriday/v2
|
||||
# github.com/sigstore/cosign/v2 v2.0.0-rc.2
|
||||
# github.com/sigstore/cosign/v2 v2.0.0
|
||||
## explicit; go 1.19
|
||||
github.com/sigstore/cosign/v2/internal/pkg/oci/remote
|
||||
github.com/sigstore/cosign/v2/pkg/cosign/bundle
|
||||
github.com/sigstore/cosign/v2/pkg/cosign/env
|
||||
github.com/sigstore/cosign/v2/pkg/oci
|
||||
@ -504,7 +506,7 @@ golang.org/x/crypto/scrypt
|
||||
# golang.org/x/mod v0.8.0
|
||||
## explicit; go 1.17
|
||||
golang.org/x/mod/semver
|
||||
# golang.org/x/net v0.6.0
|
||||
# golang.org/x/net v0.7.0
|
||||
## explicit; go 1.17
|
||||
golang.org/x/net/context
|
||||
golang.org/x/net/http/httpguts
|
||||
@ -575,10 +577,10 @@ google.golang.org/appengine/internal/modules
|
||||
google.golang.org/appengine/internal/remote_api
|
||||
google.golang.org/appengine/internal/urlfetch
|
||||
google.golang.org/appengine/urlfetch
|
||||
# google.golang.org/genproto v0.0.0-20230109162033-3c3c17ce83e6
|
||||
# google.golang.org/genproto v0.0.0-20230209215440-0dfe4f8abfcc
|
||||
## explicit; go 1.19
|
||||
google.golang.org/genproto/googleapis/rpc/status
|
||||
# google.golang.org/grpc v1.52.3
|
||||
# google.golang.org/grpc v1.53.0
|
||||
## explicit; go 1.17
|
||||
google.golang.org/grpc
|
||||
google.golang.org/grpc/attributes
|
||||
@ -685,7 +687,7 @@ k8s.io/apimachinery/pkg/util/errors
|
||||
k8s.io/apimachinery/pkg/util/sets
|
||||
k8s.io/apimachinery/pkg/util/validation
|
||||
k8s.io/apimachinery/pkg/util/validation/field
|
||||
# k8s.io/klog/v2 v2.80.1
|
||||
# k8s.io/klog/v2 v2.90.0
|
||||
## explicit; go 1.13
|
||||
k8s.io/klog/v2
|
||||
k8s.io/klog/v2/internal/buffer
|
||||
@ -693,7 +695,7 @@ k8s.io/klog/v2/internal/clock
|
||||
k8s.io/klog/v2/internal/dbg
|
||||
k8s.io/klog/v2/internal/serialize
|
||||
k8s.io/klog/v2/internal/severity
|
||||
# k8s.io/utils v0.0.0-20221107191617-1a15be271d1d
|
||||
# k8s.io/utils v0.0.0-20230115233650-391b47cb4029
|
||||
## explicit; go 1.18
|
||||
k8s.io/utils/internal/third_party/forked/golang/net
|
||||
k8s.io/utils/net
|
||||
|
Loading…
x
Reference in New Issue
Block a user