1
0
mirror of https://github.com/jesseduffield/lazygit.git synced 2024-11-24 08:52:21 +02:00

Write unit tests with the help of afero

Afero is a package that lets you mock out a filesystem with an in-memory filesystem.
It allows us to easily create the files required for a given test without worrying about
a cleanup step or different tests tripping on eachother when run in parallel.

Later on I'll standardise on using afero over the vanilla os package
This commit is contained in:
Jesse Duffield 2023-07-29 17:02:04 +10:00
parent a1fae41051
commit 7b302d8c29
88 changed files with 55298 additions and 4243 deletions

4
go.mod
View File

@ -65,8 +65,10 @@ require (
github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5 // indirect
github.com/rivo/uniseg v0.4.4 // indirect
github.com/sergi/go-diff v1.1.0 // indirect
github.com/spf13/afero v1.9.5 // indirect
github.com/xanzy/ssh-agent v0.2.1 // indirect
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519 // indirect
golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa // indirect
golang.org/x/exp v0.0.0-20220318154914-8dddf5d87bd8 // indirect
golang.org/x/net v0.7.0 // indirect
golang.org/x/sys v0.10.0 // indirect
golang.org/x/term v0.10.0 // indirect

403
go.sum
View File

@ -1,3 +1,43 @@
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To=
cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4=
cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M=
cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc=
cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk=
cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=
cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc=
cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=
cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI=
cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk=
cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY=
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU=
cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=
cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/OpenPeeDeeP/xdg v1.0.0 h1:UDLmNjCGFZZCaVMB74DqYEtXkHxnTxcr4FeJVF9uCn8=
github.com/OpenPeeDeeP/xdg v1.0.0/go.mod h1:tMoSueLQlMf0TCldjrJLNIjAc5qAOIcHt5REi88/Ygo=
github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7 h1:uSoVVbwJiQipAclBbw+8quDsfcvFjOpI5iCf4p/cqCs=
@ -11,10 +51,18 @@ github.com/atotto/clipboard v0.1.4/go.mod h1:ZY9tmq7sm5xIbd9bOK4onWV4S6X0u6GY7Vn
github.com/aybabtme/humanlog v0.4.1 h1:D8d9um55rrthJsP8IGSHBcti9lTb/XknmDAX6Zy8tek=
github.com/aybabtme/humanlog v0.4.1/go.mod h1:B0bnQX4FTSU3oftPMTTPvENCy8LqixLDvYJA9TUCAGo=
github.com/aybabtme/rgbterm v0.0.0-20170906152045-cc83f3b3ce59/go.mod h1:q/89r3U2H7sSsE2t6Kca0lfwTK8JdoNGS/yzM/4iH5I=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
github.com/cli/safeexec v1.0.0 h1:0VngyaIyqACHdcMNWfo6+KdUYnqEr2Sg+bSP1pdF+dI=
github.com/cli/safeexec v1.0.0/go.mod h1:Z/D4tTN8Vs5gXYHDCbaM1S/anmEDnJb1iW0+EJ5zx3Q=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cloudfoundry/jibber_jabber v0.0.0-20151120183258-bcc4c8345a21 h1:tuijfIjZyjZaHq9xDUh0tNitwXshJpbLkqMOJv4H3do=
github.com/cloudfoundry/jibber_jabber v0.0.0-20151120183258-bcc4c8345a21/go.mod h1:po7NpZ/QiTKzBKyrsEAxwnTamCoh8uDk/egRpQ7siIc=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/creack/pty v1.1.11 h1:07n33Z8lZxZ2qwegKbObQohDhXDQxiMMz1NOUGYlesw=
github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
@ -24,6 +72,12 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/emirpasic/gods v1.12.0 h1:QAUIPSaCu4G+POclxeqb3F+WPpdKqFGlw36+yOzGlrg=
github.com/emirpasic/gods v1.12.0/go.mod h1:YfzfFFoVP/catgzJb4IKIqXjX78Ha8FMSDh3ymbK86o=
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po=
github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/fatih/color v1.7.1-0.20180516100307-2d684516a886/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
github.com/fatih/color v1.9.0 h1:8xPHl4/q1VyqGIPif1F+1V3Y3lSmrq01EabUW3CoW5s=
github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU=
@ -48,19 +102,78 @@ github.com/go-git/go-billy/v5 v5.0.0 h1:7NQHvd9FVid8VL4qVUMm8XifBK+2xCoZ2lSk0agR
github.com/go-git/go-billy/v5 v5.0.0/go.mod h1:pmpqyWchKfYfrkb/UVH4otLvyi/5gJlGI4Hb3ZqZ3W0=
github.com/go-git/go-git-fixtures/v4 v4.0.2-0.20200613231340-f56387b50c12 h1:PbKy9zOy4aAKrJ5pibIRpVO2BXnK1Tlcg+caKI7Ox5M=
github.com/go-git/go-git-fixtures/v4 v4.0.2-0.20200613231340-f56387b50c12/go.mod h1:m+ICp2rF3jDhFgEZ/8yziagdT1C+ZpZcrJjappBCDSw=
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
github.com/go-logfmt/logfmt v0.5.0 h1:TrB8swr/68K7m9CcGut2g3UOihhbcbiMAYiuTXdEih4=
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y=
github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ=
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g=
github.com/gookit/color v1.4.2 h1:tXy44JFSFkKnELV6WaMo/lLfu/meqITX3iAV52do7lk=
github.com/gookit/color v1.4.2/go.mod h1:fqRyamkC1W8uxl+lxCQxOT09l/vYfZ+QeiX3rKQHCoQ=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/imdario/mergo v0.3.9/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
github.com/imdario/mergo v0.3.11 h1:3tnifQM4i+fbajXKBHXWEH+KvNHqojZ778UH75j3bGA=
github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
@ -83,13 +196,17 @@ github.com/jesseduffield/minimal/gitignore v0.3.3-0.20211018110810-9cde264e6b1e/
github.com/jesseduffield/yaml v2.1.0+incompatible h1:HWQJ1gIv2zHKbDYNp0Jwjlj24K8aqpFHnMCynY1EpmE=
github.com/jesseduffield/yaml v2.1.0+incompatible/go.mod h1:w0xGhOSIJCGYYW+hnFPTutCy5aACpkcwbmORt5axGqk=
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 h1:iQTw/8FWTuc7uiaSepXwyf3o52HaUYcV+Tu66S3F5GA=
github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8=
github.com/kevinburke/ssh_config v0.0.0-20190725054713-01f96b0aa0cd h1:Coekwdh0v2wtGp9Gmz1Ze3eVRAWJMLokvN3QjdzCHLY=
github.com/kevinburke/ssh_config v0.0.0-20190725054713-01f96b0aa0cd/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.2 h1:DB17ag19krx9CFsz4o3enTrPXyIXCl+2iCXH/aMAp9s=
github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515 h1:T+h1c/A9Gawja4Y9mFVWj2vyii2bbUNDw3kt9VxK2EY=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
@ -131,14 +248,18 @@ github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5 h1:q2e307iGHPdTGp
github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5/go.mod h1:jvVRKCrJTQWu0XVbaOlby/2lO20uSCHEMzzplHXte1o=
github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg=
github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
github.com/rivo/uniseg v0.4.3/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis=
github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/sahilm/fuzzy v0.1.0 h1:FzWGaw2Opqyu+794ZQ9SYifWv2EIXpwP4q8dY1kDAwI=
github.com/sahilm/fuzzy v0.1.0/go.mod h1:VFvziUEIMCrT6A6tw2RFIXPXXmzXbOsSHF0DOI8ZK9Y=
github.com/samber/lo v1.31.0 h1:Sfa+/064Tdo4SvlohQUQzBhgSer9v/coGvKQI/XLWAM=
@ -151,6 +272,8 @@ github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0=
github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=
github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/spf13/afero v1.9.5 h1:stMpOSZFs//0Lv29HduCmli3GUfpFoF3Y1Q/aXj/wVM=
github.com/spf13/afero v1.9.5/go.mod h1:UBogFpq8E9Hx+xc5CNTTEpTnuHVmXDwZcZcE1eb/UhQ=
github.com/spkg/bom v0.0.0-20160624110644-59b7046e48ad h1:fiWzISvDn0Csy5H0iwgAuJGQTUpVfEMJJd4nRFXogbc=
github.com/spkg/bom v0.0.0-20160624110644-59b7046e48ad/go.mod h1:qLr4V1qq6nMqFKkMo8ZTx3f+BZEkzsRUY10Xsm2mwU0=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
@ -160,6 +283,7 @@ github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSS
github.com/stretchr/testify v0.0.0-20161117074351-18a02ba4a312/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
@ -171,37 +295,162 @@ github.com/xanzy/ssh-agent v0.2.1 h1:TCbipTQL2JiiCprBWx9frJ2eJlCYT00NmctrHxVAr70
github.com/xanzy/ssh-agent v0.2.1/go.mod h1:mLlQY/MoOhWBj+gOGMQkOeiEvkx+8pJSI+0Bx9h2kr4=
github.com/xo/terminfo v0.0.0-20210125001918-ca9a967f8778 h1:QldyIu/L63oPpyvQmHgvgickp1Yw510KJOqX7H24mg8=
github.com/xo/terminfo v0.0.0-20210125001918-ca9a967f8778/go.mod h1:2MuV+tbUrU1zIOPMxZ5EncGwgmMJsa+9ucAQZXxsObs=
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
golang.org/x/crypto v0.0.0-20190219172222-a4c6cb3142f2/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519 h1:7I4JAnoQBe7ZtJcBaYHi5UtiO8tQHbUSXxL+pnGRANg=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa h1:zuSxTR4o9y82ebqCUJYNGJbGPo6sKVl54f/TVDObg1c=
golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
golang.org/x/exp v0.0.0-20220318154914-8dddf5d87bd8 h1:s/+U+w0teGzcoH2mdIlFQ6KfVKGaYpgyGdUefZrn9TU=
golang.org/x/exp v0.0.0-20220318154914-8dddf5d87bd8/go.mod h1:lgLbSvA5ygNOMpwM/9anMpWVlVJ7Z+cHWq/eFuinpGE=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.7.0 h1:rJrUqqhjsgNp7KqAIc25s9pZnjU7TUcSY7HcVZjdn1g=
golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20170407050850-f3918c30c5c2/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190221075227-b4e8571b14e0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
@ -217,22 +466,166 @@ golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuX
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
golang.org/x/term v0.10.0 h1:3R7pNqamzBraeqj/Tj8qt1aQ2HpmlC+Cx/qL/7hn4/c=
golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.11.0 h1:LAntKIrcmeSKERyiOh0XMV39LXS8IE9UL2yP7+f5ij4=
golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE=
golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=
google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc=
google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg=
google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE=
google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA=
google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60=
google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8=
google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU=
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/ozeidan/fuzzy-patricia.v3 v3.0.0 h1:KzcWKJ0nMAmGoBhYVMnkWc1rXjB42lKy5aIys4TdLOA=
@ -248,3 +641,13 @@ gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=

View File

@ -11,6 +11,7 @@ import (
"github.com/go-errors/errors"
"github.com/sirupsen/logrus"
"github.com/spf13/afero"
"github.com/jesseduffield/generics/slices"
appTypes "github.com/jesseduffield/lazygit/pkg/app/types"
@ -75,6 +76,7 @@ func NewCommon(config config.AppConfigurer) (*common.Common, error) {
Tr: tr,
UserConfig: userConfig,
Debug: config.GetDebug(),
Fs: afero.NewOsFs(),
}, nil
}

View File

@ -2,11 +2,13 @@ package commands
import (
"os"
"path"
"path/filepath"
"strings"
"github.com/go-errors/errors"
"github.com/sasha-s/go-deadlock"
"github.com/spf13/afero"
gogit "github.com/jesseduffield/go-git/v5"
"github.com/jesseduffield/lazygit/pkg/commands/git_commands"
@ -39,7 +41,7 @@ type GitCommand struct {
Bisect *git_commands.BisectCommands
Worktree *git_commands.WorktreeCommands
Version *git_commands.GitVersion
RepoPaths git_commands.RepoPaths
RepoPaths *git_commands.RepoPaths
Loaders Loaders
}
@ -63,11 +65,37 @@ func NewGitCommand(
gitConfig git_config.IGitConfig,
syncMutex *deadlock.Mutex,
) (*GitCommand, error) {
if err := navigateToRepoRootDirectory(os.Stat, os.Chdir); err != nil {
return nil, err
currentPath, err := os.Getwd()
if err != nil {
return nil, utils.WrapError(err)
}
repoPaths, err := git_commands.GetRepoPaths()
// converting to forward slashes for the sake of windows (which uses backwards slashes). We want everything
// to have forward slashes internally
currentPath = filepath.ToSlash(currentPath)
gitDir := env.GetGitDirEnv()
if gitDir != "" {
// we've been given the git directory explicitly so no need to navigate to it
_, err := cmn.Fs.Stat(gitDir)
if err != nil {
return nil, utils.WrapError(err)
}
} else {
// we haven't been given the git dir explicitly so we assume it's in the current working directory as `.git/` (or an ancestor directory)
rootDirectory, err := findWorktreeRoot(cmn.Fs, currentPath)
if err != nil {
return nil, utils.WrapError(err)
}
currentPath = rootDirectory
err = os.Chdir(rootDirectory)
if err != nil {
return nil, utils.WrapError(err)
}
}
repoPaths, err := git_commands.GetRepoPaths(cmn.Fs, currentPath)
if err != nil {
return nil, errors.Errorf("Error getting repo paths: %v", err)
}
@ -99,7 +127,7 @@ func NewGitCommandAux(
version *git_commands.GitVersion,
osCommand *oscommands.OSCommand,
gitConfig git_config.IGitConfig,
repoPaths git_commands.RepoPaths,
repoPaths *git_commands.RepoPaths,
repo *gogit.Repository,
syncMutex *deadlock.Mutex,
) *GitCommand {
@ -144,7 +172,7 @@ func NewGitCommandAux(
commitLoader := git_commands.NewCommitLoader(cmn, cmd, statusCommands.RebaseMode, gitCommon)
reflogCommitLoader := git_commands.NewReflogCommitLoader(cmn, cmd)
remoteLoader := git_commands.NewRemoteLoader(cmn, cmd, repo.Remotes)
worktreeLoader := git_commands.NewWorktreeLoader(gitCommon, cmd)
worktreeLoader := git_commands.NewWorktreeLoader(gitCommon)
stashLoader := git_commands.NewStashLoader(cmn, cmd)
tagLoader := git_commands.NewTagLoader(cmn, cmd)
@ -183,66 +211,32 @@ func NewGitCommandAux(
}
}
func navigateToRepoRootDirectory(stat func(string) (os.FileInfo, error), chdir func(string) error) error {
gitDir := env.GetGitDirEnv()
if gitDir != "" {
// we've been given the git directory explicitly so no need to navigate to it
_, err := stat(gitDir)
if err != nil {
return utils.WrapError(err)
}
return nil
}
// we haven't been given the git dir explicitly so we assume it's in the current working directory as `.git/` (or an ancestor directory)
// this returns the root of the current worktree. So if you start lazygit from within
// a subdirectory of the worktree, it will start in the context of the root of that worktree
func findWorktreeRoot(fs afero.Fs, currentPath string) (string, error) {
for {
_, err := stat(".git")
// we don't care if .git is a directory or a file: either is okay.
_, err := fs.Stat(path.Join(currentPath, ".git"))
if err == nil {
return nil
return currentPath, nil
}
if !os.IsNotExist(err) {
return utils.WrapError(err)
return "", utils.WrapError(err)
}
if err = chdir(".."); err != nil {
return utils.WrapError(err)
}
currentPath = path.Dir(currentPath)
currentPath, err := os.Getwd()
if err != nil {
return err
}
atRoot := currentPath == filepath.Dir(currentPath)
atRoot := currentPath == path.Dir(currentPath)
if atRoot {
// we should never really land here: the code that creates GitCommand should
// verify we're in a git directory
return errors.New("Must open lazygit in a git repository")
return "", errors.New("Must open lazygit in a git repository")
}
}
}
func setupRepository(
openGitRepository func(string, *gogit.PlainOpenOptions) (*gogit.Repository, error),
options gogit.PlainOpenOptions,
gitConfigParseErrorStr string,
path string,
) (*gogit.Repository, error) {
repository, err := openGitRepository(path, &options)
if err != nil {
if strings.Contains(err.Error(), `unquoted '\' must be followed by new line`) {
return nil, errors.New(gitConfigParseErrorStr)
}
return nil, err
}
return repository, err
}
func VerifyInGitRepo(osCommand *oscommands.OSCommand) error {
return osCommand.Cmd.New(git_commands.NewGitCmd("rev-parse").Arg("--git-dir").ToArgv()).DontLog().Run()
}

View File

@ -12,7 +12,7 @@ type GitCommon struct {
version *GitVersion
cmd oscommands.ICmdObjBuilder
os *oscommands.OSCommand
repoPaths RepoPaths
repoPaths *RepoPaths
repo *gogit.Repository
config *ConfigCommands
// mutex for doing things like push/pull/fetch
@ -24,7 +24,7 @@ func NewGitCommon(
version *GitVersion,
cmd oscommands.ICmdObjBuilder,
osCommand *oscommands.OSCommand,
repoPaths RepoPaths,
repoPaths *RepoPaths,
repo *gogit.Repository,
config *ConfigCommands,
syncMutex *deadlock.Mutex,

View File

@ -11,6 +11,7 @@ import (
"github.com/jesseduffield/lazygit/pkg/common"
"github.com/jesseduffield/lazygit/pkg/config"
"github.com/jesseduffield/lazygit/pkg/utils"
"github.com/spf13/afero"
)
type commonDeps struct {
@ -20,9 +21,10 @@ type commonDeps struct {
gitConfig *git_config.FakeGitConfig
getenv func(string) string
removeFile func(string) error
dotGitDir string
common *common.Common
cmd *oscommands.CmdObjBuilder
fs afero.Fs
repoPaths *RepoPaths
}
func buildGitCommon(deps commonDeps) *GitCommon {
@ -33,6 +35,16 @@ func buildGitCommon(deps commonDeps) *GitCommon {
gitCommon.Common = utils.NewDummyCommonWithUserConfig(deps.userConfig)
}
if deps.fs != nil {
gitCommon.Fs = deps.fs
}
if deps.repoPaths != nil {
gitCommon.repoPaths = deps.repoPaths
} else {
gitCommon.repoPaths = MockRepoPaths(".git")
}
runner := deps.runner
if runner == nil {
runner = oscommands.NewFakeRunner(nil)
@ -81,11 +93,6 @@ func buildGitCommon(deps commonDeps) *GitCommon {
TempDir: os.TempDir(),
})
gitCommon.dotGitDir = deps.dotGitDir
if gitCommon.dotGitDir == "" {
gitCommon.dotGitDir = ".git"
}
return gitCommon
}
@ -96,7 +103,7 @@ func buildRepo() *gogit.Repository {
}
func buildFileLoader(gitCommon *GitCommon) *FileLoader {
return NewFileLoader(gitCommon.Common, gitCommon.cmd, gitCommon.config)
return NewFileLoader(gitCommon, gitCommon.cmd, gitCommon.config)
}
func buildSubmoduleCommands(deps commonDeps) *SubmoduleCommands {

View File

@ -66,7 +66,7 @@ func (self *FileLoader) GetStatusFiles(opts GetStatusFileOptions) []*models.File
// Go through the files to see if any of these files are actually worktrees
// so that we can render them correctly
worktreePaths := linkedWortkreePaths(self.repoPaths.RepoGitDirPath())
worktreePaths := linkedWortkreePaths(self.Fs, self.repoPaths.RepoGitDirPath())
for _, file := range files {
for _, worktreePath := range worktreePaths {
absFilePath, err := filepath.Abs(file.Name)

View File

@ -5,7 +5,6 @@ import (
"github.com/jesseduffield/lazygit/pkg/commands/models"
"github.com/jesseduffield/lazygit/pkg/commands/oscommands"
"github.com/jesseduffield/lazygit/pkg/utils"
"github.com/stretchr/testify/assert"
)
@ -178,7 +177,7 @@ func TestFileGetStatusFiles(t *testing.T) {
cmd := oscommands.NewDummyCmdObjBuilder(s.runner)
loader := &FileLoader{
Common: utils.NewDummyCommon(),
GitCommon: buildGitCommon(commonDeps{}),
cmd: cmd,
config: &FakeFileLoaderConfig{showUntrackedFiles: "yes"},
getFileType: func(string) string { return "file" },

View File

@ -2,7 +2,7 @@ package git_commands
import (
"fmt"
"io/fs"
ioFs "io/fs"
"os"
"path"
"path/filepath"
@ -11,33 +11,10 @@ import (
"github.com/go-errors/errors"
"github.com/jesseduffield/lazygit/pkg/env"
"github.com/samber/lo"
"github.com/spf13/afero"
)
type RepoPaths interface {
// Current working directory of the program. Currently, this will always
// be the same as WorktreePath(), but in future we may support running
// lazygit from inside a subdirectory of the worktree.
CurrentPath() string
// Path to the current worktree. If we're in the main worktree, this will
// be the same as RepoPath()
WorktreePath() string
// Path of the worktree's git dir.
// If we're in the main worktree, this will be the .git dir under the RepoPath().
// If we're in a linked worktree, it will be the directory pointed at by the worktree's .git file
WorktreeGitDirPath() string
// Path of the repo. If we're in a the main worktree, this will be the same as WorktreePath()
// If we're in a bare repo, it will be the parent folder of the bare repo
RepoPath() string
// path of the git-dir for the repo.
// If this is a bare repo, it will be the location of the bare repo
// If this is a non-bare repo, it will be the location of the .git dir in
// the main worktree.
RepoGitDirPath() string
// Name of the repo. Basename of the folder containing the repo.
RepoName() string
}
type RepoDirsImpl struct {
type RepoPaths struct {
currentPath string
worktreePath string
worktreeGitDirPath string
@ -46,54 +23,81 @@ type RepoDirsImpl struct {
repoName string
}
var _ RepoPaths = &RepoDirsImpl{}
func (self *RepoDirsImpl) CurrentPath() string {
// Current working directory of the program. Currently, this will always
// be the same as WorktreePath(), but in future we may support running
// lazygit from inside a subdirectory of the worktree.
func (self *RepoPaths) CurrentPath() string {
return self.currentPath
}
func (self *RepoDirsImpl) WorktreePath() string {
// Path to the current worktree. If we're in the main worktree, this will
// be the same as RepoPath()
func (self *RepoPaths) WorktreePath() string {
return self.worktreePath
}
func (self *RepoDirsImpl) WorktreeGitDirPath() string {
// Path of the worktree's git dir.
// If we're in the main worktree, this will be the .git dir under the RepoPath().
// If we're in a linked worktree, it will be the directory pointed at by the worktree's .git file
func (self *RepoPaths) WorktreeGitDirPath() string {
return self.worktreeGitDirPath
}
func (self *RepoDirsImpl) RepoPath() string {
// Path of the repo. If we're in a the main worktree, this will be the same as WorktreePath()
// If we're in a bare repo, it will be the parent folder of the bare repo
func (self *RepoPaths) RepoPath() string {
return self.repoPath
}
func (self *RepoDirsImpl) RepoGitDirPath() string {
// path of the git-dir for the repo.
// If this is a bare repo, it will be the location of the bare repo
// If this is a non-bare repo, it will be the location of the .git dir in
// the main worktree.
func (self *RepoPaths) RepoGitDirPath() string {
return self.repoGitDirPath
}
func (self *RepoDirsImpl) RepoName() string {
// Name of the repo. Basename of the folder containing the repo.
func (self *RepoPaths) RepoName() string {
return self.repoName
}
func GetRepoPaths() (RepoPaths, error) {
currentPath, err := os.Getwd()
if err != nil {
return &RepoDirsImpl{}, errors.Errorf("failed to get current path: %v", err)
// Returns the repo paths for a typical repo
func MockRepoPaths(currentPath string) *RepoPaths {
return &RepoPaths{
currentPath: currentPath,
worktreePath: currentPath,
worktreeGitDirPath: path.Join(currentPath, ".git"),
repoPath: currentPath,
repoGitDirPath: path.Join(currentPath, ".git"),
repoName: "lazygit",
}
}
// converting to forward slashes for the sake of windows (which uses backwards slashes). We want everything
// to have forward slashes internally
currentPath = filepath.ToSlash(currentPath)
func GetRepoPaths(
fs afero.Fs,
currentPath string,
) (*RepoPaths, error) {
return getRepoPathsAux(afero.NewOsFs(), resolveSymlink, currentPath)
}
func getRepoPathsAux(
fs afero.Fs,
resolveSymlinkFn func(string) (string, error),
currentPath string,
) (*RepoPaths, error) {
worktreePath := currentPath
repoGitDirPath, repoPath, err := GetCurrentRepoGitDirPath(currentPath)
repoGitDirPath, repoPath, err := getCurrentRepoGitDirPath(fs, resolveSymlinkFn, currentPath)
if err != nil {
return &RepoDirsImpl{}, errors.Errorf("failed to get repo git dir path: %v", err)
return nil, errors.Errorf("failed to get repo git dir path: %v", err)
}
worktreeGitDirPath, err := worktreeGitDirPath(currentPath)
worktreeGitDirPath, err := worktreeGitDirPath(fs, currentPath)
if err != nil {
return &RepoDirsImpl{}, errors.Errorf("failed to get worktree git dir path: %v", err)
return nil, errors.Errorf("failed to get worktree git dir path: %v", err)
}
repoName := path.Base(repoPath)
return &RepoDirsImpl{
return &RepoPaths{
currentPath: currentPath,
worktreePath: worktreePath,
worktreeGitDirPath: worktreeGitDirPath,
@ -103,52 +107,14 @@ func GetRepoPaths() (RepoPaths, error) {
}, nil
}
// Returns the paths of linked worktrees
func linkedWortkreePaths(repoGitDirPath string) []string {
result := []string{}
// For each directory in this path we're going to cat the `gitdir` file and append its contents to our result
// That file points us to the `.git` file in the worktree.
worktreeGitDirsPath := path.Join(repoGitDirPath, "worktrees")
// ensure the directory exists
_, err := os.Stat(worktreeGitDirsPath)
if err != nil {
return result
}
_ = filepath.Walk(worktreeGitDirsPath, func(currPath string, info fs.FileInfo, err error) error {
if err != nil {
return err
}
if !info.IsDir() {
return nil
}
gitDirPath := path.Join(currPath, "gitdir")
gitDirBytes, err := os.ReadFile(gitDirPath)
if err != nil {
// ignoring error
return nil
}
trimmedGitDir := strings.TrimSpace(string(gitDirBytes))
// removing the .git part
worktreeDir := path.Dir(trimmedGitDir)
result = append(result, worktreeDir)
return nil
})
return result
}
// Returns the path of the git-dir for the worktree. For linked worktrees, the worktree has
// a .git file that points to the git-dir (which itself lives in the git-dir
// of the repo)
func worktreeGitDirPath(worktreePath string) (string, error) {
func worktreeGitDirPath(fs afero.Fs, worktreePath string) (string, error) {
// if .git is a file, we're in a linked worktree, otherwise we're in
// the main worktree
dotGitPath := path.Join(worktreePath, ".git")
gitFileInfo, err := os.Stat(dotGitPath)
gitFileInfo, err := fs.Stat(dotGitPath)
if err != nil {
return "", err
}
@ -157,12 +123,12 @@ func worktreeGitDirPath(worktreePath string) (string, error) {
return dotGitPath, nil
}
return linkedWorktreeGitDirPath(worktreePath)
return linkedWorktreeGitDirPath(fs, worktreePath)
}
func linkedWorktreeGitDirPath(worktreePath string) (string, error) {
func linkedWorktreeGitDirPath(fs afero.Fs, worktreePath string) (string, error) {
dotGitPath := path.Join(worktreePath, ".git")
gitFileContents, err := os.ReadFile(dotGitPath)
gitFileContents, err := afero.ReadFile(fs, dotGitPath)
if err != nil {
return "", err
}
@ -180,7 +146,11 @@ func linkedWorktreeGitDirPath(worktreePath string) (string, error) {
return gitDir, nil
}
func GetCurrentRepoGitDirPath(currentPath string) (string, string, error) {
func getCurrentRepoGitDirPath(
fs afero.Fs,
resolveSymlinkFn func(string) (string, error),
currentPath string,
) (string, string, error) {
var unresolvedGitPath string
if env.GetGitDirEnv() != "" {
unresolvedGitPath = env.GetGitDirEnv()
@ -188,13 +158,13 @@ func GetCurrentRepoGitDirPath(currentPath string) (string, string, error) {
unresolvedGitPath = path.Join(currentPath, ".git")
}
gitPath, err := resolveSymlink(unresolvedGitPath)
gitPath, err := resolveSymlinkFn(unresolvedGitPath)
if err != nil {
return "", "", err
}
// check if .git is a file or a directory
gitFileInfo, err := os.Stat(gitPath)
gitFileInfo, err := fs.Stat(gitPath)
if err != nil {
return "", "", err
}
@ -205,7 +175,7 @@ func GetCurrentRepoGitDirPath(currentPath string) (string, string, error) {
}
// either in a submodule, or worktree
worktreeGitPath, err := linkedWorktreeGitDirPath(currentPath)
worktreeGitPath, err := linkedWorktreeGitDirPath(fs, currentPath)
if err != nil {
return "", "", errors.Errorf("could not find git dir for %s: %v", currentPath, err)
}
@ -238,3 +208,41 @@ func resolveSymlink(path string) (string, error) {
return filepath.EvalSymlinks(path)
}
// Returns the paths of linked worktrees
func linkedWortkreePaths(fs afero.Fs, repoGitDirPath string) []string {
result := []string{}
// For each directory in this path we're going to cat the `gitdir` file and append its contents to our result
// That file points us to the `.git` file in the worktree.
worktreeGitDirsPath := path.Join(repoGitDirPath, "worktrees")
// ensure the directory exists
_, err := fs.Stat(worktreeGitDirsPath)
if err != nil {
return result
}
_ = afero.Walk(fs, worktreeGitDirsPath, func(currPath string, info ioFs.FileInfo, err error) error {
if err != nil {
return err
}
if !info.IsDir() {
return nil
}
gitDirPath := path.Join(currPath, "gitdir")
gitDirBytes, err := afero.ReadFile(fs, gitDirPath)
if err != nil {
// ignoring error
return nil
}
trimmedGitDir := strings.TrimSpace(string(gitDirBytes))
// removing the .git part
worktreeDir := path.Dir(trimmedGitDir)
result = append(result, worktreeDir)
return nil
})
return result
}

View File

@ -0,0 +1,118 @@
package git_commands
import (
"testing"
"github.com/go-errors/errors"
"github.com/spf13/afero"
"github.com/stretchr/testify/assert"
)
func mockResolveSymlinkFn(p string) (string, error) { return p, nil }
type Scenario struct {
Name string
BeforeFunc func(fs afero.Fs)
Path string
Expected *RepoPaths
Err error
}
func TestGetRepoPathsAux(t *testing.T) {
scenarios := []Scenario{
{
Name: "typical case",
BeforeFunc: func(fs afero.Fs) {
// setup for main worktree
_ = fs.MkdirAll("/path/to/repo/.git", 0o755)
},
Path: "/path/to/repo",
Expected: &RepoPaths{
currentPath: "/path/to/repo",
worktreePath: "/path/to/repo",
worktreeGitDirPath: "/path/to/repo/.git",
repoPath: "/path/to/repo",
repoGitDirPath: "/path/to/repo/.git",
repoName: "repo",
},
Err: nil,
},
{
Name: "linked worktree",
BeforeFunc: func(fs afero.Fs) {
// setup for linked worktree
_ = fs.MkdirAll("/path/to/repo/.git/worktrees/worktree1", 0o755)
_ = afero.WriteFile(fs, "/path/to/repo/worktree1/.git", []byte("gitdir: /path/to/repo/.git/worktrees/worktree1"), 0o644)
},
Path: "/path/to/repo/worktree1",
Expected: &RepoPaths{
currentPath: "/path/to/repo/worktree1",
worktreePath: "/path/to/repo/worktree1",
worktreeGitDirPath: "/path/to/repo/.git/worktrees/worktree1",
repoPath: "/path/to/repo",
repoGitDirPath: "/path/to/repo/.git",
repoName: "repo",
},
Err: nil,
},
{
Name: "worktree .git file missing gitdir directive",
BeforeFunc: func(fs afero.Fs) {
_ = fs.MkdirAll("/path/to/repo/.git/worktrees/worktree2", 0o755)
_ = afero.WriteFile(fs, "/path/to/repo/worktree2/.git", []byte("blah"), 0o644)
},
Path: "/path/to/repo/worktree2",
Expected: nil,
Err: errors.New("failed to get repo git dir path: could not find git dir for /path/to/repo/worktree2: /path/to/repo/worktree2/.git is a file which suggests we are in a submodule or a worktree but the file's contents do not contain a gitdir pointing to the actual .git directory"),
},
{
Name: "worktree .git file gitdir directive points to a non-existing directory",
BeforeFunc: func(fs afero.Fs) {
_ = fs.MkdirAll("/path/to/repo/.git/worktrees/worktree2", 0o755)
_ = afero.WriteFile(fs, "/path/to/repo/worktree2/.git", []byte("gitdir: /nonexistant"), 0o644)
},
Path: "/path/to/repo/worktree2",
Expected: nil,
Err: errors.New("failed to get repo git dir path: could not find git dir for /path/to/repo/worktree2"),
},
{
Name: "submodule",
BeforeFunc: func(fs afero.Fs) {
_ = fs.MkdirAll("/path/to/repo/.git/modules/submodule1", 0o755)
_ = afero.WriteFile(fs, "/path/to/repo/submodule1/.git", []byte("gitdir: /path/to/repo/.git/modules/submodule1"), 0o644)
},
Path: "/path/to/repo/submodule1",
Expected: &RepoPaths{
currentPath: "/path/to/repo/submodule1",
worktreePath: "/path/to/repo/submodule1",
worktreeGitDirPath: "/path/to/repo/.git/modules/submodule1",
repoPath: "/path/to/repo/submodule1",
repoGitDirPath: "/path/to/repo/.git/modules/submodule1",
repoName: "submodule1",
},
Err: nil,
},
}
for _, s := range scenarios {
s := s
t.Run(s.Name, func(t *testing.T) {
fs := afero.NewMemMapFs()
// prepare the filesystem for the scenario
s.BeforeFunc(fs)
// run the function with the scenario path
repoPaths, err := getRepoPathsAux(fs, mockResolveSymlinkFn, s.Path)
// check the error and the paths
if s.Err != nil {
assert.Error(t, err)
assert.EqualError(t, err, s.Err.Error())
} else {
assert.Nil(t, err)
assert.Equal(t, s.Expected, repoPaths)
}
})
}
}

View File

@ -1,31 +1,23 @@
package git_commands
import (
"io/fs"
"os"
iofs "io/fs"
"path/filepath"
"strings"
"github.com/go-errors/errors"
"github.com/jesseduffield/lazygit/pkg/commands/models"
"github.com/jesseduffield/lazygit/pkg/commands/oscommands"
"github.com/jesseduffield/lazygit/pkg/utils"
"github.com/samber/lo"
"github.com/spf13/afero"
)
type WorktreeLoader struct {
*GitCommon
cmd oscommands.ICmdObjBuilder
}
func NewWorktreeLoader(
gitCommon *GitCommon,
cmd oscommands.ICmdObjBuilder,
) *WorktreeLoader {
return &WorktreeLoader{
GitCommon: gitCommon,
cmd: cmd,
}
func NewWorktreeLoader(gitCommon *GitCommon) *WorktreeLoader {
return &WorktreeLoader{GitCommon: gitCommon}
}
func (self *WorktreeLoader) GetWorktrees() ([]*models.Worktree, error) {
@ -38,7 +30,9 @@ func (self *WorktreeLoader) GetWorktrees() ([]*models.Worktree, error) {
return nil, err
}
splitLines := utils.SplitLines(worktreesOutput)
splitLines := strings.Split(
utils.NormalizeLinefeeds(worktreesOutput), "\n",
)
var worktrees []*models.Worktree
var current *models.Worktree
@ -64,7 +58,7 @@ func (self *WorktreeLoader) GetWorktrees() ([]*models.Worktree, error) {
isPathMissing := self.pathExists(path)
var gitDir string
gitDir, err := worktreeGitDirPath(path)
gitDir, err := worktreeGitDirPath(self.Fs, path)
if err != nil {
self.Log.Warnf("Could not find git dir for worktree %s: %v", path, err)
}
@ -114,13 +108,13 @@ func (self *WorktreeLoader) GetWorktrees() ([]*models.Worktree, error) {
continue
}
rebasedBranch, ok := rebasedBranch(worktree)
rebasedBranch, ok := self.rebasedBranch(worktree)
if ok {
worktree.Branch = rebasedBranch
continue
}
bisectedBranch, ok := bisectedBranch(worktree)
bisectedBranch, ok := self.bisectedBranch(worktree)
if ok {
worktree.Branch = bisectedBranch
continue
@ -131,8 +125,8 @@ func (self *WorktreeLoader) GetWorktrees() ([]*models.Worktree, error) {
}
func (self *WorktreeLoader) pathExists(path string) bool {
if _, err := os.Stat(path); err != nil {
if errors.Is(err, fs.ErrNotExist) {
if _, err := self.Fs.Stat(path); err != nil {
if errors.Is(err, iofs.ErrNotExist) {
return true
}
self.Log.Errorf("failed to check if worktree path `%s` exists\n%v", path, err)
@ -141,9 +135,9 @@ func (self *WorktreeLoader) pathExists(path string) bool {
return false
}
func rebasedBranch(worktree *models.Worktree) (string, bool) {
func (self *WorktreeLoader) rebasedBranch(worktree *models.Worktree) (string, bool) {
for _, dir := range []string{"rebase-merge", "rebase-apply"} {
if bytesContent, err := os.ReadFile(filepath.Join(worktree.GitDir, dir, "head-name")); err == nil {
if bytesContent, err := afero.ReadFile(self.Fs, filepath.Join(worktree.GitDir, dir, "head-name")); err == nil {
headName := strings.TrimSpace(string(bytesContent))
shortHeadName := strings.TrimPrefix(headName, "refs/heads/")
return shortHeadName, true
@ -153,9 +147,9 @@ func rebasedBranch(worktree *models.Worktree) (string, bool) {
return "", false
}
func bisectedBranch(worktree *models.Worktree) (string, bool) {
func (self *WorktreeLoader) bisectedBranch(worktree *models.Worktree) (string, bool) {
bisectStartPath := filepath.Join(worktree.GitDir, "BISECT_START")
startContent, err := os.ReadFile(bisectStartPath)
startContent, err := afero.ReadFile(self.Fs, bisectStartPath)
if err != nil {
return "", false
}

View File

@ -3,9 +3,195 @@ package git_commands
import (
"testing"
"github.com/go-errors/errors"
"github.com/jesseduffield/lazygit/pkg/commands/models"
"github.com/jesseduffield/lazygit/pkg/commands/oscommands"
"github.com/spf13/afero"
"github.com/stretchr/testify/assert"
)
func TestGetWorktrees(t *testing.T) {
type scenario struct {
testName string
repoPaths *RepoPaths
before func(runner *oscommands.FakeCmdObjRunner, fs afero.Fs)
expectedWorktrees []*models.Worktree
expectedErr string
}
scenarios := []scenario{
{
testName: "Single worktree (main)",
repoPaths: &RepoPaths{
repoPath: "/path/to/repo",
worktreePath: "/path/to/repo",
},
before: func(runner *oscommands.FakeCmdObjRunner, fs afero.Fs) {
runner.ExpectGitArgs([]string{"worktree", "list", "--porcelain"},
`worktree /path/to/repo
HEAD d85cc9d281fa6ae1665c68365fc70e75e82a042d
branch refs/heads/mybranch
`,
nil)
_ = fs.MkdirAll("/path/to/repo/.git", 0o755)
},
expectedWorktrees: []*models.Worktree{
{
IsMain: true,
IsCurrent: true,
Path: "/path/to/repo",
IsPathMissing: false,
GitDir: "/path/to/repo/.git",
Branch: "mybranch",
Name: "repo",
},
},
expectedErr: "",
},
{
testName: "Multiple worktrees (main + linked)",
repoPaths: &RepoPaths{
repoPath: "/path/to/repo",
worktreePath: "/path/to/repo",
},
before: func(runner *oscommands.FakeCmdObjRunner, fs afero.Fs) {
runner.ExpectGitArgs([]string{"worktree", "list", "--porcelain"},
`worktree /path/to/repo
HEAD d85cc9d281fa6ae1665c68365fc70e75e82a042d
branch refs/heads/mybranch
worktree /path/to/repo-worktree
HEAD 775955775e79b8f5b4c4b56f82fbf657e2d5e4de
branch refs/heads/mybranch-worktree
`,
nil)
_ = fs.MkdirAll("/path/to/repo/.git", 0o755)
_ = fs.MkdirAll("/path/to/repo-worktree", 0o755)
_ = fs.MkdirAll("/path/to/repo/.git/worktrees/repo-worktree", 0o755)
_ = afero.WriteFile(fs, "/path/to/repo-worktree/.git", []byte("gitdir: /path/to/repo/.git/worktrees/repo-worktree"), 0o755)
},
expectedWorktrees: []*models.Worktree{
{
IsMain: true,
IsCurrent: true,
Path: "/path/to/repo",
IsPathMissing: false,
GitDir: "/path/to/repo/.git",
Branch: "mybranch",
Name: "repo",
},
{
IsMain: false,
IsCurrent: false,
Path: "/path/to/repo-worktree",
IsPathMissing: false,
GitDir: "/path/to/repo/.git/worktrees/repo-worktree",
Branch: "mybranch-worktree",
Name: "repo-worktree",
},
},
expectedErr: "",
},
{
testName: "Worktree missing path",
repoPaths: &RepoPaths{
repoPath: "/path/to/repo",
worktreePath: "/path/to/repo",
},
before: func(runner *oscommands.FakeCmdObjRunner, fs afero.Fs) {
runner.ExpectGitArgs([]string{"worktree", "list", "--porcelain"},
`worktree /path/to/worktree
HEAD 775955775e79b8f5b4c4b56f82fbf657e2d5e4de
branch refs/heads/missingbranch
`,
nil)
_ = fs.MkdirAll("/path/to/repo/.git", 0o755)
},
expectedWorktrees: []*models.Worktree{
{
IsMain: false,
IsCurrent: false,
Path: "/path/to/worktree",
IsPathMissing: true,
GitDir: "",
Branch: "missingbranch",
Name: "worktree",
},
},
expectedErr: "",
},
{
testName: "In linked worktree",
repoPaths: &RepoPaths{
repoPath: "/path/to/repo",
worktreePath: "/path/to/repo-worktree",
},
before: func(runner *oscommands.FakeCmdObjRunner, fs afero.Fs) {
runner.ExpectGitArgs([]string{"worktree", "list", "--porcelain"},
`worktree /path/to/repo
HEAD d85cc9d281fa6ae1665c68365fc70e75e82a042d
branch refs/heads/mybranch
worktree /path/to/repo-worktree
HEAD 775955775e79b8f5b4c4b56f82fbf657e2d5e4de
branch refs/heads/mybranch-worktree
`,
nil)
_ = fs.MkdirAll("/path/to/repo/.git", 0o755)
_ = fs.MkdirAll("/path/to/repo-worktree", 0o755)
_ = fs.MkdirAll("/path/to/repo/.git/worktrees/repo-worktree", 0o755)
_ = afero.WriteFile(fs, "/path/to/repo-worktree/.git", []byte("gitdir: /path/to/repo/.git/worktrees/repo-worktree"), 0o755)
},
expectedWorktrees: []*models.Worktree{
{
IsMain: false,
IsCurrent: true,
Path: "/path/to/repo-worktree",
IsPathMissing: false,
GitDir: "/path/to/repo/.git/worktrees/repo-worktree",
Branch: "mybranch-worktree",
Name: "repo-worktree",
},
{
IsMain: true,
IsCurrent: false,
Path: "/path/to/repo",
IsPathMissing: false,
GitDir: "/path/to/repo/.git",
Branch: "mybranch",
Name: "repo",
},
},
expectedErr: "",
},
}
for _, s := range scenarios {
s := s
t.Run(s.testName, func(t *testing.T) {
runner := oscommands.NewFakeRunner(t)
fs := afero.NewMemMapFs()
s.before(runner, fs)
loader := &WorktreeLoader{
GitCommon: buildGitCommon(commonDeps{runner: runner, fs: fs, repoPaths: s.repoPaths}),
}
worktrees, err := loader.GetWorktrees()
if s.expectedErr != "" {
assert.EqualError(t, errors.New(s.expectedErr), err.Error())
} else {
assert.NoError(t, err)
assert.EqualValues(t, worktrees, s.expectedWorktrees)
}
})
}
}
func TestGetUniqueNamesFromPaths(t *testing.T) {
for _, scenario := range []struct {
input []string

View File

@ -1,305 +1,74 @@
package commands
import (
"fmt"
"os"
"testing"
"time"
"github.com/go-errors/errors"
gogit "github.com/jesseduffield/go-git/v5"
"github.com/jesseduffield/lazygit/pkg/commands/git_commands"
"github.com/jesseduffield/lazygit/pkg/commands/git_config"
"github.com/jesseduffield/lazygit/pkg/commands/oscommands"
"github.com/jesseduffield/lazygit/pkg/utils"
"github.com/sasha-s/go-deadlock"
"github.com/spf13/afero"
"github.com/stretchr/testify/assert"
)
type fileInfoMock struct {
name string
size int64
fileMode os.FileMode
fileModTime time.Time
isDir bool
sys interface{}
}
// Name is a function.
func (f fileInfoMock) Name() string {
return f.name
}
// Size is a function.
func (f fileInfoMock) Size() int64 {
return f.size
}
// Mode is a function.
func (f fileInfoMock) Mode() os.FileMode {
return f.fileMode
}
// ModTime is a function.
func (f fileInfoMock) ModTime() time.Time {
return f.fileModTime
}
// IsDir is a function.
func (f fileInfoMock) IsDir() bool {
return f.isDir
}
// Sys is a function.
func (f fileInfoMock) Sys() interface{} {
return f.sys
}
// TestNavigateToRepoRootDirectory is a function.
func TestNavigateToRepoRootDirectory(t *testing.T) {
func TestFindWorktreeRoot(t *testing.T) {
type scenario struct {
testName string
stat func(string) (os.FileInfo, error)
chdir func(string) error
test func(error)
testName string
currentPath string
before func(fs afero.Fs)
expectedPath string
expectedErr string
}
scenarios := []scenario{
{
"Navigate to git repository",
func(string) (os.FileInfo, error) {
return fileInfoMock{isDir: true}, nil
},
func(string) error {
return nil
},
func(err error) {
assert.NoError(t, err)
testName: "at root of worktree",
currentPath: "/path/to/repo",
before: func(fs afero.Fs) {
_ = fs.MkdirAll("/path/to/repo/.git", 0o755)
},
expectedPath: "/path/to/repo",
expectedErr: "",
},
{
"An error occurred when getting path information",
func(string) (os.FileInfo, error) {
return nil, fmt.Errorf("An error occurred")
},
func(string) error {
return nil
},
func(err error) {
assert.Error(t, err)
assert.EqualError(t, err, "An error occurred")
testName: "inside worktree",
currentPath: "/path/to/repo/subdir",
before: func(fs afero.Fs) {
_ = fs.MkdirAll("/path/to/repo/.git", 0o755)
_ = fs.MkdirAll("/path/to/repo/subdir", 0o755)
},
expectedPath: "/path/to/repo",
expectedErr: "",
},
{
"An error occurred when trying to move one path backward",
func(string) (os.FileInfo, error) {
return nil, os.ErrNotExist
},
func(string) error {
return fmt.Errorf("An error occurred")
},
func(err error) {
assert.Error(t, err)
assert.EqualError(t, err, "An error occurred")
testName: "not in a git repo",
currentPath: "/path/to/dir",
before: func(fs afero.Fs) {},
expectedPath: "",
expectedErr: "Must open lazygit in a git repository",
},
{
testName: "In linked worktree",
currentPath: "/path/to/worktree",
before: func(fs afero.Fs) {
_ = fs.MkdirAll("/path/to/worktree", 0o755)
_ = afero.WriteFile(fs, "/path/to/worktree/.git", []byte("blah"), 0o755)
},
expectedPath: "/path/to/worktree",
expectedErr: "",
},
}
for _, s := range scenarios {
s := s
t.Run(s.testName, func(t *testing.T) {
s.test(navigateToRepoRootDirectory(s.stat, s.chdir))
})
}
}
// TestSetupRepository is a function.
func TestSetupRepository(t *testing.T) {
type scenario struct {
testName string
openGitRepository func(string, *gogit.PlainOpenOptions) (*gogit.Repository, error)
errorStr string
options gogit.PlainOpenOptions
test func(*gogit.Repository, error)
}
scenarios := []scenario{
{
"A gitconfig parsing error occurred",
func(string, *gogit.PlainOpenOptions) (*gogit.Repository, error) {
return nil, fmt.Errorf(`unquoted '\' must be followed by new line`)
},
"error translated",
gogit.PlainOpenOptions{},
func(r *gogit.Repository, err error) {
assert.Error(t, err)
assert.EqualError(t, err, "error translated")
},
},
{
"A gogit error occurred",
func(string, *gogit.PlainOpenOptions) (*gogit.Repository, error) {
return nil, fmt.Errorf("Error from inside gogit")
},
"",
gogit.PlainOpenOptions{},
func(r *gogit.Repository, err error) {
assert.Error(t, err)
assert.EqualError(t, err, "Error from inside gogit")
},
},
{
"Setup done properly",
func(string, *gogit.PlainOpenOptions) (*gogit.Repository, error) {
assert.NoError(t, os.RemoveAll("/tmp/lazygit-test"))
r, err := gogit.PlainInit("/tmp/lazygit-test", false)
assert.NoError(t, err)
return r, nil
},
"",
gogit.PlainOpenOptions{},
func(r *gogit.Repository, err error) {
assert.NoError(t, err)
assert.NotNil(t, r)
},
},
}
for _, s := range scenarios {
s := s
t.Run(s.testName, func(t *testing.T) {
s.test(setupRepository(s.openGitRepository, s.options, s.errorStr))
})
}
}
// TestNewGitCommand is a function.
func TestNewGitCommand(t *testing.T) {
actual, err := os.Getwd()
assert.NoError(t, err)
defer func() {
assert.NoError(t, os.Chdir(actual))
}()
type scenario struct {
testName string
setup func()
test func(*GitCommand, error)
}
scenarios := []scenario{
{
"An error occurred, folder doesn't contains a git repository",
func() {
assert.NoError(t, os.Chdir("/tmp"))
},
func(gitCmd *GitCommand, err error) {
assert.Error(t, err)
assert.Regexp(t, `Must open lazygit in a git repository`, err.Error())
},
},
{
"New GitCommand object created",
func() {
assert.NoError(t, os.RemoveAll("/tmp/lazygit-test"))
_, err := gogit.PlainInit("/tmp/lazygit-test", false)
assert.NoError(t, err)
assert.NoError(t, os.Chdir("/tmp/lazygit-test"))
},
func(gitCmd *GitCommand, err error) {
assert.NoError(t, err)
},
},
}
for _, s := range scenarios {
s := s
t.Run(s.testName, func(t *testing.T) {
s.setup()
s.test(
NewGitCommand(utils.NewDummyCommon(),
&git_commands.GitVersion{},
oscommands.NewDummyOSCommand(),
git_config.NewFakeGitConfig(nil),
&deadlock.Mutex{},
))
})
}
}
func TestFindDotGitDir(t *testing.T) {
type scenario struct {
testName string
stat func(string) (os.FileInfo, error)
readFile func(filename string) ([]byte, error)
test func(string, error)
}
scenarios := []scenario{
{
".git is a directory",
func(dotGit string) (os.FileInfo, error) {
assert.Equal(t, ".git", dotGit)
return os.Stat("testdata/a_dir")
},
func(dotGit string) ([]byte, error) {
assert.Fail(t, "readFile should not be called if .git is a directory")
return nil, nil
},
func(gitDir string, err error) {
assert.NoError(t, err)
assert.Equal(t, ".git", gitDir)
},
},
{
".git is a file",
func(dotGit string) (os.FileInfo, error) {
assert.Equal(t, ".git", dotGit)
return os.Stat("testdata/a_file")
},
func(dotGit string) ([]byte, error) {
assert.Equal(t, ".git", dotGit)
return []byte("gitdir: blah\n"), nil
},
func(gitDir string, err error) {
assert.NoError(t, err)
assert.Equal(t, "blah", gitDir)
},
},
{
"os.Stat returns an error",
func(dotGit string) (os.FileInfo, error) {
assert.Equal(t, ".git", dotGit)
return nil, errors.New("error")
},
func(dotGit string) ([]byte, error) {
assert.Fail(t, "readFile should not be called os.Stat returns an error")
return nil, nil
},
func(gitDir string, err error) {
assert.Error(t, err)
},
},
{
"readFile returns an error",
func(dotGit string) (os.FileInfo, error) {
assert.Equal(t, ".git", dotGit)
return os.Stat("testdata/a_file")
},
func(dotGit string) ([]byte, error) {
return nil, errors.New("error")
},
func(gitDir string, err error) {
assert.Error(t, err)
},
},
}
for _, s := range scenarios {
s := s
t.Run(s.testName, func(t *testing.T) {
s.test(findDotGitDir(s.stat, s.readFile))
fs := afero.NewMemMapFs()
s.before(fs)
root, err := findWorktreeRoot(fs, s.currentPath)
if s.expectedErr != "" {
assert.EqualError(t, errors.New(s.expectedErr), err.Error())
} else {
assert.NoError(t, err)
assert.Equal(t, s.expectedPath, root)
}
})
}
}

View File

@ -4,6 +4,7 @@ import (
"github.com/jesseduffield/lazygit/pkg/config"
"github.com/jesseduffield/lazygit/pkg/i18n"
"github.com/sirupsen/logrus"
"github.com/spf13/afero"
)
// Commonly used things wrapped into one struct for convenience when passing it around
@ -12,4 +13,7 @@ type Common struct {
Tr *i18n.TranslationSet
UserConfig *config.UserConfig
Debug bool
// for interacting with the filesystem. We use afero rather than the default
// `os` package for the sake of mocking the filesystem in tests
Fs afero.Fs
}

View File

@ -7,6 +7,7 @@ import (
"github.com/jesseduffield/lazygit/pkg/config"
"github.com/jesseduffield/lazygit/pkg/i18n"
"github.com/sirupsen/logrus"
"github.com/spf13/afero"
)
// NewDummyLog creates a new dummy Log for testing
@ -22,6 +23,7 @@ func NewDummyCommon() *common.Common {
Log: NewDummyLog(),
Tr: &tr,
UserConfig: config.GetDefaultConfig(),
Fs: afero.NewOsFs(),
}
}
@ -31,5 +33,8 @@ func NewDummyCommonWithUserConfig(userConfig *config.UserConfig) *common.Common
Log: NewDummyLog(),
Tr: &tr,
UserConfig: userConfig,
// TODO: remove dependency on actual filesystem in tests and switch to using
// in-memory for everything
Fs: afero.NewOsFs(),
}
}

2
vendor/github.com/spf13/afero/.gitignore generated vendored Normal file
View File

@ -0,0 +1,2 @@
sftpfs/file1
sftpfs/test/

174
vendor/github.com/spf13/afero/LICENSE.txt generated vendored Normal file
View File

@ -0,0 +1,174 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.

442
vendor/github.com/spf13/afero/README.md generated vendored Normal file
View File

@ -0,0 +1,442 @@
![afero logo-sm](https://cloud.githubusercontent.com/assets/173412/11490338/d50e16dc-97a5-11e5-8b12-019a300d0fcb.png)
A FileSystem Abstraction System for Go
[![Test](https://github.com/spf13/afero/actions/workflows/test.yml/badge.svg)](https://github.com/spf13/afero/actions/workflows/test.yml) [![GoDoc](https://godoc.org/github.com/spf13/afero?status.svg)](https://godoc.org/github.com/spf13/afero) [![Join the chat at https://gitter.im/spf13/afero](https://badges.gitter.im/Dev%20Chat.svg)](https://gitter.im/spf13/afero?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
# Overview
Afero is a filesystem framework providing a simple, uniform and universal API
interacting with any filesystem, as an abstraction layer providing interfaces,
types and methods. Afero has an exceptionally clean interface and simple design
without needless constructors or initialization methods.
Afero is also a library providing a base set of interoperable backend
filesystems that make it easy to work with afero while retaining all the power
and benefit of the os and ioutil packages.
Afero provides significant improvements over using the os package alone, most
notably the ability to create mock and testing filesystems without relying on the disk.
It is suitable for use in any situation where you would consider using the OS
package as it provides an additional abstraction that makes it easy to use a
memory backed file system during testing. It also adds support for the http
filesystem for full interoperability.
## Afero Features
* A single consistent API for accessing a variety of filesystems
* Interoperation between a variety of file system types
* A set of interfaces to encourage and enforce interoperability between backends
* An atomic cross platform memory backed file system
* Support for compositional (union) file systems by combining multiple file systems acting as one
* Specialized backends which modify existing filesystems (Read Only, Regexp filtered)
* A set of utility functions ported from io, ioutil & hugo to be afero aware
* Wrapper for go 1.16 filesystem abstraction `io/fs.FS`
# Using Afero
Afero is easy to use and easier to adopt.
A few different ways you could use Afero:
* Use the interfaces alone to define your own file system.
* Wrapper for the OS packages.
* Define different filesystems for different parts of your application.
* Use Afero for mock filesystems while testing
## Step 1: Install Afero
First use go get to install the latest version of the library.
$ go get github.com/spf13/afero
Next include Afero in your application.
```go
import "github.com/spf13/afero"
```
## Step 2: Declare a backend
First define a package variable and set it to a pointer to a filesystem.
```go
var AppFs = afero.NewMemMapFs()
or
var AppFs = afero.NewOsFs()
```
It is important to note that if you repeat the composite literal you
will be using a completely new and isolated filesystem. In the case of
OsFs it will still use the same underlying filesystem but will reduce
the ability to drop in other filesystems as desired.
## Step 3: Use it like you would the OS package
Throughout your application use any function and method like you normally
would.
So if my application before had:
```go
os.Open("/tmp/foo")
```
We would replace it with:
```go
AppFs.Open("/tmp/foo")
```
`AppFs` being the variable we defined above.
## List of all available functions
File System Methods Available:
```go
Chmod(name string, mode os.FileMode) : error
Chown(name string, uid, gid int) : error
Chtimes(name string, atime time.Time, mtime time.Time) : error
Create(name string) : File, error
Mkdir(name string, perm os.FileMode) : error
MkdirAll(path string, perm os.FileMode) : error
Name() : string
Open(name string) : File, error
OpenFile(name string, flag int, perm os.FileMode) : File, error
Remove(name string) : error
RemoveAll(path string) : error
Rename(oldname, newname string) : error
Stat(name string) : os.FileInfo, error
```
File Interfaces and Methods Available:
```go
io.Closer
io.Reader
io.ReaderAt
io.Seeker
io.Writer
io.WriterAt
Name() : string
Readdir(count int) : []os.FileInfo, error
Readdirnames(n int) : []string, error
Stat() : os.FileInfo, error
Sync() : error
Truncate(size int64) : error
WriteString(s string) : ret int, err error
```
In some applications it may make sense to define a new package that
simply exports the file system variable for easy access from anywhere.
## Using Afero's utility functions
Afero provides a set of functions to make it easier to use the underlying file systems.
These functions have been primarily ported from io & ioutil with some developed for Hugo.
The afero utilities support all afero compatible backends.
The list of utilities includes:
```go
DirExists(path string) (bool, error)
Exists(path string) (bool, error)
FileContainsBytes(filename string, subslice []byte) (bool, error)
GetTempDir(subPath string) string
IsDir(path string) (bool, error)
IsEmpty(path string) (bool, error)
ReadDir(dirname string) ([]os.FileInfo, error)
ReadFile(filename string) ([]byte, error)
SafeWriteReader(path string, r io.Reader) (err error)
TempDir(dir, prefix string) (name string, err error)
TempFile(dir, prefix string) (f File, err error)
Walk(root string, walkFn filepath.WalkFunc) error
WriteFile(filename string, data []byte, perm os.FileMode) error
WriteReader(path string, r io.Reader) (err error)
```
For a complete list see [Afero's GoDoc](https://godoc.org/github.com/spf13/afero)
They are available under two different approaches to use. You can either call
them directly where the first parameter of each function will be the file
system, or you can declare a new `Afero`, a custom type used to bind these
functions as methods to a given filesystem.
### Calling utilities directly
```go
fs := new(afero.MemMapFs)
f, err := afero.TempFile(fs,"", "ioutil-test")
```
### Calling via Afero
```go
fs := afero.NewMemMapFs()
afs := &afero.Afero{Fs: fs}
f, err := afs.TempFile("", "ioutil-test")
```
## Using Afero for Testing
There is a large benefit to using a mock filesystem for testing. It has a
completely blank state every time it is initialized and can be easily
reproducible regardless of OS. You could create files to your heart’s content
and the file access would be fast while also saving you from all the annoying
issues with deleting temporary files, Windows file locking, etc. The MemMapFs
backend is perfect for testing.
* Much faster than performing I/O operations on disk
* Avoid security issues and permissions
* Far more control. 'rm -rf /' with confidence
* Test setup is far more easier to do
* No test cleanup needed
One way to accomplish this is to define a variable as mentioned above.
In your application this will be set to afero.NewOsFs() during testing you
can set it to afero.NewMemMapFs().
It wouldn't be uncommon to have each test initialize a blank slate memory
backend. To do this I would define my `appFS = afero.NewOsFs()` somewhere
appropriate in my application code. This approach ensures that Tests are order
independent, with no test relying on the state left by an earlier test.
Then in my tests I would initialize a new MemMapFs for each test:
```go
func TestExist(t *testing.T) {
appFS := afero.NewMemMapFs()
// create test files and directories
appFS.MkdirAll("src/a", 0755)
afero.WriteFile(appFS, "src/a/b", []byte("file b"), 0644)
afero.WriteFile(appFS, "src/c", []byte("file c"), 0644)
name := "src/c"
_, err := appFS.Stat(name)
if os.IsNotExist(err) {
t.Errorf("file \"%s\" does not exist.\n", name)
}
}
```
# Available Backends
## Operating System Native
### OsFs
The first is simply a wrapper around the native OS calls. This makes it
very easy to use as all of the calls are the same as the existing OS
calls. It also makes it trivial to have your code use the OS during
operation and a mock filesystem during testing or as needed.
```go
appfs := afero.NewOsFs()
appfs.MkdirAll("src/a", 0755)
```
## Memory Backed Storage
### MemMapFs
Afero also provides a fully atomic memory backed filesystem perfect for use in
mocking and to speed up unnecessary disk io when persistence isn’t
necessary. It is fully concurrent and will work within go routines
safely.
```go
mm := afero.NewMemMapFs()
mm.MkdirAll("src/a", 0755)
```
#### InMemoryFile
As part of MemMapFs, Afero also provides an atomic, fully concurrent memory
backed file implementation. This can be used in other memory backed file
systems with ease. Plans are to add a radix tree memory stored file
system using InMemoryFile.
## Network Interfaces
### SftpFs
Afero has experimental support for secure file transfer protocol (sftp). Which can
be used to perform file operations over a encrypted channel.
### GCSFs
Afero has experimental support for Google Cloud Storage (GCS). You can either set the
`GOOGLE_APPLICATION_CREDENTIALS_JSON` env variable to your JSON credentials or use `opts` in
`NewGcsFS` to configure access to your GCS bucket.
Some known limitations of the existing implementation:
* No Chmod support - The GCS ACL could probably be mapped to *nix style permissions but that would add another level of complexity and is ignored in this version.
* No Chtimes support - Could be simulated with attributes (gcs a/m-times are set implicitly) but that's is left for another version.
* Not thread safe - Also assumes all file operations are done through the same instance of the GcsFs. File operations between different GcsFs instances are not guaranteed to be consistent.
## Filtering Backends
### BasePathFs
The BasePathFs restricts all operations to a given path within an Fs.
The given file name to the operations on this Fs will be prepended with
the base path before calling the source Fs.
```go
bp := afero.NewBasePathFs(afero.NewOsFs(), "/base/path")
```
### ReadOnlyFs
A thin wrapper around the source Fs providing a read only view.
```go
fs := afero.NewReadOnlyFs(afero.NewOsFs())
_, err := fs.Create("/file.txt")
// err = syscall.EPERM
```
# RegexpFs
A filtered view on file names, any file NOT matching
the passed regexp will be treated as non-existing.
Files not matching the regexp provided will not be created.
Directories are not filtered.
```go
fs := afero.NewRegexpFs(afero.NewMemMapFs(), regexp.MustCompile(`\.txt$`))
_, err := fs.Create("/file.html")
// err = syscall.ENOENT
```
### HttpFs
Afero provides an http compatible backend which can wrap any of the existing
backends.
The Http package requires a slightly specific version of Open which
returns an http.File type.
Afero provides an httpFs file system which satisfies this requirement.
Any Afero FileSystem can be used as an httpFs.
```go
httpFs := afero.NewHttpFs(<ExistingFS>)
fileserver := http.FileServer(httpFs.Dir(<PATH>))
http.Handle("/", fileserver)
```
## Composite Backends
Afero provides the ability have two filesystems (or more) act as a single
file system.
### CacheOnReadFs
The CacheOnReadFs will lazily make copies of any accessed files from the base
layer into the overlay. Subsequent reads will be pulled from the overlay
directly permitting the request is within the cache duration of when it was
created in the overlay.
If the base filesystem is writeable, any changes to files will be
done first to the base, then to the overlay layer. Write calls to open file
handles like `Write()` or `Truncate()` to the overlay first.
To writing files to the overlay only, you can use the overlay Fs directly (not
via the union Fs).
Cache files in the layer for the given time.Duration, a cache duration of 0
means "forever" meaning the file will not be re-requested from the base ever.
A read-only base will make the overlay also read-only but still copy files
from the base to the overlay when they're not present (or outdated) in the
caching layer.
```go
base := afero.NewOsFs()
layer := afero.NewMemMapFs()
ufs := afero.NewCacheOnReadFs(base, layer, 100 * time.Second)
```
### CopyOnWriteFs()
The CopyOnWriteFs is a read only base file system with a potentially
writeable layer on top.
Read operations will first look in the overlay and if not found there, will
serve the file from the base.
Changes to the file system will only be made in the overlay.
Any attempt to modify a file found only in the base will copy the file to the
overlay layer before modification (including opening a file with a writable
handle).
Removing and Renaming files present only in the base layer is not currently
permitted. If a file is present in the base layer and the overlay, only the
overlay will be removed/renamed.
```go
base := afero.NewOsFs()
roBase := afero.NewReadOnlyFs(base)
ufs := afero.NewCopyOnWriteFs(roBase, afero.NewMemMapFs())
fh, _ = ufs.Create("/home/test/file2.txt")
fh.WriteString("This is a test")
fh.Close()
```
In this example all write operations will only occur in memory (MemMapFs)
leaving the base filesystem (OsFs) untouched.
## Desired/possible backends
The following is a short list of possible backends we hope someone will
implement:
* SSH
* S3
# About the project
## What's in the name
Afero comes from the latin roots Ad-Facere.
**"Ad"** is a prefix meaning "to".
**"Facere"** is a form of the root "faciō" making "make or do".
The literal meaning of afero is "to make" or "to do" which seems very fitting
for a library that allows one to make files and directories and do things with them.
The English word that shares the same roots as Afero is "affair". Affair shares
the same concept but as a noun it means "something that is made or done" or "an
object of a particular type".
It's also nice that unlike some of my other libraries (hugo, cobra, viper) it
Googles very well.
## Release Notes
See the [Releases Page](https://github.com/spf13/afero/releases).
## Contributing
1. Fork it
2. Create your feature branch (`git checkout -b my-new-feature`)
3. Commit your changes (`git commit -am 'Add some feature'`)
4. Push to the branch (`git push origin my-new-feature`)
5. Create new Pull Request
## Contributors
Names in no particular order:
* [spf13](https://github.com/spf13)
* [jaqx0r](https://github.com/jaqx0r)
* [mbertschler](https://github.com/mbertschler)
* [xor-gate](https://github.com/xor-gate)
## License
Afero is released under the Apache 2.0 license. See
[LICENSE.txt](https://github.com/spf13/afero/blob/master/LICENSE.txt)

111
vendor/github.com/spf13/afero/afero.go generated vendored Normal file
View File

@ -0,0 +1,111 @@
// Copyright © 2014 Steve Francia <spf@spf13.com>.
// Copyright 2013 tsuru authors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package afero provides types and methods for interacting with the filesystem,
// as an abstraction layer.
// Afero also provides a few implementations that are mostly interoperable. One that
// uses the operating system filesystem, one that uses memory to store files
// (cross platform) and an interface that should be implemented if you want to
// provide your own filesystem.
package afero
import (
"errors"
"io"
"os"
"time"
)
type Afero struct {
Fs
}
// File represents a file in the filesystem.
type File interface {
io.Closer
io.Reader
io.ReaderAt
io.Seeker
io.Writer
io.WriterAt
Name() string
Readdir(count int) ([]os.FileInfo, error)
Readdirnames(n int) ([]string, error)
Stat() (os.FileInfo, error)
Sync() error
Truncate(size int64) error
WriteString(s string) (ret int, err error)
}
// Fs is the filesystem interface.
//
// Any simulated or real filesystem should implement this interface.
type Fs interface {
// Create creates a file in the filesystem, returning the file and an
// error, if any happens.
Create(name string) (File, error)
// Mkdir creates a directory in the filesystem, return an error if any
// happens.
Mkdir(name string, perm os.FileMode) error
// MkdirAll creates a directory path and all parents that does not exist
// yet.
MkdirAll(path string, perm os.FileMode) error
// Open opens a file, returning it or an error, if any happens.
Open(name string) (File, error)
// OpenFile opens a file using the given flags and the given mode.
OpenFile(name string, flag int, perm os.FileMode) (File, error)
// Remove removes a file identified by name, returning an error, if any
// happens.
Remove(name string) error
// RemoveAll removes a directory path and any children it contains. It
// does not fail if the path does not exist (return nil).
RemoveAll(path string) error
// Rename renames a file.
Rename(oldname, newname string) error
// Stat returns a FileInfo describing the named file, or an error, if any
// happens.
Stat(name string) (os.FileInfo, error)
// The name of this FileSystem
Name() string
// Chmod changes the mode of the named file to mode.
Chmod(name string, mode os.FileMode) error
// Chown changes the uid and gid of the named file.
Chown(name string, uid, gid int) error
// Chtimes changes the access and modification times of the named file
Chtimes(name string, atime time.Time, mtime time.Time) error
}
var (
ErrFileClosed = errors.New("File is closed")
ErrOutOfRange = errors.New("out of range")
ErrTooLarge = errors.New("too large")
ErrFileNotFound = os.ErrNotExist
ErrFileExists = os.ErrExist
ErrDestinationExists = os.ErrExist
)

10
vendor/github.com/spf13/afero/appveyor.yml generated vendored Normal file
View File

@ -0,0 +1,10 @@
# This currently does nothing. We have moved to GitHub action, but this is kept
# until spf13 has disabled this project in AppVeyor.
version: '{build}'
clone_folder: C:\gopath\src\github.com\spf13\afero
environment:
GOPATH: C:\gopath
build_script:
- cmd: >-
go version

222
vendor/github.com/spf13/afero/basepath.go generated vendored Normal file
View File

@ -0,0 +1,222 @@
package afero
import (
"io/fs"
"os"
"path/filepath"
"runtime"
"strings"
"time"
)
var (
_ Lstater = (*BasePathFs)(nil)
_ fs.ReadDirFile = (*BasePathFile)(nil)
)
// The BasePathFs restricts all operations to a given path within an Fs.
// The given file name to the operations on this Fs will be prepended with
// the base path before calling the base Fs.
// Any file name (after filepath.Clean()) outside this base path will be
// treated as non existing file.
//
// Note that it does not clean the error messages on return, so you may
// reveal the real path on errors.
type BasePathFs struct {
source Fs
path string
}
type BasePathFile struct {
File
path string
}
func (f *BasePathFile) Name() string {
sourcename := f.File.Name()
return strings.TrimPrefix(sourcename, filepath.Clean(f.path))
}
func (f *BasePathFile) ReadDir(n int) ([]fs.DirEntry, error) {
if rdf, ok := f.File.(fs.ReadDirFile); ok {
return rdf.ReadDir(n)
}
return readDirFile{f.File}.ReadDir(n)
}
func NewBasePathFs(source Fs, path string) Fs {
return &BasePathFs{source: source, path: path}
}
// on a file outside the base path it returns the given file name and an error,
// else the given file with the base path prepended
func (b *BasePathFs) RealPath(name string) (path string, err error) {
if err := validateBasePathName(name); err != nil {
return name, err
}
bpath := filepath.Clean(b.path)
path = filepath.Clean(filepath.Join(bpath, name))
if !strings.HasPrefix(path, bpath) {
return name, os.ErrNotExist
}
return path, nil
}
func validateBasePathName(name string) error {
if runtime.GOOS != "windows" {
// Not much to do here;
// the virtual file paths all look absolute on *nix.
return nil
}
// On Windows a common mistake would be to provide an absolute OS path
// We could strip out the base part, but that would not be very portable.
if filepath.IsAbs(name) {
return os.ErrNotExist
}
return nil
}
func (b *BasePathFs) Chtimes(name string, atime, mtime time.Time) (err error) {
if name, err = b.RealPath(name); err != nil {
return &os.PathError{Op: "chtimes", Path: name, Err: err}
}
return b.source.Chtimes(name, atime, mtime)
}
func (b *BasePathFs) Chmod(name string, mode os.FileMode) (err error) {
if name, err = b.RealPath(name); err != nil {
return &os.PathError{Op: "chmod", Path: name, Err: err}
}
return b.source.Chmod(name, mode)
}
func (b *BasePathFs) Chown(name string, uid, gid int) (err error) {
if name, err = b.RealPath(name); err != nil {
return &os.PathError{Op: "chown", Path: name, Err: err}
}
return b.source.Chown(name, uid, gid)
}
func (b *BasePathFs) Name() string {
return "BasePathFs"
}
func (b *BasePathFs) Stat(name string) (fi os.FileInfo, err error) {
if name, err = b.RealPath(name); err != nil {
return nil, &os.PathError{Op: "stat", Path: name, Err: err}
}
return b.source.Stat(name)
}
func (b *BasePathFs) Rename(oldname, newname string) (err error) {
if oldname, err = b.RealPath(oldname); err != nil {
return &os.PathError{Op: "rename", Path: oldname, Err: err}
}
if newname, err = b.RealPath(newname); err != nil {
return &os.PathError{Op: "rename", Path: newname, Err: err}
}
return b.source.Rename(oldname, newname)
}
func (b *BasePathFs) RemoveAll(name string) (err error) {
if name, err = b.RealPath(name); err != nil {
return &os.PathError{Op: "remove_all", Path: name, Err: err}
}
return b.source.RemoveAll(name)
}
func (b *BasePathFs) Remove(name string) (err error) {
if name, err = b.RealPath(name); err != nil {
return &os.PathError{Op: "remove", Path: name, Err: err}
}
return b.source.Remove(name)
}
func (b *BasePathFs) OpenFile(name string, flag int, mode os.FileMode) (f File, err error) {
if name, err = b.RealPath(name); err != nil {
return nil, &os.PathError{Op: "openfile", Path: name, Err: err}
}
sourcef, err := b.source.OpenFile(name, flag, mode)
if err != nil {
return nil, err
}
return &BasePathFile{sourcef, b.path}, nil
}
func (b *BasePathFs) Open(name string) (f File, err error) {
if name, err = b.RealPath(name); err != nil {
return nil, &os.PathError{Op: "open", Path: name, Err: err}
}
sourcef, err := b.source.Open(name)
if err != nil {
return nil, err
}
return &BasePathFile{File: sourcef, path: b.path}, nil
}
func (b *BasePathFs) Mkdir(name string, mode os.FileMode) (err error) {
if name, err = b.RealPath(name); err != nil {
return &os.PathError{Op: "mkdir", Path: name, Err: err}
}
return b.source.Mkdir(name, mode)
}
func (b *BasePathFs) MkdirAll(name string, mode os.FileMode) (err error) {
if name, err = b.RealPath(name); err != nil {
return &os.PathError{Op: "mkdir", Path: name, Err: err}
}
return b.source.MkdirAll(name, mode)
}
func (b *BasePathFs) Create(name string) (f File, err error) {
if name, err = b.RealPath(name); err != nil {
return nil, &os.PathError{Op: "create", Path: name, Err: err}
}
sourcef, err := b.source.Create(name)
if err != nil {
return nil, err
}
return &BasePathFile{File: sourcef, path: b.path}, nil
}
func (b *BasePathFs) LstatIfPossible(name string) (os.FileInfo, bool, error) {
name, err := b.RealPath(name)
if err != nil {
return nil, false, &os.PathError{Op: "lstat", Path: name, Err: err}
}
if lstater, ok := b.source.(Lstater); ok {
return lstater.LstatIfPossible(name)
}
fi, err := b.source.Stat(name)
return fi, false, err
}
func (b *BasePathFs) SymlinkIfPossible(oldname, newname string) error {
oldname, err := b.RealPath(oldname)
if err != nil {
return &os.LinkError{Op: "symlink", Old: oldname, New: newname, Err: err}
}
newname, err = b.RealPath(newname)
if err != nil {
return &os.LinkError{Op: "symlink", Old: oldname, New: newname, Err: err}
}
if linker, ok := b.source.(Linker); ok {
return linker.SymlinkIfPossible(oldname, newname)
}
return &os.LinkError{Op: "symlink", Old: oldname, New: newname, Err: ErrNoSymlink}
}
func (b *BasePathFs) ReadlinkIfPossible(name string) (string, error) {
name, err := b.RealPath(name)
if err != nil {
return "", &os.PathError{Op: "readlink", Path: name, Err: err}
}
if reader, ok := b.source.(LinkReader); ok {
return reader.ReadlinkIfPossible(name)
}
return "", &os.PathError{Op: "readlink", Path: name, Err: ErrNoReadlink}
}

315
vendor/github.com/spf13/afero/cacheOnReadFs.go generated vendored Normal file
View File

@ -0,0 +1,315 @@
package afero
import (
"os"
"syscall"
"time"
)
// If the cache duration is 0, cache time will be unlimited, i.e. once
// a file is in the layer, the base will never be read again for this file.
//
// For cache times greater than 0, the modification time of a file is
// checked. Note that a lot of file system implementations only allow a
// resolution of a second for timestamps... or as the godoc for os.Chtimes()
// states: "The underlying filesystem may truncate or round the values to a
// less precise time unit."
//
// This caching union will forward all write calls also to the base file
// system first. To prevent writing to the base Fs, wrap it in a read-only
// filter - Note: this will also make the overlay read-only, for writing files
// in the overlay, use the overlay Fs directly, not via the union Fs.
type CacheOnReadFs struct {
base Fs
layer Fs
cacheTime time.Duration
}
func NewCacheOnReadFs(base Fs, layer Fs, cacheTime time.Duration) Fs {
return &CacheOnReadFs{base: base, layer: layer, cacheTime: cacheTime}
}
type cacheState int
const (
// not present in the overlay, unknown if it exists in the base:
cacheMiss cacheState = iota
// present in the overlay and in base, base file is newer:
cacheStale
// present in the overlay - with cache time == 0 it may exist in the base,
// with cacheTime > 0 it exists in the base and is same age or newer in the
// overlay
cacheHit
// happens if someone writes directly to the overlay without
// going through this union
cacheLocal
)
func (u *CacheOnReadFs) cacheStatus(name string) (state cacheState, fi os.FileInfo, err error) {
var lfi, bfi os.FileInfo
lfi, err = u.layer.Stat(name)
if err == nil {
if u.cacheTime == 0 {
return cacheHit, lfi, nil
}
if lfi.ModTime().Add(u.cacheTime).Before(time.Now()) {
bfi, err = u.base.Stat(name)
if err != nil {
return cacheLocal, lfi, nil
}
if bfi.ModTime().After(lfi.ModTime()) {
return cacheStale, bfi, nil
}
}
return cacheHit, lfi, nil
}
if err == syscall.ENOENT || os.IsNotExist(err) {
return cacheMiss, nil, nil
}
return cacheMiss, nil, err
}
func (u *CacheOnReadFs) copyToLayer(name string) error {
return copyToLayer(u.base, u.layer, name)
}
func (u *CacheOnReadFs) copyFileToLayer(name string, flag int, perm os.FileMode) error {
return copyFileToLayer(u.base, u.layer, name, flag, perm)
}
func (u *CacheOnReadFs) Chtimes(name string, atime, mtime time.Time) error {
st, _, err := u.cacheStatus(name)
if err != nil {
return err
}
switch st {
case cacheLocal:
case cacheHit:
err = u.base.Chtimes(name, atime, mtime)
case cacheStale, cacheMiss:
if err := u.copyToLayer(name); err != nil {
return err
}
err = u.base.Chtimes(name, atime, mtime)
}
if err != nil {
return err
}
return u.layer.Chtimes(name, atime, mtime)
}
func (u *CacheOnReadFs) Chmod(name string, mode os.FileMode) error {
st, _, err := u.cacheStatus(name)
if err != nil {
return err
}
switch st {
case cacheLocal:
case cacheHit:
err = u.base.Chmod(name, mode)
case cacheStale, cacheMiss:
if err := u.copyToLayer(name); err != nil {
return err
}
err = u.base.Chmod(name, mode)
}
if err != nil {
return err
}
return u.layer.Chmod(name, mode)
}
func (u *CacheOnReadFs) Chown(name string, uid, gid int) error {
st, _, err := u.cacheStatus(name)
if err != nil {
return err
}
switch st {
case cacheLocal:
case cacheHit:
err = u.base.Chown(name, uid, gid)
case cacheStale, cacheMiss:
if err := u.copyToLayer(name); err != nil {
return err
}
err = u.base.Chown(name, uid, gid)
}
if err != nil {
return err
}
return u.layer.Chown(name, uid, gid)
}
func (u *CacheOnReadFs) Stat(name string) (os.FileInfo, error) {
st, fi, err := u.cacheStatus(name)
if err != nil {
return nil, err
}
switch st {
case cacheMiss:
return u.base.Stat(name)
default: // cacheStale has base, cacheHit and cacheLocal the layer os.FileInfo
return fi, nil
}
}
func (u *CacheOnReadFs) Rename(oldname, newname string) error {
st, _, err := u.cacheStatus(oldname)
if err != nil {
return err
}
switch st {
case cacheLocal:
case cacheHit:
err = u.base.Rename(oldname, newname)
case cacheStale, cacheMiss:
if err := u.copyToLayer(oldname); err != nil {
return err
}
err = u.base.Rename(oldname, newname)
}
if err != nil {
return err
}
return u.layer.Rename(oldname, newname)
}
func (u *CacheOnReadFs) Remove(name string) error {
st, _, err := u.cacheStatus(name)
if err != nil {
return err
}
switch st {
case cacheLocal:
case cacheHit, cacheStale, cacheMiss:
err = u.base.Remove(name)
}
if err != nil {
return err
}
return u.layer.Remove(name)
}
func (u *CacheOnReadFs) RemoveAll(name string) error {
st, _, err := u.cacheStatus(name)
if err != nil {
return err
}
switch st {
case cacheLocal:
case cacheHit, cacheStale, cacheMiss:
err = u.base.RemoveAll(name)
}
if err != nil {
return err
}
return u.layer.RemoveAll(name)
}
func (u *CacheOnReadFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) {
st, _, err := u.cacheStatus(name)
if err != nil {
return nil, err
}
switch st {
case cacheLocal, cacheHit:
default:
if err := u.copyFileToLayer(name, flag, perm); err != nil {
return nil, err
}
}
if flag&(os.O_WRONLY|syscall.O_RDWR|os.O_APPEND|os.O_CREATE|os.O_TRUNC) != 0 {
bfi, err := u.base.OpenFile(name, flag, perm)
if err != nil {
return nil, err
}
lfi, err := u.layer.OpenFile(name, flag, perm)
if err != nil {
bfi.Close() // oops, what if O_TRUNC was set and file opening in the layer failed...?
return nil, err
}
return &UnionFile{Base: bfi, Layer: lfi}, nil
}
return u.layer.OpenFile(name, flag, perm)
}
func (u *CacheOnReadFs) Open(name string) (File, error) {
st, fi, err := u.cacheStatus(name)
if err != nil {
return nil, err
}
switch st {
case cacheLocal:
return u.layer.Open(name)
case cacheMiss:
bfi, err := u.base.Stat(name)
if err != nil {
return nil, err
}
if bfi.IsDir() {
return u.base.Open(name)
}
if err := u.copyToLayer(name); err != nil {
return nil, err
}
return u.layer.Open(name)
case cacheStale:
if !fi.IsDir() {
if err := u.copyToLayer(name); err != nil {
return nil, err
}
return u.layer.Open(name)
}
case cacheHit:
if !fi.IsDir() {
return u.layer.Open(name)
}
}
// the dirs from cacheHit, cacheStale fall down here:
bfile, _ := u.base.Open(name)
lfile, err := u.layer.Open(name)
if err != nil && bfile == nil {
return nil, err
}
return &UnionFile{Base: bfile, Layer: lfile}, nil
}
func (u *CacheOnReadFs) Mkdir(name string, perm os.FileMode) error {
err := u.base.Mkdir(name, perm)
if err != nil {
return err
}
return u.layer.MkdirAll(name, perm) // yes, MkdirAll... we cannot assume it exists in the cache
}
func (u *CacheOnReadFs) Name() string {
return "CacheOnReadFs"
}
func (u *CacheOnReadFs) MkdirAll(name string, perm os.FileMode) error {
err := u.base.MkdirAll(name, perm)
if err != nil {
return err
}
return u.layer.MkdirAll(name, perm)
}
func (u *CacheOnReadFs) Create(name string) (File, error) {
bfh, err := u.base.Create(name)
if err != nil {
return nil, err
}
lfh, err := u.layer.Create(name)
if err != nil {
// oops, see comment about OS_TRUNC above, should we remove? then we have to
// remember if the file did not exist before
bfh.Close()
return nil, err
}
return &UnionFile{Base: bfh, Layer: lfh}, nil
}

23
vendor/github.com/spf13/afero/const_bsds.go generated vendored Normal file
View File

@ -0,0 +1,23 @@
// Copyright © 2016 Steve Francia <spf@spf13.com>.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build aix || darwin || openbsd || freebsd || netbsd || dragonfly
// +build aix darwin openbsd freebsd netbsd dragonfly
package afero
import (
"syscall"
)
const BADFD = syscall.EBADF

22
vendor/github.com/spf13/afero/const_win_unix.go generated vendored Normal file
View File

@ -0,0 +1,22 @@
// Copyright © 2016 Steve Francia <spf@spf13.com>.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build !darwin && !openbsd && !freebsd && !dragonfly && !netbsd && !aix
// +build !darwin,!openbsd,!freebsd,!dragonfly,!netbsd,!aix
package afero
import (
"syscall"
)
const BADFD = syscall.EBADFD

327
vendor/github.com/spf13/afero/copyOnWriteFs.go generated vendored Normal file
View File

@ -0,0 +1,327 @@
package afero
import (
"fmt"
"os"
"path/filepath"
"syscall"
"time"
)
var _ Lstater = (*CopyOnWriteFs)(nil)
// The CopyOnWriteFs is a union filesystem: a read only base file system with
// a possibly writeable layer on top. Changes to the file system will only
// be made in the overlay: Changing an existing file in the base layer which
// is not present in the overlay will copy the file to the overlay ("changing"
// includes also calls to e.g. Chtimes(), Chmod() and Chown()).
//
// Reading directories is currently only supported via Open(), not OpenFile().
type CopyOnWriteFs struct {
base Fs
layer Fs
}
func NewCopyOnWriteFs(base Fs, layer Fs) Fs {
return &CopyOnWriteFs{base: base, layer: layer}
}
// Returns true if the file is not in the overlay
func (u *CopyOnWriteFs) isBaseFile(name string) (bool, error) {
if _, err := u.layer.Stat(name); err == nil {
return false, nil
}
_, err := u.base.Stat(name)
if err != nil {
if oerr, ok := err.(*os.PathError); ok {
if oerr.Err == os.ErrNotExist || oerr.Err == syscall.ENOENT || oerr.Err == syscall.ENOTDIR {
return false, nil
}
}
if err == syscall.ENOENT {
return false, nil
}
}
return true, err
}
func (u *CopyOnWriteFs) copyToLayer(name string) error {
return copyToLayer(u.base, u.layer, name)
}
func (u *CopyOnWriteFs) Chtimes(name string, atime, mtime time.Time) error {
b, err := u.isBaseFile(name)
if err != nil {
return err
}
if b {
if err := u.copyToLayer(name); err != nil {
return err
}
}
return u.layer.Chtimes(name, atime, mtime)
}
func (u *CopyOnWriteFs) Chmod(name string, mode os.FileMode) error {
b, err := u.isBaseFile(name)
if err != nil {
return err
}
if b {
if err := u.copyToLayer(name); err != nil {
return err
}
}
return u.layer.Chmod(name, mode)
}
func (u *CopyOnWriteFs) Chown(name string, uid, gid int) error {
b, err := u.isBaseFile(name)
if err != nil {
return err
}
if b {
if err := u.copyToLayer(name); err != nil {
return err
}
}
return u.layer.Chown(name, uid, gid)
}
func (u *CopyOnWriteFs) Stat(name string) (os.FileInfo, error) {
fi, err := u.layer.Stat(name)
if err != nil {
isNotExist := u.isNotExist(err)
if isNotExist {
return u.base.Stat(name)
}
return nil, err
}
return fi, nil
}
func (u *CopyOnWriteFs) LstatIfPossible(name string) (os.FileInfo, bool, error) {
llayer, ok1 := u.layer.(Lstater)
lbase, ok2 := u.base.(Lstater)
if ok1 {
fi, b, err := llayer.LstatIfPossible(name)
if err == nil {
return fi, b, nil
}
if !u.isNotExist(err) {
return nil, b, err
}
}
if ok2 {
fi, b, err := lbase.LstatIfPossible(name)
if err == nil {
return fi, b, nil
}
if !u.isNotExist(err) {
return nil, b, err
}
}
fi, err := u.Stat(name)
return fi, false, err
}
func (u *CopyOnWriteFs) SymlinkIfPossible(oldname, newname string) error {
if slayer, ok := u.layer.(Linker); ok {
return slayer.SymlinkIfPossible(oldname, newname)
}
return &os.LinkError{Op: "symlink", Old: oldname, New: newname, Err: ErrNoSymlink}
}
func (u *CopyOnWriteFs) ReadlinkIfPossible(name string) (string, error) {
if rlayer, ok := u.layer.(LinkReader); ok {
return rlayer.ReadlinkIfPossible(name)
}
if rbase, ok := u.base.(LinkReader); ok {
return rbase.ReadlinkIfPossible(name)
}
return "", &os.PathError{Op: "readlink", Path: name, Err: ErrNoReadlink}
}
func (u *CopyOnWriteFs) isNotExist(err error) bool {
if e, ok := err.(*os.PathError); ok {
err = e.Err
}
if err == os.ErrNotExist || err == syscall.ENOENT || err == syscall.ENOTDIR {
return true
}
return false
}
// Renaming files present only in the base layer is not permitted
func (u *CopyOnWriteFs) Rename(oldname, newname string) error {
b, err := u.isBaseFile(oldname)
if err != nil {
return err
}
if b {
return syscall.EPERM
}
return u.layer.Rename(oldname, newname)
}
// Removing files present only in the base layer is not permitted. If
// a file is present in the base layer and the overlay, only the overlay
// will be removed.
func (u *CopyOnWriteFs) Remove(name string) error {
err := u.layer.Remove(name)
switch err {
case syscall.ENOENT:
_, err = u.base.Stat(name)
if err == nil {
return syscall.EPERM
}
return syscall.ENOENT
default:
return err
}
}
func (u *CopyOnWriteFs) RemoveAll(name string) error {
err := u.layer.RemoveAll(name)
switch err {
case syscall.ENOENT:
_, err = u.base.Stat(name)
if err == nil {
return syscall.EPERM
}
return syscall.ENOENT
default:
return err
}
}
func (u *CopyOnWriteFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) {
b, err := u.isBaseFile(name)
if err != nil {
return nil, err
}
if flag&(os.O_WRONLY|os.O_RDWR|os.O_APPEND|os.O_CREATE|os.O_TRUNC) != 0 {
if b {
if err = u.copyToLayer(name); err != nil {
return nil, err
}
return u.layer.OpenFile(name, flag, perm)
}
dir := filepath.Dir(name)
isaDir, err := IsDir(u.base, dir)
if err != nil && !os.IsNotExist(err) {
return nil, err
}
if isaDir {
if err = u.layer.MkdirAll(dir, 0o777); err != nil {
return nil, err
}
return u.layer.OpenFile(name, flag, perm)
}
isaDir, err = IsDir(u.layer, dir)
if err != nil {
return nil, err
}
if isaDir {
return u.layer.OpenFile(name, flag, perm)
}
return nil, &os.PathError{Op: "open", Path: name, Err: syscall.ENOTDIR} // ...or os.ErrNotExist?
}
if b {
return u.base.OpenFile(name, flag, perm)
}
return u.layer.OpenFile(name, flag, perm)
}
// This function handles the 9 different possibilities caused
// by the union which are the intersection of the following...
//
// layer: doesn't exist, exists as a file, and exists as a directory
// base: doesn't exist, exists as a file, and exists as a directory
func (u *CopyOnWriteFs) Open(name string) (File, error) {
// Since the overlay overrides the base we check that first
b, err := u.isBaseFile(name)
if err != nil {
return nil, err
}
// If overlay doesn't exist, return the base (base state irrelevant)
if b {
return u.base.Open(name)
}
// If overlay is a file, return it (base state irrelevant)
dir, err := IsDir(u.layer, name)
if err != nil {
return nil, err
}
if !dir {
return u.layer.Open(name)
}
// Overlay is a directory, base state now matters.
// Base state has 3 states to check but 2 outcomes:
// A. It's a file or non-readable in the base (return just the overlay)
// B. It's an accessible directory in the base (return a UnionFile)
// If base is file or nonreadable, return overlay
dir, err = IsDir(u.base, name)
if !dir || err != nil {
return u.layer.Open(name)
}
// Both base & layer are directories
// Return union file (if opens are without error)
bfile, bErr := u.base.Open(name)
lfile, lErr := u.layer.Open(name)
// If either have errors at this point something is very wrong. Return nil and the errors
if bErr != nil || lErr != nil {
return nil, fmt.Errorf("BaseErr: %v\nOverlayErr: %v", bErr, lErr)
}
return &UnionFile{Base: bfile, Layer: lfile}, nil
}
func (u *CopyOnWriteFs) Mkdir(name string, perm os.FileMode) error {
dir, err := IsDir(u.base, name)
if err != nil {
return u.layer.MkdirAll(name, perm)
}
if dir {
return ErrFileExists
}
return u.layer.MkdirAll(name, perm)
}
func (u *CopyOnWriteFs) Name() string {
return "CopyOnWriteFs"
}
func (u *CopyOnWriteFs) MkdirAll(name string, perm os.FileMode) error {
dir, err := IsDir(u.base, name)
if err != nil {
return u.layer.MkdirAll(name, perm)
}
if dir {
// This is in line with how os.MkdirAll behaves.
return nil
}
return u.layer.MkdirAll(name, perm)
}
func (u *CopyOnWriteFs) Create(name string) (File, error) {
return u.OpenFile(name, os.O_CREATE|os.O_TRUNC|os.O_RDWR, 0o666)
}

114
vendor/github.com/spf13/afero/httpFs.go generated vendored Normal file
View File

@ -0,0 +1,114 @@
// Copyright © 2014 Steve Francia <spf@spf13.com>.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package afero
import (
"errors"
"net/http"
"os"
"path"
"path/filepath"
"strings"
"time"
)
type httpDir struct {
basePath string
fs HttpFs
}
func (d httpDir) Open(name string) (http.File, error) {
if filepath.Separator != '/' && strings.ContainsRune(name, filepath.Separator) ||
strings.Contains(name, "\x00") {
return nil, errors.New("http: invalid character in file path")
}
dir := string(d.basePath)
if dir == "" {
dir = "."
}
f, err := d.fs.Open(filepath.Join(dir, filepath.FromSlash(path.Clean("/"+name))))
if err != nil {
return nil, err
}
return f, nil
}
type HttpFs struct {
source Fs
}
func NewHttpFs(source Fs) *HttpFs {
return &HttpFs{source: source}
}
func (h HttpFs) Dir(s string) *httpDir {
return &httpDir{basePath: s, fs: h}
}
func (h HttpFs) Name() string { return "h HttpFs" }
func (h HttpFs) Create(name string) (File, error) {
return h.source.Create(name)
}
func (h HttpFs) Chmod(name string, mode os.FileMode) error {
return h.source.Chmod(name, mode)
}
func (h HttpFs) Chown(name string, uid, gid int) error {
return h.source.Chown(name, uid, gid)
}
func (h HttpFs) Chtimes(name string, atime time.Time, mtime time.Time) error {
return h.source.Chtimes(name, atime, mtime)
}
func (h HttpFs) Mkdir(name string, perm os.FileMode) error {
return h.source.Mkdir(name, perm)
}
func (h HttpFs) MkdirAll(path string, perm os.FileMode) error {
return h.source.MkdirAll(path, perm)
}
func (h HttpFs) Open(name string) (http.File, error) {
f, err := h.source.Open(name)
if err == nil {
if httpfile, ok := f.(http.File); ok {
return httpfile, nil
}
}
return nil, err
}
func (h HttpFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) {
return h.source.OpenFile(name, flag, perm)
}
func (h HttpFs) Remove(name string) error {
return h.source.Remove(name)
}
func (h HttpFs) RemoveAll(path string) error {
return h.source.RemoveAll(path)
}
func (h HttpFs) Rename(oldname, newname string) error {
return h.source.Rename(oldname, newname)
}
func (h HttpFs) Stat(name string) (os.FileInfo, error) {
return h.source.Stat(name)
}

View File

@ -0,0 +1,27 @@
// Copyright © 2022 Steve Francia <spf@spf13.com>.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package common
import "io/fs"
// FileInfoDirEntry provides an adapter from os.FileInfo to fs.DirEntry
type FileInfoDirEntry struct {
fs.FileInfo
}
var _ fs.DirEntry = FileInfoDirEntry{}
func (d FileInfoDirEntry) Type() fs.FileMode { return d.FileInfo.Mode().Type() }
func (d FileInfoDirEntry) Info() (fs.FileInfo, error) { return d.FileInfo, nil }

298
vendor/github.com/spf13/afero/iofs.go generated vendored Normal file
View File

@ -0,0 +1,298 @@
//go:build go1.16
// +build go1.16
package afero
import (
"io"
"io/fs"
"os"
"path"
"sort"
"time"
"github.com/spf13/afero/internal/common"
)
// IOFS adopts afero.Fs to stdlib io/fs.FS
type IOFS struct {
Fs
}
func NewIOFS(fs Fs) IOFS {
return IOFS{Fs: fs}
}
var (
_ fs.FS = IOFS{}
_ fs.GlobFS = IOFS{}
_ fs.ReadDirFS = IOFS{}
_ fs.ReadFileFS = IOFS{}
_ fs.StatFS = IOFS{}
_ fs.SubFS = IOFS{}
)
func (iofs IOFS) Open(name string) (fs.File, error) {
const op = "open"
// by convention for fs.FS implementations we should perform this check
if !fs.ValidPath(name) {
return nil, iofs.wrapError(op, name, fs.ErrInvalid)
}
file, err := iofs.Fs.Open(name)
if err != nil {
return nil, iofs.wrapError(op, name, err)
}
// file should implement fs.ReadDirFile
if _, ok := file.(fs.ReadDirFile); !ok {
file = readDirFile{file}
}
return file, nil
}
func (iofs IOFS) Glob(pattern string) ([]string, error) {
const op = "glob"
// afero.Glob does not perform this check but it's required for implementations
if _, err := path.Match(pattern, ""); err != nil {
return nil, iofs.wrapError(op, pattern, err)
}
items, err := Glob(iofs.Fs, pattern)
if err != nil {
return nil, iofs.wrapError(op, pattern, err)
}
return items, nil
}
func (iofs IOFS) ReadDir(name string) ([]fs.DirEntry, error) {
f, err := iofs.Fs.Open(name)
if err != nil {
return nil, iofs.wrapError("readdir", name, err)
}
defer f.Close()
if rdf, ok := f.(fs.ReadDirFile); ok {
items, err := rdf.ReadDir(-1)
if err != nil {
return nil, iofs.wrapError("readdir", name, err)
}
sort.Slice(items, func(i, j int) bool { return items[i].Name() < items[j].Name() })
return items, nil
}
items, err := f.Readdir(-1)
if err != nil {
return nil, iofs.wrapError("readdir", name, err)
}
sort.Sort(byName(items))
ret := make([]fs.DirEntry, len(items))
for i := range items {
ret[i] = common.FileInfoDirEntry{FileInfo: items[i]}
}
return ret, nil
}
func (iofs IOFS) ReadFile(name string) ([]byte, error) {
const op = "readfile"
if !fs.ValidPath(name) {
return nil, iofs.wrapError(op, name, fs.ErrInvalid)
}
bytes, err := ReadFile(iofs.Fs, name)
if err != nil {
return nil, iofs.wrapError(op, name, err)
}
return bytes, nil
}
func (iofs IOFS) Sub(dir string) (fs.FS, error) { return IOFS{NewBasePathFs(iofs.Fs, dir)}, nil }
func (IOFS) wrapError(op, path string, err error) error {
if _, ok := err.(*fs.PathError); ok {
return err // don't need to wrap again
}
return &fs.PathError{
Op: op,
Path: path,
Err: err,
}
}
// readDirFile provides adapter from afero.File to fs.ReadDirFile needed for correct Open
type readDirFile struct {
File
}
var _ fs.ReadDirFile = readDirFile{}
func (r readDirFile) ReadDir(n int) ([]fs.DirEntry, error) {
items, err := r.File.Readdir(n)
if err != nil {
return nil, err
}
ret := make([]fs.DirEntry, len(items))
for i := range items {
ret[i] = common.FileInfoDirEntry{FileInfo: items[i]}
}
return ret, nil
}
// FromIOFS adopts io/fs.FS to use it as afero.Fs
// Note that io/fs.FS is read-only so all mutating methods will return fs.PathError with fs.ErrPermission
// To store modifications you may use afero.CopyOnWriteFs
type FromIOFS struct {
fs.FS
}
var _ Fs = FromIOFS{}
func (f FromIOFS) Create(name string) (File, error) { return nil, notImplemented("create", name) }
func (f FromIOFS) Mkdir(name string, perm os.FileMode) error { return notImplemented("mkdir", name) }
func (f FromIOFS) MkdirAll(path string, perm os.FileMode) error {
return notImplemented("mkdirall", path)
}
func (f FromIOFS) Open(name string) (File, error) {
file, err := f.FS.Open(name)
if err != nil {
return nil, err
}
return fromIOFSFile{File: file, name: name}, nil
}
func (f FromIOFS) OpenFile(name string, flag int, perm os.FileMode) (File, error) {
return f.Open(name)
}
func (f FromIOFS) Remove(name string) error {
return notImplemented("remove", name)
}
func (f FromIOFS) RemoveAll(path string) error {
return notImplemented("removeall", path)
}
func (f FromIOFS) Rename(oldname, newname string) error {
return notImplemented("rename", oldname)
}
func (f FromIOFS) Stat(name string) (os.FileInfo, error) { return fs.Stat(f.FS, name) }
func (f FromIOFS) Name() string { return "fromiofs" }
func (f FromIOFS) Chmod(name string, mode os.FileMode) error {
return notImplemented("chmod", name)
}
func (f FromIOFS) Chown(name string, uid, gid int) error {
return notImplemented("chown", name)
}
func (f FromIOFS) Chtimes(name string, atime time.Time, mtime time.Time) error {
return notImplemented("chtimes", name)
}
type fromIOFSFile struct {
fs.File
name string
}
func (f fromIOFSFile) ReadAt(p []byte, off int64) (n int, err error) {
readerAt, ok := f.File.(io.ReaderAt)
if !ok {
return -1, notImplemented("readat", f.name)
}
return readerAt.ReadAt(p, off)
}
func (f fromIOFSFile) Seek(offset int64, whence int) (int64, error) {
seeker, ok := f.File.(io.Seeker)
if !ok {
return -1, notImplemented("seek", f.name)
}
return seeker.Seek(offset, whence)
}
func (f fromIOFSFile) Write(p []byte) (n int, err error) {
return -1, notImplemented("write", f.name)
}
func (f fromIOFSFile) WriteAt(p []byte, off int64) (n int, err error) {
return -1, notImplemented("writeat", f.name)
}
func (f fromIOFSFile) Name() string { return f.name }
func (f fromIOFSFile) Readdir(count int) ([]os.FileInfo, error) {
rdfile, ok := f.File.(fs.ReadDirFile)
if !ok {
return nil, notImplemented("readdir", f.name)
}
entries, err := rdfile.ReadDir(count)
if err != nil {
return nil, err
}
ret := make([]os.FileInfo, len(entries))
for i := range entries {
ret[i], err = entries[i].Info()
if err != nil {
return nil, err
}
}
return ret, nil
}
func (f fromIOFSFile) Readdirnames(n int) ([]string, error) {
rdfile, ok := f.File.(fs.ReadDirFile)
if !ok {
return nil, notImplemented("readdir", f.name)
}
entries, err := rdfile.ReadDir(n)
if err != nil {
return nil, err
}
ret := make([]string, len(entries))
for i := range entries {
ret[i] = entries[i].Name()
}
return ret, nil
}
func (f fromIOFSFile) Sync() error { return nil }
func (f fromIOFSFile) Truncate(size int64) error {
return notImplemented("truncate", f.name)
}
func (f fromIOFSFile) WriteString(s string) (ret int, err error) {
return -1, notImplemented("writestring", f.name)
}
func notImplemented(op, path string) error {
return &fs.PathError{Op: op, Path: path, Err: fs.ErrPermission}
}

243
vendor/github.com/spf13/afero/ioutil.go generated vendored Normal file
View File

@ -0,0 +1,243 @@
// Copyright ©2015 The Go Authors
// Copyright ©2015 Steve Francia <spf@spf13.com>
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package afero
import (
"bytes"
"io"
"os"
"path/filepath"
"sort"
"strconv"
"strings"
"sync"
"time"
)
// byName implements sort.Interface.
type byName []os.FileInfo
func (f byName) Len() int { return len(f) }
func (f byName) Less(i, j int) bool { return f[i].Name() < f[j].Name() }
func (f byName) Swap(i, j int) { f[i], f[j] = f[j], f[i] }
// ReadDir reads the directory named by dirname and returns
// a list of sorted directory entries.
func (a Afero) ReadDir(dirname string) ([]os.FileInfo, error) {
return ReadDir(a.Fs, dirname)
}
func ReadDir(fs Fs, dirname string) ([]os.FileInfo, error) {
f, err := fs.Open(dirname)
if err != nil {
return nil, err
}
list, err := f.Readdir(-1)
f.Close()
if err != nil {
return nil, err
}
sort.Sort(byName(list))
return list, nil
}
// ReadFile reads the file named by filename and returns the contents.
// A successful call returns err == nil, not err == EOF. Because ReadFile
// reads the whole file, it does not treat an EOF from Read as an error
// to be reported.
func (a Afero) ReadFile(filename string) ([]byte, error) {
return ReadFile(a.Fs, filename)
}
func ReadFile(fs Fs, filename string) ([]byte, error) {
f, err := fs.Open(filename)
if err != nil {
return nil, err
}
defer f.Close()
// It's a good but not certain bet that FileInfo will tell us exactly how much to
// read, so let's try it but be prepared for the answer to be wrong.
var n int64
if fi, err := f.Stat(); err == nil {
// Don't preallocate a huge buffer, just in case.
if size := fi.Size(); size < 1e9 {
n = size
}
}
// As initial capacity for readAll, use n + a little extra in case Size is zero,
// and to avoid another allocation after Read has filled the buffer. The readAll
// call will read into its allocated internal buffer cheaply. If the size was
// wrong, we'll either waste some space off the end or reallocate as needed, but
// in the overwhelmingly common case we'll get it just right.
return readAll(f, n+bytes.MinRead)
}
// readAll reads from r until an error or EOF and returns the data it read
// from the internal buffer allocated with a specified capacity.
func readAll(r io.Reader, capacity int64) (b []byte, err error) {
buf := bytes.NewBuffer(make([]byte, 0, capacity))
// If the buffer overflows, we will get bytes.ErrTooLarge.
// Return that as an error. Any other panic remains.
defer func() {
e := recover()
if e == nil {
return
}
if panicErr, ok := e.(error); ok && panicErr == bytes.ErrTooLarge {
err = panicErr
} else {
panic(e)
}
}()
_, err = buf.ReadFrom(r)
return buf.Bytes(), err
}
// ReadAll reads from r until an error or EOF and returns the data it read.
// A successful call returns err == nil, not err == EOF. Because ReadAll is
// defined to read from src until EOF, it does not treat an EOF from Read
// as an error to be reported.
func ReadAll(r io.Reader) ([]byte, error) {
return readAll(r, bytes.MinRead)
}
// WriteFile writes data to a file named by filename.
// If the file does not exist, WriteFile creates it with permissions perm;
// otherwise WriteFile truncates it before writing.
func (a Afero) WriteFile(filename string, data []byte, perm os.FileMode) error {
return WriteFile(a.Fs, filename, data, perm)
}
func WriteFile(fs Fs, filename string, data []byte, perm os.FileMode) error {
f, err := fs.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm)
if err != nil {
return err
}
n, err := f.Write(data)
if err == nil && n < len(data) {
err = io.ErrShortWrite
}
if err1 := f.Close(); err == nil {
err = err1
}
return err
}
// Random number state.
// We generate random temporary file names so that there's a good
// chance the file doesn't exist yet - keeps the number of tries in
// TempFile to a minimum.
var (
randNum uint32
randmu sync.Mutex
)
func reseed() uint32 {
return uint32(time.Now().UnixNano() + int64(os.Getpid()))
}
func nextRandom() string {
randmu.Lock()
r := randNum
if r == 0 {
r = reseed()
}
r = r*1664525 + 1013904223 // constants from Numerical Recipes
randNum = r
randmu.Unlock()
return strconv.Itoa(int(1e9 + r%1e9))[1:]
}
// TempFile creates a new temporary file in the directory dir,
// opens the file for reading and writing, and returns the resulting *os.File.
// The filename is generated by taking pattern and adding a random
// string to the end. If pattern includes a "*", the random string
// replaces the last "*".
// If dir is the empty string, TempFile uses the default directory
// for temporary files (see os.TempDir).
// Multiple programs calling TempFile simultaneously
// will not choose the same file. The caller can use f.Name()
// to find the pathname of the file. It is the caller's responsibility
// to remove the file when no longer needed.
func (a Afero) TempFile(dir, pattern string) (f File, err error) {
return TempFile(a.Fs, dir, pattern)
}
func TempFile(fs Fs, dir, pattern string) (f File, err error) {
if dir == "" {
dir = os.TempDir()
}
var prefix, suffix string
if pos := strings.LastIndex(pattern, "*"); pos != -1 {
prefix, suffix = pattern[:pos], pattern[pos+1:]
} else {
prefix = pattern
}
nconflict := 0
for i := 0; i < 10000; i++ {
name := filepath.Join(dir, prefix+nextRandom()+suffix)
f, err = fs.OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0o600)
if os.IsExist(err) {
if nconflict++; nconflict > 10 {
randmu.Lock()
randNum = reseed()
randmu.Unlock()
}
continue
}
break
}
return
}
// TempDir creates a new temporary directory in the directory dir
// with a name beginning with prefix and returns the path of the
// new directory. If dir is the empty string, TempDir uses the
// default directory for temporary files (see os.TempDir).
// Multiple programs calling TempDir simultaneously
// will not choose the same directory. It is the caller's responsibility
// to remove the directory when no longer needed.
func (a Afero) TempDir(dir, prefix string) (name string, err error) {
return TempDir(a.Fs, dir, prefix)
}
func TempDir(fs Fs, dir, prefix string) (name string, err error) {
if dir == "" {
dir = os.TempDir()
}
nconflict := 0
for i := 0; i < 10000; i++ {
try := filepath.Join(dir, prefix+nextRandom())
err = fs.Mkdir(try, 0o700)
if os.IsExist(err) {
if nconflict++; nconflict > 10 {
randmu.Lock()
randNum = reseed()
randmu.Unlock()
}
continue
}
if err == nil {
name = try
}
break
}
return
}

27
vendor/github.com/spf13/afero/lstater.go generated vendored Normal file
View File

@ -0,0 +1,27 @@
// Copyright © 2018 Steve Francia <spf@spf13.com>.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package afero
import (
"os"
)
// Lstater is an optional interface in Afero. It is only implemented by the
// filesystems saying so.
// It will call Lstat if the filesystem iself is, or it delegates to, the os filesystem.
// Else it will call Stat.
// In addtion to the FileInfo, it will return a boolean telling whether Lstat was called or not.
type Lstater interface {
LstatIfPossible(name string) (os.FileInfo, bool, error)
}

110
vendor/github.com/spf13/afero/match.go generated vendored Normal file
View File

@ -0,0 +1,110 @@
// Copyright © 2014 Steve Francia <spf@spf13.com>.
// Copyright 2009 The Go Authors. All rights reserved.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package afero
import (
"path/filepath"
"sort"
"strings"
)
// Glob returns the names of all files matching pattern or nil
// if there is no matching file. The syntax of patterns is the same
// as in Match. The pattern may describe hierarchical names such as
// /usr/*/bin/ed (assuming the Separator is '/').
//
// Glob ignores file system errors such as I/O errors reading directories.
// The only possible returned error is ErrBadPattern, when pattern
// is malformed.
//
// This was adapted from (http://golang.org/pkg/path/filepath) and uses several
// built-ins from that package.
func Glob(fs Fs, pattern string) (matches []string, err error) {
if !hasMeta(pattern) {
// Lstat not supported by a ll filesystems.
if _, err = lstatIfPossible(fs, pattern); err != nil {
return nil, nil
}
return []string{pattern}, nil
}
dir, file := filepath.Split(pattern)
switch dir {
case "":
dir = "."
case string(filepath.Separator):
// nothing
default:
dir = dir[0 : len(dir)-1] // chop off trailing separator
}
if !hasMeta(dir) {
return glob(fs, dir, file, nil)
}
var m []string
m, err = Glob(fs, dir)
if err != nil {
return
}
for _, d := range m {
matches, err = glob(fs, d, file, matches)
if err != nil {
return
}
}
return
}
// glob searches for files matching pattern in the directory dir
// and appends them to matches. If the directory cannot be
// opened, it returns the existing matches. New matches are
// added in lexicographical order.
func glob(fs Fs, dir, pattern string, matches []string) (m []string, e error) {
m = matches
fi, err := fs.Stat(dir)
if err != nil {
return
}
if !fi.IsDir() {
return
}
d, err := fs.Open(dir)
if err != nil {
return
}
defer d.Close()
names, _ := d.Readdirnames(-1)
sort.Strings(names)
for _, n := range names {
matched, err := filepath.Match(pattern, n)
if err != nil {
return m, err
}
if matched {
m = append(m, filepath.Join(dir, n))
}
}
return
}
// hasMeta reports whether path contains any of the magic characters
// recognized by Match.
func hasMeta(path string) bool {
// TODO(niemeyer): Should other magic characters be added here?
return strings.ContainsAny(path, "*?[")
}

37
vendor/github.com/spf13/afero/mem/dir.go generated vendored Normal file
View File

@ -0,0 +1,37 @@
// Copyright © 2014 Steve Francia <spf@spf13.com>.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package mem
type Dir interface {
Len() int
Names() []string
Files() []*FileData
Add(*FileData)
Remove(*FileData)
}
func RemoveFromMemDir(dir *FileData, f *FileData) {
dir.memDir.Remove(f)
}
func AddToMemDir(dir *FileData, f *FileData) {
dir.memDir.Add(f)
}
func InitializeDir(d *FileData) {
if d.memDir == nil {
d.dir = true
d.memDir = &DirMap{}
}
}

43
vendor/github.com/spf13/afero/mem/dirmap.go generated vendored Normal file
View File

@ -0,0 +1,43 @@
// Copyright © 2015 Steve Francia <spf@spf13.com>.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package mem
import "sort"
type DirMap map[string]*FileData
func (m DirMap) Len() int { return len(m) }
func (m DirMap) Add(f *FileData) { m[f.name] = f }
func (m DirMap) Remove(f *FileData) { delete(m, f.name) }
func (m DirMap) Files() (files []*FileData) {
for _, f := range m {
files = append(files, f)
}
sort.Sort(filesSorter(files))
return files
}
// implement sort.Interface for []*FileData
type filesSorter []*FileData
func (s filesSorter) Len() int { return len(s) }
func (s filesSorter) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func (s filesSorter) Less(i, j int) bool { return s[i].name < s[j].name }
func (m DirMap) Names() (names []string) {
for x := range m {
names = append(names, x)
}
return names
}

359
vendor/github.com/spf13/afero/mem/file.go generated vendored Normal file
View File

@ -0,0 +1,359 @@
// Copyright © 2015 Steve Francia <spf@spf13.com>.
// Copyright 2013 tsuru authors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package mem
import (
"bytes"
"errors"
"io"
"io/fs"
"os"
"path/filepath"
"sync"
"sync/atomic"
"time"
"github.com/spf13/afero/internal/common"
)
const FilePathSeparator = string(filepath.Separator)
var _ fs.ReadDirFile = &File{}
type File struct {
// atomic requires 64-bit alignment for struct field access
at int64
readDirCount int64
closed bool
readOnly bool
fileData *FileData
}
func NewFileHandle(data *FileData) *File {
return &File{fileData: data}
}
func NewReadOnlyFileHandle(data *FileData) *File {
return &File{fileData: data, readOnly: true}
}
func (f File) Data() *FileData {
return f.fileData
}
type FileData struct {
sync.Mutex
name string
data []byte
memDir Dir
dir bool
mode os.FileMode
modtime time.Time
uid int
gid int
}
func (d *FileData) Name() string {
d.Lock()
defer d.Unlock()
return d.name
}
func CreateFile(name string) *FileData {
return &FileData{name: name, mode: os.ModeTemporary, modtime: time.Now()}
}
func CreateDir(name string) *FileData {
return &FileData{name: name, memDir: &DirMap{}, dir: true, modtime: time.Now()}
}
func ChangeFileName(f *FileData, newname string) {
f.Lock()
f.name = newname
f.Unlock()
}
func SetMode(f *FileData, mode os.FileMode) {
f.Lock()
f.mode = mode
f.Unlock()
}
func SetModTime(f *FileData, mtime time.Time) {
f.Lock()
setModTime(f, mtime)
f.Unlock()
}
func setModTime(f *FileData, mtime time.Time) {
f.modtime = mtime
}
func SetUID(f *FileData, uid int) {
f.Lock()
f.uid = uid
f.Unlock()
}
func SetGID(f *FileData, gid int) {
f.Lock()
f.gid = gid
f.Unlock()
}
func GetFileInfo(f *FileData) *FileInfo {
return &FileInfo{f}
}
func (f *File) Open() error {
atomic.StoreInt64(&f.at, 0)
atomic.StoreInt64(&f.readDirCount, 0)
f.fileData.Lock()
f.closed = false
f.fileData.Unlock()
return nil
}
func (f *File) Close() error {
f.fileData.Lock()
f.closed = true
if !f.readOnly {
setModTime(f.fileData, time.Now())
}
f.fileData.Unlock()
return nil
}
func (f *File) Name() string {
return f.fileData.Name()
}
func (f *File) Stat() (os.FileInfo, error) {
return &FileInfo{f.fileData}, nil
}
func (f *File) Sync() error {
return nil
}
func (f *File) Readdir(count int) (res []os.FileInfo, err error) {
if !f.fileData.dir {
return nil, &os.PathError{Op: "readdir", Path: f.fileData.name, Err: errors.New("not a dir")}
}
var outLength int64
f.fileData.Lock()
files := f.fileData.memDir.Files()[f.readDirCount:]
if count > 0 {
if len(files) < count {
outLength = int64(len(files))
} else {
outLength = int64(count)
}
if len(files) == 0 {
err = io.EOF
}
} else {
outLength = int64(len(files))
}
f.readDirCount += outLength
f.fileData.Unlock()
res = make([]os.FileInfo, outLength)
for i := range res {
res[i] = &FileInfo{files[i]}
}
return res, err
}
func (f *File) Readdirnames(n int) (names []string, err error) {
fi, err := f.Readdir(n)
names = make([]string, len(fi))
for i, f := range fi {
_, names[i] = filepath.Split(f.Name())
}
return names, err
}
// Implements fs.ReadDirFile
func (f *File) ReadDir(n int) ([]fs.DirEntry, error) {
fi, err := f.Readdir(n)
if err != nil {
return nil, err
}
di := make([]fs.DirEntry, len(fi))
for i, f := range fi {
di[i] = common.FileInfoDirEntry{FileInfo: f}
}
return di, nil
}
func (f *File) Read(b []byte) (n int, err error) {
f.fileData.Lock()
defer f.fileData.Unlock()
if f.closed {
return 0, ErrFileClosed
}
if len(b) > 0 && int(f.at) == len(f.fileData.data) {
return 0, io.EOF
}
if int(f.at) > len(f.fileData.data) {
return 0, io.ErrUnexpectedEOF
}
if len(f.fileData.data)-int(f.at) >= len(b) {
n = len(b)
} else {
n = len(f.fileData.data) - int(f.at)
}
copy(b, f.fileData.data[f.at:f.at+int64(n)])
atomic.AddInt64(&f.at, int64(n))
return
}
func (f *File) ReadAt(b []byte, off int64) (n int, err error) {
prev := atomic.LoadInt64(&f.at)
atomic.StoreInt64(&f.at, off)
n, err = f.Read(b)
atomic.StoreInt64(&f.at, prev)
return
}
func (f *File) Truncate(size int64) error {
if f.closed {
return ErrFileClosed
}
if f.readOnly {
return &os.PathError{Op: "truncate", Path: f.fileData.name, Err: errors.New("file handle is read only")}
}
if size < 0 {
return ErrOutOfRange
}
f.fileData.Lock()
defer f.fileData.Unlock()
if size > int64(len(f.fileData.data)) {
diff := size - int64(len(f.fileData.data))
f.fileData.data = append(f.fileData.data, bytes.Repeat([]byte{0o0}, int(diff))...)
} else {
f.fileData.data = f.fileData.data[0:size]
}
setModTime(f.fileData, time.Now())
return nil
}
func (f *File) Seek(offset int64, whence int) (int64, error) {
if f.closed {
return 0, ErrFileClosed
}
switch whence {
case io.SeekStart:
atomic.StoreInt64(&f.at, offset)
case io.SeekCurrent:
atomic.AddInt64(&f.at, offset)
case io.SeekEnd:
atomic.StoreInt64(&f.at, int64(len(f.fileData.data))+offset)
}
return f.at, nil
}
func (f *File) Write(b []byte) (n int, err error) {
if f.closed {
return 0, ErrFileClosed
}
if f.readOnly {
return 0, &os.PathError{Op: "write", Path: f.fileData.name, Err: errors.New("file handle is read only")}
}
n = len(b)
cur := atomic.LoadInt64(&f.at)
f.fileData.Lock()
defer f.fileData.Unlock()
diff := cur - int64(len(f.fileData.data))
var tail []byte
if n+int(cur) < len(f.fileData.data) {
tail = f.fileData.data[n+int(cur):]
}
if diff > 0 {
f.fileData.data = append(f.fileData.data, append(bytes.Repeat([]byte{0o0}, int(diff)), b...)...)
f.fileData.data = append(f.fileData.data, tail...)
} else {
f.fileData.data = append(f.fileData.data[:cur], b...)
f.fileData.data = append(f.fileData.data, tail...)
}
setModTime(f.fileData, time.Now())
atomic.AddInt64(&f.at, int64(n))
return
}
func (f *File) WriteAt(b []byte, off int64) (n int, err error) {
atomic.StoreInt64(&f.at, off)
return f.Write(b)
}
func (f *File) WriteString(s string) (ret int, err error) {
return f.Write([]byte(s))
}
func (f *File) Info() *FileInfo {
return &FileInfo{f.fileData}
}
type FileInfo struct {
*FileData
}
// Implements os.FileInfo
func (s *FileInfo) Name() string {
s.Lock()
_, name := filepath.Split(s.name)
s.Unlock()
return name
}
func (s *FileInfo) Mode() os.FileMode {
s.Lock()
defer s.Unlock()
return s.mode
}
func (s *FileInfo) ModTime() time.Time {
s.Lock()
defer s.Unlock()
return s.modtime
}
func (s *FileInfo) IsDir() bool {
s.Lock()
defer s.Unlock()
return s.dir
}
func (s *FileInfo) Sys() interface{} { return nil }
func (s *FileInfo) Size() int64 {
if s.IsDir() {
return int64(42)
}
s.Lock()
defer s.Unlock()
return int64(len(s.data))
}
var (
ErrFileClosed = errors.New("File is closed")
ErrOutOfRange = errors.New("out of range")
ErrTooLarge = errors.New("too large")
ErrFileNotFound = os.ErrNotExist
ErrFileExists = os.ErrExist
ErrDestinationExists = os.ErrExist
)

422
vendor/github.com/spf13/afero/memmap.go generated vendored Normal file
View File

@ -0,0 +1,422 @@
// Copyright © 2014 Steve Francia <spf@spf13.com>.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package afero
import (
"fmt"
"io"
"log"
"os"
"path/filepath"
"strings"
"sync"
"time"
"github.com/spf13/afero/mem"
)
const chmodBits = os.ModePerm | os.ModeSetuid | os.ModeSetgid | os.ModeSticky // Only a subset of bits are allowed to be changed. Documented under os.Chmod()
type MemMapFs struct {
mu sync.RWMutex
data map[string]*mem.FileData
init sync.Once
}
func NewMemMapFs() Fs {
return &MemMapFs{}
}
func (m *MemMapFs) getData() map[string]*mem.FileData {
m.init.Do(func() {
m.data = make(map[string]*mem.FileData)
// Root should always exist, right?
// TODO: what about windows?
root := mem.CreateDir(FilePathSeparator)
mem.SetMode(root, os.ModeDir|0o755)
m.data[FilePathSeparator] = root
})
return m.data
}
func (*MemMapFs) Name() string { return "MemMapFS" }
func (m *MemMapFs) Create(name string) (File, error) {
name = normalizePath(name)
m.mu.Lock()
file := mem.CreateFile(name)
m.getData()[name] = file
m.registerWithParent(file, 0)
m.mu.Unlock()
return mem.NewFileHandle(file), nil
}
func (m *MemMapFs) unRegisterWithParent(fileName string) error {
f, err := m.lockfreeOpen(fileName)
if err != nil {
return err
}
parent := m.findParent(f)
if parent == nil {
log.Panic("parent of ", f.Name(), " is nil")
}
parent.Lock()
mem.RemoveFromMemDir(parent, f)
parent.Unlock()
return nil
}
func (m *MemMapFs) findParent(f *mem.FileData) *mem.FileData {
pdir, _ := filepath.Split(f.Name())
pdir = filepath.Clean(pdir)
pfile, err := m.lockfreeOpen(pdir)
if err != nil {
return nil
}
return pfile
}
func (m *MemMapFs) registerWithParent(f *mem.FileData, perm os.FileMode) {
if f == nil {
return
}
parent := m.findParent(f)
if parent == nil {
pdir := filepath.Dir(filepath.Clean(f.Name()))
err := m.lockfreeMkdir(pdir, perm)
if err != nil {
// log.Println("Mkdir error:", err)
return
}
parent, err = m.lockfreeOpen(pdir)
if err != nil {
// log.Println("Open after Mkdir error:", err)
return
}
}
parent.Lock()
mem.InitializeDir(parent)
mem.AddToMemDir(parent, f)
parent.Unlock()
}
func (m *MemMapFs) lockfreeMkdir(name string, perm os.FileMode) error {
name = normalizePath(name)
x, ok := m.getData()[name]
if ok {
// Only return ErrFileExists if it's a file, not a directory.
i := mem.FileInfo{FileData: x}
if !i.IsDir() {
return ErrFileExists
}
} else {
item := mem.CreateDir(name)
mem.SetMode(item, os.ModeDir|perm)
m.getData()[name] = item
m.registerWithParent(item, perm)
}
return nil
}
func (m *MemMapFs) Mkdir(name string, perm os.FileMode) error {
perm &= chmodBits
name = normalizePath(name)
m.mu.RLock()
_, ok := m.getData()[name]
m.mu.RUnlock()
if ok {
return &os.PathError{Op: "mkdir", Path: name, Err: ErrFileExists}
}
m.mu.Lock()
// Dobule check that it doesn't exist.
if _, ok := m.getData()[name]; ok {
m.mu.Unlock()
return &os.PathError{Op: "mkdir", Path: name, Err: ErrFileExists}
}
item := mem.CreateDir(name)
mem.SetMode(item, os.ModeDir|perm)
m.getData()[name] = item
m.registerWithParent(item, perm)
m.mu.Unlock()
return m.setFileMode(name, perm|os.ModeDir)
}
func (m *MemMapFs) MkdirAll(path string, perm os.FileMode) error {
err := m.Mkdir(path, perm)
if err != nil {
if err.(*os.PathError).Err == ErrFileExists {
return nil
}
return err
}
return nil
}
// Handle some relative paths
func normalizePath(path string) string {
path = filepath.Clean(path)
switch path {
case ".":
return FilePathSeparator
case "..":
return FilePathSeparator
default:
return path
}
}
func (m *MemMapFs) Open(name string) (File, error) {
f, err := m.open(name)
if f != nil {
return mem.NewReadOnlyFileHandle(f), err
}
return nil, err
}
func (m *MemMapFs) openWrite(name string) (File, error) {
f, err := m.open(name)
if f != nil {
return mem.NewFileHandle(f), err
}
return nil, err
}
func (m *MemMapFs) open(name string) (*mem.FileData, error) {
name = normalizePath(name)
m.mu.RLock()
f, ok := m.getData()[name]
m.mu.RUnlock()
if !ok {
return nil, &os.PathError{Op: "open", Path: name, Err: ErrFileNotFound}
}
return f, nil
}
func (m *MemMapFs) lockfreeOpen(name string) (*mem.FileData, error) {
name = normalizePath(name)
f, ok := m.getData()[name]
if ok {
return f, nil
} else {
return nil, ErrFileNotFound
}
}
func (m *MemMapFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) {
perm &= chmodBits
chmod := false
file, err := m.openWrite(name)
if err == nil && (flag&os.O_EXCL > 0) {
return nil, &os.PathError{Op: "open", Path: name, Err: ErrFileExists}
}
if os.IsNotExist(err) && (flag&os.O_CREATE > 0) {
file, err = m.Create(name)
chmod = true
}
if err != nil {
return nil, err
}
if flag == os.O_RDONLY {
file = mem.NewReadOnlyFileHandle(file.(*mem.File).Data())
}
if flag&os.O_APPEND > 0 {
_, err = file.Seek(0, io.SeekEnd)
if err != nil {
file.Close()
return nil, err
}
}
if flag&os.O_TRUNC > 0 && flag&(os.O_RDWR|os.O_WRONLY) > 0 {
err = file.Truncate(0)
if err != nil {
file.Close()
return nil, err
}
}
if chmod {
return file, m.setFileMode(name, perm)
}
return file, nil
}
func (m *MemMapFs) Remove(name string) error {
name = normalizePath(name)
m.mu.Lock()
defer m.mu.Unlock()
if _, ok := m.getData()[name]; ok {
err := m.unRegisterWithParent(name)
if err != nil {
return &os.PathError{Op: "remove", Path: name, Err: err}
}
delete(m.getData(), name)
} else {
return &os.PathError{Op: "remove", Path: name, Err: os.ErrNotExist}
}
return nil
}
func (m *MemMapFs) RemoveAll(path string) error {
path = normalizePath(path)
m.mu.Lock()
m.unRegisterWithParent(path)
m.mu.Unlock()
m.mu.RLock()
defer m.mu.RUnlock()
for p := range m.getData() {
if p == path || strings.HasPrefix(p, path+FilePathSeparator) {
m.mu.RUnlock()
m.mu.Lock()
delete(m.getData(), p)
m.mu.Unlock()
m.mu.RLock()
}
}
return nil
}
func (m *MemMapFs) Rename(oldname, newname string) error {
oldname = normalizePath(oldname)
newname = normalizePath(newname)
if oldname == newname {
return nil
}
m.mu.RLock()
defer m.mu.RUnlock()
if _, ok := m.getData()[oldname]; ok {
m.mu.RUnlock()
m.mu.Lock()
m.unRegisterWithParent(oldname)
fileData := m.getData()[oldname]
delete(m.getData(), oldname)
mem.ChangeFileName(fileData, newname)
m.getData()[newname] = fileData
m.registerWithParent(fileData, 0)
m.mu.Unlock()
m.mu.RLock()
} else {
return &os.PathError{Op: "rename", Path: oldname, Err: ErrFileNotFound}
}
for p, fileData := range m.getData() {
if strings.HasPrefix(p, oldname+FilePathSeparator) {
m.mu.RUnlock()
m.mu.Lock()
delete(m.getData(), p)
p := strings.Replace(p, oldname, newname, 1)
m.getData()[p] = fileData
m.mu.Unlock()
m.mu.RLock()
}
}
return nil
}
func (m *MemMapFs) LstatIfPossible(name string) (os.FileInfo, bool, error) {
fileInfo, err := m.Stat(name)
return fileInfo, false, err
}
func (m *MemMapFs) Stat(name string) (os.FileInfo, error) {
f, err := m.Open(name)
if err != nil {
return nil, err
}
fi := mem.GetFileInfo(f.(*mem.File).Data())
return fi, nil
}
func (m *MemMapFs) Chmod(name string, mode os.FileMode) error {
mode &= chmodBits
m.mu.RLock()
f, ok := m.getData()[name]
m.mu.RUnlock()
if !ok {
return &os.PathError{Op: "chmod", Path: name, Err: ErrFileNotFound}
}
prevOtherBits := mem.GetFileInfo(f).Mode() & ^chmodBits
mode = prevOtherBits | mode
return m.setFileMode(name, mode)
}
func (m *MemMapFs) setFileMode(name string, mode os.FileMode) error {
name = normalizePath(name)
m.mu.RLock()
f, ok := m.getData()[name]
m.mu.RUnlock()
if !ok {
return &os.PathError{Op: "chmod", Path: name, Err: ErrFileNotFound}
}
m.mu.Lock()
mem.SetMode(f, mode)
m.mu.Unlock()
return nil
}
func (m *MemMapFs) Chown(name string, uid, gid int) error {
name = normalizePath(name)
m.mu.RLock()
f, ok := m.getData()[name]
m.mu.RUnlock()
if !ok {
return &os.PathError{Op: "chown", Path: name, Err: ErrFileNotFound}
}
mem.SetUID(f, uid)
mem.SetGID(f, gid)
return nil
}
func (m *MemMapFs) Chtimes(name string, atime time.Time, mtime time.Time) error {
name = normalizePath(name)
m.mu.RLock()
f, ok := m.getData()[name]
m.mu.RUnlock()
if !ok {
return &os.PathError{Op: "chtimes", Path: name, Err: ErrFileNotFound}
}
m.mu.Lock()
mem.SetModTime(f, mtime)
m.mu.Unlock()
return nil
}
func (m *MemMapFs) List() {
for _, x := range m.data {
y := mem.FileInfo{FileData: x}
fmt.Println(x.Name(), y.Size())
}
}

113
vendor/github.com/spf13/afero/os.go generated vendored Normal file
View File

@ -0,0 +1,113 @@
// Copyright © 2014 Steve Francia <spf@spf13.com>.
// Copyright 2013 tsuru authors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package afero
import (
"os"
"time"
)
var _ Lstater = (*OsFs)(nil)
// OsFs is a Fs implementation that uses functions provided by the os package.
//
// For details in any method, check the documentation of the os package
// (http://golang.org/pkg/os/).
type OsFs struct{}
func NewOsFs() Fs {
return &OsFs{}
}
func (OsFs) Name() string { return "OsFs" }
func (OsFs) Create(name string) (File, error) {
f, e := os.Create(name)
if f == nil {
// while this looks strange, we need to return a bare nil (of type nil) not
// a nil value of type *os.File or nil won't be nil
return nil, e
}
return f, e
}
func (OsFs) Mkdir(name string, perm os.FileMode) error {
return os.Mkdir(name, perm)
}
func (OsFs) MkdirAll(path string, perm os.FileMode) error {
return os.MkdirAll(path, perm)
}
func (OsFs) Open(name string) (File, error) {
f, e := os.Open(name)
if f == nil {
// while this looks strange, we need to return a bare nil (of type nil) not
// a nil value of type *os.File or nil won't be nil
return nil, e
}
return f, e
}
func (OsFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) {
f, e := os.OpenFile(name, flag, perm)
if f == nil {
// while this looks strange, we need to return a bare nil (of type nil) not
// a nil value of type *os.File or nil won't be nil
return nil, e
}
return f, e
}
func (OsFs) Remove(name string) error {
return os.Remove(name)
}
func (OsFs) RemoveAll(path string) error {
return os.RemoveAll(path)
}
func (OsFs) Rename(oldname, newname string) error {
return os.Rename(oldname, newname)
}
func (OsFs) Stat(name string) (os.FileInfo, error) {
return os.Stat(name)
}
func (OsFs) Chmod(name string, mode os.FileMode) error {
return os.Chmod(name, mode)
}
func (OsFs) Chown(name string, uid, gid int) error {
return os.Chown(name, uid, gid)
}
func (OsFs) Chtimes(name string, atime time.Time, mtime time.Time) error {
return os.Chtimes(name, atime, mtime)
}
func (OsFs) LstatIfPossible(name string) (os.FileInfo, bool, error) {
fi, err := os.Lstat(name)
return fi, true, err
}
func (OsFs) SymlinkIfPossible(oldname, newname string) error {
return os.Symlink(oldname, newname)
}
func (OsFs) ReadlinkIfPossible(name string) (string, error) {
return os.Readlink(name)
}

106
vendor/github.com/spf13/afero/path.go generated vendored Normal file
View File

@ -0,0 +1,106 @@
// Copyright ©2015 The Go Authors
// Copyright ©2015 Steve Francia <spf@spf13.com>
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package afero
import (
"os"
"path/filepath"
"sort"
)
// readDirNames reads the directory named by dirname and returns
// a sorted list of directory entries.
// adapted from https://golang.org/src/path/filepath/path.go
func readDirNames(fs Fs, dirname string) ([]string, error) {
f, err := fs.Open(dirname)
if err != nil {
return nil, err
}
names, err := f.Readdirnames(-1)
f.Close()
if err != nil {
return nil, err
}
sort.Strings(names)
return names, nil
}
// walk recursively descends path, calling walkFn
// adapted from https://golang.org/src/path/filepath/path.go
func walk(fs Fs, path string, info os.FileInfo, walkFn filepath.WalkFunc) error {
err := walkFn(path, info, nil)
if err != nil {
if info.IsDir() && err == filepath.SkipDir {
return nil
}
return err
}
if !info.IsDir() {
return nil
}
names, err := readDirNames(fs, path)
if err != nil {
return walkFn(path, info, err)
}
for _, name := range names {
filename := filepath.Join(path, name)
fileInfo, err := lstatIfPossible(fs, filename)
if err != nil {
if err := walkFn(filename, fileInfo, err); err != nil && err != filepath.SkipDir {
return err
}
} else {
err = walk(fs, filename, fileInfo, walkFn)
if err != nil {
if !fileInfo.IsDir() || err != filepath.SkipDir {
return err
}
}
}
}
return nil
}
// if the filesystem supports it, use Lstat, else use fs.Stat
func lstatIfPossible(fs Fs, path string) (os.FileInfo, error) {
if lfs, ok := fs.(Lstater); ok {
fi, _, err := lfs.LstatIfPossible(path)
return fi, err
}
return fs.Stat(path)
}
// Walk walks the file tree rooted at root, calling walkFn for each file or
// directory in the tree, including root. All errors that arise visiting files
// and directories are filtered by walkFn. The files are walked in lexical
// order, which makes the output deterministic but means that for very
// large directories Walk can be inefficient.
// Walk does not follow symbolic links.
func (a Afero) Walk(root string, walkFn filepath.WalkFunc) error {
return Walk(a.Fs, root, walkFn)
}
func Walk(fs Fs, root string, walkFn filepath.WalkFunc) error {
info, err := lstatIfPossible(fs, root)
if err != nil {
return walkFn(root, nil, err)
}
return walk(fs, root, info, walkFn)
}

96
vendor/github.com/spf13/afero/readonlyfs.go generated vendored Normal file
View File

@ -0,0 +1,96 @@
package afero
import (
"os"
"syscall"
"time"
)
var _ Lstater = (*ReadOnlyFs)(nil)
type ReadOnlyFs struct {
source Fs
}
func NewReadOnlyFs(source Fs) Fs {
return &ReadOnlyFs{source: source}
}
func (r *ReadOnlyFs) ReadDir(name string) ([]os.FileInfo, error) {
return ReadDir(r.source, name)
}
func (r *ReadOnlyFs) Chtimes(n string, a, m time.Time) error {
return syscall.EPERM
}
func (r *ReadOnlyFs) Chmod(n string, m os.FileMode) error {
return syscall.EPERM
}
func (r *ReadOnlyFs) Chown(n string, uid, gid int) error {
return syscall.EPERM
}
func (r *ReadOnlyFs) Name() string {
return "ReadOnlyFilter"
}
func (r *ReadOnlyFs) Stat(name string) (os.FileInfo, error) {
return r.source.Stat(name)
}
func (r *ReadOnlyFs) LstatIfPossible(name string) (os.FileInfo, bool, error) {
if lsf, ok := r.source.(Lstater); ok {
return lsf.LstatIfPossible(name)
}
fi, err := r.Stat(name)
return fi, false, err
}
func (r *ReadOnlyFs) SymlinkIfPossible(oldname, newname string) error {
return &os.LinkError{Op: "symlink", Old: oldname, New: newname, Err: ErrNoSymlink}
}
func (r *ReadOnlyFs) ReadlinkIfPossible(name string) (string, error) {
if srdr, ok := r.source.(LinkReader); ok {
return srdr.ReadlinkIfPossible(name)
}
return "", &os.PathError{Op: "readlink", Path: name, Err: ErrNoReadlink}
}
func (r *ReadOnlyFs) Rename(o, n string) error {
return syscall.EPERM
}
func (r *ReadOnlyFs) RemoveAll(p string) error {
return syscall.EPERM
}
func (r *ReadOnlyFs) Remove(n string) error {
return syscall.EPERM
}
func (r *ReadOnlyFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) {
if flag&(os.O_WRONLY|syscall.O_RDWR|os.O_APPEND|os.O_CREATE|os.O_TRUNC) != 0 {
return nil, syscall.EPERM
}
return r.source.OpenFile(name, flag, perm)
}
func (r *ReadOnlyFs) Open(n string) (File, error) {
return r.source.Open(n)
}
func (r *ReadOnlyFs) Mkdir(n string, p os.FileMode) error {
return syscall.EPERM
}
func (r *ReadOnlyFs) MkdirAll(n string, p os.FileMode) error {
return syscall.EPERM
}
func (r *ReadOnlyFs) Create(n string) (File, error) {
return nil, syscall.EPERM
}

223
vendor/github.com/spf13/afero/regexpfs.go generated vendored Normal file
View File

@ -0,0 +1,223 @@
package afero
import (
"os"
"regexp"
"syscall"
"time"
)
// The RegexpFs filters files (not directories) by regular expression. Only
// files matching the given regexp will be allowed, all others get a ENOENT error (
// "No such file or directory").
type RegexpFs struct {
re *regexp.Regexp
source Fs
}
func NewRegexpFs(source Fs, re *regexp.Regexp) Fs {
return &RegexpFs{source: source, re: re}
}
type RegexpFile struct {
f File
re *regexp.Regexp
}
func (r *RegexpFs) matchesName(name string) error {
if r.re == nil {
return nil
}
if r.re.MatchString(name) {
return nil
}
return syscall.ENOENT
}
func (r *RegexpFs) dirOrMatches(name string) error {
dir, err := IsDir(r.source, name)
if err != nil {
return err
}
if dir {
return nil
}
return r.matchesName(name)
}
func (r *RegexpFs) Chtimes(name string, a, m time.Time) error {
if err := r.dirOrMatches(name); err != nil {
return err
}
return r.source.Chtimes(name, a, m)
}
func (r *RegexpFs) Chmod(name string, mode os.FileMode) error {
if err := r.dirOrMatches(name); err != nil {
return err
}
return r.source.Chmod(name, mode)
}
func (r *RegexpFs) Chown(name string, uid, gid int) error {
if err := r.dirOrMatches(name); err != nil {
return err
}
return r.source.Chown(name, uid, gid)
}
func (r *RegexpFs) Name() string {
return "RegexpFs"
}
func (r *RegexpFs) Stat(name string) (os.FileInfo, error) {
if err := r.dirOrMatches(name); err != nil {
return nil, err
}
return r.source.Stat(name)
}
func (r *RegexpFs) Rename(oldname, newname string) error {
dir, err := IsDir(r.source, oldname)
if err != nil {
return err
}
if dir {
return nil
}
if err := r.matchesName(oldname); err != nil {
return err
}
if err := r.matchesName(newname); err != nil {
return err
}
return r.source.Rename(oldname, newname)
}
func (r *RegexpFs) RemoveAll(p string) error {
dir, err := IsDir(r.source, p)
if err != nil {
return err
}
if !dir {
if err := r.matchesName(p); err != nil {
return err
}
}
return r.source.RemoveAll(p)
}
func (r *RegexpFs) Remove(name string) error {
if err := r.dirOrMatches(name); err != nil {
return err
}
return r.source.Remove(name)
}
func (r *RegexpFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) {
if err := r.dirOrMatches(name); err != nil {
return nil, err
}
return r.source.OpenFile(name, flag, perm)
}
func (r *RegexpFs) Open(name string) (File, error) {
dir, err := IsDir(r.source, name)
if err != nil {
return nil, err
}
if !dir {
if err := r.matchesName(name); err != nil {
return nil, err
}
}
f, err := r.source.Open(name)
if err != nil {
return nil, err
}
return &RegexpFile{f: f, re: r.re}, nil
}
func (r *RegexpFs) Mkdir(n string, p os.FileMode) error {
return r.source.Mkdir(n, p)
}
func (r *RegexpFs) MkdirAll(n string, p os.FileMode) error {
return r.source.MkdirAll(n, p)
}
func (r *RegexpFs) Create(name string) (File, error) {
if err := r.matchesName(name); err != nil {
return nil, err
}
return r.source.Create(name)
}
func (f *RegexpFile) Close() error {
return f.f.Close()
}
func (f *RegexpFile) Read(s []byte) (int, error) {
return f.f.Read(s)
}
func (f *RegexpFile) ReadAt(s []byte, o int64) (int, error) {
return f.f.ReadAt(s, o)
}
func (f *RegexpFile) Seek(o int64, w int) (int64, error) {
return f.f.Seek(o, w)
}
func (f *RegexpFile) Write(s []byte) (int, error) {
return f.f.Write(s)
}
func (f *RegexpFile) WriteAt(s []byte, o int64) (int, error) {
return f.f.WriteAt(s, o)
}
func (f *RegexpFile) Name() string {
return f.f.Name()
}
func (f *RegexpFile) Readdir(c int) (fi []os.FileInfo, err error) {
var rfi []os.FileInfo
rfi, err = f.f.Readdir(c)
if err != nil {
return nil, err
}
for _, i := range rfi {
if i.IsDir() || f.re.MatchString(i.Name()) {
fi = append(fi, i)
}
}
return fi, nil
}
func (f *RegexpFile) Readdirnames(c int) (n []string, err error) {
fi, err := f.Readdir(c)
if err != nil {
return nil, err
}
for _, s := range fi {
n = append(n, s.Name())
}
return n, nil
}
func (f *RegexpFile) Stat() (os.FileInfo, error) {
return f.f.Stat()
}
func (f *RegexpFile) Sync() error {
return f.f.Sync()
}
func (f *RegexpFile) Truncate(s int64) error {
return f.f.Truncate(s)
}
func (f *RegexpFile) WriteString(s string) (int, error) {
return f.f.WriteString(s)
}

55
vendor/github.com/spf13/afero/symlink.go generated vendored Normal file
View File

@ -0,0 +1,55 @@
// Copyright © 2018 Steve Francia <spf@spf13.com>.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package afero
import (
"errors"
)
// Symlinker is an optional interface in Afero. It is only implemented by the
// filesystems saying so.
// It indicates support for 3 symlink related interfaces that implement the
// behaviors of the os methods:
// - Lstat
// - Symlink, and
// - Readlink
type Symlinker interface {
Lstater
Linker
LinkReader
}
// Linker is an optional interface in Afero. It is only implemented by the
// filesystems saying so.
// It will call Symlink if the filesystem itself is, or it delegates to, the os filesystem,
// or the filesystem otherwise supports Symlink's.
type Linker interface {
SymlinkIfPossible(oldname, newname string) error
}
// ErrNoSymlink is the error that will be wrapped in an os.LinkError if a file system
// does not support Symlink's either directly or through its delegated filesystem.
// As expressed by support for the Linker interface.
var ErrNoSymlink = errors.New("symlink not supported")
// LinkReader is an optional interface in Afero. It is only implemented by the
// filesystems saying so.
type LinkReader interface {
ReadlinkIfPossible(name string) (string, error)
}
// ErrNoReadlink is the error that will be wrapped in an os.Path if a file system
// does not support the readlink operation either directly or through its delegated filesystem.
// As expressed by support for the LinkReader interface.
var ErrNoReadlink = errors.New("readlink not supported")

330
vendor/github.com/spf13/afero/unionFile.go generated vendored Normal file
View File

@ -0,0 +1,330 @@
package afero
import (
"io"
"os"
"path/filepath"
"syscall"
)
// The UnionFile implements the afero.File interface and will be returned
// when reading a directory present at least in the overlay or opening a file
// for writing.
//
// The calls to
// Readdir() and Readdirnames() merge the file os.FileInfo / names from the
// base and the overlay - for files present in both layers, only those
// from the overlay will be used.
//
// When opening files for writing (Create() / OpenFile() with the right flags)
// the operations will be done in both layers, starting with the overlay. A
// successful read in the overlay will move the cursor position in the base layer
// by the number of bytes read.
type UnionFile struct {
Base File
Layer File
Merger DirsMerger
off int
files []os.FileInfo
}
func (f *UnionFile) Close() error {
// first close base, so we have a newer timestamp in the overlay. If we'd close
// the overlay first, we'd get a cacheStale the next time we access this file
// -> cache would be useless ;-)
if f.Base != nil {
f.Base.Close()
}
if f.Layer != nil {
return f.Layer.Close()
}
return BADFD
}
func (f *UnionFile) Read(s []byte) (int, error) {
if f.Layer != nil {
n, err := f.Layer.Read(s)
if (err == nil || err == io.EOF) && f.Base != nil {
// advance the file position also in the base file, the next
// call may be a write at this position (or a seek with SEEK_CUR)
if _, seekErr := f.Base.Seek(int64(n), io.SeekCurrent); seekErr != nil {
// only overwrite err in case the seek fails: we need to
// report an eventual io.EOF to the caller
err = seekErr
}
}
return n, err
}
if f.Base != nil {
return f.Base.Read(s)
}
return 0, BADFD
}
func (f *UnionFile) ReadAt(s []byte, o int64) (int, error) {
if f.Layer != nil {
n, err := f.Layer.ReadAt(s, o)
if (err == nil || err == io.EOF) && f.Base != nil {
_, err = f.Base.Seek(o+int64(n), io.SeekStart)
}
return n, err
}
if f.Base != nil {
return f.Base.ReadAt(s, o)
}
return 0, BADFD
}
func (f *UnionFile) Seek(o int64, w int) (pos int64, err error) {
if f.Layer != nil {
pos, err = f.Layer.Seek(o, w)
if (err == nil || err == io.EOF) && f.Base != nil {
_, err = f.Base.Seek(o, w)
}
return pos, err
}
if f.Base != nil {
return f.Base.Seek(o, w)
}
return 0, BADFD
}
func (f *UnionFile) Write(s []byte) (n int, err error) {
if f.Layer != nil {
n, err = f.Layer.Write(s)
if err == nil && f.Base != nil { // hmm, do we have fixed size files where a write may hit the EOF mark?
_, err = f.Base.Write(s)
}
return n, err
}
if f.Base != nil {
return f.Base.Write(s)
}
return 0, BADFD
}
func (f *UnionFile) WriteAt(s []byte, o int64) (n int, err error) {
if f.Layer != nil {
n, err = f.Layer.WriteAt(s, o)
if err == nil && f.Base != nil {
_, err = f.Base.WriteAt(s, o)
}
return n, err
}
if f.Base != nil {
return f.Base.WriteAt(s, o)
}
return 0, BADFD
}
func (f *UnionFile) Name() string {
if f.Layer != nil {
return f.Layer.Name()
}
return f.Base.Name()
}
// DirsMerger is how UnionFile weaves two directories together.
// It takes the FileInfo slices from the layer and the base and returns a
// single view.
type DirsMerger func(lofi, bofi []os.FileInfo) ([]os.FileInfo, error)
var defaultUnionMergeDirsFn = func(lofi, bofi []os.FileInfo) ([]os.FileInfo, error) {
files := make(map[string]os.FileInfo)
for _, fi := range lofi {
files[fi.Name()] = fi
}
for _, fi := range bofi {
if _, exists := files[fi.Name()]; !exists {
files[fi.Name()] = fi
}
}
rfi := make([]os.FileInfo, len(files))
i := 0
for _, fi := range files {
rfi[i] = fi
i++
}
return rfi, nil
}
// Readdir will weave the two directories together and
// return a single view of the overlayed directories.
// At the end of the directory view, the error is io.EOF if c > 0.
func (f *UnionFile) Readdir(c int) (ofi []os.FileInfo, err error) {
var merge DirsMerger = f.Merger
if merge == nil {
merge = defaultUnionMergeDirsFn
}
if f.off == 0 {
var lfi []os.FileInfo
if f.Layer != nil {
lfi, err = f.Layer.Readdir(-1)
if err != nil {
return nil, err
}
}
var bfi []os.FileInfo
if f.Base != nil {
bfi, err = f.Base.Readdir(-1)
if err != nil {
return nil, err
}
}
merged, err := merge(lfi, bfi)
if err != nil {
return nil, err
}
f.files = append(f.files, merged...)
}
files := f.files[f.off:]
if c <= 0 {
return files, nil
}
if len(files) == 0 {
return nil, io.EOF
}
if c > len(files) {
c = len(files)
}
defer func() { f.off += c }()
return files[:c], nil
}
func (f *UnionFile) Readdirnames(c int) ([]string, error) {
rfi, err := f.Readdir(c)
if err != nil {
return nil, err
}
var names []string
for _, fi := range rfi {
names = append(names, fi.Name())
}
return names, nil
}
func (f *UnionFile) Stat() (os.FileInfo, error) {
if f.Layer != nil {
return f.Layer.Stat()
}
if f.Base != nil {
return f.Base.Stat()
}
return nil, BADFD
}
func (f *UnionFile) Sync() (err error) {
if f.Layer != nil {
err = f.Layer.Sync()
if err == nil && f.Base != nil {
err = f.Base.Sync()
}
return err
}
if f.Base != nil {
return f.Base.Sync()
}
return BADFD
}
func (f *UnionFile) Truncate(s int64) (err error) {
if f.Layer != nil {
err = f.Layer.Truncate(s)
if err == nil && f.Base != nil {
err = f.Base.Truncate(s)
}
return err
}
if f.Base != nil {
return f.Base.Truncate(s)
}
return BADFD
}
func (f *UnionFile) WriteString(s string) (n int, err error) {
if f.Layer != nil {
n, err = f.Layer.WriteString(s)
if err == nil && f.Base != nil {
_, err = f.Base.WriteString(s)
}
return n, err
}
if f.Base != nil {
return f.Base.WriteString(s)
}
return 0, BADFD
}
func copyFile(base Fs, layer Fs, name string, bfh File) error {
// First make sure the directory exists
exists, err := Exists(layer, filepath.Dir(name))
if err != nil {
return err
}
if !exists {
err = layer.MkdirAll(filepath.Dir(name), 0o777) // FIXME?
if err != nil {
return err
}
}
// Create the file on the overlay
lfh, err := layer.Create(name)
if err != nil {
return err
}
n, err := io.Copy(lfh, bfh)
if err != nil {
// If anything fails, clean up the file
layer.Remove(name)
lfh.Close()
return err
}
bfi, err := bfh.Stat()
if err != nil || bfi.Size() != n {
layer.Remove(name)
lfh.Close()
return syscall.EIO
}
err = lfh.Close()
if err != nil {
layer.Remove(name)
lfh.Close()
return err
}
return layer.Chtimes(name, bfi.ModTime(), bfi.ModTime())
}
func copyToLayer(base Fs, layer Fs, name string) error {
bfh, err := base.Open(name)
if err != nil {
return err
}
defer bfh.Close()
return copyFile(base, layer, name, bfh)
}
func copyFileToLayer(base Fs, layer Fs, name string, flag int, perm os.FileMode) error {
bfh, err := base.OpenFile(name, flag, perm)
if err != nil {
return err
}
defer bfh.Close()
return copyFile(base, layer, name, bfh)
}

329
vendor/github.com/spf13/afero/util.go generated vendored Normal file
View File

@ -0,0 +1,329 @@
// Copyright ©2015 Steve Francia <spf@spf13.com>
// Portions Copyright ©2015 The Hugo Authors
// Portions Copyright 2016-present Bjørn Erik Pedersen <bjorn.erik.pedersen@gmail.com>
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package afero
import (
"bytes"
"fmt"
"io"
"os"
"path/filepath"
"strings"
"unicode"
"golang.org/x/text/runes"
"golang.org/x/text/transform"
"golang.org/x/text/unicode/norm"
)
// Filepath separator defined by os.Separator.
const FilePathSeparator = string(filepath.Separator)
// Takes a reader and a path and writes the content
func (a Afero) WriteReader(path string, r io.Reader) (err error) {
return WriteReader(a.Fs, path, r)
}
func WriteReader(fs Fs, path string, r io.Reader) (err error) {
dir, _ := filepath.Split(path)
ospath := filepath.FromSlash(dir)
if ospath != "" {
err = fs.MkdirAll(ospath, 0o777) // rwx, rw, r
if err != nil {
if err != os.ErrExist {
return err
}
}
}
file, err := fs.Create(path)
if err != nil {
return
}
defer file.Close()
_, err = io.Copy(file, r)
return
}
// Same as WriteReader but checks to see if file/directory already exists.
func (a Afero) SafeWriteReader(path string, r io.Reader) (err error) {
return SafeWriteReader(a.Fs, path, r)
}
func SafeWriteReader(fs Fs, path string, r io.Reader) (err error) {
dir, _ := filepath.Split(path)
ospath := filepath.FromSlash(dir)
if ospath != "" {
err = fs.MkdirAll(ospath, 0o777) // rwx, rw, r
if err != nil {
return
}
}
exists, err := Exists(fs, path)
if err != nil {
return
}
if exists {
return fmt.Errorf("%v already exists", path)
}
file, err := fs.Create(path)
if err != nil {
return
}
defer file.Close()
_, err = io.Copy(file, r)
return
}
func (a Afero) GetTempDir(subPath string) string {
return GetTempDir(a.Fs, subPath)
}
// GetTempDir returns the default temp directory with trailing slash
// if subPath is not empty then it will be created recursively with mode 777 rwx rwx rwx
func GetTempDir(fs Fs, subPath string) string {
addSlash := func(p string) string {
if FilePathSeparator != p[len(p)-1:] {
p = p + FilePathSeparator
}
return p
}
dir := addSlash(os.TempDir())
if subPath != "" {
// preserve windows backslash :-(
if FilePathSeparator == "\\" {
subPath = strings.Replace(subPath, "\\", "____", -1)
}
dir = dir + UnicodeSanitize((subPath))
if FilePathSeparator == "\\" {
dir = strings.Replace(dir, "____", "\\", -1)
}
if exists, _ := Exists(fs, dir); exists {
return addSlash(dir)
}
err := fs.MkdirAll(dir, 0o777)
if err != nil {
panic(err)
}
dir = addSlash(dir)
}
return dir
}
// Rewrite string to remove non-standard path characters
func UnicodeSanitize(s string) string {
source := []rune(s)
target := make([]rune, 0, len(source))
for _, r := range source {
if unicode.IsLetter(r) ||
unicode.IsDigit(r) ||
unicode.IsMark(r) ||
r == '.' ||
r == '/' ||
r == '\\' ||
r == '_' ||
r == '-' ||
r == '%' ||
r == ' ' ||
r == '#' {
target = append(target, r)
}
}
return string(target)
}
// Transform characters with accents into plain forms.
func NeuterAccents(s string) string {
t := transform.Chain(norm.NFD, runes.Remove(runes.In(unicode.Mn)), norm.NFC)
result, _, _ := transform.String(t, string(s))
return result
}
func (a Afero) FileContainsBytes(filename string, subslice []byte) (bool, error) {
return FileContainsBytes(a.Fs, filename, subslice)
}
// Check if a file contains a specified byte slice.
func FileContainsBytes(fs Fs, filename string, subslice []byte) (bool, error) {
f, err := fs.Open(filename)
if err != nil {
return false, err
}
defer f.Close()
return readerContainsAny(f, subslice), nil
}
func (a Afero) FileContainsAnyBytes(filename string, subslices [][]byte) (bool, error) {
return FileContainsAnyBytes(a.Fs, filename, subslices)
}
// Check if a file contains any of the specified byte slices.
func FileContainsAnyBytes(fs Fs, filename string, subslices [][]byte) (bool, error) {
f, err := fs.Open(filename)
if err != nil {
return false, err
}
defer f.Close()
return readerContainsAny(f, subslices...), nil
}
// readerContains reports whether any of the subslices is within r.
func readerContainsAny(r io.Reader, subslices ...[]byte) bool {
if r == nil || len(subslices) == 0 {
return false
}
largestSlice := 0
for _, sl := range subslices {
if len(sl) > largestSlice {
largestSlice = len(sl)
}
}
if largestSlice == 0 {
return false
}
bufflen := largestSlice * 4
halflen := bufflen / 2
buff := make([]byte, bufflen)
var err error
var n, i int
for {
i++
if i == 1 {
n, err = io.ReadAtLeast(r, buff[:halflen], halflen)
} else {
if i != 2 {
// shift left to catch overlapping matches
copy(buff[:], buff[halflen:])
}
n, err = io.ReadAtLeast(r, buff[halflen:], halflen)
}
if n > 0 {
for _, sl := range subslices {
if bytes.Contains(buff, sl) {
return true
}
}
}
if err != nil {
break
}
}
return false
}
func (a Afero) DirExists(path string) (bool, error) {
return DirExists(a.Fs, path)
}
// DirExists checks if a path exists and is a directory.
func DirExists(fs Fs, path string) (bool, error) {
fi, err := fs.Stat(path)
if err == nil && fi.IsDir() {
return true, nil
}
if os.IsNotExist(err) {
return false, nil
}
return false, err
}
func (a Afero) IsDir(path string) (bool, error) {
return IsDir(a.Fs, path)
}
// IsDir checks if a given path is a directory.
func IsDir(fs Fs, path string) (bool, error) {
fi, err := fs.Stat(path)
if err != nil {
return false, err
}
return fi.IsDir(), nil
}
func (a Afero) IsEmpty(path string) (bool, error) {
return IsEmpty(a.Fs, path)
}
// IsEmpty checks if a given file or directory is empty.
func IsEmpty(fs Fs, path string) (bool, error) {
if b, _ := Exists(fs, path); !b {
return false, fmt.Errorf("%q path does not exist", path)
}
fi, err := fs.Stat(path)
if err != nil {
return false, err
}
if fi.IsDir() {
f, err := fs.Open(path)
if err != nil {
return false, err
}
defer f.Close()
list, err := f.Readdir(-1)
if err != nil {
return false, err
}
return len(list) == 0, nil
}
return fi.Size() == 0, nil
}
func (a Afero) Exists(path string) (bool, error) {
return Exists(a.Fs, path)
}
// Check if a file or directory exists.
func Exists(fs Fs, path string) (bool, error) {
_, err := fs.Stat(path)
if err == nil {
return true, nil
}
if os.IsNotExist(err) {
return false, nil
}
return false, err
}
func FullBaseFsPath(basePathFs *BasePathFs, relativePath string) string {
combinedPath := filepath.Join(basePathFs.path, relativePath)
if parent, ok := basePathFs.source.(*BasePathFs); ok {
return FullBaseFsPath(parent, combinedPath)
}
return combinedPath
}

3
vendor/golang.org/x/crypto/AUTHORS generated vendored
View File

@ -1,3 +0,0 @@
# This source code refers to The Go Authors for copyright purposes.
# The master list of authors is in the main Go distribution,
# visible at https://tip.golang.org/AUTHORS.

View File

@ -1,3 +0,0 @@
# This source code was written by the Go contributors.
# The master list of contributors is in the main Go distribution,
# visible at https://tip.golang.org/CONTRIBUTORS.

View File

@ -15,6 +15,7 @@ const bufSize = 256
// xorKeyStreamVX is an assembly implementation of XORKeyStream. It must only
// be called when the vector facility is available. Implementation in asm_s390x.s.
//
//go:noescape
func xorKeyStreamVX(dst, src []byte, key *[8]uint32, nonce *[3]uint32, counter *uint32)

View File

@ -9,7 +9,8 @@ package curve25519 // import "golang.org/x/crypto/curve25519"
import (
"crypto/subtle"
"fmt"
"errors"
"strconv"
"golang.org/x/crypto/curve25519/internal/field"
)
@ -124,10 +125,10 @@ func X25519(scalar, point []byte) ([]byte, error) {
func x25519(dst *[32]byte, scalar, point []byte) ([]byte, error) {
var in [32]byte
if l := len(scalar); l != 32 {
return nil, fmt.Errorf("bad scalar length: %d, expected %d", l, 32)
return nil, errors.New("bad scalar length: " + strconv.Itoa(l) + ", expected 32")
}
if l := len(point); l != 32 {
return nil, fmt.Errorf("bad point length: %d, expected %d", l, 32)
return nil, errors.New("bad point length: " + strconv.Itoa(l) + ", expected 32")
}
copy(in[:], scalar)
if &point[0] == &Basepoint[0] {
@ -138,7 +139,7 @@ func x25519(dst *[32]byte, scalar, point []byte) ([]byte, error) {
copy(base[:], point)
ScalarMult(dst, &in, &base)
if subtle.ConstantTimeCompare(dst[:], zero[:]) == 1 {
return nil, fmt.Errorf("bad input point: low order point")
return nil, errors.New("bad input point: low order point")
}
}
return dst[:], nil

View File

@ -1,13 +1,16 @@
// Code generated by command: go run fe_amd64_asm.go -out ../fe_amd64.s -stubs ../fe_amd64.go -pkg field. DO NOT EDIT.
//go:build amd64 && gc && !purego
// +build amd64,gc,!purego
package field
// feMul sets out = a * b. It works like feMulGeneric.
//
//go:noescape
func feMul(out *Element, a *Element, b *Element)
// feSquare sets out = a * a. It works like feSquareGeneric.
//
//go:noescape
func feSquare(out *Element, a *Element)

View File

@ -1,13 +1,7 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// In Go 1.13, the ed25519 package was promoted to the standard library as
// crypto/ed25519, and this package became a wrapper for the standard library one.
//
//go:build !go1.13
// +build !go1.13
// Package ed25519 implements the Ed25519 signature algorithm. See
// https://ed25519.cr.yp.to/.
//
@ -16,21 +10,15 @@
// representation includes a public key suffix to make multiple signing
// operations with the same key more efficient. This package refers to the RFC
// 8032 private key as the “seed”.
//
// Beginning with Go 1.13, the functionality of this package was moved to the
// standard library as crypto/ed25519. This package only acts as a compatibility
// wrapper.
package ed25519
// This code is a port of the public domain, “ref10” implementation of ed25519
// from SUPERCOP.
import (
"bytes"
"crypto"
cryptorand "crypto/rand"
"crypto/sha512"
"errors"
"crypto/ed25519"
"io"
"strconv"
"golang.org/x/crypto/ed25519/internal/edwards25519"
)
const (
@ -45,57 +33,21 @@ const (
)
// PublicKey is the type of Ed25519 public keys.
type PublicKey []byte
//
// This type is an alias for crypto/ed25519's PublicKey type.
// See the crypto/ed25519 package for the methods on this type.
type PublicKey = ed25519.PublicKey
// PrivateKey is the type of Ed25519 private keys. It implements crypto.Signer.
type PrivateKey []byte
// Public returns the PublicKey corresponding to priv.
func (priv PrivateKey) Public() crypto.PublicKey {
publicKey := make([]byte, PublicKeySize)
copy(publicKey, priv[32:])
return PublicKey(publicKey)
}
// Seed returns the private key seed corresponding to priv. It is provided for
// interoperability with RFC 8032. RFC 8032's private keys correspond to seeds
// in this package.
func (priv PrivateKey) Seed() []byte {
seed := make([]byte, SeedSize)
copy(seed, priv[:32])
return seed
}
// Sign signs the given message with priv.
// Ed25519 performs two passes over messages to be signed and therefore cannot
// handle pre-hashed messages. Thus opts.HashFunc() must return zero to
// indicate the message hasn't been hashed. This can be achieved by passing
// crypto.Hash(0) as the value for opts.
func (priv PrivateKey) Sign(rand io.Reader, message []byte, opts crypto.SignerOpts) (signature []byte, err error) {
if opts.HashFunc() != crypto.Hash(0) {
return nil, errors.New("ed25519: cannot sign hashed message")
}
return Sign(priv, message), nil
}
//
// This type is an alias for crypto/ed25519's PrivateKey type.
// See the crypto/ed25519 package for the methods on this type.
type PrivateKey = ed25519.PrivateKey
// GenerateKey generates a public/private key pair using entropy from rand.
// If rand is nil, crypto/rand.Reader will be used.
func GenerateKey(rand io.Reader) (PublicKey, PrivateKey, error) {
if rand == nil {
rand = cryptorand.Reader
}
seed := make([]byte, SeedSize)
if _, err := io.ReadFull(rand, seed); err != nil {
return nil, nil, err
}
privateKey := NewKeyFromSeed(seed)
publicKey := make([]byte, PublicKeySize)
copy(publicKey, privateKey[32:])
return publicKey, privateKey, nil
return ed25519.GenerateKey(rand)
}
// NewKeyFromSeed calculates a private key from a seed. It will panic if
@ -103,121 +55,17 @@ func GenerateKey(rand io.Reader) (PublicKey, PrivateKey, error) {
// with RFC 8032. RFC 8032's private keys correspond to seeds in this
// package.
func NewKeyFromSeed(seed []byte) PrivateKey {
if l := len(seed); l != SeedSize {
panic("ed25519: bad seed length: " + strconv.Itoa(l))
}
digest := sha512.Sum512(seed)
digest[0] &= 248
digest[31] &= 127
digest[31] |= 64
var A edwards25519.ExtendedGroupElement
var hBytes [32]byte
copy(hBytes[:], digest[:])
edwards25519.GeScalarMultBase(&A, &hBytes)
var publicKeyBytes [32]byte
A.ToBytes(&publicKeyBytes)
privateKey := make([]byte, PrivateKeySize)
copy(privateKey, seed)
copy(privateKey[32:], publicKeyBytes[:])
return privateKey
return ed25519.NewKeyFromSeed(seed)
}
// Sign signs the message with privateKey and returns a signature. It will
// panic if len(privateKey) is not PrivateKeySize.
func Sign(privateKey PrivateKey, message []byte) []byte {
if l := len(privateKey); l != PrivateKeySize {
panic("ed25519: bad private key length: " + strconv.Itoa(l))
}
h := sha512.New()
h.Write(privateKey[:32])
var digest1, messageDigest, hramDigest [64]byte
var expandedSecretKey [32]byte
h.Sum(digest1[:0])
copy(expandedSecretKey[:], digest1[:])
expandedSecretKey[0] &= 248
expandedSecretKey[31] &= 63
expandedSecretKey[31] |= 64
h.Reset()
h.Write(digest1[32:])
h.Write(message)
h.Sum(messageDigest[:0])
var messageDigestReduced [32]byte
edwards25519.ScReduce(&messageDigestReduced, &messageDigest)
var R edwards25519.ExtendedGroupElement
edwards25519.GeScalarMultBase(&R, &messageDigestReduced)
var encodedR [32]byte
R.ToBytes(&encodedR)
h.Reset()
h.Write(encodedR[:])
h.Write(privateKey[32:])
h.Write(message)
h.Sum(hramDigest[:0])
var hramDigestReduced [32]byte
edwards25519.ScReduce(&hramDigestReduced, &hramDigest)
var s [32]byte
edwards25519.ScMulAdd(&s, &hramDigestReduced, &expandedSecretKey, &messageDigestReduced)
signature := make([]byte, SignatureSize)
copy(signature[:], encodedR[:])
copy(signature[32:], s[:])
return signature
return ed25519.Sign(privateKey, message)
}
// Verify reports whether sig is a valid signature of message by publicKey. It
// will panic if len(publicKey) is not PublicKeySize.
func Verify(publicKey PublicKey, message, sig []byte) bool {
if l := len(publicKey); l != PublicKeySize {
panic("ed25519: bad public key length: " + strconv.Itoa(l))
}
if len(sig) != SignatureSize || sig[63]&224 != 0 {
return false
}
var A edwards25519.ExtendedGroupElement
var publicKeyBytes [32]byte
copy(publicKeyBytes[:], publicKey)
if !A.FromBytes(&publicKeyBytes) {
return false
}
edwards25519.FeNeg(&A.X, &A.X)
edwards25519.FeNeg(&A.T, &A.T)
h := sha512.New()
h.Write(sig[:32])
h.Write(publicKey[:])
h.Write(message)
var digest [64]byte
h.Sum(digest[:0])
var hReduced [32]byte
edwards25519.ScReduce(&hReduced, &digest)
var R edwards25519.ProjectiveGroupElement
var s [32]byte
copy(s[:], sig[32:])
// https://tools.ietf.org/html/rfc8032#section-5.1.7 requires that s be in
// the range [0, order) in order to prevent signature malleability.
if !edwards25519.ScMinimal(&s) {
return false
}
edwards25519.GeDoubleScalarMultVartime(&R, &hReduced, &A, &s)
var checkR [32]byte
R.ToBytes(&checkR)
return bytes.Equal(sig[:32], checkR[:])
return ed25519.Verify(publicKey, message, sig)
}

View File

@ -1,74 +0,0 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build go1.13
// +build go1.13
// Package ed25519 implements the Ed25519 signature algorithm. See
// https://ed25519.cr.yp.to/.
//
// These functions are also compatible with the “Ed25519” function defined in
// RFC 8032. However, unlike RFC 8032's formulation, this package's private key
// representation includes a public key suffix to make multiple signing
// operations with the same key more efficient. This package refers to the RFC
// 8032 private key as the “seed”.
//
// Beginning with Go 1.13, the functionality of this package was moved to the
// standard library as crypto/ed25519. This package only acts as a compatibility
// wrapper.
package ed25519
import (
"crypto/ed25519"
"io"
)
const (
// PublicKeySize is the size, in bytes, of public keys as used in this package.
PublicKeySize = 32
// PrivateKeySize is the size, in bytes, of private keys as used in this package.
PrivateKeySize = 64
// SignatureSize is the size, in bytes, of signatures generated and verified by this package.
SignatureSize = 64
// SeedSize is the size, in bytes, of private key seeds. These are the private key representations used by RFC 8032.
SeedSize = 32
)
// PublicKey is the type of Ed25519 public keys.
//
// This type is an alias for crypto/ed25519's PublicKey type.
// See the crypto/ed25519 package for the methods on this type.
type PublicKey = ed25519.PublicKey
// PrivateKey is the type of Ed25519 private keys. It implements crypto.Signer.
//
// This type is an alias for crypto/ed25519's PrivateKey type.
// See the crypto/ed25519 package for the methods on this type.
type PrivateKey = ed25519.PrivateKey
// GenerateKey generates a public/private key pair using entropy from rand.
// If rand is nil, crypto/rand.Reader will be used.
func GenerateKey(rand io.Reader) (PublicKey, PrivateKey, error) {
return ed25519.GenerateKey(rand)
}
// NewKeyFromSeed calculates a private key from a seed. It will panic if
// len(seed) is not SeedSize. This function is provided for interoperability
// with RFC 8032. RFC 8032's private keys correspond to seeds in this
// package.
func NewKeyFromSeed(seed []byte) PrivateKey {
return ed25519.NewKeyFromSeed(seed)
}
// Sign signs the message with privateKey and returns a signature. It will
// panic if len(privateKey) is not PrivateKeySize.
func Sign(privateKey PrivateKey, message []byte) []byte {
return ed25519.Sign(privateKey, message)
}
// Verify reports whether sig is a valid signature of message by publicKey. It
// will panic if len(publicKey) is not PublicKeySize.
func Verify(publicKey PublicKey, message, sig []byte) bool {
return ed25519.Verify(publicKey, message, sig)
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -136,7 +136,7 @@ func shiftRightBy2(a uint128) uint128 {
// updateGeneric absorbs msg into the state.h accumulator. For each chunk m of
// 128 bits of message, it computes
//
// h₊ = (h + m) * r mod 2¹³⁰ - 5
// h₊ = (h + m) * r mod 2¹³⁰ - 5
//
// If the msg length is not a multiple of TagSize, it assumes the last
// incomplete chunk is the final one.
@ -278,8 +278,7 @@ const (
// finalize completes the modular reduction of h and computes
//
// out = h + s mod 2¹²⁸
//
// out = h + s mod 2¹²⁸
func finalize(out *[TagSize]byte, h *[3]uint64, s *[2]uint64) {
h0, h1, h2 := h[0], h[1], h[2]

View File

@ -14,6 +14,7 @@ import (
// updateVX is an assembly implementation of Poly1305 that uses vector
// instructions. It must only be called if the vector facility (vx) is
// available.
//
//go:noescape
func updateVX(state *macState, msg []byte)

View File

@ -23,12 +23,14 @@ import (
// A Block represents an OpenPGP armored structure.
//
// The encoded form is:
// -----BEGIN Type-----
// Headers
//
// base64-encoded Bytes
// '=' base64 encoded checksum
// -----END Type-----
// -----BEGIN Type-----
// Headers
//
// base64-encoded Bytes
// '=' base64 encoded checksum
// -----END Type-----
//
// where Headers is a possibly empty sequence of Key: Value lines.
//
// Since the armored data can be very large, this package presents a streaming

View File

@ -96,7 +96,8 @@ func (l *lineBreaker) Close() (err error) {
// trailer.
//
// It's built into a stack of io.Writers:
// encoding -> base64 encoder -> lineBreaker -> out
//
// encoding -> base64 encoder -> lineBreaker -> out
type encoding struct {
out io.Writer
breaker *lineBreaker

View File

@ -77,8 +77,8 @@ func Encrypt(random io.Reader, pub *PublicKey, msg []byte) (c1, c2 *big.Int, err
// returns the plaintext of the message. An error can result only if the
// ciphertext is invalid. Users should keep in mind that this is a padding
// oracle and thus, if exposed to an adaptive chosen ciphertext attack, can
// be used to break the cryptosystem. See ``Chosen Ciphertext Attacks
// Against Protocols Based on the RSA Encryption Standard PKCS #1'', Daniel
// be used to break the cryptosystem. See Chosen Ciphertext Attacks
// Against Protocols Based on the RSA Encryption Standard PKCS #1, Daniel
// Bleichenbacher, Advances in Cryptology (Crypto '98),
func Decrypt(priv *PrivateKey, c1, c2 *big.Int) (msg []byte, err error) {
s := new(big.Int).Exp(c1, priv.X, priv.P)

View File

@ -8,7 +8,8 @@
// ssh-agent process using the sample server.
//
// References:
// [PROTOCOL.agent]: https://tools.ietf.org/html/draft-miller-ssh-agent-00
//
// [PROTOCOL.agent]: https://tools.ietf.org/html/draft-miller-ssh-agent-00
package agent // import "golang.org/x/crypto/ssh/agent"
import (
@ -25,7 +26,6 @@ import (
"math/big"
"sync"
"crypto"
"golang.org/x/crypto/ed25519"
"golang.org/x/crypto/ssh"
)
@ -771,19 +771,53 @@ func (s *agentKeyringSigner) Sign(rand io.Reader, data []byte) (*ssh.Signature,
return s.agent.Sign(s.pub, data)
}
func (s *agentKeyringSigner) SignWithOpts(rand io.Reader, data []byte, opts crypto.SignerOpts) (*ssh.Signature, error) {
var flags SignatureFlags
if opts != nil {
switch opts.HashFunc() {
case crypto.SHA256:
flags = SignatureFlagRsaSha256
case crypto.SHA512:
flags = SignatureFlagRsaSha512
}
func (s *agentKeyringSigner) SignWithAlgorithm(rand io.Reader, data []byte, algorithm string) (*ssh.Signature, error) {
if algorithm == "" || algorithm == underlyingAlgo(s.pub.Type()) {
return s.Sign(rand, data)
}
var flags SignatureFlags
switch algorithm {
case ssh.KeyAlgoRSASHA256:
flags = SignatureFlagRsaSha256
case ssh.KeyAlgoRSASHA512:
flags = SignatureFlagRsaSha512
default:
return nil, fmt.Errorf("agent: unsupported algorithm %q", algorithm)
}
return s.agent.SignWithFlags(s.pub, data, flags)
}
var _ ssh.AlgorithmSigner = &agentKeyringSigner{}
// certKeyAlgoNames is a mapping from known certificate algorithm names to the
// corresponding public key signature algorithm.
//
// This map must be kept in sync with the one in certs.go.
var certKeyAlgoNames = map[string]string{
ssh.CertAlgoRSAv01: ssh.KeyAlgoRSA,
ssh.CertAlgoRSASHA256v01: ssh.KeyAlgoRSASHA256,
ssh.CertAlgoRSASHA512v01: ssh.KeyAlgoRSASHA512,
ssh.CertAlgoDSAv01: ssh.KeyAlgoDSA,
ssh.CertAlgoECDSA256v01: ssh.KeyAlgoECDSA256,
ssh.CertAlgoECDSA384v01: ssh.KeyAlgoECDSA384,
ssh.CertAlgoECDSA521v01: ssh.KeyAlgoECDSA521,
ssh.CertAlgoSKECDSA256v01: ssh.KeyAlgoSKECDSA256,
ssh.CertAlgoED25519v01: ssh.KeyAlgoED25519,
ssh.CertAlgoSKED25519v01: ssh.KeyAlgoSKED25519,
}
// underlyingAlgo returns the signature algorithm associated with algo (which is
// an advertised or negotiated public key or host key algorithm). These are
// usually the same, except for certificate algorithms.
func underlyingAlgo(algo string) string {
if a, ok := certKeyAlgoNames[algo]; ok {
return a
}
return algo
}
// Calls an extension method. It is up to the agent implementation as to whether or not
// any particular extension is supported and may always return an error. Because the
// type of the response is up to the implementation, this returns the bytes of the

View File

@ -113,7 +113,7 @@ func (r *keyring) Unlock(passphrase []byte) error {
// expireKeysLocked removes expired keys from the keyring. If a key was added
// with a lifetimesecs contraint and seconds >= lifetimesecs seconds have
// ellapsed, it is removed. The caller *must* be holding the keyring mutex.
// elapsed, it is removed. The caller *must* be holding the keyring mutex.
func (r *keyring) expireKeysLocked() {
for _, k := range r.keys {
if k.expire != nil && time.Now().After(*k.expire) {
@ -205,9 +205,9 @@ func (r *keyring) SignWithFlags(key ssh.PublicKey, data []byte, flags SignatureF
var algorithm string
switch flags {
case SignatureFlagRsaSha256:
algorithm = ssh.SigAlgoRSASHA2256
algorithm = ssh.KeyAlgoRSASHA256
case SignatureFlagRsaSha512:
algorithm = ssh.SigAlgoRSASHA2512
algorithm = ssh.KeyAlgoRSASHA512
default:
return nil, fmt.Errorf("agent: unsupported signature flags: %d", flags)
}

View File

@ -14,8 +14,10 @@ import (
"time"
)
// These constants from [PROTOCOL.certkeys] represent the algorithm names
// for certificate types supported by this package.
// Certificate algorithm names from [PROTOCOL.certkeys]. These values can appear
// in Certificate.Type, PublicKey.Type, and ClientConfig.HostKeyAlgorithms.
// Unlike key algorithm names, these are not passed to AlgorithmSigner and don't
// appear in the Signature.Format field.
const (
CertAlgoRSAv01 = "ssh-rsa-cert-v01@openssh.com"
CertAlgoDSAv01 = "ssh-dss-cert-v01@openssh.com"
@ -25,6 +27,21 @@ const (
CertAlgoSKECDSA256v01 = "sk-ecdsa-sha2-nistp256-cert-v01@openssh.com"
CertAlgoED25519v01 = "ssh-ed25519-cert-v01@openssh.com"
CertAlgoSKED25519v01 = "sk-ssh-ed25519-cert-v01@openssh.com"
// CertAlgoRSASHA256v01 and CertAlgoRSASHA512v01 can't appear as a
// Certificate.Type (or PublicKey.Type), but only in
// ClientConfig.HostKeyAlgorithms.
CertAlgoRSASHA256v01 = "rsa-sha2-256-cert-v01@openssh.com"
CertAlgoRSASHA512v01 = "rsa-sha2-512-cert-v01@openssh.com"
)
const (
// Deprecated: use CertAlgoRSAv01.
CertSigAlgoRSAv01 = CertAlgoRSAv01
// Deprecated: use CertAlgoRSASHA256v01.
CertSigAlgoRSASHA2256v01 = CertAlgoRSASHA256v01
// Deprecated: use CertAlgoRSASHA512v01.
CertSigAlgoRSASHA2512v01 = CertAlgoRSASHA512v01
)
// Certificate types distinguish between host and user
@ -423,6 +440,16 @@ func (c *Certificate) SignCert(rand io.Reader, authority Signer) error {
}
c.SignatureKey = authority.PublicKey()
// Default to KeyAlgoRSASHA512 for ssh-rsa signers.
if v, ok := authority.(AlgorithmSigner); ok && v.PublicKey().Type() == KeyAlgoRSA {
sig, err := v.SignWithAlgorithm(rand, c.bytesForSigning(), KeyAlgoRSASHA512)
if err != nil {
return err
}
c.Signature = sig
return nil
}
sig, err := authority.Sign(rand, c.bytesForSigning())
if err != nil {
return err
@ -431,26 +458,42 @@ func (c *Certificate) SignCert(rand io.Reader, authority Signer) error {
return nil
}
var certAlgoNames = map[string]string{
KeyAlgoRSA: CertAlgoRSAv01,
KeyAlgoDSA: CertAlgoDSAv01,
KeyAlgoECDSA256: CertAlgoECDSA256v01,
KeyAlgoECDSA384: CertAlgoECDSA384v01,
KeyAlgoECDSA521: CertAlgoECDSA521v01,
KeyAlgoSKECDSA256: CertAlgoSKECDSA256v01,
KeyAlgoED25519: CertAlgoED25519v01,
KeyAlgoSKED25519: CertAlgoSKED25519v01,
// certKeyAlgoNames is a mapping from known certificate algorithm names to the
// corresponding public key signature algorithm.
//
// This map must be kept in sync with the one in agent/client.go.
var certKeyAlgoNames = map[string]string{
CertAlgoRSAv01: KeyAlgoRSA,
CertAlgoRSASHA256v01: KeyAlgoRSASHA256,
CertAlgoRSASHA512v01: KeyAlgoRSASHA512,
CertAlgoDSAv01: KeyAlgoDSA,
CertAlgoECDSA256v01: KeyAlgoECDSA256,
CertAlgoECDSA384v01: KeyAlgoECDSA384,
CertAlgoECDSA521v01: KeyAlgoECDSA521,
CertAlgoSKECDSA256v01: KeyAlgoSKECDSA256,
CertAlgoED25519v01: KeyAlgoED25519,
CertAlgoSKED25519v01: KeyAlgoSKED25519,
}
// certToPrivAlgo returns the underlying algorithm for a certificate algorithm.
// Panics if a non-certificate algorithm is passed.
func certToPrivAlgo(algo string) string {
for privAlgo, pubAlgo := range certAlgoNames {
if pubAlgo == algo {
return privAlgo
// underlyingAlgo returns the signature algorithm associated with algo (which is
// an advertised or negotiated public key or host key algorithm). These are
// usually the same, except for certificate algorithms.
func underlyingAlgo(algo string) string {
if a, ok := certKeyAlgoNames[algo]; ok {
return a
}
return algo
}
// certificateAlgo returns the certificate algorithms that uses the provided
// underlying signature algorithm.
func certificateAlgo(algo string) (certAlgo string, ok bool) {
for certName, algoName := range certKeyAlgoNames {
if algoName == algo {
return certName, true
}
}
panic("unknown cert algorithm")
return "", false
}
func (cert *Certificate) bytesForSigning() []byte {
@ -494,13 +537,13 @@ func (c *Certificate) Marshal() []byte {
return result
}
// Type returns the key name. It is part of the PublicKey interface.
// Type returns the certificate algorithm name. It is part of the PublicKey interface.
func (c *Certificate) Type() string {
algo, ok := certAlgoNames[c.Key.Type()]
certName, ok := certificateAlgo(c.Key.Type())
if !ok {
panic("unknown cert key type " + c.Key.Type())
panic("unknown certificate type for key type " + c.Key.Type())
}
return algo
return certName
}
// Verify verifies a signature against the certificate's public

View File

@ -394,6 +394,10 @@ func (c *gcmCipher) readCipherPacket(seqNum uint32, r io.Reader) ([]byte, error)
}
c.incIV()
if len(plain) == 0 {
return nil, errors.New("ssh: empty packet")
}
padding := plain[0]
if padding < 4 {
// padding is a byte, so it automatically satisfies
@ -636,7 +640,7 @@ const chacha20Poly1305ID = "chacha20-poly1305@openssh.com"
// chacha20Poly1305Cipher implements the chacha20-poly1305@openssh.com
// AEAD, which is described here:
//
// https://tools.ietf.org/html/draft-josefsson-ssh-chacha20-poly1305-openssh-00
// https://tools.ietf.org/html/draft-josefsson-ssh-chacha20-poly1305-openssh-00
//
// the methods here also implement padding, which RFC4253 Section 6
// also requires of stream ciphers.
@ -710,6 +714,10 @@ func (c *chacha20Poly1305Cipher) readCipherPacket(seqNum uint32, r io.Reader) ([
plain := c.buf[4:contentEnd]
s.XORKeyStream(plain, plain)
if len(plain) == 0 {
return nil, errors.New("ssh: empty packet")
}
padding := plain[0]
if padding < 4 {
// padding is a byte, so it automatically satisfies

View File

@ -113,14 +113,18 @@ func (c *connection) clientHandshake(dialAddress string, config *ClientConfig) e
return c.clientAuthenticate(config)
}
// verifyHostKeySignature verifies the host key obtained in the key
// exchange.
func verifyHostKeySignature(hostKey PublicKey, result *kexResult) error {
// verifyHostKeySignature verifies the host key obtained in the key exchange.
// algo is the negotiated algorithm, and may be a certificate type.
func verifyHostKeySignature(hostKey PublicKey, algo string, result *kexResult) error {
sig, rest, ok := parseSignatureBody(result.Signature)
if len(rest) > 0 || !ok {
return errors.New("ssh: signature parse error")
}
if a := underlyingAlgo(algo); sig.Format != a {
return fmt.Errorf("ssh: invalid signature algorithm %q, expected %q", sig.Format, a)
}
return hostKey.Verify(result.H, sig)
}
@ -224,11 +228,11 @@ type ClientConfig struct {
// be used for the connection. If empty, a reasonable default is used.
ClientVersion string
// HostKeyAlgorithms lists the key types that the client will
// accept from the server as host key, in order of
// HostKeyAlgorithms lists the public key algorithms that the client will
// accept from the server for host key authentication, in order of
// preference. If empty, a reasonable default is used. Any
// string returned from PublicKey.Type method may be used, or
// any of the CertAlgoXxxx and KeyAlgoXxxx constants.
// string returned from a PublicKey.Type method may be used, or
// any of the CertAlgo and KeyAlgo constants.
HostKeyAlgorithms []string
// Timeout is the maximum amount of time for the TCP connection to establish.

View File

@ -9,6 +9,7 @@ import (
"errors"
"fmt"
"io"
"strings"
)
type authResult int
@ -29,6 +30,33 @@ func (c *connection) clientAuthenticate(config *ClientConfig) error {
if err != nil {
return err
}
// The server may choose to send a SSH_MSG_EXT_INFO at this point (if we
// advertised willingness to receive one, which we always do) or not. See
// RFC 8308, Section 2.4.
extensions := make(map[string][]byte)
if len(packet) > 0 && packet[0] == msgExtInfo {
var extInfo extInfoMsg
if err := Unmarshal(packet, &extInfo); err != nil {
return err
}
payload := extInfo.Payload
for i := uint32(0); i < extInfo.NumExtensions; i++ {
name, rest, ok := parseString(payload)
if !ok {
return parseError(msgExtInfo)
}
value, rest, ok := parseString(rest)
if !ok {
return parseError(msgExtInfo)
}
extensions[string(name)] = value
payload = rest
}
packet, err = c.transport.readPacket()
if err != nil {
return err
}
}
var serviceAccept serviceAcceptMsg
if err := Unmarshal(packet, &serviceAccept); err != nil {
return err
@ -41,7 +69,7 @@ func (c *connection) clientAuthenticate(config *ClientConfig) error {
sessionID := c.transport.getSessionID()
for auth := AuthMethod(new(noneAuth)); auth != nil; {
ok, methods, err := auth.auth(sessionID, config.User, c.transport, config.Rand)
ok, methods, err := auth.auth(sessionID, config.User, c.transport, config.Rand, extensions)
if err != nil {
return err
}
@ -93,7 +121,7 @@ type AuthMethod interface {
// If authentication is not successful, a []string of alternative
// method names is returned. If the slice is nil, it will be ignored
// and the previous set of possible methods will be reused.
auth(session []byte, user string, p packetConn, rand io.Reader) (authResult, []string, error)
auth(session []byte, user string, p packetConn, rand io.Reader, extensions map[string][]byte) (authResult, []string, error)
// method returns the RFC 4252 method name.
method() string
@ -102,7 +130,7 @@ type AuthMethod interface {
// "none" authentication, RFC 4252 section 5.2.
type noneAuth int
func (n *noneAuth) auth(session []byte, user string, c packetConn, rand io.Reader) (authResult, []string, error) {
func (n *noneAuth) auth(session []byte, user string, c packetConn, rand io.Reader, _ map[string][]byte) (authResult, []string, error) {
if err := c.writePacket(Marshal(&userAuthRequestMsg{
User: user,
Service: serviceSSH,
@ -122,7 +150,7 @@ func (n *noneAuth) method() string {
// a function call, e.g. by prompting the user.
type passwordCallback func() (password string, err error)
func (cb passwordCallback) auth(session []byte, user string, c packetConn, rand io.Reader) (authResult, []string, error) {
func (cb passwordCallback) auth(session []byte, user string, c packetConn, rand io.Reader, _ map[string][]byte) (authResult, []string, error) {
type passwordAuthMsg struct {
User string `sshtype:"50"`
Service string
@ -189,7 +217,46 @@ func (cb publicKeyCallback) method() string {
return "publickey"
}
func (cb publicKeyCallback) auth(session []byte, user string, c packetConn, rand io.Reader) (authResult, []string, error) {
func pickSignatureAlgorithm(signer Signer, extensions map[string][]byte) (as AlgorithmSigner, algo string) {
keyFormat := signer.PublicKey().Type()
// Like in sendKexInit, if the public key implements AlgorithmSigner we
// assume it supports all algorithms, otherwise only the key format one.
as, ok := signer.(AlgorithmSigner)
if !ok {
return algorithmSignerWrapper{signer}, keyFormat
}
extPayload, ok := extensions["server-sig-algs"]
if !ok {
// If there is no "server-sig-algs" extension, fall back to the key
// format algorithm.
return as, keyFormat
}
// The server-sig-algs extension only carries underlying signature
// algorithm, but we are trying to select a protocol-level public key
// algorithm, which might be a certificate type. Extend the list of server
// supported algorithms to include the corresponding certificate algorithms.
serverAlgos := strings.Split(string(extPayload), ",")
for _, algo := range serverAlgos {
if certAlgo, ok := certificateAlgo(algo); ok {
serverAlgos = append(serverAlgos, certAlgo)
}
}
keyAlgos := algorithmsForKeyFormat(keyFormat)
algo, err := findCommon("public key signature algorithm", keyAlgos, serverAlgos)
if err != nil {
// If there is no overlap, try the key anyway with the key format
// algorithm, to support servers that fail to list all supported
// algorithms.
return as, keyFormat
}
return as, algo
}
func (cb publicKeyCallback) auth(session []byte, user string, c packetConn, rand io.Reader, extensions map[string][]byte) (authResult, []string, error) {
// Authentication is performed by sending an enquiry to test if a key is
// acceptable to the remote. If the key is acceptable, the client will
// attempt to authenticate with the valid key. If not the client will repeat
@ -201,7 +268,10 @@ func (cb publicKeyCallback) auth(session []byte, user string, c packetConn, rand
}
var methods []string
for _, signer := range signers {
ok, err := validateKey(signer.PublicKey(), user, c)
pub := signer.PublicKey()
as, algo := pickSignatureAlgorithm(signer, extensions)
ok, err := validateKey(pub, algo, user, c)
if err != nil {
return authFailure, nil, err
}
@ -209,13 +279,13 @@ func (cb publicKeyCallback) auth(session []byte, user string, c packetConn, rand
continue
}
pub := signer.PublicKey()
pubKey := pub.Marshal()
sign, err := signer.Sign(rand, buildDataSignedForAuth(session, userAuthRequestMsg{
data := buildDataSignedForAuth(session, userAuthRequestMsg{
User: user,
Service: serviceSSH,
Method: cb.method(),
}, []byte(pub.Type()), pubKey))
}, algo, pubKey)
sign, err := as.SignWithAlgorithm(rand, data, underlyingAlgo(algo))
if err != nil {
return authFailure, nil, err
}
@ -229,7 +299,7 @@ func (cb publicKeyCallback) auth(session []byte, user string, c packetConn, rand
Service: serviceSSH,
Method: cb.method(),
HasSig: true,
Algoname: pub.Type(),
Algoname: algo,
PubKey: pubKey,
Sig: sig,
}
@ -266,26 +336,25 @@ func containsMethod(methods []string, method string) bool {
}
// validateKey validates the key provided is acceptable to the server.
func validateKey(key PublicKey, user string, c packetConn) (bool, error) {
func validateKey(key PublicKey, algo string, user string, c packetConn) (bool, error) {
pubKey := key.Marshal()
msg := publickeyAuthMsg{
User: user,
Service: serviceSSH,
Method: "publickey",
HasSig: false,
Algoname: key.Type(),
Algoname: algo,
PubKey: pubKey,
}
if err := c.writePacket(Marshal(&msg)); err != nil {
return false, err
}
return confirmKeyAck(key, c)
return confirmKeyAck(key, algo, c)
}
func confirmKeyAck(key PublicKey, c packetConn) (bool, error) {
func confirmKeyAck(key PublicKey, algo string, c packetConn) (bool, error) {
pubKey := key.Marshal()
algoname := key.Type()
for {
packet, err := c.readPacket()
@ -302,14 +371,14 @@ func confirmKeyAck(key PublicKey, c packetConn) (bool, error) {
if err := Unmarshal(packet, &msg); err != nil {
return false, err
}
if msg.Algo != algoname || !bytes.Equal(msg.PubKey, pubKey) {
if msg.Algo != algo || !bytes.Equal(msg.PubKey, pubKey) {
return false, nil
}
return true, nil
case msgUserAuthFailure:
return false, nil
default:
return false, unexpectedMessageError(msgUserAuthSuccess, packet[0])
return false, unexpectedMessageError(msgUserAuthPubKeyOk, packet[0])
}
}
}
@ -330,6 +399,7 @@ func PublicKeysCallback(getSigners func() (signers []Signer, err error)) AuthMet
// along with a list of remaining authentication methods to try next and
// an error if an unexpected response was received.
func handleAuthResponse(c packetConn) (authResult, []string, error) {
gotMsgExtInfo := false
for {
packet, err := c.readPacket()
if err != nil {
@ -341,6 +411,12 @@ func handleAuthResponse(c packetConn) (authResult, []string, error) {
if err := handleBannerResponse(c, packet); err != nil {
return authFailure, nil, err
}
case msgExtInfo:
// Ignore post-authentication RFC 8308 extensions, once.
if gotMsgExtInfo {
return authFailure, nil, unexpectedMessageError(msgUserAuthSuccess, packet[0])
}
gotMsgExtInfo = true
case msgUserAuthFailure:
var msg userAuthFailureMsg
if err := Unmarshal(packet, &msg); err != nil {
@ -380,10 +456,10 @@ func handleBannerResponse(c packetConn, packet []byte) error {
// disabling echoing (e.g. for passwords), and return all the answers.
// Challenge may be called multiple times in a single session. After
// successful authentication, the server may send a challenge with no
// questions, for which the user and instruction messages should be
// questions, for which the name and instruction messages should be
// printed. RFC 4256 section 3.3 details how the UI should behave for
// both CLI and GUI environments.
type KeyboardInteractiveChallenge func(user, instruction string, questions []string, echos []bool) (answers []string, err error)
type KeyboardInteractiveChallenge func(name, instruction string, questions []string, echos []bool) (answers []string, err error)
// KeyboardInteractive returns an AuthMethod using a prompt/response
// sequence controlled by the server.
@ -395,7 +471,7 @@ func (cb KeyboardInteractiveChallenge) method() string {
return "keyboard-interactive"
}
func (cb KeyboardInteractiveChallenge) auth(session []byte, user string, c packetConn, rand io.Reader) (authResult, []string, error) {
func (cb KeyboardInteractiveChallenge) auth(session []byte, user string, c packetConn, rand io.Reader, _ map[string][]byte) (authResult, []string, error) {
type initiateMsg struct {
User string `sshtype:"50"`
Service string
@ -412,6 +488,7 @@ func (cb KeyboardInteractiveChallenge) auth(session []byte, user string, c packe
return authFailure, nil, err
}
gotMsgExtInfo := false
for {
packet, err := c.readPacket()
if err != nil {
@ -425,6 +502,13 @@ func (cb KeyboardInteractiveChallenge) auth(session []byte, user string, c packe
return authFailure, nil, err
}
continue
case msgExtInfo:
// Ignore post-authentication RFC 8308 extensions, once.
if gotMsgExtInfo {
return authFailure, nil, unexpectedMessageError(msgUserAuthInfoRequest, packet[0])
}
gotMsgExtInfo = true
continue
case msgUserAuthInfoRequest:
// OK
case msgUserAuthFailure:
@ -465,7 +549,7 @@ func (cb KeyboardInteractiveChallenge) auth(session []byte, user string, c packe
return authFailure, nil, errors.New("ssh: extra data following keyboard-interactive pairs")
}
answers, err := cb(msg.User, msg.Instruction, prompts, echos)
answers, err := cb(msg.Name, msg.Instruction, prompts, echos)
if err != nil {
return authFailure, nil, err
}
@ -497,9 +581,9 @@ type retryableAuthMethod struct {
maxTries int
}
func (r *retryableAuthMethod) auth(session []byte, user string, c packetConn, rand io.Reader) (ok authResult, methods []string, err error) {
func (r *retryableAuthMethod) auth(session []byte, user string, c packetConn, rand io.Reader, extensions map[string][]byte) (ok authResult, methods []string, err error) {
for i := 0; r.maxTries <= 0 || i < r.maxTries; i++ {
ok, methods, err = r.authMethod.auth(session, user, c, rand)
ok, methods, err = r.authMethod.auth(session, user, c, rand, extensions)
if ok != authFailure || err != nil { // either success, partial success or error terminate
return ok, methods, err
}
@ -542,7 +626,7 @@ type gssAPIWithMICCallback struct {
target string
}
func (g *gssAPIWithMICCallback) auth(session []byte, user string, c packetConn, rand io.Reader) (authResult, []string, error) {
func (g *gssAPIWithMICCallback) auth(session []byte, user string, c packetConn, rand io.Reader, _ map[string][]byte) (authResult, []string, error) {
m := &userAuthRequestMsg{
User: user,
Service: serviceSSH,

View File

@ -44,11 +44,11 @@ var preferredCiphers = []string{
// supportedKexAlgos specifies the supported key-exchange algorithms in
// preference order.
var supportedKexAlgos = []string{
kexAlgoCurve25519SHA256,
kexAlgoCurve25519SHA256, kexAlgoCurve25519SHA256LibSSH,
// P384 and P521 are not constant-time yet, but since we don't
// reuse ephemeral keys, using them for ECDH should be OK.
kexAlgoECDH256, kexAlgoECDH384, kexAlgoECDH521,
kexAlgoDH14SHA1, kexAlgoDH1SHA1,
kexAlgoDH14SHA256, kexAlgoDH14SHA1, kexAlgoDH1SHA1,
}
// serverForbiddenKexAlgos contains key exchange algorithms, that are forbidden
@ -61,18 +61,20 @@ var serverForbiddenKexAlgos = map[string]struct{}{
// preferredKexAlgos specifies the default preference for key-exchange algorithms
// in preference order.
var preferredKexAlgos = []string{
kexAlgoCurve25519SHA256,
kexAlgoCurve25519SHA256, kexAlgoCurve25519SHA256LibSSH,
kexAlgoECDH256, kexAlgoECDH384, kexAlgoECDH521,
kexAlgoDH14SHA1,
kexAlgoDH14SHA256, kexAlgoDH14SHA1,
}
// supportedHostKeyAlgos specifies the supported host-key algorithms (i.e. methods
// of authenticating servers) in preference order.
var supportedHostKeyAlgos = []string{
CertAlgoRSASHA512v01, CertAlgoRSASHA256v01,
CertAlgoRSAv01, CertAlgoDSAv01, CertAlgoECDSA256v01,
CertAlgoECDSA384v01, CertAlgoECDSA521v01, CertAlgoED25519v01,
KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521,
KeyAlgoRSASHA512, KeyAlgoRSASHA256,
KeyAlgoRSA, KeyAlgoDSA,
KeyAlgoED25519,
@ -87,19 +89,33 @@ var supportedMACs = []string{
var supportedCompressions = []string{compressionNone}
// hashFuncs keeps the mapping of supported algorithms to their respective
// hashes needed for signature verification.
// hashFuncs keeps the mapping of supported signature algorithms to their
// respective hashes needed for signing and verification.
var hashFuncs = map[string]crypto.Hash{
KeyAlgoRSA: crypto.SHA1,
KeyAlgoDSA: crypto.SHA1,
KeyAlgoECDSA256: crypto.SHA256,
KeyAlgoECDSA384: crypto.SHA384,
KeyAlgoECDSA521: crypto.SHA512,
CertAlgoRSAv01: crypto.SHA1,
CertAlgoDSAv01: crypto.SHA1,
CertAlgoECDSA256v01: crypto.SHA256,
CertAlgoECDSA384v01: crypto.SHA384,
CertAlgoECDSA521v01: crypto.SHA512,
KeyAlgoRSA: crypto.SHA1,
KeyAlgoRSASHA256: crypto.SHA256,
KeyAlgoRSASHA512: crypto.SHA512,
KeyAlgoDSA: crypto.SHA1,
KeyAlgoECDSA256: crypto.SHA256,
KeyAlgoECDSA384: crypto.SHA384,
KeyAlgoECDSA521: crypto.SHA512,
// KeyAlgoED25519 doesn't pre-hash.
KeyAlgoSKECDSA256: crypto.SHA256,
KeyAlgoSKED25519: crypto.SHA256,
}
// algorithmsForKeyFormat returns the supported signature algorithms for a given
// public key format (PublicKey.Type), in order of preference. See RFC 8332,
// Section 2. See also the note in sendKexInit on backwards compatibility.
func algorithmsForKeyFormat(keyFormat string) []string {
switch keyFormat {
case KeyAlgoRSA:
return []string{KeyAlgoRSASHA256, KeyAlgoRSASHA512, KeyAlgoRSA}
case CertAlgoRSAv01:
return []string{CertAlgoRSASHA256v01, CertAlgoRSASHA512v01, CertAlgoRSAv01}
default:
return []string{keyFormat}
}
}
// unexpectedMessageError results when the SSH message that we received didn't
@ -146,6 +162,11 @@ func (a *directionAlgorithms) rekeyBytes() int64 {
return 1 << 30
}
var aeadCiphers = map[string]bool{
gcmCipherID: true,
chacha20Poly1305ID: true,
}
type algorithms struct {
kex string
hostKey string
@ -181,14 +202,18 @@ func findAgreedAlgorithms(isClient bool, clientKexInit, serverKexInit *kexInitMs
return
}
ctos.MAC, err = findCommon("client to server MAC", clientKexInit.MACsClientServer, serverKexInit.MACsClientServer)
if err != nil {
return
if !aeadCiphers[ctos.Cipher] {
ctos.MAC, err = findCommon("client to server MAC", clientKexInit.MACsClientServer, serverKexInit.MACsClientServer)
if err != nil {
return
}
}
stoc.MAC, err = findCommon("server to client MAC", clientKexInit.MACsServerClient, serverKexInit.MACsServerClient)
if err != nil {
return
if !aeadCiphers[stoc.Cipher] {
stoc.MAC, err = findCommon("server to client MAC", clientKexInit.MACsServerClient, serverKexInit.MACsServerClient)
if err != nil {
return
}
}
ctos.Compression, err = findCommon("client to server compression", clientKexInit.CompressionClientServer, serverKexInit.CompressionClientServer)
@ -272,8 +297,9 @@ func (c *Config) SetDefaults() {
}
// buildDataSignedForAuth returns the data that is signed in order to prove
// possession of a private key. See RFC 4252, section 7.
func buildDataSignedForAuth(sessionID []byte, req userAuthRequestMsg, algo, pubKey []byte) []byte {
// possession of a private key. See RFC 4252, section 7. algo is the advertised
// algorithm, and may be a certificate type.
func buildDataSignedForAuth(sessionID []byte, req userAuthRequestMsg, algo string, pubKey []byte) []byte {
data := struct {
Session []byte
Type byte
@ -281,7 +307,7 @@ func buildDataSignedForAuth(sessionID []byte, req userAuthRequestMsg, algo, pubK
Service string
Method string
Sign bool
Algo []byte
Algo string
PubKey []byte
}{
sessionID,

View File

@ -12,8 +12,9 @@ the multiplexed nature of SSH is exposed to users that wish to support
others.
References:
[PROTOCOL.certkeys]: http://cvsweb.openbsd.org/cgi-bin/cvsweb/src/usr.bin/ssh/PROTOCOL.certkeys?rev=HEAD
[SSH-PARAMETERS]: http://www.iana.org/assignments/ssh-parameters/ssh-parameters.xml#ssh-parameters-1
[PROTOCOL.certkeys]: http://cvsweb.openbsd.org/cgi-bin/cvsweb/src/usr.bin/ssh/PROTOCOL.certkeys?rev=HEAD
[SSH-PARAMETERS]: http://www.iana.org/assignments/ssh-parameters/ssh-parameters.xml#ssh-parameters-1
This package does not fall under the stability promise of the Go language itself,
so its API may be changed when pressing needs arise.

View File

@ -455,14 +455,38 @@ func (t *handshakeTransport) sendKexInit() error {
}
io.ReadFull(rand.Reader, msg.Cookie[:])
if len(t.hostKeys) > 0 {
isServer := len(t.hostKeys) > 0
if isServer {
for _, k := range t.hostKeys {
msg.ServerHostKeyAlgos = append(
msg.ServerHostKeyAlgos, k.PublicKey().Type())
// If k is an AlgorithmSigner, presume it supports all signature algorithms
// associated with the key format. (Ideally AlgorithmSigner would have a
// method to advertise supported algorithms, but it doesn't. This means that
// adding support for a new algorithm is a breaking change, as we will
// immediately negotiate it even if existing implementations don't support
// it. If that ever happens, we'll have to figure something out.)
// If k is not an AlgorithmSigner, we can only assume it only supports the
// algorithms that matches the key format. (This means that Sign can't pick
// a different default.)
keyFormat := k.PublicKey().Type()
if _, ok := k.(AlgorithmSigner); ok {
msg.ServerHostKeyAlgos = append(msg.ServerHostKeyAlgos, algorithmsForKeyFormat(keyFormat)...)
} else {
msg.ServerHostKeyAlgos = append(msg.ServerHostKeyAlgos, keyFormat)
}
}
} else {
msg.ServerHostKeyAlgos = t.hostKeyAlgorithms
// As a client we opt in to receiving SSH_MSG_EXT_INFO so we know what
// algorithms the server supports for public key authentication. See RFC
// 8308, Section 2.1.
if firstKeyExchange := t.sessionID == nil; firstKeyExchange {
msg.KexAlgos = make([]string, 0, len(t.config.KeyExchanges)+1)
msg.KexAlgos = append(msg.KexAlgos, t.config.KeyExchanges...)
msg.KexAlgos = append(msg.KexAlgos, "ext-info-c")
}
}
packet := Marshal(msg)
// writePacket destroys the contents, so save a copy.
@ -582,9 +606,9 @@ func (t *handshakeTransport) enterKeyExchange(otherInitPacket []byte) error {
var result *kexResult
if len(t.hostKeys) > 0 {
result, err = t.server(kex, t.algorithms, &magics)
result, err = t.server(kex, &magics)
} else {
result, err = t.client(kex, t.algorithms, &magics)
result, err = t.client(kex, &magics)
}
if err != nil {
@ -611,19 +635,52 @@ func (t *handshakeTransport) enterKeyExchange(otherInitPacket []byte) error {
return nil
}
func (t *handshakeTransport) server(kex kexAlgorithm, algs *algorithms, magics *handshakeMagics) (*kexResult, error) {
var hostKey Signer
for _, k := range t.hostKeys {
if algs.hostKey == k.PublicKey().Type() {
hostKey = k
// algorithmSignerWrapper is an AlgorithmSigner that only supports the default
// key format algorithm.
//
// This is technically a violation of the AlgorithmSigner interface, but it
// should be unreachable given where we use this. Anyway, at least it returns an
// error instead of panicing or producing an incorrect signature.
type algorithmSignerWrapper struct {
Signer
}
func (a algorithmSignerWrapper) SignWithAlgorithm(rand io.Reader, data []byte, algorithm string) (*Signature, error) {
if algorithm != underlyingAlgo(a.PublicKey().Type()) {
return nil, errors.New("ssh: internal error: algorithmSignerWrapper invoked with non-default algorithm")
}
return a.Sign(rand, data)
}
func pickHostKey(hostKeys []Signer, algo string) AlgorithmSigner {
for _, k := range hostKeys {
if algo == k.PublicKey().Type() {
return algorithmSignerWrapper{k}
}
k, ok := k.(AlgorithmSigner)
if !ok {
continue
}
for _, a := range algorithmsForKeyFormat(k.PublicKey().Type()) {
if algo == a {
return k
}
}
}
return nil
}
r, err := kex.Server(t.conn, t.config.Rand, magics, hostKey)
func (t *handshakeTransport) server(kex kexAlgorithm, magics *handshakeMagics) (*kexResult, error) {
hostKey := pickHostKey(t.hostKeys, t.algorithms.hostKey)
if hostKey == nil {
return nil, errors.New("ssh: internal error: negotiated unsupported signature type")
}
r, err := kex.Server(t.conn, t.config.Rand, magics, hostKey, t.algorithms.hostKey)
return r, err
}
func (t *handshakeTransport) client(kex kexAlgorithm, algs *algorithms, magics *handshakeMagics) (*kexResult, error) {
func (t *handshakeTransport) client(kex kexAlgorithm, magics *handshakeMagics) (*kexResult, error) {
result, err := kex.Client(t.conn, t.config.Rand, magics)
if err != nil {
return nil, err
@ -634,7 +691,7 @@ func (t *handshakeTransport) client(kex kexAlgorithm, algs *algorithms, magics *
return nil, err
}
if err := verifyHostKeySignature(hostKey, result); err != nil {
if err := verifyHostKeySignature(hostKey, t.algorithms.hostKey, result); err != nil {
return nil, err
}

186
vendor/golang.org/x/crypto/ssh/kex.go generated vendored
View File

@ -20,12 +20,14 @@ import (
)
const (
kexAlgoDH1SHA1 = "diffie-hellman-group1-sha1"
kexAlgoDH14SHA1 = "diffie-hellman-group14-sha1"
kexAlgoECDH256 = "ecdh-sha2-nistp256"
kexAlgoECDH384 = "ecdh-sha2-nistp384"
kexAlgoECDH521 = "ecdh-sha2-nistp521"
kexAlgoCurve25519SHA256 = "curve25519-sha256@libssh.org"
kexAlgoDH1SHA1 = "diffie-hellman-group1-sha1"
kexAlgoDH14SHA1 = "diffie-hellman-group14-sha1"
kexAlgoDH14SHA256 = "diffie-hellman-group14-sha256"
kexAlgoECDH256 = "ecdh-sha2-nistp256"
kexAlgoECDH384 = "ecdh-sha2-nistp384"
kexAlgoECDH521 = "ecdh-sha2-nistp521"
kexAlgoCurve25519SHA256LibSSH = "curve25519-sha256@libssh.org"
kexAlgoCurve25519SHA256 = "curve25519-sha256"
// For the following kex only the client half contains a production
// ready implementation. The server half only consists of a minimal
@ -75,8 +77,9 @@ func (m *handshakeMagics) write(w io.Writer) {
// kexAlgorithm abstracts different key exchange algorithms.
type kexAlgorithm interface {
// Server runs server-side key agreement, signing the result
// with a hostkey.
Server(p packetConn, rand io.Reader, magics *handshakeMagics, s Signer) (*kexResult, error)
// with a hostkey. algo is the negotiated algorithm, and may
// be a certificate type.
Server(p packetConn, rand io.Reader, magics *handshakeMagics, s AlgorithmSigner, algo string) (*kexResult, error)
// Client runs the client-side key agreement. Caller is
// responsible for verifying the host key signature.
@ -86,6 +89,7 @@ type kexAlgorithm interface {
// dhGroup is a multiplicative group suitable for implementing Diffie-Hellman key agreement.
type dhGroup struct {
g, p, pMinus1 *big.Int
hashFunc crypto.Hash
}
func (group *dhGroup) diffieHellman(theirPublic, myPrivate *big.Int) (*big.Int, error) {
@ -96,8 +100,6 @@ func (group *dhGroup) diffieHellman(theirPublic, myPrivate *big.Int) (*big.Int,
}
func (group *dhGroup) Client(c packetConn, randSource io.Reader, magics *handshakeMagics) (*kexResult, error) {
hashFunc := crypto.SHA1
var x *big.Int
for {
var err error
@ -132,7 +134,7 @@ func (group *dhGroup) Client(c packetConn, randSource io.Reader, magics *handsha
return nil, err
}
h := hashFunc.New()
h := group.hashFunc.New()
magics.write(h)
writeString(h, kexDHReply.HostKey)
writeInt(h, X)
@ -146,12 +148,11 @@ func (group *dhGroup) Client(c packetConn, randSource io.Reader, magics *handsha
K: K,
HostKey: kexDHReply.HostKey,
Signature: kexDHReply.Signature,
Hash: crypto.SHA1,
Hash: group.hashFunc,
}, nil
}
func (group *dhGroup) Server(c packetConn, randSource io.Reader, magics *handshakeMagics, priv Signer) (result *kexResult, err error) {
hashFunc := crypto.SHA1
func (group *dhGroup) Server(c packetConn, randSource io.Reader, magics *handshakeMagics, priv AlgorithmSigner, algo string) (result *kexResult, err error) {
packet, err := c.readPacket()
if err != nil {
return
@ -179,7 +180,7 @@ func (group *dhGroup) Server(c packetConn, randSource io.Reader, magics *handsha
hostKeyBytes := priv.PublicKey().Marshal()
h := hashFunc.New()
h := group.hashFunc.New()
magics.write(h)
writeString(h, hostKeyBytes)
writeInt(h, kexDHInit.X)
@ -193,7 +194,7 @@ func (group *dhGroup) Server(c packetConn, randSource io.Reader, magics *handsha
// H is already a hash, but the hostkey signing will apply its
// own key-specific hash algorithm.
sig, err := signAndMarshal(priv, randSource, H)
sig, err := signAndMarshal(priv, randSource, H, algo)
if err != nil {
return nil, err
}
@ -211,7 +212,7 @@ func (group *dhGroup) Server(c packetConn, randSource io.Reader, magics *handsha
K: K,
HostKey: hostKeyBytes,
Signature: sig,
Hash: crypto.SHA1,
Hash: group.hashFunc,
}, err
}
@ -314,7 +315,7 @@ func validateECPublicKey(curve elliptic.Curve, x, y *big.Int) bool {
return true
}
func (kex *ecdh) Server(c packetConn, rand io.Reader, magics *handshakeMagics, priv Signer) (result *kexResult, err error) {
func (kex *ecdh) Server(c packetConn, rand io.Reader, magics *handshakeMagics, priv AlgorithmSigner, algo string) (result *kexResult, err error) {
packet, err := c.readPacket()
if err != nil {
return nil, err
@ -359,7 +360,7 @@ func (kex *ecdh) Server(c packetConn, rand io.Reader, magics *handshakeMagics, p
// H is already a hash, but the hostkey signing will apply its
// own key-specific hash algorithm.
sig, err := signAndMarshal(priv, rand, H)
sig, err := signAndMarshal(priv, rand, H, algo)
if err != nil {
return nil, err
}
@ -384,39 +385,62 @@ func (kex *ecdh) Server(c packetConn, rand io.Reader, magics *handshakeMagics, p
}, nil
}
// ecHash returns the hash to match the given elliptic curve, see RFC
// 5656, section 6.2.1
func ecHash(curve elliptic.Curve) crypto.Hash {
bitSize := curve.Params().BitSize
switch {
case bitSize <= 256:
return crypto.SHA256
case bitSize <= 384:
return crypto.SHA384
}
return crypto.SHA512
}
var kexAlgoMap = map[string]kexAlgorithm{}
func init() {
// This is the group called diffie-hellman-group1-sha1 in RFC
// 4253 and Oakley Group 2 in RFC 2409.
// This is the group called diffie-hellman-group1-sha1 in
// RFC 4253 and Oakley Group 2 in RFC 2409.
p, _ := new(big.Int).SetString("FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE65381FFFFFFFFFFFFFFFF", 16)
kexAlgoMap[kexAlgoDH1SHA1] = &dhGroup{
g: new(big.Int).SetInt64(2),
p: p,
pMinus1: new(big.Int).Sub(p, bigOne),
hashFunc: crypto.SHA1,
}
// This are the groups called diffie-hellman-group14-sha1 and
// diffie-hellman-group14-sha256 in RFC 4253 and RFC 8268,
// and Oakley Group 14 in RFC 3526.
p, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AACAA68FFFFFFFFFFFFFFFF", 16)
group14 := &dhGroup{
g: new(big.Int).SetInt64(2),
p: p,
pMinus1: new(big.Int).Sub(p, bigOne),
}
// This is the group called diffie-hellman-group14-sha1 in RFC
// 4253 and Oakley Group 14 in RFC 3526.
p, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AACAA68FFFFFFFFFFFFFFFF", 16)
kexAlgoMap[kexAlgoDH14SHA1] = &dhGroup{
g: new(big.Int).SetInt64(2),
p: p,
pMinus1: new(big.Int).Sub(p, bigOne),
g: group14.g, p: group14.p, pMinus1: group14.pMinus1,
hashFunc: crypto.SHA1,
}
kexAlgoMap[kexAlgoDH14SHA256] = &dhGroup{
g: group14.g, p: group14.p, pMinus1: group14.pMinus1,
hashFunc: crypto.SHA256,
}
kexAlgoMap[kexAlgoECDH521] = &ecdh{elliptic.P521()}
kexAlgoMap[kexAlgoECDH384] = &ecdh{elliptic.P384()}
kexAlgoMap[kexAlgoECDH256] = &ecdh{elliptic.P256()}
kexAlgoMap[kexAlgoCurve25519SHA256] = &curve25519sha256{}
kexAlgoMap[kexAlgoCurve25519SHA256LibSSH] = &curve25519sha256{}
kexAlgoMap[kexAlgoDHGEXSHA1] = &dhGEXSHA{hashFunc: crypto.SHA1}
kexAlgoMap[kexAlgoDHGEXSHA256] = &dhGEXSHA{hashFunc: crypto.SHA256}
}
// curve25519sha256 implements the curve25519-sha256@libssh.org key
// agreement protocol, as described in
// https://git.libssh.org/projects/libssh.git/tree/doc/curve25519-sha256@libssh.org.txt
// curve25519sha256 implements the curve25519-sha256 (formerly known as
// curve25519-sha256@libssh.org) key exchange method, as described in RFC 8731.
type curve25519sha256 struct{}
type curve25519KeyPair struct {
@ -486,7 +510,7 @@ func (kex *curve25519sha256) Client(c packetConn, rand io.Reader, magics *handsh
}, nil
}
func (kex *curve25519sha256) Server(c packetConn, rand io.Reader, magics *handshakeMagics, priv Signer) (result *kexResult, err error) {
func (kex *curve25519sha256) Server(c packetConn, rand io.Reader, magics *handshakeMagics, priv AlgorithmSigner, algo string) (result *kexResult, err error) {
packet, err := c.readPacket()
if err != nil {
return
@ -527,7 +551,7 @@ func (kex *curve25519sha256) Server(c packetConn, rand io.Reader, magics *handsh
H := h.Sum(nil)
sig, err := signAndMarshal(priv, rand, H)
sig, err := signAndMarshal(priv, rand, H, algo)
if err != nil {
return nil, err
}
@ -553,7 +577,6 @@ func (kex *curve25519sha256) Server(c packetConn, rand io.Reader, magics *handsh
// diffie-hellman-group-exchange-sha256 key agreement protocols,
// as described in RFC 4419
type dhGEXSHA struct {
g, p *big.Int
hashFunc crypto.Hash
}
@ -563,14 +586,7 @@ const (
dhGroupExchangeMaximumBits = 8192
)
func (gex *dhGEXSHA) diffieHellman(theirPublic, myPrivate *big.Int) (*big.Int, error) {
if theirPublic.Sign() <= 0 || theirPublic.Cmp(gex.p) >= 0 {
return nil, fmt.Errorf("ssh: DH parameter out of bounds")
}
return new(big.Int).Exp(theirPublic, myPrivate, gex.p), nil
}
func (gex dhGEXSHA) Client(c packetConn, randSource io.Reader, magics *handshakeMagics) (*kexResult, error) {
func (gex *dhGEXSHA) Client(c packetConn, randSource io.Reader, magics *handshakeMagics) (*kexResult, error) {
// Send GexRequest
kexDHGexRequest := kexDHGexRequestMsg{
MinBits: dhGroupExchangeMinimumBits,
@ -587,35 +603,29 @@ func (gex dhGEXSHA) Client(c packetConn, randSource io.Reader, magics *handshake
return nil, err
}
var kexDHGexGroup kexDHGexGroupMsg
if err = Unmarshal(packet, &kexDHGexGroup); err != nil {
var msg kexDHGexGroupMsg
if err = Unmarshal(packet, &msg); err != nil {
return nil, err
}
// reject if p's bit length < dhGroupExchangeMinimumBits or > dhGroupExchangeMaximumBits
if kexDHGexGroup.P.BitLen() < dhGroupExchangeMinimumBits || kexDHGexGroup.P.BitLen() > dhGroupExchangeMaximumBits {
return nil, fmt.Errorf("ssh: server-generated gex p is out of range (%d bits)", kexDHGexGroup.P.BitLen())
if msg.P.BitLen() < dhGroupExchangeMinimumBits || msg.P.BitLen() > dhGroupExchangeMaximumBits {
return nil, fmt.Errorf("ssh: server-generated gex p is out of range (%d bits)", msg.P.BitLen())
}
gex.p = kexDHGexGroup.P
gex.g = kexDHGexGroup.G
// Check if g is safe by verifing that g > 1 and g < p - 1
one := big.NewInt(1)
var pMinusOne = &big.Int{}
pMinusOne.Sub(gex.p, one)
if gex.g.Cmp(one) != 1 && gex.g.Cmp(pMinusOne) != -1 {
// Check if g is safe by verifying that 1 < g < p-1
pMinusOne := new(big.Int).Sub(msg.P, bigOne)
if msg.G.Cmp(bigOne) <= 0 || msg.G.Cmp(pMinusOne) >= 0 {
return nil, fmt.Errorf("ssh: server provided gex g is not safe")
}
// Send GexInit
var pHalf = &big.Int{}
pHalf.Rsh(gex.p, 1)
pHalf := new(big.Int).Rsh(msg.P, 1)
x, err := rand.Int(randSource, pHalf)
if err != nil {
return nil, err
}
X := new(big.Int).Exp(gex.g, x, gex.p)
X := new(big.Int).Exp(msg.G, x, msg.P)
kexDHGexInit := kexDHGexInitMsg{
X: X,
}
@ -634,13 +644,13 @@ func (gex dhGEXSHA) Client(c packetConn, randSource io.Reader, magics *handshake
return nil, err
}
kInt, err := gex.diffieHellman(kexDHGexReply.Y, x)
if err != nil {
return nil, err
if kexDHGexReply.Y.Cmp(bigOne) <= 0 || kexDHGexReply.Y.Cmp(pMinusOne) >= 0 {
return nil, errors.New("ssh: DH parameter out of bounds")
}
kInt := new(big.Int).Exp(kexDHGexReply.Y, x, msg.P)
// Check if k is safe by verifing that k > 1 and k < p - 1
if kInt.Cmp(one) != 1 && kInt.Cmp(pMinusOne) != -1 {
// Check if k is safe by verifying that k > 1 and k < p - 1
if kInt.Cmp(bigOne) <= 0 || kInt.Cmp(pMinusOne) >= 0 {
return nil, fmt.Errorf("ssh: derived k is not safe")
}
@ -650,8 +660,8 @@ func (gex dhGEXSHA) Client(c packetConn, randSource io.Reader, magics *handshake
binary.Write(h, binary.BigEndian, uint32(dhGroupExchangeMinimumBits))
binary.Write(h, binary.BigEndian, uint32(dhGroupExchangePreferredBits))
binary.Write(h, binary.BigEndian, uint32(dhGroupExchangeMaximumBits))
writeInt(h, gex.p)
writeInt(h, gex.g)
writeInt(h, msg.P)
writeInt(h, msg.G)
writeInt(h, X)
writeInt(h, kexDHGexReply.Y)
K := make([]byte, intLength(kInt))
@ -670,7 +680,7 @@ func (gex dhGEXSHA) Client(c packetConn, randSource io.Reader, magics *handshake
// Server half implementation of the Diffie Hellman Key Exchange with SHA1 and SHA256.
//
// This is a minimal implementation to satisfy the automated tests.
func (gex dhGEXSHA) Server(c packetConn, randSource io.Reader, magics *handshakeMagics, priv Signer) (result *kexResult, err error) {
func (gex dhGEXSHA) Server(c packetConn, randSource io.Reader, magics *handshakeMagics, priv AlgorithmSigner, algo string) (result *kexResult, err error) {
// Receive GexRequest
packet, err := c.readPacket()
if err != nil {
@ -681,35 +691,17 @@ func (gex dhGEXSHA) Server(c packetConn, randSource io.Reader, magics *handshake
return
}
// smoosh the user's preferred size into our own limits
if kexDHGexRequest.PreferedBits > dhGroupExchangeMaximumBits {
kexDHGexRequest.PreferedBits = dhGroupExchangeMaximumBits
}
if kexDHGexRequest.PreferedBits < dhGroupExchangeMinimumBits {
kexDHGexRequest.PreferedBits = dhGroupExchangeMinimumBits
}
// fix min/max if they're inconsistent. technically, we could just pout
// and hang up, but there's no harm in giving them the benefit of the
// doubt and just picking a bitsize for them.
if kexDHGexRequest.MinBits > kexDHGexRequest.PreferedBits {
kexDHGexRequest.MinBits = kexDHGexRequest.PreferedBits
}
if kexDHGexRequest.MaxBits < kexDHGexRequest.PreferedBits {
kexDHGexRequest.MaxBits = kexDHGexRequest.PreferedBits
}
// Send GexGroup
// This is the group called diffie-hellman-group14-sha1 in RFC
// 4253 and Oakley Group 14 in RFC 3526.
p, _ := new(big.Int).SetString("FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AACAA68FFFFFFFFFFFFFFFF", 16)
gex.p = p
gex.g = big.NewInt(2)
g := big.NewInt(2)
kexDHGexGroup := kexDHGexGroupMsg{
P: gex.p,
G: gex.g,
msg := &kexDHGexGroupMsg{
P: p,
G: g,
}
if err := c.writePacket(Marshal(&kexDHGexGroup)); err != nil {
if err := c.writePacket(Marshal(msg)); err != nil {
return nil, err
}
@ -723,19 +715,19 @@ func (gex dhGEXSHA) Server(c packetConn, randSource io.Reader, magics *handshake
return
}
var pHalf = &big.Int{}
pHalf.Rsh(gex.p, 1)
pHalf := new(big.Int).Rsh(p, 1)
y, err := rand.Int(randSource, pHalf)
if err != nil {
return
}
Y := new(big.Int).Exp(g, y, p)
Y := new(big.Int).Exp(gex.g, y, gex.p)
kInt, err := gex.diffieHellman(kexDHGexInit.X, y)
if err != nil {
return nil, err
pMinusOne := new(big.Int).Sub(p, bigOne)
if kexDHGexInit.X.Cmp(bigOne) <= 0 || kexDHGexInit.X.Cmp(pMinusOne) >= 0 {
return nil, errors.New("ssh: DH parameter out of bounds")
}
kInt := new(big.Int).Exp(kexDHGexInit.X, y, p)
hostKeyBytes := priv.PublicKey().Marshal()
@ -745,8 +737,8 @@ func (gex dhGEXSHA) Server(c packetConn, randSource io.Reader, magics *handshake
binary.Write(h, binary.BigEndian, uint32(dhGroupExchangeMinimumBits))
binary.Write(h, binary.BigEndian, uint32(dhGroupExchangePreferredBits))
binary.Write(h, binary.BigEndian, uint32(dhGroupExchangeMaximumBits))
writeInt(h, gex.p)
writeInt(h, gex.g)
writeInt(h, p)
writeInt(h, g)
writeInt(h, kexDHGexInit.X)
writeInt(h, Y)
@ -758,7 +750,7 @@ func (gex dhGEXSHA) Server(c packetConn, randSource io.Reader, magics *handshake
// H is already a hash, but the hostkey signing will apply its
// own key-specific hash algorithm.
sig, err := signAndMarshal(priv, randSource, H)
sig, err := signAndMarshal(priv, randSource, H, algo)
if err != nil {
return nil, err
}

View File

@ -30,8 +30,9 @@ import (
"golang.org/x/crypto/ssh/internal/bcrypt_pbkdf"
)
// These constants represent the algorithm names for key types supported by this
// package.
// Public key algorithms names. These values can appear in PublicKey.Type,
// ClientConfig.HostKeyAlgorithms, Signature.Format, or as AlgorithmSigner
// arguments.
const (
KeyAlgoRSA = "ssh-rsa"
KeyAlgoDSA = "ssh-dss"
@ -41,16 +42,21 @@ const (
KeyAlgoECDSA521 = "ecdsa-sha2-nistp521"
KeyAlgoED25519 = "ssh-ed25519"
KeyAlgoSKED25519 = "sk-ssh-ed25519@openssh.com"
// KeyAlgoRSASHA256 and KeyAlgoRSASHA512 are only public key algorithms, not
// public key formats, so they can't appear as a PublicKey.Type. The
// corresponding PublicKey.Type is KeyAlgoRSA. See RFC 8332, Section 2.
KeyAlgoRSASHA256 = "rsa-sha2-256"
KeyAlgoRSASHA512 = "rsa-sha2-512"
)
// These constants represent non-default signature algorithms that are supported
// as algorithm parameters to AlgorithmSigner.SignWithAlgorithm methods. See
// [PROTOCOL.agent] section 4.5.1 and
// https://tools.ietf.org/html/draft-ietf-curdle-rsa-sha2-10
const (
SigAlgoRSA = "ssh-rsa"
SigAlgoRSASHA2256 = "rsa-sha2-256"
SigAlgoRSASHA2512 = "rsa-sha2-512"
// Deprecated: use KeyAlgoRSA.
SigAlgoRSA = KeyAlgoRSA
// Deprecated: use KeyAlgoRSASHA256.
SigAlgoRSASHA2256 = KeyAlgoRSASHA256
// Deprecated: use KeyAlgoRSASHA512.
SigAlgoRSASHA2512 = KeyAlgoRSASHA512
)
// parsePubKey parses a public key of the given algorithm.
@ -70,7 +76,7 @@ func parsePubKey(in []byte, algo string) (pubKey PublicKey, rest []byte, err err
case KeyAlgoSKED25519:
return parseSKEd25519(in)
case CertAlgoRSAv01, CertAlgoDSAv01, CertAlgoECDSA256v01, CertAlgoECDSA384v01, CertAlgoECDSA521v01, CertAlgoSKECDSA256v01, CertAlgoED25519v01, CertAlgoSKED25519v01:
cert, err := parseCert(in, certToPrivAlgo(algo))
cert, err := parseCert(in, certKeyAlgoNames[algo])
if err != nil {
return nil, nil, err
}
@ -289,18 +295,21 @@ func MarshalAuthorizedKey(key PublicKey) []byte {
return b.Bytes()
}
// PublicKey is an abstraction of different types of public keys.
// PublicKey represents a public key using an unspecified algorithm.
//
// Some PublicKeys provided by this package also implement CryptoPublicKey.
type PublicKey interface {
// Type returns the key's type, e.g. "ssh-rsa".
// Type returns the key format name, e.g. "ssh-rsa".
Type() string
// Marshal returns the serialized key data in SSH wire format,
// with the name prefix. To unmarshal the returned data, use
// the ParsePublicKey function.
// Marshal returns the serialized key data in SSH wire format, with the name
// prefix. To unmarshal the returned data, use the ParsePublicKey function.
Marshal() []byte
// Verify that sig is a signature on the given data using this
// key. This function will hash the data appropriately first.
// Verify that sig is a signature on the given data using this key. This
// method will hash the data appropriately first. sig.Format is allowed to
// be any signature algorithm compatible with the key type, the caller
// should check if it has more stringent requirements.
Verify(data []byte, sig *Signature) error
}
@ -311,25 +320,32 @@ type CryptoPublicKey interface {
}
// A Signer can create signatures that verify against a public key.
//
// Some Signers provided by this package also implement AlgorithmSigner.
type Signer interface {
// PublicKey returns an associated PublicKey instance.
// PublicKey returns the associated PublicKey.
PublicKey() PublicKey
// Sign returns raw signature for the given data. This method
// will apply the hash specified for the keytype to the data.
// Sign returns a signature for the given data. This method will hash the
// data appropriately first. The signature algorithm is expected to match
// the key format returned by the PublicKey.Type method (and not to be any
// alternative algorithm supported by the key format).
Sign(rand io.Reader, data []byte) (*Signature, error)
}
// A AlgorithmSigner is a Signer that also supports specifying a specific
// algorithm to use for signing.
// An AlgorithmSigner is a Signer that also supports specifying an algorithm to
// use for signing.
//
// An AlgorithmSigner can't advertise the algorithms it supports, so it should
// be prepared to be invoked with every algorithm supported by the public key
// format.
type AlgorithmSigner interface {
Signer
// SignWithAlgorithm is like Signer.Sign, but allows specification of a
// non-default signing algorithm. See the SigAlgo* constants in this
// package for signature algorithms supported by this package. Callers may
// pass an empty string for the algorithm in which case the AlgorithmSigner
// will use its default algorithm.
// SignWithAlgorithm is like Signer.Sign, but allows specifying a desired
// signing algorithm. Callers may pass an empty string for the algorithm in
// which case the AlgorithmSigner will use a default algorithm. This default
// doesn't currently control any behavior in this package.
SignWithAlgorithm(rand io.Reader, data []byte, algorithm string) (*Signature, error)
}
@ -381,17 +397,11 @@ func (r *rsaPublicKey) Marshal() []byte {
}
func (r *rsaPublicKey) Verify(data []byte, sig *Signature) error {
var hash crypto.Hash
switch sig.Format {
case SigAlgoRSA:
hash = crypto.SHA1
case SigAlgoRSASHA2256:
hash = crypto.SHA256
case SigAlgoRSASHA2512:
hash = crypto.SHA512
default:
supportedAlgos := algorithmsForKeyFormat(r.Type())
if !contains(supportedAlgos, sig.Format) {
return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, r.Type())
}
hash := hashFuncs[sig.Format]
h := hash.New()
h.Write(data)
digest := h.Sum(nil)
@ -466,7 +476,7 @@ func (k *dsaPublicKey) Verify(data []byte, sig *Signature) error {
if sig.Format != k.Type() {
return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, k.Type())
}
h := crypto.SHA1.New()
h := hashFuncs[sig.Format].New()
h.Write(data)
digest := h.Sum(nil)
@ -499,7 +509,7 @@ func (k *dsaPrivateKey) PublicKey() PublicKey {
}
func (k *dsaPrivateKey) Sign(rand io.Reader, data []byte) (*Signature, error) {
return k.SignWithAlgorithm(rand, data, "")
return k.SignWithAlgorithm(rand, data, k.PublicKey().Type())
}
func (k *dsaPrivateKey) SignWithAlgorithm(rand io.Reader, data []byte, algorithm string) (*Signature, error) {
@ -507,7 +517,7 @@ func (k *dsaPrivateKey) SignWithAlgorithm(rand io.Reader, data []byte, algorithm
return nil, fmt.Errorf("ssh: unsupported signature algorithm %s", algorithm)
}
h := crypto.SHA1.New()
h := hashFuncs[k.PublicKey().Type()].New()
h.Write(data)
digest := h.Sum(nil)
r, s, err := dsa.Sign(rand, k.PrivateKey, digest)
@ -603,19 +613,6 @@ func supportedEllipticCurve(curve elliptic.Curve) bool {
return curve == elliptic.P256() || curve == elliptic.P384() || curve == elliptic.P521()
}
// ecHash returns the hash to match the given elliptic curve, see RFC
// 5656, section 6.2.1
func ecHash(curve elliptic.Curve) crypto.Hash {
bitSize := curve.Params().BitSize
switch {
case bitSize <= 256:
return crypto.SHA256
case bitSize <= 384:
return crypto.SHA384
}
return crypto.SHA512
}
// parseECDSA parses an ECDSA key according to RFC 5656, section 3.1.
func parseECDSA(in []byte) (out PublicKey, rest []byte, err error) {
var w struct {
@ -671,7 +668,7 @@ func (k *ecdsaPublicKey) Verify(data []byte, sig *Signature) error {
return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, k.Type())
}
h := ecHash(k.Curve).New()
h := hashFuncs[sig.Format].New()
h.Write(data)
digest := h.Sum(nil)
@ -775,7 +772,7 @@ func (k *skECDSAPublicKey) Verify(data []byte, sig *Signature) error {
return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, k.Type())
}
h := ecHash(k.Curve).New()
h := hashFuncs[sig.Format].New()
h.Write([]byte(k.application))
appDigest := h.Sum(nil)
@ -874,7 +871,7 @@ func (k *skEd25519PublicKey) Verify(data []byte, sig *Signature) error {
return fmt.Errorf("invalid size %d for Ed25519 public key", l)
}
h := sha256.New()
h := hashFuncs[sig.Format].New()
h.Write([]byte(k.application))
appDigest := h.Sum(nil)
@ -961,44 +958,20 @@ func (s *wrappedSigner) PublicKey() PublicKey {
}
func (s *wrappedSigner) Sign(rand io.Reader, data []byte) (*Signature, error) {
return s.SignWithAlgorithm(rand, data, "")
return s.SignWithAlgorithm(rand, data, s.pubKey.Type())
}
func (s *wrappedSigner) SignWithAlgorithm(rand io.Reader, data []byte, algorithm string) (*Signature, error) {
var hashFunc crypto.Hash
if _, ok := s.pubKey.(*rsaPublicKey); ok {
// RSA keys support a few hash functions determined by the requested signature algorithm
switch algorithm {
case "", SigAlgoRSA:
algorithm = SigAlgoRSA
hashFunc = crypto.SHA1
case SigAlgoRSASHA2256:
hashFunc = crypto.SHA256
case SigAlgoRSASHA2512:
hashFunc = crypto.SHA512
default:
return nil, fmt.Errorf("ssh: unsupported signature algorithm %s", algorithm)
}
} else {
// The only supported algorithm for all other key types is the same as the type of the key
if algorithm == "" {
algorithm = s.pubKey.Type()
} else if algorithm != s.pubKey.Type() {
return nil, fmt.Errorf("ssh: unsupported signature algorithm %s", algorithm)
}
switch key := s.pubKey.(type) {
case *dsaPublicKey:
hashFunc = crypto.SHA1
case *ecdsaPublicKey:
hashFunc = ecHash(key.Curve)
case ed25519PublicKey:
default:
return nil, fmt.Errorf("ssh: unsupported key type %T", key)
}
if algorithm == "" {
algorithm = s.pubKey.Type()
}
supportedAlgos := algorithmsForKeyFormat(s.pubKey.Type())
if !contains(supportedAlgos, algorithm) {
return nil, fmt.Errorf("ssh: unsupported signature algorithm %q for key format %q", algorithm, s.pubKey.Type())
}
hashFunc := hashFuncs[algorithm]
var digest []byte
if hashFunc != 0 {
h := hashFunc.New()

View File

@ -141,6 +141,14 @@ type serviceAcceptMsg struct {
Service string `sshtype:"6"`
}
// See RFC 8308, section 2.3
const msgExtInfo = 7
type extInfoMsg struct {
NumExtensions uint32 `sshtype:"7"`
Payload []byte `ssh:"rest"`
}
// See RFC 4252, section 5.
const msgUserAuthRequest = 50
@ -180,11 +188,11 @@ const msgUserAuthInfoRequest = 60
const msgUserAuthInfoResponse = 61
type userAuthInfoRequestMsg struct {
User string `sshtype:"60"`
Instruction string
DeprecatedLanguage string
NumPrompts uint32
Prompts []byte `ssh:"rest"`
Name string `sshtype:"60"`
Instruction string
Language string
NumPrompts uint32
Prompts []byte `ssh:"rest"`
}
// See RFC 4254, section 5.1.
@ -782,6 +790,8 @@ func decode(packet []byte) (interface{}, error) {
msg = new(serviceRequestMsg)
case msgServiceAccept:
msg = new(serviceAcceptMsg)
case msgExtInfo:
msg = new(extInfoMsg)
case msgKexInit:
msg = new(kexInitMsg)
case msgKexDHInit:
@ -843,6 +853,7 @@ var packetTypeNames = map[byte]string{
msgDisconnect: "disconnectMsg",
msgServiceRequest: "serviceRequestMsg",
msgServiceAccept: "serviceAcceptMsg",
msgExtInfo: "extInfoMsg",
msgKexInit: "kexInitMsg",
msgKexDHInit: "kexDHInitMsg",
msgKexDHReply: "kexDHReplyMsg",

View File

@ -120,7 +120,7 @@ type ServerConfig struct {
}
// AddHostKey adds a private key as a host key. If an existing host
// key exists with the same algorithm, it is overwritten. Each server
// key exists with the same public key format, it is replaced. Each server
// config must have at least one host key.
func (s *ServerConfig) AddHostKey(key Signer) {
for i, k := range s.hostKeys {
@ -212,9 +212,10 @@ func NewServerConn(c net.Conn, config *ServerConfig) (*ServerConn, <-chan NewCha
}
// signAndMarshal signs the data with the appropriate algorithm,
// and serializes the result in SSH wire format.
func signAndMarshal(k Signer, rand io.Reader, data []byte) ([]byte, error) {
sig, err := k.Sign(rand, data)
// and serializes the result in SSH wire format. algo is the negotiate
// algorithm and may be a certificate type.
func signAndMarshal(k AlgorithmSigner, rand io.Reader, data []byte, algo string) ([]byte, error) {
sig, err := k.SignWithAlgorithm(rand, data, underlyingAlgo(algo))
if err != nil {
return nil, err
}
@ -284,7 +285,7 @@ func (s *connection) serverHandshake(config *ServerConfig) (*Permissions, error)
func isAcceptableAlgo(algo string) bool {
switch algo {
case KeyAlgoRSA, KeyAlgoDSA, KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521, KeyAlgoSKECDSA256, KeyAlgoED25519, KeyAlgoSKED25519,
case KeyAlgoRSA, KeyAlgoRSASHA256, KeyAlgoRSASHA512, KeyAlgoDSA, KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521, KeyAlgoSKECDSA256, KeyAlgoED25519, KeyAlgoSKED25519,
CertAlgoRSAv01, CertAlgoDSAv01, CertAlgoECDSA256v01, CertAlgoECDSA384v01, CertAlgoECDSA521v01, CertAlgoSKECDSA256v01, CertAlgoED25519v01, CertAlgoSKED25519v01:
return true
}
@ -553,6 +554,7 @@ userAuthLoop:
if !ok || len(payload) > 0 {
return nil, parseError(msgUserAuthRequest)
}
// Ensure the public key algo and signature algo
// are supported. Compare the private key
// algorithm name that corresponds to algo with
@ -562,7 +564,12 @@ userAuthLoop:
authErr = fmt.Errorf("ssh: algorithm %q not accepted", sig.Format)
break
}
signedData := buildDataSignedForAuth(sessionID, userAuthReq, algoBytes, pubKeyData)
if underlyingAlgo(algo) != sig.Format {
authErr = fmt.Errorf("ssh: signature %q not compatible with selected algorithm %q", sig.Format, algo)
break
}
signedData := buildDataSignedForAuth(sessionID, userAuthReq, algo, pubKeyData)
if err := pubKey.Verify(signedData, sig); err != nil {
return nil, err
@ -633,6 +640,30 @@ userAuthLoop:
}
authFailures++
if config.MaxAuthTries > 0 && authFailures >= config.MaxAuthTries {
// If we have hit the max attempts, don't bother sending the
// final SSH_MSG_USERAUTH_FAILURE message, since there are
// no more authentication methods which can be attempted,
// and this message may cause the client to re-attempt
// authentication while we send the disconnect message.
// Continue, and trigger the disconnect at the start of
// the loop.
//
// The SSH specification is somewhat confusing about this,
// RFC 4252 Section 5.1 requires each authentication failure
// be responded to with a respective SSH_MSG_USERAUTH_FAILURE
// message, but Section 4 says the server should disconnect
// after some number of attempts, but it isn't explicit which
// message should take precedence (i.e. should there be a failure
// message than a disconnect message, or if we are going to
// disconnect, should we only send that message.)
//
// Either way, OpenSSH disconnects immediately after the last
// failed authnetication attempt, and given they are typically
// considered the golden implementation it seems reasonable
// to match that behavior.
continue
}
var failureMsg userAuthFailureMsg
if config.PasswordCallback != nil {
@ -670,7 +701,7 @@ type sshClientKeyboardInteractive struct {
*connection
}
func (c *sshClientKeyboardInteractive) Challenge(user, instruction string, questions []string, echos []bool) (answers []string, err error) {
func (c *sshClientKeyboardInteractive) Challenge(name, instruction string, questions []string, echos []bool) (answers []string, err error) {
if len(questions) != len(echos) {
return nil, errors.New("ssh: echos and questions must have equal length")
}
@ -682,6 +713,7 @@ func (c *sshClientKeyboardInteractive) Challenge(user, instruction string, quest
}
if err := c.transport.writePacket(Marshal(&userAuthInfoRequestMsg{
Name: name,
Instruction: instruction,
NumPrompts: uint32(len(questions)),
Prompts: prompts,

View File

@ -85,6 +85,7 @@ const (
IXANY = 39
IXOFF = 40
IMAXBEL = 41
IUTF8 = 42 // RFC 8160
ISIG = 50
ICANON = 51
XCASE = 52

View File

@ -238,15 +238,19 @@ var (
// (to setup server->client keys) or clientKeys (for client->server keys).
func newPacketCipher(d direction, algs directionAlgorithms, kex *kexResult) (packetCipher, error) {
cipherMode := cipherModes[algs.Cipher]
macMode := macModes[algs.MAC]
iv := make([]byte, cipherMode.ivSize)
key := make([]byte, cipherMode.keySize)
macKey := make([]byte, macMode.keySize)
generateKeyMaterial(iv, d.ivTag, kex)
generateKeyMaterial(key, d.keyTag, kex)
generateKeyMaterial(macKey, d.macKeyTag, kex)
var macKey []byte
if !aeadCiphers[algs.Cipher] {
macMode := macModes[algs.MAC]
macKey = make([]byte, macMode.keySize)
generateKeyMaterial(macKey, d.macKeyTag, kex)
}
return cipherModes[algs.Cipher].create(key, iv, macKey, algs)
}

187
vendor/golang.org/x/text/runes/cond.go generated vendored Normal file
View File

@ -0,0 +1,187 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package runes
import (
"unicode/utf8"
"golang.org/x/text/transform"
)
// Note: below we pass invalid UTF-8 to the tIn and tNotIn transformers as is.
// This is done for various reasons:
// - To retain the semantics of the Nop transformer: if input is passed to a Nop
// one would expect it to be unchanged.
// - It would be very expensive to pass a converted RuneError to a transformer:
// a transformer might need more source bytes after RuneError, meaning that
// the only way to pass it safely is to create a new buffer and manage the
// intermingling of RuneErrors and normal input.
// - Many transformers leave ill-formed UTF-8 as is, so this is not
// inconsistent. Generally ill-formed UTF-8 is only replaced if it is a
// logical consequence of the operation (as for Map) or if it otherwise would
// pose security concerns (as for Remove).
// - An alternative would be to return an error on ill-formed UTF-8, but this
// would be inconsistent with other operations.
// If returns a transformer that applies tIn to consecutive runes for which
// s.Contains(r) and tNotIn to consecutive runes for which !s.Contains(r). Reset
// is called on tIn and tNotIn at the start of each run. A Nop transformer will
// substitute a nil value passed to tIn or tNotIn. Invalid UTF-8 is translated
// to RuneError to determine which transformer to apply, but is passed as is to
// the respective transformer.
func If(s Set, tIn, tNotIn transform.Transformer) Transformer {
if tIn == nil && tNotIn == nil {
return Transformer{transform.Nop}
}
if tIn == nil {
tIn = transform.Nop
}
if tNotIn == nil {
tNotIn = transform.Nop
}
sIn, ok := tIn.(transform.SpanningTransformer)
if !ok {
sIn = dummySpan{tIn}
}
sNotIn, ok := tNotIn.(transform.SpanningTransformer)
if !ok {
sNotIn = dummySpan{tNotIn}
}
a := &cond{
tIn: sIn,
tNotIn: sNotIn,
f: s.Contains,
}
a.Reset()
return Transformer{a}
}
type dummySpan struct{ transform.Transformer }
func (d dummySpan) Span(src []byte, atEOF bool) (n int, err error) {
return 0, transform.ErrEndOfSpan
}
type cond struct {
tIn, tNotIn transform.SpanningTransformer
f func(rune) bool
check func(rune) bool // current check to perform
t transform.SpanningTransformer // current transformer to use
}
// Reset implements transform.Transformer.
func (t *cond) Reset() {
t.check = t.is
t.t = t.tIn
t.t.Reset() // notIn will be reset on first usage.
}
func (t *cond) is(r rune) bool {
if t.f(r) {
return true
}
t.check = t.isNot
t.t = t.tNotIn
t.tNotIn.Reset()
return false
}
func (t *cond) isNot(r rune) bool {
if !t.f(r) {
return true
}
t.check = t.is
t.t = t.tIn
t.tIn.Reset()
return false
}
// This implementation of Span doesn't help all too much, but it needs to be
// there to satisfy this package's Transformer interface.
// TODO: there are certainly room for improvements, though. For example, if
// t.t == transform.Nop (which will a common occurrence) it will save a bundle
// to special-case that loop.
func (t *cond) Span(src []byte, atEOF bool) (n int, err error) {
p := 0
for n < len(src) && err == nil {
// Don't process too much at a time as the Spanner that will be
// called on this block may terminate early.
const maxChunk = 4096
max := len(src)
if v := n + maxChunk; v < max {
max = v
}
atEnd := false
size := 0
current := t.t
for ; p < max; p += size {
r := rune(src[p])
if r < utf8.RuneSelf {
size = 1
} else if r, size = utf8.DecodeRune(src[p:]); size == 1 {
if !atEOF && !utf8.FullRune(src[p:]) {
err = transform.ErrShortSrc
break
}
}
if !t.check(r) {
// The next rune will be the start of a new run.
atEnd = true
break
}
}
n2, err2 := current.Span(src[n:p], atEnd || (atEOF && p == len(src)))
n += n2
if err2 != nil {
return n, err2
}
// At this point either err != nil or t.check will pass for the rune at p.
p = n + size
}
return n, err
}
func (t *cond) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
p := 0
for nSrc < len(src) && err == nil {
// Don't process too much at a time, as the work might be wasted if the
// destination buffer isn't large enough to hold the result or a
// transform returns an error early.
const maxChunk = 4096
max := len(src)
if n := nSrc + maxChunk; n < len(src) {
max = n
}
atEnd := false
size := 0
current := t.t
for ; p < max; p += size {
r := rune(src[p])
if r < utf8.RuneSelf {
size = 1
} else if r, size = utf8.DecodeRune(src[p:]); size == 1 {
if !atEOF && !utf8.FullRune(src[p:]) {
err = transform.ErrShortSrc
break
}
}
if !t.check(r) {
// The next rune will be the start of a new run.
atEnd = true
break
}
}
nDst2, nSrc2, err2 := current.Transform(dst[nDst:], src[nSrc:p], atEnd || (atEOF && p == len(src)))
nDst += nDst2
nSrc += nSrc2
if err2 != nil {
return nDst, nSrc, err2
}
// At this point either err != nil or t.check will pass for the rune at p.
p = nSrc + size
}
return nDst, nSrc, err
}

355
vendor/golang.org/x/text/runes/runes.go generated vendored Normal file
View File

@ -0,0 +1,355 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package runes provide transforms for UTF-8 encoded text.
package runes // import "golang.org/x/text/runes"
import (
"unicode"
"unicode/utf8"
"golang.org/x/text/transform"
)
// A Set is a collection of runes.
type Set interface {
// Contains returns true if r is contained in the set.
Contains(r rune) bool
}
type setFunc func(rune) bool
func (s setFunc) Contains(r rune) bool {
return s(r)
}
// Note: using funcs here instead of wrapping types result in cleaner
// documentation and a smaller API.
// In creates a Set with a Contains method that returns true for all runes in
// the given RangeTable.
func In(rt *unicode.RangeTable) Set {
return setFunc(func(r rune) bool { return unicode.Is(rt, r) })
}
// NotIn creates a Set with a Contains method that returns true for all runes not
// in the given RangeTable.
func NotIn(rt *unicode.RangeTable) Set {
return setFunc(func(r rune) bool { return !unicode.Is(rt, r) })
}
// Predicate creates a Set with a Contains method that returns f(r).
func Predicate(f func(rune) bool) Set {
return setFunc(f)
}
// Transformer implements the transform.Transformer interface.
type Transformer struct {
t transform.SpanningTransformer
}
func (t Transformer) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
return t.t.Transform(dst, src, atEOF)
}
func (t Transformer) Span(b []byte, atEOF bool) (n int, err error) {
return t.t.Span(b, atEOF)
}
func (t Transformer) Reset() { t.t.Reset() }
// Bytes returns a new byte slice with the result of converting b using t. It
// calls Reset on t. It returns nil if any error was found. This can only happen
// if an error-producing Transformer is passed to If.
func (t Transformer) Bytes(b []byte) []byte {
b, _, err := transform.Bytes(t, b)
if err != nil {
return nil
}
return b
}
// String returns a string with the result of converting s using t. It calls
// Reset on t. It returns the empty string if any error was found. This can only
// happen if an error-producing Transformer is passed to If.
func (t Transformer) String(s string) string {
s, _, err := transform.String(t, s)
if err != nil {
return ""
}
return s
}
// TODO:
// - Copy: copying strings and bytes in whole-rune units.
// - Validation (maybe)
// - Well-formed-ness (maybe)
const runeErrorString = string(utf8.RuneError)
// Remove returns a Transformer that removes runes r for which s.Contains(r).
// Illegal input bytes are replaced by RuneError before being passed to f.
func Remove(s Set) Transformer {
if f, ok := s.(setFunc); ok {
// This little trick cuts the running time of BenchmarkRemove for sets
// created by Predicate roughly in half.
// TODO: special-case RangeTables as well.
return Transformer{remove(f)}
}
return Transformer{remove(s.Contains)}
}
// TODO: remove transform.RemoveFunc.
type remove func(r rune) bool
func (remove) Reset() {}
// Span implements transform.Spanner.
func (t remove) Span(src []byte, atEOF bool) (n int, err error) {
for r, size := rune(0), 0; n < len(src); {
if r = rune(src[n]); r < utf8.RuneSelf {
size = 1
} else if r, size = utf8.DecodeRune(src[n:]); size == 1 {
// Invalid rune.
if !atEOF && !utf8.FullRune(src[n:]) {
err = transform.ErrShortSrc
} else {
err = transform.ErrEndOfSpan
}
break
}
if t(r) {
err = transform.ErrEndOfSpan
break
}
n += size
}
return
}
// Transform implements transform.Transformer.
func (t remove) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
for r, size := rune(0), 0; nSrc < len(src); {
if r = rune(src[nSrc]); r < utf8.RuneSelf {
size = 1
} else if r, size = utf8.DecodeRune(src[nSrc:]); size == 1 {
// Invalid rune.
if !atEOF && !utf8.FullRune(src[nSrc:]) {
err = transform.ErrShortSrc
break
}
// We replace illegal bytes with RuneError. Not doing so might
// otherwise turn a sequence of invalid UTF-8 into valid UTF-8.
// The resulting byte sequence may subsequently contain runes
// for which t(r) is true that were passed unnoticed.
if !t(utf8.RuneError) {
if nDst+3 > len(dst) {
err = transform.ErrShortDst
break
}
dst[nDst+0] = runeErrorString[0]
dst[nDst+1] = runeErrorString[1]
dst[nDst+2] = runeErrorString[2]
nDst += 3
}
nSrc++
continue
}
if t(r) {
nSrc += size
continue
}
if nDst+size > len(dst) {
err = transform.ErrShortDst
break
}
for i := 0; i < size; i++ {
dst[nDst] = src[nSrc]
nDst++
nSrc++
}
}
return
}
// Map returns a Transformer that maps the runes in the input using the given
// mapping. Illegal bytes in the input are converted to utf8.RuneError before
// being passed to the mapping func.
func Map(mapping func(rune) rune) Transformer {
return Transformer{mapper(mapping)}
}
type mapper func(rune) rune
func (mapper) Reset() {}
// Span implements transform.Spanner.
func (t mapper) Span(src []byte, atEOF bool) (n int, err error) {
for r, size := rune(0), 0; n < len(src); n += size {
if r = rune(src[n]); r < utf8.RuneSelf {
size = 1
} else if r, size = utf8.DecodeRune(src[n:]); size == 1 {
// Invalid rune.
if !atEOF && !utf8.FullRune(src[n:]) {
err = transform.ErrShortSrc
} else {
err = transform.ErrEndOfSpan
}
break
}
if t(r) != r {
err = transform.ErrEndOfSpan
break
}
}
return n, err
}
// Transform implements transform.Transformer.
func (t mapper) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
var replacement rune
var b [utf8.UTFMax]byte
for r, size := rune(0), 0; nSrc < len(src); {
if r = rune(src[nSrc]); r < utf8.RuneSelf {
if replacement = t(r); replacement < utf8.RuneSelf {
if nDst == len(dst) {
err = transform.ErrShortDst
break
}
dst[nDst] = byte(replacement)
nDst++
nSrc++
continue
}
size = 1
} else if r, size = utf8.DecodeRune(src[nSrc:]); size == 1 {
// Invalid rune.
if !atEOF && !utf8.FullRune(src[nSrc:]) {
err = transform.ErrShortSrc
break
}
if replacement = t(utf8.RuneError); replacement == utf8.RuneError {
if nDst+3 > len(dst) {
err = transform.ErrShortDst
break
}
dst[nDst+0] = runeErrorString[0]
dst[nDst+1] = runeErrorString[1]
dst[nDst+2] = runeErrorString[2]
nDst += 3
nSrc++
continue
}
} else if replacement = t(r); replacement == r {
if nDst+size > len(dst) {
err = transform.ErrShortDst
break
}
for i := 0; i < size; i++ {
dst[nDst] = src[nSrc]
nDst++
nSrc++
}
continue
}
n := utf8.EncodeRune(b[:], replacement)
if nDst+n > len(dst) {
err = transform.ErrShortDst
break
}
for i := 0; i < n; i++ {
dst[nDst] = b[i]
nDst++
}
nSrc += size
}
return
}
// ReplaceIllFormed returns a transformer that replaces all input bytes that are
// not part of a well-formed UTF-8 code sequence with utf8.RuneError.
func ReplaceIllFormed() Transformer {
return Transformer{&replaceIllFormed{}}
}
type replaceIllFormed struct{ transform.NopResetter }
func (t replaceIllFormed) Span(src []byte, atEOF bool) (n int, err error) {
for n < len(src) {
// ASCII fast path.
if src[n] < utf8.RuneSelf {
n++
continue
}
r, size := utf8.DecodeRune(src[n:])
// Look for a valid non-ASCII rune.
if r != utf8.RuneError || size != 1 {
n += size
continue
}
// Look for short source data.
if !atEOF && !utf8.FullRune(src[n:]) {
err = transform.ErrShortSrc
break
}
// We have an invalid rune.
err = transform.ErrEndOfSpan
break
}
return n, err
}
func (t replaceIllFormed) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
for nSrc < len(src) {
// ASCII fast path.
if r := src[nSrc]; r < utf8.RuneSelf {
if nDst == len(dst) {
err = transform.ErrShortDst
break
}
dst[nDst] = r
nDst++
nSrc++
continue
}
// Look for a valid non-ASCII rune.
if _, size := utf8.DecodeRune(src[nSrc:]); size != 1 {
if size != copy(dst[nDst:], src[nSrc:nSrc+size]) {
err = transform.ErrShortDst
break
}
nDst += size
nSrc += size
continue
}
// Look for short source data.
if !atEOF && !utf8.FullRune(src[nSrc:]) {
err = transform.ErrShortSrc
break
}
// We have an invalid rune.
if nDst+3 > len(dst) {
err = transform.ErrShortDst
break
}
dst[nDst+0] = runeErrorString[0]
dst[nDst+1] = runeErrorString[1]
dst[nDst+2] = runeErrorString[2]
nDst += 3
nSrc++
}
return nDst, nSrc, err
}

512
vendor/golang.org/x/text/unicode/norm/composition.go generated vendored Normal file
View File

@ -0,0 +1,512 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package norm
import "unicode/utf8"
const (
maxNonStarters = 30
// The maximum number of characters needed for a buffer is
// maxNonStarters + 1 for the starter + 1 for the GCJ
maxBufferSize = maxNonStarters + 2
maxNFCExpansion = 3 // NFC(0x1D160)
maxNFKCExpansion = 18 // NFKC(0xFDFA)
maxByteBufferSize = utf8.UTFMax * maxBufferSize // 128
)
// ssState is used for reporting the segment state after inserting a rune.
// It is returned by streamSafe.next.
type ssState int
const (
// Indicates a rune was successfully added to the segment.
ssSuccess ssState = iota
// Indicates a rune starts a new segment and should not be added.
ssStarter
// Indicates a rune caused a segment overflow and a CGJ should be inserted.
ssOverflow
)
// streamSafe implements the policy of when a CGJ should be inserted.
type streamSafe uint8
// first inserts the first rune of a segment. It is a faster version of next if
// it is known p represents the first rune in a segment.
func (ss *streamSafe) first(p Properties) {
*ss = streamSafe(p.nTrailingNonStarters())
}
// insert returns a ssState value to indicate whether a rune represented by p
// can be inserted.
func (ss *streamSafe) next(p Properties) ssState {
if *ss > maxNonStarters {
panic("streamSafe was not reset")
}
n := p.nLeadingNonStarters()
if *ss += streamSafe(n); *ss > maxNonStarters {
*ss = 0
return ssOverflow
}
// The Stream-Safe Text Processing prescribes that the counting can stop
// as soon as a starter is encountered. However, there are some starters,
// like Jamo V and T, that can combine with other runes, leaving their
// successive non-starters appended to the previous, possibly causing an
// overflow. We will therefore consider any rune with a non-zero nLead to
// be a non-starter. Note that it always hold that if nLead > 0 then
// nLead == nTrail.
if n == 0 {
*ss = streamSafe(p.nTrailingNonStarters())
return ssStarter
}
return ssSuccess
}
// backwards is used for checking for overflow and segment starts
// when traversing a string backwards. Users do not need to call first
// for the first rune. The state of the streamSafe retains the count of
// the non-starters loaded.
func (ss *streamSafe) backwards(p Properties) ssState {
if *ss > maxNonStarters {
panic("streamSafe was not reset")
}
c := *ss + streamSafe(p.nTrailingNonStarters())
if c > maxNonStarters {
return ssOverflow
}
*ss = c
if p.nLeadingNonStarters() == 0 {
return ssStarter
}
return ssSuccess
}
func (ss streamSafe) isMax() bool {
return ss == maxNonStarters
}
// GraphemeJoiner is inserted after maxNonStarters non-starter runes.
const GraphemeJoiner = "\u034F"
// reorderBuffer is used to normalize a single segment. Characters inserted with
// insert are decomposed and reordered based on CCC. The compose method can
// be used to recombine characters. Note that the byte buffer does not hold
// the UTF-8 characters in order. Only the rune array is maintained in sorted
// order. flush writes the resulting segment to a byte array.
type reorderBuffer struct {
rune [maxBufferSize]Properties // Per character info.
byte [maxByteBufferSize]byte // UTF-8 buffer. Referenced by runeInfo.pos.
nbyte uint8 // Number or bytes.
ss streamSafe // For limiting length of non-starter sequence.
nrune int // Number of runeInfos.
f formInfo
src input
nsrc int
tmpBytes input
out []byte
flushF func(*reorderBuffer) bool
}
func (rb *reorderBuffer) init(f Form, src []byte) {
rb.f = *formTable[f]
rb.src.setBytes(src)
rb.nsrc = len(src)
rb.ss = 0
}
func (rb *reorderBuffer) initString(f Form, src string) {
rb.f = *formTable[f]
rb.src.setString(src)
rb.nsrc = len(src)
rb.ss = 0
}
func (rb *reorderBuffer) setFlusher(out []byte, f func(*reorderBuffer) bool) {
rb.out = out
rb.flushF = f
}
// reset discards all characters from the buffer.
func (rb *reorderBuffer) reset() {
rb.nrune = 0
rb.nbyte = 0
}
func (rb *reorderBuffer) doFlush() bool {
if rb.f.composing {
rb.compose()
}
res := rb.flushF(rb)
rb.reset()
return res
}
// appendFlush appends the normalized segment to rb.out.
func appendFlush(rb *reorderBuffer) bool {
for i := 0; i < rb.nrune; i++ {
start := rb.rune[i].pos
end := start + rb.rune[i].size
rb.out = append(rb.out, rb.byte[start:end]...)
}
return true
}
// flush appends the normalized segment to out and resets rb.
func (rb *reorderBuffer) flush(out []byte) []byte {
for i := 0; i < rb.nrune; i++ {
start := rb.rune[i].pos
end := start + rb.rune[i].size
out = append(out, rb.byte[start:end]...)
}
rb.reset()
return out
}
// flushCopy copies the normalized segment to buf and resets rb.
// It returns the number of bytes written to buf.
func (rb *reorderBuffer) flushCopy(buf []byte) int {
p := 0
for i := 0; i < rb.nrune; i++ {
runep := rb.rune[i]
p += copy(buf[p:], rb.byte[runep.pos:runep.pos+runep.size])
}
rb.reset()
return p
}
// insertOrdered inserts a rune in the buffer, ordered by Canonical Combining Class.
// It returns false if the buffer is not large enough to hold the rune.
// It is used internally by insert and insertString only.
func (rb *reorderBuffer) insertOrdered(info Properties) {
n := rb.nrune
b := rb.rune[:]
cc := info.ccc
if cc > 0 {
// Find insertion position + move elements to make room.
for ; n > 0; n-- {
if b[n-1].ccc <= cc {
break
}
b[n] = b[n-1]
}
}
rb.nrune += 1
pos := uint8(rb.nbyte)
rb.nbyte += utf8.UTFMax
info.pos = pos
b[n] = info
}
// insertErr is an error code returned by insert. Using this type instead
// of error improves performance up to 20% for many of the benchmarks.
type insertErr int
const (
iSuccess insertErr = -iota
iShortDst
iShortSrc
)
// insertFlush inserts the given rune in the buffer ordered by CCC.
// If a decomposition with multiple segments are encountered, they leading
// ones are flushed.
// It returns a non-zero error code if the rune was not inserted.
func (rb *reorderBuffer) insertFlush(src input, i int, info Properties) insertErr {
if rune := src.hangul(i); rune != 0 {
rb.decomposeHangul(rune)
return iSuccess
}
if info.hasDecomposition() {
return rb.insertDecomposed(info.Decomposition())
}
rb.insertSingle(src, i, info)
return iSuccess
}
// insertUnsafe inserts the given rune in the buffer ordered by CCC.
// It is assumed there is sufficient space to hold the runes. It is the
// responsibility of the caller to ensure this. This can be done by checking
// the state returned by the streamSafe type.
func (rb *reorderBuffer) insertUnsafe(src input, i int, info Properties) {
if rune := src.hangul(i); rune != 0 {
rb.decomposeHangul(rune)
}
if info.hasDecomposition() {
// TODO: inline.
rb.insertDecomposed(info.Decomposition())
} else {
rb.insertSingle(src, i, info)
}
}
// insertDecomposed inserts an entry in to the reorderBuffer for each rune
// in dcomp. dcomp must be a sequence of decomposed UTF-8-encoded runes.
// It flushes the buffer on each new segment start.
func (rb *reorderBuffer) insertDecomposed(dcomp []byte) insertErr {
rb.tmpBytes.setBytes(dcomp)
// As the streamSafe accounting already handles the counting for modifiers,
// we don't have to call next. However, we do need to keep the accounting
// intact when flushing the buffer.
for i := 0; i < len(dcomp); {
info := rb.f.info(rb.tmpBytes, i)
if info.BoundaryBefore() && rb.nrune > 0 && !rb.doFlush() {
return iShortDst
}
i += copy(rb.byte[rb.nbyte:], dcomp[i:i+int(info.size)])
rb.insertOrdered(info)
}
return iSuccess
}
// insertSingle inserts an entry in the reorderBuffer for the rune at
// position i. info is the runeInfo for the rune at position i.
func (rb *reorderBuffer) insertSingle(src input, i int, info Properties) {
src.copySlice(rb.byte[rb.nbyte:], i, i+int(info.size))
rb.insertOrdered(info)
}
// insertCGJ inserts a Combining Grapheme Joiner (0x034f) into rb.
func (rb *reorderBuffer) insertCGJ() {
rb.insertSingle(input{str: GraphemeJoiner}, 0, Properties{size: uint8(len(GraphemeJoiner))})
}
// appendRune inserts a rune at the end of the buffer. It is used for Hangul.
func (rb *reorderBuffer) appendRune(r rune) {
bn := rb.nbyte
sz := utf8.EncodeRune(rb.byte[bn:], rune(r))
rb.nbyte += utf8.UTFMax
rb.rune[rb.nrune] = Properties{pos: bn, size: uint8(sz)}
rb.nrune++
}
// assignRune sets a rune at position pos. It is used for Hangul and recomposition.
func (rb *reorderBuffer) assignRune(pos int, r rune) {
bn := rb.rune[pos].pos
sz := utf8.EncodeRune(rb.byte[bn:], rune(r))
rb.rune[pos] = Properties{pos: bn, size: uint8(sz)}
}
// runeAt returns the rune at position n. It is used for Hangul and recomposition.
func (rb *reorderBuffer) runeAt(n int) rune {
inf := rb.rune[n]
r, _ := utf8.DecodeRune(rb.byte[inf.pos : inf.pos+inf.size])
return r
}
// bytesAt returns the UTF-8 encoding of the rune at position n.
// It is used for Hangul and recomposition.
func (rb *reorderBuffer) bytesAt(n int) []byte {
inf := rb.rune[n]
return rb.byte[inf.pos : int(inf.pos)+int(inf.size)]
}
// For Hangul we combine algorithmically, instead of using tables.
const (
hangulBase = 0xAC00 // UTF-8(hangulBase) -> EA B0 80
hangulBase0 = 0xEA
hangulBase1 = 0xB0
hangulBase2 = 0x80
hangulEnd = hangulBase + jamoLVTCount // UTF-8(0xD7A4) -> ED 9E A4
hangulEnd0 = 0xED
hangulEnd1 = 0x9E
hangulEnd2 = 0xA4
jamoLBase = 0x1100 // UTF-8(jamoLBase) -> E1 84 00
jamoLBase0 = 0xE1
jamoLBase1 = 0x84
jamoLEnd = 0x1113
jamoVBase = 0x1161
jamoVEnd = 0x1176
jamoTBase = 0x11A7
jamoTEnd = 0x11C3
jamoTCount = 28
jamoVCount = 21
jamoVTCount = 21 * 28
jamoLVTCount = 19 * 21 * 28
)
const hangulUTF8Size = 3
func isHangul(b []byte) bool {
if len(b) < hangulUTF8Size {
return false
}
b0 := b[0]
if b0 < hangulBase0 {
return false
}
b1 := b[1]
switch {
case b0 == hangulBase0:
return b1 >= hangulBase1
case b0 < hangulEnd0:
return true
case b0 > hangulEnd0:
return false
case b1 < hangulEnd1:
return true
}
return b1 == hangulEnd1 && b[2] < hangulEnd2
}
func isHangulString(b string) bool {
if len(b) < hangulUTF8Size {
return false
}
b0 := b[0]
if b0 < hangulBase0 {
return false
}
b1 := b[1]
switch {
case b0 == hangulBase0:
return b1 >= hangulBase1
case b0 < hangulEnd0:
return true
case b0 > hangulEnd0:
return false
case b1 < hangulEnd1:
return true
}
return b1 == hangulEnd1 && b[2] < hangulEnd2
}
// Caller must ensure len(b) >= 2.
func isJamoVT(b []byte) bool {
// True if (rune & 0xff00) == jamoLBase
return b[0] == jamoLBase0 && (b[1]&0xFC) == jamoLBase1
}
func isHangulWithoutJamoT(b []byte) bool {
c, _ := utf8.DecodeRune(b)
c -= hangulBase
return c < jamoLVTCount && c%jamoTCount == 0
}
// decomposeHangul writes the decomposed Hangul to buf and returns the number
// of bytes written. len(buf) should be at least 9.
func decomposeHangul(buf []byte, r rune) int {
const JamoUTF8Len = 3
r -= hangulBase
x := r % jamoTCount
r /= jamoTCount
utf8.EncodeRune(buf, jamoLBase+r/jamoVCount)
utf8.EncodeRune(buf[JamoUTF8Len:], jamoVBase+r%jamoVCount)
if x != 0 {
utf8.EncodeRune(buf[2*JamoUTF8Len:], jamoTBase+x)
return 3 * JamoUTF8Len
}
return 2 * JamoUTF8Len
}
// decomposeHangul algorithmically decomposes a Hangul rune into
// its Jamo components.
// See https://unicode.org/reports/tr15/#Hangul for details on decomposing Hangul.
func (rb *reorderBuffer) decomposeHangul(r rune) {
r -= hangulBase
x := r % jamoTCount
r /= jamoTCount
rb.appendRune(jamoLBase + r/jamoVCount)
rb.appendRune(jamoVBase + r%jamoVCount)
if x != 0 {
rb.appendRune(jamoTBase + x)
}
}
// combineHangul algorithmically combines Jamo character components into Hangul.
// See https://unicode.org/reports/tr15/#Hangul for details on combining Hangul.
func (rb *reorderBuffer) combineHangul(s, i, k int) {
b := rb.rune[:]
bn := rb.nrune
for ; i < bn; i++ {
cccB := b[k-1].ccc
cccC := b[i].ccc
if cccB == 0 {
s = k - 1
}
if s != k-1 && cccB >= cccC {
// b[i] is blocked by greater-equal cccX below it
b[k] = b[i]
k++
} else {
l := rb.runeAt(s) // also used to compare to hangulBase
v := rb.runeAt(i) // also used to compare to jamoT
switch {
case jamoLBase <= l && l < jamoLEnd &&
jamoVBase <= v && v < jamoVEnd:
// 11xx plus 116x to LV
rb.assignRune(s, hangulBase+
(l-jamoLBase)*jamoVTCount+(v-jamoVBase)*jamoTCount)
case hangulBase <= l && l < hangulEnd &&
jamoTBase < v && v < jamoTEnd &&
((l-hangulBase)%jamoTCount) == 0:
// ACxx plus 11Ax to LVT
rb.assignRune(s, l+v-jamoTBase)
default:
b[k] = b[i]
k++
}
}
}
rb.nrune = k
}
// compose recombines the runes in the buffer.
// It should only be used to recompose a single segment, as it will not
// handle alternations between Hangul and non-Hangul characters correctly.
func (rb *reorderBuffer) compose() {
// Lazily load the map used by the combine func below, but do
// it outside of the loop.
recompMapOnce.Do(buildRecompMap)
// UAX #15, section X5 , including Corrigendum #5
// "In any character sequence beginning with starter S, a character C is
// blocked from S if and only if there is some character B between S
// and C, and either B is a starter or it has the same or higher
// combining class as C."
bn := rb.nrune
if bn == 0 {
return
}
k := 1
b := rb.rune[:]
for s, i := 0, 1; i < bn; i++ {
if isJamoVT(rb.bytesAt(i)) {
// Redo from start in Hangul mode. Necessary to support
// U+320E..U+321E in NFKC mode.
rb.combineHangul(s, i, k)
return
}
ii := b[i]
// We can only use combineForward as a filter if we later
// get the info for the combined character. This is more
// expensive than using the filter. Using combinesBackward()
// is safe.
if ii.combinesBackward() {
cccB := b[k-1].ccc
cccC := ii.ccc
blocked := false // b[i] blocked by starter or greater or equal CCC?
if cccB == 0 {
s = k - 1
} else {
blocked = s != k-1 && cccB >= cccC
}
if !blocked {
combined := combine(rb.runeAt(s), rb.runeAt(i))
if combined != 0 {
rb.assignRune(s, combined)
continue
}
}
}
b[k] = b[i]
k++
}
rb.nrune = k
}

279
vendor/golang.org/x/text/unicode/norm/forminfo.go generated vendored Normal file
View File

@ -0,0 +1,279 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package norm
import "encoding/binary"
// This file contains Form-specific logic and wrappers for data in tables.go.
// Rune info is stored in a separate trie per composing form. A composing form
// and its corresponding decomposing form share the same trie. Each trie maps
// a rune to a uint16. The values take two forms. For v >= 0x8000:
// bits
// 15: 1 (inverse of NFD_QC bit of qcInfo)
// 13..7: qcInfo (see below). isYesD is always true (no decomposition).
// 6..0: ccc (compressed CCC value).
// For v < 0x8000, the respective rune has a decomposition and v is an index
// into a byte array of UTF-8 decomposition sequences and additional info and
// has the form:
// <header> <decomp_byte>* [<tccc> [<lccc>]]
// The header contains the number of bytes in the decomposition (excluding this
// length byte). The two most significant bits of this length byte correspond
// to bit 5 and 4 of qcInfo (see below). The byte sequence itself starts at v+1.
// The byte sequence is followed by a trailing and leading CCC if the values
// for these are not zero. The value of v determines which ccc are appended
// to the sequences. For v < firstCCC, there are none, for v >= firstCCC,
// the sequence is followed by a trailing ccc, and for v >= firstLeadingCC
// there is an additional leading ccc. The value of tccc itself is the
// trailing CCC shifted left 2 bits. The two least-significant bits of tccc
// are the number of trailing non-starters.
const (
qcInfoMask = 0x3F // to clear all but the relevant bits in a qcInfo
headerLenMask = 0x3F // extract the length value from the header byte
headerFlagsMask = 0xC0 // extract the qcInfo bits from the header byte
)
// Properties provides access to normalization properties of a rune.
type Properties struct {
pos uint8 // start position in reorderBuffer; used in composition.go
size uint8 // length of UTF-8 encoding of this rune
ccc uint8 // leading canonical combining class (ccc if not decomposition)
tccc uint8 // trailing canonical combining class (ccc if not decomposition)
nLead uint8 // number of leading non-starters.
flags qcInfo // quick check flags
index uint16
}
// functions dispatchable per form
type lookupFunc func(b input, i int) Properties
// formInfo holds Form-specific functions and tables.
type formInfo struct {
form Form
composing, compatibility bool // form type
info lookupFunc
nextMain iterFunc
}
var formTable = []*formInfo{{
form: NFC,
composing: true,
compatibility: false,
info: lookupInfoNFC,
nextMain: nextComposed,
}, {
form: NFD,
composing: false,
compatibility: false,
info: lookupInfoNFC,
nextMain: nextDecomposed,
}, {
form: NFKC,
composing: true,
compatibility: true,
info: lookupInfoNFKC,
nextMain: nextComposed,
}, {
form: NFKD,
composing: false,
compatibility: true,
info: lookupInfoNFKC,
nextMain: nextDecomposed,
}}
// We do not distinguish between boundaries for NFC, NFD, etc. to avoid
// unexpected behavior for the user. For example, in NFD, there is a boundary
// after 'a'. However, 'a' might combine with modifiers, so from the application's
// perspective it is not a good boundary. We will therefore always use the
// boundaries for the combining variants.
// BoundaryBefore returns true if this rune starts a new segment and
// cannot combine with any rune on the left.
func (p Properties) BoundaryBefore() bool {
if p.ccc == 0 && !p.combinesBackward() {
return true
}
// We assume that the CCC of the first character in a decomposition
// is always non-zero if different from info.ccc and that we can return
// false at this point. This is verified by maketables.
return false
}
// BoundaryAfter returns true if runes cannot combine with or otherwise
// interact with this or previous runes.
func (p Properties) BoundaryAfter() bool {
// TODO: loosen these conditions.
return p.isInert()
}
// We pack quick check data in 4 bits:
//
// 5: Combines forward (0 == false, 1 == true)
// 4..3: NFC_QC Yes(00), No (10), or Maybe (11)
// 2: NFD_QC Yes (0) or No (1). No also means there is a decomposition.
// 1..0: Number of trailing non-starters.
//
// When all 4 bits are zero, the character is inert, meaning it is never
// influenced by normalization.
type qcInfo uint8
func (p Properties) isYesC() bool { return p.flags&0x10 == 0 }
func (p Properties) isYesD() bool { return p.flags&0x4 == 0 }
func (p Properties) combinesForward() bool { return p.flags&0x20 != 0 }
func (p Properties) combinesBackward() bool { return p.flags&0x8 != 0 } // == isMaybe
func (p Properties) hasDecomposition() bool { return p.flags&0x4 != 0 } // == isNoD
func (p Properties) isInert() bool {
return p.flags&qcInfoMask == 0 && p.ccc == 0
}
func (p Properties) multiSegment() bool {
return p.index >= firstMulti && p.index < endMulti
}
func (p Properties) nLeadingNonStarters() uint8 {
return p.nLead
}
func (p Properties) nTrailingNonStarters() uint8 {
return uint8(p.flags & 0x03)
}
// Decomposition returns the decomposition for the underlying rune
// or nil if there is none.
func (p Properties) Decomposition() []byte {
// TODO: create the decomposition for Hangul?
if p.index == 0 {
return nil
}
i := p.index
n := decomps[i] & headerLenMask
i++
return decomps[i : i+uint16(n)]
}
// Size returns the length of UTF-8 encoding of the rune.
func (p Properties) Size() int {
return int(p.size)
}
// CCC returns the canonical combining class of the underlying rune.
func (p Properties) CCC() uint8 {
if p.index >= firstCCCZeroExcept {
return 0
}
return ccc[p.ccc]
}
// LeadCCC returns the CCC of the first rune in the decomposition.
// If there is no decomposition, LeadCCC equals CCC.
func (p Properties) LeadCCC() uint8 {
return ccc[p.ccc]
}
// TrailCCC returns the CCC of the last rune in the decomposition.
// If there is no decomposition, TrailCCC equals CCC.
func (p Properties) TrailCCC() uint8 {
return ccc[p.tccc]
}
func buildRecompMap() {
recompMap = make(map[uint32]rune, len(recompMapPacked)/8)
var buf [8]byte
for i := 0; i < len(recompMapPacked); i += 8 {
copy(buf[:], recompMapPacked[i:i+8])
key := binary.BigEndian.Uint32(buf[:4])
val := binary.BigEndian.Uint32(buf[4:])
recompMap[key] = rune(val)
}
}
// Recomposition
// We use 32-bit keys instead of 64-bit for the two codepoint keys.
// This clips off the bits of three entries, but we know this will not
// result in a collision. In the unlikely event that changes to
// UnicodeData.txt introduce collisions, the compiler will catch it.
// Note that the recomposition map for NFC and NFKC are identical.
// combine returns the combined rune or 0 if it doesn't exist.
//
// The caller is responsible for calling
// recompMapOnce.Do(buildRecompMap) sometime before this is called.
func combine(a, b rune) rune {
key := uint32(uint16(a))<<16 + uint32(uint16(b))
if recompMap == nil {
panic("caller error") // see func comment
}
return recompMap[key]
}
func lookupInfoNFC(b input, i int) Properties {
v, sz := b.charinfoNFC(i)
return compInfo(v, sz)
}
func lookupInfoNFKC(b input, i int) Properties {
v, sz := b.charinfoNFKC(i)
return compInfo(v, sz)
}
// Properties returns properties for the first rune in s.
func (f Form) Properties(s []byte) Properties {
if f == NFC || f == NFD {
return compInfo(nfcData.lookup(s))
}
return compInfo(nfkcData.lookup(s))
}
// PropertiesString returns properties for the first rune in s.
func (f Form) PropertiesString(s string) Properties {
if f == NFC || f == NFD {
return compInfo(nfcData.lookupString(s))
}
return compInfo(nfkcData.lookupString(s))
}
// compInfo converts the information contained in v and sz
// to a Properties. See the comment at the top of the file
// for more information on the format.
func compInfo(v uint16, sz int) Properties {
if v == 0 {
return Properties{size: uint8(sz)}
} else if v >= 0x8000 {
p := Properties{
size: uint8(sz),
ccc: uint8(v),
tccc: uint8(v),
flags: qcInfo(v >> 8),
}
if p.ccc > 0 || p.combinesBackward() {
p.nLead = uint8(p.flags & 0x3)
}
return p
}
// has decomposition
h := decomps[v]
f := (qcInfo(h&headerFlagsMask) >> 2) | 0x4
p := Properties{size: uint8(sz), flags: f, index: v}
if v >= firstCCC {
v += uint16(h&headerLenMask) + 1
c := decomps[v]
p.tccc = c >> 2
p.flags |= qcInfo(c & 0x3)
if v >= firstLeadingCCC {
p.nLead = c & 0x3
if v >= firstStarterWithNLead {
// We were tricked. Remove the decomposition.
p.flags &= 0x03
p.index = 0
return p
}
p.ccc = decomps[v+1]
}
}
return p
}

109
vendor/golang.org/x/text/unicode/norm/input.go generated vendored Normal file
View File

@ -0,0 +1,109 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package norm
import "unicode/utf8"
type input struct {
str string
bytes []byte
}
func inputBytes(str []byte) input {
return input{bytes: str}
}
func inputString(str string) input {
return input{str: str}
}
func (in *input) setBytes(str []byte) {
in.str = ""
in.bytes = str
}
func (in *input) setString(str string) {
in.str = str
in.bytes = nil
}
func (in *input) _byte(p int) byte {
if in.bytes == nil {
return in.str[p]
}
return in.bytes[p]
}
func (in *input) skipASCII(p, max int) int {
if in.bytes == nil {
for ; p < max && in.str[p] < utf8.RuneSelf; p++ {
}
} else {
for ; p < max && in.bytes[p] < utf8.RuneSelf; p++ {
}
}
return p
}
func (in *input) skipContinuationBytes(p int) int {
if in.bytes == nil {
for ; p < len(in.str) && !utf8.RuneStart(in.str[p]); p++ {
}
} else {
for ; p < len(in.bytes) && !utf8.RuneStart(in.bytes[p]); p++ {
}
}
return p
}
func (in *input) appendSlice(buf []byte, b, e int) []byte {
if in.bytes != nil {
return append(buf, in.bytes[b:e]...)
}
for i := b; i < e; i++ {
buf = append(buf, in.str[i])
}
return buf
}
func (in *input) copySlice(buf []byte, b, e int) int {
if in.bytes == nil {
return copy(buf, in.str[b:e])
}
return copy(buf, in.bytes[b:e])
}
func (in *input) charinfoNFC(p int) (uint16, int) {
if in.bytes == nil {
return nfcData.lookupString(in.str[p:])
}
return nfcData.lookup(in.bytes[p:])
}
func (in *input) charinfoNFKC(p int) (uint16, int) {
if in.bytes == nil {
return nfkcData.lookupString(in.str[p:])
}
return nfkcData.lookup(in.bytes[p:])
}
func (in *input) hangul(p int) (r rune) {
var size int
if in.bytes == nil {
if !isHangulString(in.str[p:]) {
return 0
}
r, size = utf8.DecodeRuneInString(in.str[p:])
} else {
if !isHangul(in.bytes[p:]) {
return 0
}
r, size = utf8.DecodeRune(in.bytes[p:])
}
if size != hangulUTF8Size {
return 0
}
return r
}

458
vendor/golang.org/x/text/unicode/norm/iter.go generated vendored Normal file
View File

@ -0,0 +1,458 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package norm
import (
"fmt"
"unicode/utf8"
)
// MaxSegmentSize is the maximum size of a byte buffer needed to consider any
// sequence of starter and non-starter runes for the purpose of normalization.
const MaxSegmentSize = maxByteBufferSize
// An Iter iterates over a string or byte slice, while normalizing it
// to a given Form.
type Iter struct {
rb reorderBuffer
buf [maxByteBufferSize]byte
info Properties // first character saved from previous iteration
next iterFunc // implementation of next depends on form
asciiF iterFunc
p int // current position in input source
multiSeg []byte // remainder of multi-segment decomposition
}
type iterFunc func(*Iter) []byte
// Init initializes i to iterate over src after normalizing it to Form f.
func (i *Iter) Init(f Form, src []byte) {
i.p = 0
if len(src) == 0 {
i.setDone()
i.rb.nsrc = 0
return
}
i.multiSeg = nil
i.rb.init(f, src)
i.next = i.rb.f.nextMain
i.asciiF = nextASCIIBytes
i.info = i.rb.f.info(i.rb.src, i.p)
i.rb.ss.first(i.info)
}
// InitString initializes i to iterate over src after normalizing it to Form f.
func (i *Iter) InitString(f Form, src string) {
i.p = 0
if len(src) == 0 {
i.setDone()
i.rb.nsrc = 0
return
}
i.multiSeg = nil
i.rb.initString(f, src)
i.next = i.rb.f.nextMain
i.asciiF = nextASCIIString
i.info = i.rb.f.info(i.rb.src, i.p)
i.rb.ss.first(i.info)
}
// Seek sets the segment to be returned by the next call to Next to start
// at position p. It is the responsibility of the caller to set p to the
// start of a segment.
func (i *Iter) Seek(offset int64, whence int) (int64, error) {
var abs int64
switch whence {
case 0:
abs = offset
case 1:
abs = int64(i.p) + offset
case 2:
abs = int64(i.rb.nsrc) + offset
default:
return 0, fmt.Errorf("norm: invalid whence")
}
if abs < 0 {
return 0, fmt.Errorf("norm: negative position")
}
if int(abs) >= i.rb.nsrc {
i.setDone()
return int64(i.p), nil
}
i.p = int(abs)
i.multiSeg = nil
i.next = i.rb.f.nextMain
i.info = i.rb.f.info(i.rb.src, i.p)
i.rb.ss.first(i.info)
return abs, nil
}
// returnSlice returns a slice of the underlying input type as a byte slice.
// If the underlying is of type []byte, it will simply return a slice.
// If the underlying is of type string, it will copy the slice to the buffer
// and return that.
func (i *Iter) returnSlice(a, b int) []byte {
if i.rb.src.bytes == nil {
return i.buf[:copy(i.buf[:], i.rb.src.str[a:b])]
}
return i.rb.src.bytes[a:b]
}
// Pos returns the byte position at which the next call to Next will commence processing.
func (i *Iter) Pos() int {
return i.p
}
func (i *Iter) setDone() {
i.next = nextDone
i.p = i.rb.nsrc
}
// Done returns true if there is no more input to process.
func (i *Iter) Done() bool {
return i.p >= i.rb.nsrc
}
// Next returns f(i.input[i.Pos():n]), where n is a boundary of i.input.
// For any input a and b for which f(a) == f(b), subsequent calls
// to Next will return the same segments.
// Modifying runes are grouped together with the preceding starter, if such a starter exists.
// Although not guaranteed, n will typically be the smallest possible n.
func (i *Iter) Next() []byte {
return i.next(i)
}
func nextASCIIBytes(i *Iter) []byte {
p := i.p + 1
if p >= i.rb.nsrc {
p0 := i.p
i.setDone()
return i.rb.src.bytes[p0:p]
}
if i.rb.src.bytes[p] < utf8.RuneSelf {
p0 := i.p
i.p = p
return i.rb.src.bytes[p0:p]
}
i.info = i.rb.f.info(i.rb.src, i.p)
i.next = i.rb.f.nextMain
return i.next(i)
}
func nextASCIIString(i *Iter) []byte {
p := i.p + 1
if p >= i.rb.nsrc {
i.buf[0] = i.rb.src.str[i.p]
i.setDone()
return i.buf[:1]
}
if i.rb.src.str[p] < utf8.RuneSelf {
i.buf[0] = i.rb.src.str[i.p]
i.p = p
return i.buf[:1]
}
i.info = i.rb.f.info(i.rb.src, i.p)
i.next = i.rb.f.nextMain
return i.next(i)
}
func nextHangul(i *Iter) []byte {
p := i.p
next := p + hangulUTF8Size
if next >= i.rb.nsrc {
i.setDone()
} else if i.rb.src.hangul(next) == 0 {
i.rb.ss.next(i.info)
i.info = i.rb.f.info(i.rb.src, i.p)
i.next = i.rb.f.nextMain
return i.next(i)
}
i.p = next
return i.buf[:decomposeHangul(i.buf[:], i.rb.src.hangul(p))]
}
func nextDone(i *Iter) []byte {
return nil
}
// nextMulti is used for iterating over multi-segment decompositions
// for decomposing normal forms.
func nextMulti(i *Iter) []byte {
j := 0
d := i.multiSeg
// skip first rune
for j = 1; j < len(d) && !utf8.RuneStart(d[j]); j++ {
}
for j < len(d) {
info := i.rb.f.info(input{bytes: d}, j)
if info.BoundaryBefore() {
i.multiSeg = d[j:]
return d[:j]
}
j += int(info.size)
}
// treat last segment as normal decomposition
i.next = i.rb.f.nextMain
return i.next(i)
}
// nextMultiNorm is used for iterating over multi-segment decompositions
// for composing normal forms.
func nextMultiNorm(i *Iter) []byte {
j := 0
d := i.multiSeg
for j < len(d) {
info := i.rb.f.info(input{bytes: d}, j)
if info.BoundaryBefore() {
i.rb.compose()
seg := i.buf[:i.rb.flushCopy(i.buf[:])]
i.rb.insertUnsafe(input{bytes: d}, j, info)
i.multiSeg = d[j+int(info.size):]
return seg
}
i.rb.insertUnsafe(input{bytes: d}, j, info)
j += int(info.size)
}
i.multiSeg = nil
i.next = nextComposed
return doNormComposed(i)
}
// nextDecomposed is the implementation of Next for forms NFD and NFKD.
func nextDecomposed(i *Iter) (next []byte) {
outp := 0
inCopyStart, outCopyStart := i.p, 0
for {
if sz := int(i.info.size); sz <= 1 {
i.rb.ss = 0
p := i.p
i.p++ // ASCII or illegal byte. Either way, advance by 1.
if i.p >= i.rb.nsrc {
i.setDone()
return i.returnSlice(p, i.p)
} else if i.rb.src._byte(i.p) < utf8.RuneSelf {
i.next = i.asciiF
return i.returnSlice(p, i.p)
}
outp++
} else if d := i.info.Decomposition(); d != nil {
// Note: If leading CCC != 0, then len(d) == 2 and last is also non-zero.
// Case 1: there is a leftover to copy. In this case the decomposition
// must begin with a modifier and should always be appended.
// Case 2: no leftover. Simply return d if followed by a ccc == 0 value.
p := outp + len(d)
if outp > 0 {
i.rb.src.copySlice(i.buf[outCopyStart:], inCopyStart, i.p)
// TODO: this condition should not be possible, but we leave it
// in for defensive purposes.
if p > len(i.buf) {
return i.buf[:outp]
}
} else if i.info.multiSegment() {
// outp must be 0 as multi-segment decompositions always
// start a new segment.
if i.multiSeg == nil {
i.multiSeg = d
i.next = nextMulti
return nextMulti(i)
}
// We are in the last segment. Treat as normal decomposition.
d = i.multiSeg
i.multiSeg = nil
p = len(d)
}
prevCC := i.info.tccc
if i.p += sz; i.p >= i.rb.nsrc {
i.setDone()
i.info = Properties{} // Force BoundaryBefore to succeed.
} else {
i.info = i.rb.f.info(i.rb.src, i.p)
}
switch i.rb.ss.next(i.info) {
case ssOverflow:
i.next = nextCGJDecompose
fallthrough
case ssStarter:
if outp > 0 {
copy(i.buf[outp:], d)
return i.buf[:p]
}
return d
}
copy(i.buf[outp:], d)
outp = p
inCopyStart, outCopyStart = i.p, outp
if i.info.ccc < prevCC {
goto doNorm
}
continue
} else if r := i.rb.src.hangul(i.p); r != 0 {
outp = decomposeHangul(i.buf[:], r)
i.p += hangulUTF8Size
inCopyStart, outCopyStart = i.p, outp
if i.p >= i.rb.nsrc {
i.setDone()
break
} else if i.rb.src.hangul(i.p) != 0 {
i.next = nextHangul
return i.buf[:outp]
}
} else {
p := outp + sz
if p > len(i.buf) {
break
}
outp = p
i.p += sz
}
if i.p >= i.rb.nsrc {
i.setDone()
break
}
prevCC := i.info.tccc
i.info = i.rb.f.info(i.rb.src, i.p)
if v := i.rb.ss.next(i.info); v == ssStarter {
break
} else if v == ssOverflow {
i.next = nextCGJDecompose
break
}
if i.info.ccc < prevCC {
goto doNorm
}
}
if outCopyStart == 0 {
return i.returnSlice(inCopyStart, i.p)
} else if inCopyStart < i.p {
i.rb.src.copySlice(i.buf[outCopyStart:], inCopyStart, i.p)
}
return i.buf[:outp]
doNorm:
// Insert what we have decomposed so far in the reorderBuffer.
// As we will only reorder, there will always be enough room.
i.rb.src.copySlice(i.buf[outCopyStart:], inCopyStart, i.p)
i.rb.insertDecomposed(i.buf[0:outp])
return doNormDecomposed(i)
}
func doNormDecomposed(i *Iter) []byte {
for {
i.rb.insertUnsafe(i.rb.src, i.p, i.info)
if i.p += int(i.info.size); i.p >= i.rb.nsrc {
i.setDone()
break
}
i.info = i.rb.f.info(i.rb.src, i.p)
if i.info.ccc == 0 {
break
}
if s := i.rb.ss.next(i.info); s == ssOverflow {
i.next = nextCGJDecompose
break
}
}
// new segment or too many combining characters: exit normalization
return i.buf[:i.rb.flushCopy(i.buf[:])]
}
func nextCGJDecompose(i *Iter) []byte {
i.rb.ss = 0
i.rb.insertCGJ()
i.next = nextDecomposed
i.rb.ss.first(i.info)
buf := doNormDecomposed(i)
return buf
}
// nextComposed is the implementation of Next for forms NFC and NFKC.
func nextComposed(i *Iter) []byte {
outp, startp := 0, i.p
var prevCC uint8
for {
if !i.info.isYesC() {
goto doNorm
}
prevCC = i.info.tccc
sz := int(i.info.size)
if sz == 0 {
sz = 1 // illegal rune: copy byte-by-byte
}
p := outp + sz
if p > len(i.buf) {
break
}
outp = p
i.p += sz
if i.p >= i.rb.nsrc {
i.setDone()
break
} else if i.rb.src._byte(i.p) < utf8.RuneSelf {
i.rb.ss = 0
i.next = i.asciiF
break
}
i.info = i.rb.f.info(i.rb.src, i.p)
if v := i.rb.ss.next(i.info); v == ssStarter {
break
} else if v == ssOverflow {
i.next = nextCGJCompose
break
}
if i.info.ccc < prevCC {
goto doNorm
}
}
return i.returnSlice(startp, i.p)
doNorm:
// reset to start position
i.p = startp
i.info = i.rb.f.info(i.rb.src, i.p)
i.rb.ss.first(i.info)
if i.info.multiSegment() {
d := i.info.Decomposition()
info := i.rb.f.info(input{bytes: d}, 0)
i.rb.insertUnsafe(input{bytes: d}, 0, info)
i.multiSeg = d[int(info.size):]
i.next = nextMultiNorm
return nextMultiNorm(i)
}
i.rb.ss.first(i.info)
i.rb.insertUnsafe(i.rb.src, i.p, i.info)
return doNormComposed(i)
}
func doNormComposed(i *Iter) []byte {
// First rune should already be inserted.
for {
if i.p += int(i.info.size); i.p >= i.rb.nsrc {
i.setDone()
break
}
i.info = i.rb.f.info(i.rb.src, i.p)
if s := i.rb.ss.next(i.info); s == ssStarter {
break
} else if s == ssOverflow {
i.next = nextCGJCompose
break
}
i.rb.insertUnsafe(i.rb.src, i.p, i.info)
}
i.rb.compose()
seg := i.buf[:i.rb.flushCopy(i.buf[:])]
return seg
}
func nextCGJCompose(i *Iter) []byte {
i.rb.ss = 0 // instead of first
i.rb.insertCGJ()
i.next = nextComposed
// Note that we treat any rune with nLeadingNonStarters > 0 as a non-starter,
// even if they are not. This is particularly dubious for U+FF9E and UFF9A.
// If we ever change that, insert a check here.
i.rb.ss.first(i.info)
i.rb.insertUnsafe(i.rb.src, i.p, i.info)
return doNormComposed(i)
}

610
vendor/golang.org/x/text/unicode/norm/normalize.go generated vendored Normal file
View File

@ -0,0 +1,610 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Note: the file data_test.go that is generated should not be checked in.
//go:generate go run maketables.go triegen.go
//go:generate go test -tags test
// Package norm contains types and functions for normalizing Unicode strings.
package norm // import "golang.org/x/text/unicode/norm"
import (
"unicode/utf8"
"golang.org/x/text/transform"
)
// A Form denotes a canonical representation of Unicode code points.
// The Unicode-defined normalization and equivalence forms are:
//
// NFC Unicode Normalization Form C
// NFD Unicode Normalization Form D
// NFKC Unicode Normalization Form KC
// NFKD Unicode Normalization Form KD
//
// For a Form f, this documentation uses the notation f(x) to mean
// the bytes or string x converted to the given form.
// A position n in x is called a boundary if conversion to the form can
// proceed independently on both sides:
//
// f(x) == append(f(x[0:n]), f(x[n:])...)
//
// References: https://unicode.org/reports/tr15/ and
// https://unicode.org/notes/tn5/.
type Form int
const (
NFC Form = iota
NFD
NFKC
NFKD
)
// Bytes returns f(b). May return b if f(b) = b.
func (f Form) Bytes(b []byte) []byte {
src := inputBytes(b)
ft := formTable[f]
n, ok := ft.quickSpan(src, 0, len(b), true)
if ok {
return b
}
out := make([]byte, n, len(b))
copy(out, b[0:n])
rb := reorderBuffer{f: *ft, src: src, nsrc: len(b), out: out, flushF: appendFlush}
return doAppendInner(&rb, n)
}
// String returns f(s).
func (f Form) String(s string) string {
src := inputString(s)
ft := formTable[f]
n, ok := ft.quickSpan(src, 0, len(s), true)
if ok {
return s
}
out := make([]byte, n, len(s))
copy(out, s[0:n])
rb := reorderBuffer{f: *ft, src: src, nsrc: len(s), out: out, flushF: appendFlush}
return string(doAppendInner(&rb, n))
}
// IsNormal returns true if b == f(b).
func (f Form) IsNormal(b []byte) bool {
src := inputBytes(b)
ft := formTable[f]
bp, ok := ft.quickSpan(src, 0, len(b), true)
if ok {
return true
}
rb := reorderBuffer{f: *ft, src: src, nsrc: len(b)}
rb.setFlusher(nil, cmpNormalBytes)
for bp < len(b) {
rb.out = b[bp:]
if bp = decomposeSegment(&rb, bp, true); bp < 0 {
return false
}
bp, _ = rb.f.quickSpan(rb.src, bp, len(b), true)
}
return true
}
func cmpNormalBytes(rb *reorderBuffer) bool {
b := rb.out
for i := 0; i < rb.nrune; i++ {
info := rb.rune[i]
if int(info.size) > len(b) {
return false
}
p := info.pos
pe := p + info.size
for ; p < pe; p++ {
if b[0] != rb.byte[p] {
return false
}
b = b[1:]
}
}
return true
}
// IsNormalString returns true if s == f(s).
func (f Form) IsNormalString(s string) bool {
src := inputString(s)
ft := formTable[f]
bp, ok := ft.quickSpan(src, 0, len(s), true)
if ok {
return true
}
rb := reorderBuffer{f: *ft, src: src, nsrc: len(s)}
rb.setFlusher(nil, func(rb *reorderBuffer) bool {
for i := 0; i < rb.nrune; i++ {
info := rb.rune[i]
if bp+int(info.size) > len(s) {
return false
}
p := info.pos
pe := p + info.size
for ; p < pe; p++ {
if s[bp] != rb.byte[p] {
return false
}
bp++
}
}
return true
})
for bp < len(s) {
if bp = decomposeSegment(&rb, bp, true); bp < 0 {
return false
}
bp, _ = rb.f.quickSpan(rb.src, bp, len(s), true)
}
return true
}
// patchTail fixes a case where a rune may be incorrectly normalized
// if it is followed by illegal continuation bytes. It returns the
// patched buffer and whether the decomposition is still in progress.
func patchTail(rb *reorderBuffer) bool {
info, p := lastRuneStart(&rb.f, rb.out)
if p == -1 || info.size == 0 {
return true
}
end := p + int(info.size)
extra := len(rb.out) - end
if extra > 0 {
// Potentially allocating memory. However, this only
// happens with ill-formed UTF-8.
x := make([]byte, 0)
x = append(x, rb.out[len(rb.out)-extra:]...)
rb.out = rb.out[:end]
decomposeToLastBoundary(rb)
rb.doFlush()
rb.out = append(rb.out, x...)
return false
}
buf := rb.out[p:]
rb.out = rb.out[:p]
decomposeToLastBoundary(rb)
if s := rb.ss.next(info); s == ssStarter {
rb.doFlush()
rb.ss.first(info)
} else if s == ssOverflow {
rb.doFlush()
rb.insertCGJ()
rb.ss = 0
}
rb.insertUnsafe(inputBytes(buf), 0, info)
return true
}
func appendQuick(rb *reorderBuffer, i int) int {
if rb.nsrc == i {
return i
}
end, _ := rb.f.quickSpan(rb.src, i, rb.nsrc, true)
rb.out = rb.src.appendSlice(rb.out, i, end)
return end
}
// Append returns f(append(out, b...)).
// The buffer out must be nil, empty, or equal to f(out).
func (f Form) Append(out []byte, src ...byte) []byte {
return f.doAppend(out, inputBytes(src), len(src))
}
func (f Form) doAppend(out []byte, src input, n int) []byte {
if n == 0 {
return out
}
ft := formTable[f]
// Attempt to do a quickSpan first so we can avoid initializing the reorderBuffer.
if len(out) == 0 {
p, _ := ft.quickSpan(src, 0, n, true)
out = src.appendSlice(out, 0, p)
if p == n {
return out
}
rb := reorderBuffer{f: *ft, src: src, nsrc: n, out: out, flushF: appendFlush}
return doAppendInner(&rb, p)
}
rb := reorderBuffer{f: *ft, src: src, nsrc: n}
return doAppend(&rb, out, 0)
}
func doAppend(rb *reorderBuffer, out []byte, p int) []byte {
rb.setFlusher(out, appendFlush)
src, n := rb.src, rb.nsrc
doMerge := len(out) > 0
if q := src.skipContinuationBytes(p); q > p {
// Move leading non-starters to destination.
rb.out = src.appendSlice(rb.out, p, q)
p = q
doMerge = patchTail(rb)
}
fd := &rb.f
if doMerge {
var info Properties
if p < n {
info = fd.info(src, p)
if !info.BoundaryBefore() || info.nLeadingNonStarters() > 0 {
if p == 0 {
decomposeToLastBoundary(rb)
}
p = decomposeSegment(rb, p, true)
}
}
if info.size == 0 {
rb.doFlush()
// Append incomplete UTF-8 encoding.
return src.appendSlice(rb.out, p, n)
}
if rb.nrune > 0 {
return doAppendInner(rb, p)
}
}
p = appendQuick(rb, p)
return doAppendInner(rb, p)
}
func doAppendInner(rb *reorderBuffer, p int) []byte {
for n := rb.nsrc; p < n; {
p = decomposeSegment(rb, p, true)
p = appendQuick(rb, p)
}
return rb.out
}
// AppendString returns f(append(out, []byte(s))).
// The buffer out must be nil, empty, or equal to f(out).
func (f Form) AppendString(out []byte, src string) []byte {
return f.doAppend(out, inputString(src), len(src))
}
// QuickSpan returns a boundary n such that b[0:n] == f(b[0:n]).
// It is not guaranteed to return the largest such n.
func (f Form) QuickSpan(b []byte) int {
n, _ := formTable[f].quickSpan(inputBytes(b), 0, len(b), true)
return n
}
// Span implements transform.SpanningTransformer. It returns a boundary n such
// that b[0:n] == f(b[0:n]). It is not guaranteed to return the largest such n.
func (f Form) Span(b []byte, atEOF bool) (n int, err error) {
n, ok := formTable[f].quickSpan(inputBytes(b), 0, len(b), atEOF)
if n < len(b) {
if !ok {
err = transform.ErrEndOfSpan
} else {
err = transform.ErrShortSrc
}
}
return n, err
}
// SpanString returns a boundary n such that s[0:n] == f(s[0:n]).
// It is not guaranteed to return the largest such n.
func (f Form) SpanString(s string, atEOF bool) (n int, err error) {
n, ok := formTable[f].quickSpan(inputString(s), 0, len(s), atEOF)
if n < len(s) {
if !ok {
err = transform.ErrEndOfSpan
} else {
err = transform.ErrShortSrc
}
}
return n, err
}
// quickSpan returns a boundary n such that src[0:n] == f(src[0:n]) and
// whether any non-normalized parts were found. If atEOF is false, n will
// not point past the last segment if this segment might be become
// non-normalized by appending other runes.
func (f *formInfo) quickSpan(src input, i, end int, atEOF bool) (n int, ok bool) {
var lastCC uint8
ss := streamSafe(0)
lastSegStart := i
for n = end; i < n; {
if j := src.skipASCII(i, n); i != j {
i = j
lastSegStart = i - 1
lastCC = 0
ss = 0
continue
}
info := f.info(src, i)
if info.size == 0 {
if atEOF {
// include incomplete runes
return n, true
}
return lastSegStart, true
}
// This block needs to be before the next, because it is possible to
// have an overflow for runes that are starters (e.g. with U+FF9E).
switch ss.next(info) {
case ssStarter:
lastSegStart = i
case ssOverflow:
return lastSegStart, false
case ssSuccess:
if lastCC > info.ccc {
return lastSegStart, false
}
}
if f.composing {
if !info.isYesC() {
break
}
} else {
if !info.isYesD() {
break
}
}
lastCC = info.ccc
i += int(info.size)
}
if i == n {
if !atEOF {
n = lastSegStart
}
return n, true
}
return lastSegStart, false
}
// QuickSpanString returns a boundary n such that s[0:n] == f(s[0:n]).
// It is not guaranteed to return the largest such n.
func (f Form) QuickSpanString(s string) int {
n, _ := formTable[f].quickSpan(inputString(s), 0, len(s), true)
return n
}
// FirstBoundary returns the position i of the first boundary in b
// or -1 if b contains no boundary.
func (f Form) FirstBoundary(b []byte) int {
return f.firstBoundary(inputBytes(b), len(b))
}
func (f Form) firstBoundary(src input, nsrc int) int {
i := src.skipContinuationBytes(0)
if i >= nsrc {
return -1
}
fd := formTable[f]
ss := streamSafe(0)
// We should call ss.first here, but we can't as the first rune is
// skipped already. This means FirstBoundary can't really determine
// CGJ insertion points correctly. Luckily it doesn't have to.
for {
info := fd.info(src, i)
if info.size == 0 {
return -1
}
if s := ss.next(info); s != ssSuccess {
return i
}
i += int(info.size)
if i >= nsrc {
if !info.BoundaryAfter() && !ss.isMax() {
return -1
}
return nsrc
}
}
}
// FirstBoundaryInString returns the position i of the first boundary in s
// or -1 if s contains no boundary.
func (f Form) FirstBoundaryInString(s string) int {
return f.firstBoundary(inputString(s), len(s))
}
// NextBoundary reports the index of the boundary between the first and next
// segment in b or -1 if atEOF is false and there are not enough bytes to
// determine this boundary.
func (f Form) NextBoundary(b []byte, atEOF bool) int {
return f.nextBoundary(inputBytes(b), len(b), atEOF)
}
// NextBoundaryInString reports the index of the boundary between the first and
// next segment in b or -1 if atEOF is false and there are not enough bytes to
// determine this boundary.
func (f Form) NextBoundaryInString(s string, atEOF bool) int {
return f.nextBoundary(inputString(s), len(s), atEOF)
}
func (f Form) nextBoundary(src input, nsrc int, atEOF bool) int {
if nsrc == 0 {
if atEOF {
return 0
}
return -1
}
fd := formTable[f]
info := fd.info(src, 0)
if info.size == 0 {
if atEOF {
return 1
}
return -1
}
ss := streamSafe(0)
ss.first(info)
for i := int(info.size); i < nsrc; i += int(info.size) {
info = fd.info(src, i)
if info.size == 0 {
if atEOF {
return i
}
return -1
}
// TODO: Using streamSafe to determine the boundary isn't the same as
// using BoundaryBefore. Determine which should be used.
if s := ss.next(info); s != ssSuccess {
return i
}
}
if !atEOF && !info.BoundaryAfter() && !ss.isMax() {
return -1
}
return nsrc
}
// LastBoundary returns the position i of the last boundary in b
// or -1 if b contains no boundary.
func (f Form) LastBoundary(b []byte) int {
return lastBoundary(formTable[f], b)
}
func lastBoundary(fd *formInfo, b []byte) int {
i := len(b)
info, p := lastRuneStart(fd, b)
if p == -1 {
return -1
}
if info.size == 0 { // ends with incomplete rune
if p == 0 { // starts with incomplete rune
return -1
}
i = p
info, p = lastRuneStart(fd, b[:i])
if p == -1 { // incomplete UTF-8 encoding or non-starter bytes without a starter
return i
}
}
if p+int(info.size) != i { // trailing non-starter bytes: illegal UTF-8
return i
}
if info.BoundaryAfter() {
return i
}
ss := streamSafe(0)
v := ss.backwards(info)
for i = p; i >= 0 && v != ssStarter; i = p {
info, p = lastRuneStart(fd, b[:i])
if v = ss.backwards(info); v == ssOverflow {
break
}
if p+int(info.size) != i {
if p == -1 { // no boundary found
return -1
}
return i // boundary after an illegal UTF-8 encoding
}
}
return i
}
// decomposeSegment scans the first segment in src into rb. It inserts 0x034f
// (Grapheme Joiner) when it encounters a sequence of more than 30 non-starters
// and returns the number of bytes consumed from src or iShortDst or iShortSrc.
func decomposeSegment(rb *reorderBuffer, sp int, atEOF bool) int {
// Force one character to be consumed.
info := rb.f.info(rb.src, sp)
if info.size == 0 {
return 0
}
if s := rb.ss.next(info); s == ssStarter {
// TODO: this could be removed if we don't support merging.
if rb.nrune > 0 {
goto end
}
} else if s == ssOverflow {
rb.insertCGJ()
goto end
}
if err := rb.insertFlush(rb.src, sp, info); err != iSuccess {
return int(err)
}
for {
sp += int(info.size)
if sp >= rb.nsrc {
if !atEOF && !info.BoundaryAfter() {
return int(iShortSrc)
}
break
}
info = rb.f.info(rb.src, sp)
if info.size == 0 {
if !atEOF {
return int(iShortSrc)
}
break
}
if s := rb.ss.next(info); s == ssStarter {
break
} else if s == ssOverflow {
rb.insertCGJ()
break
}
if err := rb.insertFlush(rb.src, sp, info); err != iSuccess {
return int(err)
}
}
end:
if !rb.doFlush() {
return int(iShortDst)
}
return sp
}
// lastRuneStart returns the runeInfo and position of the last
// rune in buf or the zero runeInfo and -1 if no rune was found.
func lastRuneStart(fd *formInfo, buf []byte) (Properties, int) {
p := len(buf) - 1
for ; p >= 0 && !utf8.RuneStart(buf[p]); p-- {
}
if p < 0 {
return Properties{}, -1
}
return fd.info(inputBytes(buf), p), p
}
// decomposeToLastBoundary finds an open segment at the end of the buffer
// and scans it into rb. Returns the buffer minus the last segment.
func decomposeToLastBoundary(rb *reorderBuffer) {
fd := &rb.f
info, i := lastRuneStart(fd, rb.out)
if int(info.size) != len(rb.out)-i {
// illegal trailing continuation bytes
return
}
if info.BoundaryAfter() {
return
}
var add [maxNonStarters + 1]Properties // stores runeInfo in reverse order
padd := 0
ss := streamSafe(0)
p := len(rb.out)
for {
add[padd] = info
v := ss.backwards(info)
if v == ssOverflow {
// Note that if we have an overflow, it the string we are appending to
// is not correctly normalized. In this case the behavior is undefined.
break
}
padd++
p -= int(info.size)
if v == ssStarter || p < 0 {
break
}
info, i = lastRuneStart(fd, rb.out[:p])
if int(info.size) != p-i {
break
}
}
rb.ss = ss
// Copy bytes for insertion as we may need to overwrite rb.out.
var buf [maxBufferSize * utf8.UTFMax]byte
cp := buf[:copy(buf[:], rb.out[p:])]
rb.out = rb.out[:p]
for padd--; padd >= 0; padd-- {
info = add[padd]
rb.insertUnsafe(inputBytes(cp), 0, info)
cp = cp[info.size:]
}
}

125
vendor/golang.org/x/text/unicode/norm/readwriter.go generated vendored Normal file
View File

@ -0,0 +1,125 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package norm
import "io"
type normWriter struct {
rb reorderBuffer
w io.Writer
buf []byte
}
// Write implements the standard write interface. If the last characters are
// not at a normalization boundary, the bytes will be buffered for the next
// write. The remaining bytes will be written on close.
func (w *normWriter) Write(data []byte) (n int, err error) {
// Process data in pieces to keep w.buf size bounded.
const chunk = 4000
for len(data) > 0 {
// Normalize into w.buf.
m := len(data)
if m > chunk {
m = chunk
}
w.rb.src = inputBytes(data[:m])
w.rb.nsrc = m
w.buf = doAppend(&w.rb, w.buf, 0)
data = data[m:]
n += m
// Write out complete prefix, save remainder.
// Note that lastBoundary looks back at most 31 runes.
i := lastBoundary(&w.rb.f, w.buf)
if i == -1 {
i = 0
}
if i > 0 {
if _, err = w.w.Write(w.buf[:i]); err != nil {
break
}
bn := copy(w.buf, w.buf[i:])
w.buf = w.buf[:bn]
}
}
return n, err
}
// Close forces data that remains in the buffer to be written.
func (w *normWriter) Close() error {
if len(w.buf) > 0 {
_, err := w.w.Write(w.buf)
if err != nil {
return err
}
}
return nil
}
// Writer returns a new writer that implements Write(b)
// by writing f(b) to w. The returned writer may use an
// internal buffer to maintain state across Write calls.
// Calling its Close method writes any buffered data to w.
func (f Form) Writer(w io.Writer) io.WriteCloser {
wr := &normWriter{rb: reorderBuffer{}, w: w}
wr.rb.init(f, nil)
return wr
}
type normReader struct {
rb reorderBuffer
r io.Reader
inbuf []byte
outbuf []byte
bufStart int
lastBoundary int
err error
}
// Read implements the standard read interface.
func (r *normReader) Read(p []byte) (int, error) {
for {
if r.lastBoundary-r.bufStart > 0 {
n := copy(p, r.outbuf[r.bufStart:r.lastBoundary])
r.bufStart += n
if r.lastBoundary-r.bufStart > 0 {
return n, nil
}
return n, r.err
}
if r.err != nil {
return 0, r.err
}
outn := copy(r.outbuf, r.outbuf[r.lastBoundary:])
r.outbuf = r.outbuf[0:outn]
r.bufStart = 0
n, err := r.r.Read(r.inbuf)
r.rb.src = inputBytes(r.inbuf[0:n])
r.rb.nsrc, r.err = n, err
if n > 0 {
r.outbuf = doAppend(&r.rb, r.outbuf, 0)
}
if err == io.EOF {
r.lastBoundary = len(r.outbuf)
} else {
r.lastBoundary = lastBoundary(&r.rb.f, r.outbuf)
if r.lastBoundary == -1 {
r.lastBoundary = 0
}
}
}
}
// Reader returns a new reader that implements Read
// by reading data from r and returning f(data).
func (f Form) Reader(r io.Reader) io.Reader {
const chunk = 4000
buf := make([]byte, chunk)
rr := &normReader{rb: reorderBuffer{}, r: r, inbuf: buf}
rr.rb.init(f, buf)
return rr
}

7658
vendor/golang.org/x/text/unicode/norm/tables10.0.0.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

7694
vendor/golang.org/x/text/unicode/norm/tables11.0.0.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

7711
vendor/golang.org/x/text/unicode/norm/tables12.0.0.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

7761
vendor/golang.org/x/text/unicode/norm/tables13.0.0.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

7908
vendor/golang.org/x/text/unicode/norm/tables15.0.0.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

7638
vendor/golang.org/x/text/unicode/norm/tables9.0.0.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

88
vendor/golang.org/x/text/unicode/norm/transform.go generated vendored Normal file
View File

@ -0,0 +1,88 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package norm
import (
"unicode/utf8"
"golang.org/x/text/transform"
)
// Reset implements the Reset method of the transform.Transformer interface.
func (Form) Reset() {}
// Transform implements the Transform method of the transform.Transformer
// interface. It may need to write segments of up to MaxSegmentSize at once.
// Users should either catch ErrShortDst and allow dst to grow or have dst be at
// least of size MaxTransformChunkSize to be guaranteed of progress.
func (f Form) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
// Cap the maximum number of src bytes to check.
b := src
eof := atEOF
if ns := len(dst); ns < len(b) {
err = transform.ErrShortDst
eof = false
b = b[:ns]
}
i, ok := formTable[f].quickSpan(inputBytes(b), 0, len(b), eof)
n := copy(dst, b[:i])
if !ok {
nDst, nSrc, err = f.transform(dst[n:], src[n:], atEOF)
return nDst + n, nSrc + n, err
}
if err == nil && n < len(src) && !atEOF {
err = transform.ErrShortSrc
}
return n, n, err
}
func flushTransform(rb *reorderBuffer) bool {
// Write out (must fully fit in dst, or else it is an ErrShortDst).
if len(rb.out) < rb.nrune*utf8.UTFMax {
return false
}
rb.out = rb.out[rb.flushCopy(rb.out):]
return true
}
var errs = []error{nil, transform.ErrShortDst, transform.ErrShortSrc}
// transform implements the transform.Transformer interface. It is only called
// when quickSpan does not pass for a given string.
func (f Form) transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
// TODO: get rid of reorderBuffer. See CL 23460044.
rb := reorderBuffer{}
rb.init(f, src)
for {
// Load segment into reorder buffer.
rb.setFlusher(dst[nDst:], flushTransform)
end := decomposeSegment(&rb, nSrc, atEOF)
if end < 0 {
return nDst, nSrc, errs[-end]
}
nDst = len(dst) - len(rb.out)
nSrc = end
// Next quickSpan.
end = rb.nsrc
eof := atEOF
if n := nSrc + len(dst) - nDst; n < end {
err = transform.ErrShortDst
end = n
eof = false
}
end, ok := rb.f.quickSpan(rb.src, nSrc, end, eof)
n := copy(dst[nDst:], rb.src.bytes[nSrc:end])
nSrc += n
nDst += n
if ok {
if err == nil && n < rb.nsrc && !atEOF {
err = transform.ErrShortSrc
}
return nDst, nSrc, err
}
}
}

54
vendor/golang.org/x/text/unicode/norm/trie.go generated vendored Normal file
View File

@ -0,0 +1,54 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package norm
type valueRange struct {
value uint16 // header: value:stride
lo, hi byte // header: lo:n
}
type sparseBlocks struct {
values []valueRange
offset []uint16
}
var nfcSparse = sparseBlocks{
values: nfcSparseValues[:],
offset: nfcSparseOffset[:],
}
var nfkcSparse = sparseBlocks{
values: nfkcSparseValues[:],
offset: nfkcSparseOffset[:],
}
var (
nfcData = newNfcTrie(0)
nfkcData = newNfkcTrie(0)
)
// lookupValue determines the type of block n and looks up the value for b.
// For n < t.cutoff, the block is a simple lookup table. Otherwise, the block
// is a list of ranges with an accompanying value. Given a matching range r,
// the value for b is by r.value + (b - r.lo) * stride.
func (t *sparseBlocks) lookup(n uint32, b byte) uint16 {
offset := t.offset[n]
header := t.values[offset]
lo := offset + 1
hi := lo + uint16(header.lo)
for lo < hi {
m := lo + (hi-lo)/2
r := t.values[m]
if r.lo <= b && b <= r.hi {
return r.value + uint16(b-r.lo)*header.value
}
if b < r.lo {
hi = m
} else {
lo = m + 1
}
}
return 0
}

10
vendor/modules.txt vendored
View File

@ -254,6 +254,11 @@ github.com/sergi/go-diff/diffmatchpatch
# github.com/sirupsen/logrus v1.4.2
## explicit
github.com/sirupsen/logrus
# github.com/spf13/afero v1.9.5
## explicit; go 1.16
github.com/spf13/afero
github.com/spf13/afero/internal/common
github.com/spf13/afero/mem
# github.com/spkg/bom v0.0.0-20160624110644-59b7046e48ad
## explicit
github.com/spkg/bom
@ -266,7 +271,7 @@ github.com/xanzy/ssh-agent
# github.com/xo/terminfo v0.0.0-20210125001918-ca9a967f8778
## explicit; go 1.15
github.com/xo/terminfo
# golang.org/x/crypto v0.0.0-20210921155107-089bfa567519
# golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa
## explicit; go 1.17
golang.org/x/crypto/blowfish
golang.org/x/crypto/cast5
@ -274,7 +279,6 @@ golang.org/x/crypto/chacha20
golang.org/x/crypto/curve25519
golang.org/x/crypto/curve25519/internal/field
golang.org/x/crypto/ed25519
golang.org/x/crypto/ed25519/internal/edwards25519
golang.org/x/crypto/internal/poly1305
golang.org/x/crypto/internal/subtle
golang.org/x/crypto/openpgp
@ -310,7 +314,9 @@ golang.org/x/term
## explicit; go 1.17
golang.org/x/text/encoding
golang.org/x/text/encoding/internal/identifier
golang.org/x/text/runes
golang.org/x/text/transform
golang.org/x/text/unicode/norm
# gopkg.in/ozeidan/fuzzy-patricia.v3 v3.0.0
## explicit
gopkg.in/ozeidan/fuzzy-patricia.v3/patricia