1
0
mirror of https://github.com/pocketbase/pocketbase.git synced 2025-03-19 14:17:48 +02:00

[poc] replaced aws-sdk-go-v2 and gocloud.dev/blob

This commit is contained in:
Gani Georgiev 2025-03-05 15:58:21 +02:00
parent 48a9b82024
commit 501c49012e
34 changed files with 9845 additions and 6187 deletions

View File

@ -1,5 +1,7 @@
## v0.26.0 (WIP)
- ⚠️ Replaced `aws-sdk-go-v2` and `gocloud.dev/blob` with custom lighter implementation (@todo docs and tests)
- ⚠️ Prioritized the user submitted non-empty `createData.email` (_it will be unverified_) when creating the PocketBase user during the first OAuth2 auth.
- Load the request info context during password/OAuth2/OTP authentication ([#6402](https://github.com/pocketbase/pocketbase/issues/6402)).

View File

@ -13,7 +13,7 @@ import (
"github.com/pocketbase/pocketbase/apis"
"github.com/pocketbase/pocketbase/core"
"github.com/pocketbase/pocketbase/tests"
"gocloud.dev/blob"
"github.com/pocketbase/pocketbase/tools/filesystem/blob"
)
func TestBackupsList(t *testing.T) {
@ -490,7 +490,7 @@ func TestBackupsDownload(t *testing.T) {
t.Fatal(err)
}
},
ExpectedStatus: 400,
ExpectedStatus: 404,
ExpectedContent: []string{`"data":{}`},
ExpectedEvents: map[string]int{"*": 0},
},

41
go.mod
View File

@ -3,12 +3,6 @@ module github.com/pocketbase/pocketbase
go 1.23
require (
github.com/aws/aws-sdk-go-v2 v1.36.1
github.com/aws/aws-sdk-go-v2/config v1.28.10
github.com/aws/aws-sdk-go-v2/credentials v1.17.51
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.48
github.com/aws/aws-sdk-go-v2/service/s3 v1.72.2
github.com/aws/smithy-go v1.22.2
github.com/disintegration/imaging v1.6.2
github.com/domodwyer/mailyak/v3 v3.6.2
github.com/dop251/goja v0.0.0-20241009100908-5f46f2705ca3
@ -23,7 +17,6 @@ require (
github.com/pocketbase/tygoja v0.0.0-20250103200817-ca580d8c5119
github.com/spf13/cast v1.7.1
github.com/spf13/cobra v1.8.1
gocloud.dev v0.40.0
golang.org/x/crypto v0.33.0
golang.org/x/net v0.35.0
golang.org/x/oauth2 v0.26.0
@ -32,47 +25,25 @@ require (
)
require (
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.8 // indirect
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.28 // indirect
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.32 // indirect
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.32 // indirect
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.2 // indirect
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.32 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.2 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.6.0 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.13 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.13 // indirect
github.com/aws/aws-sdk-go-v2/service/sso v1.24.15 // indirect
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.14 // indirect
github.com/aws/aws-sdk-go-v2/service/sts v1.33.14 // indirect
github.com/asaskevich/govalidator v0.0.0-20200108200545-475eaeb16496 // indirect
github.com/dlclark/regexp2 v1.11.4 // indirect
github.com/dop251/base64dec v0.0.0-20231022112746-c6c9f9a96217 // indirect
github.com/dustin/go-humanize v1.0.1 // indirect
github.com/go-sourcemap/sourcemap v2.1.4+incompatible // indirect
github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 // indirect
github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8 // indirect
github.com/google/uuid v1.6.0 // indirect
github.com/googleapis/gax-go/v2 v2.14.1 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/mattn/go-colorable v0.1.14 // indirect
github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-isatty v0.0.20 // indirect
github.com/ncruces/go-strftime v0.1.9 // indirect
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect
github.com/spf13/pflag v1.0.6 // indirect
github.com/stretchr/testify v1.8.2 // indirect
go.opencensus.io v0.24.0 // indirect
github.com/spf13/pflag v1.0.5 // indirect
golang.org/x/exp v0.0.0-20230315142452-642cacee5cc0 // indirect
golang.org/x/image v0.24.0 // indirect
golang.org/x/mod v0.23.0 // indirect
golang.org/x/image v0.0.0-20191009234506-e7c1f5e7dbb8 // indirect
golang.org/x/mod v0.21.0 // indirect
golang.org/x/sys v0.30.0 // indirect
golang.org/x/text v0.22.0 // indirect
golang.org/x/tools v0.30.0 // indirect
golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da // indirect
google.golang.org/api v0.220.0 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20250207221924-e9438ea467c6 // indirect
google.golang.org/grpc v1.70.0 // indirect
google.golang.org/protobuf v1.36.5 // indirect
golang.org/x/tools v0.26.0 // indirect
modernc.org/libc v1.61.13 // indirect
modernc.org/mathutil v1.7.1 // indirect
modernc.org/memory v1.8.2 // indirect

252
go.sum
View File

@ -1,71 +1,10 @@
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.115.0 h1:CnFSK6Xo3lDYRoBKEcAtia6VSC837/ZkJuRduSFnr14=
cloud.google.com/go v0.115.0/go.mod h1:8jIM5vVgoAEoiVxQ/O4BFTfHqulPZgs/ufEzMcFMdWU=
cloud.google.com/go/auth v0.14.1 h1:AwoJbzUdxA/whv1qj3TLKwh3XX5sikny2fc40wUl+h0=
cloud.google.com/go/auth v0.14.1/go.mod h1:4JHUxlGXisL0AW8kXPtUF6ztuOksyfUQNFjfsOCXkPM=
cloud.google.com/go/auth/oauth2adapt v0.2.7 h1:/Lc7xODdqcEw8IrZ9SvwnlLX6j9FHQM74z6cBk9Rw6M=
cloud.google.com/go/auth/oauth2adapt v0.2.7/go.mod h1:NTbTTzfvPl1Y3V1nPpOgl2w6d/FjO7NNUQaWSox6ZMc=
cloud.google.com/go/compute/metadata v0.6.0 h1:A6hENjEsCDtC1k8byVsgwvVcioamEHvZ4j01OwKxG9I=
cloud.google.com/go/compute/metadata v0.6.0/go.mod h1:FjyFAW1MW0C203CEOMDTu3Dk1FlqW3Rga40jzHL4hfg=
cloud.google.com/go/iam v1.1.13 h1:7zWBXG9ERbMLrzQBRhFliAV+kjcRToDTgQT3CTwYyv4=
cloud.google.com/go/iam v1.1.13/go.mod h1:K8mY0uSXwEXS30KrnVb+j54LB/ntfZu1dr+4zFMNbus=
cloud.google.com/go/storage v1.43.0 h1:CcxnSohZwizt4LCzQHWvBf1/kvtHUn7gk9QERXPyXFs=
cloud.google.com/go/storage v1.43.0/go.mod h1:ajvxEa7WmZS1PxvKRq4bq0tFT3vMd502JwstCcYv0Q0=
filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA=
filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/Masterminds/semver/v3 v3.2.1 h1:RN9w6+7QoMeJVGyfmbcgs28Br8cvmnucEXnY0rYXWg0=
github.com/Masterminds/semver/v3 v3.2.1/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ=
github.com/asaskevich/govalidator v0.0.0-20200108200545-475eaeb16496 h1:zV3ejI06GQ59hwDQAvmK1qxOQGB3WuVTRoY0okPTAv0=
github.com/asaskevich/govalidator v0.0.0-20200108200545-475eaeb16496/go.mod h1:oGkLhpf+kjZl6xBf758TQhh5XrAeiJv/7FRz/2spLIg=
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so=
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw=
github.com/aws/aws-sdk-go v1.55.5 h1:KKUZBfBoyqy5d3swXyiC7Q76ic40rYcbqH7qjh59kzU=
github.com/aws/aws-sdk-go v1.55.5/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU=
github.com/aws/aws-sdk-go-v2 v1.36.1 h1:iTDl5U6oAhkNPba0e1t1hrwAo02ZMqbrGq4k5JBWM5E=
github.com/aws/aws-sdk-go-v2 v1.36.1/go.mod h1:5PMILGVKiW32oDzjj6RU52yrNrDPUHcbZQYr1sM7qmM=
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.8 h1:zAxi9p3wsZMIaVCdoiQp2uZ9k1LsZvmAnoTBeZPXom0=
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.8/go.mod h1:3XkePX5dSaxveLAYY7nsbsZZrKxCyEuE5pM4ziFxyGg=
github.com/aws/aws-sdk-go-v2/config v1.28.10 h1:fKODZHfqQu06pCzR69KJ3GuttraRJkhlC8g80RZ0Dfg=
github.com/aws/aws-sdk-go-v2/config v1.28.10/go.mod h1:PvdxRYZ5Um9QMq9PQ0zHHNdtKK+he2NHtFCUFMXWXeg=
github.com/aws/aws-sdk-go-v2/credentials v1.17.51 h1:F/9Sm6Y6k4LqDesZDPJCLxQGXNNHd/ZtJiWd0lCZKRk=
github.com/aws/aws-sdk-go-v2/credentials v1.17.51/go.mod h1:TKbzCHm43AoPyA+iLGGcruXd4AFhF8tOmLex2R9jWNQ=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.28 h1:KwsodFKVQTlI5EyhRSugALzsV6mG/SGrdjlMXSZSdso=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.28/go.mod h1:EY3APf9MzygVhKuPXAc5H+MkGb8k/DOSQjWS0LgkKqI=
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.48 h1:XnXVe2zRyPf0+fAW5L05esmngvBpC6DQZK7oZB/z/Co=
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.48/go.mod h1:S3wey90OrS4f7kYxH6PT175YyEcHTORY07++HurMaRM=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.32 h1:BjUcr3X3K0wZPGFg2bxOWW3VPN8rkE3/61zhP+IHviA=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.32/go.mod h1:80+OGC/bgzzFFTUmcuwD0lb4YutwQeKLFpmt6hoWapU=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.32 h1:m1GeXHVMJsRsUAqG6HjZWx9dj7F5TR+cF1bjyfYyBd4=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.32/go.mod h1:IitoQxGfaKdVLNg0hD8/DXmAqNy0H4K2H2Sf91ti8sI=
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.2 h1:Pg9URiobXy85kgFev3og2CuOZ8JZUBENF+dcgWBaYNk=
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.2/go.mod h1:FbtygfRFze9usAadmnGJNc8KsP346kEe+y2/oyhGAGc=
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.32 h1:OIHj/nAhVzIXGzbAE+4XmZ8FPvro3THr6NlqErJc3wY=
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.32/go.mod h1:LiBEsDo34OJXqdDlRGsilhlIiXR7DL+6Cx2f4p1EgzI=
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.2 h1:D4oz8/CzT9bAEYtVhSBmFj2dNOtaHOtMKc2vHBwYizA=
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.2/go.mod h1:Za3IHqTQ+yNcRHxu1OFucBh0ACZT4j4VQFF0BqpZcLY=
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.6.0 h1:kT2WeWcFySdYpPgyqJMSUE7781Qucjtn6wBvrgm9P+M=
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.6.0/go.mod h1:WYH1ABybY7JK9TITPnk6ZlP7gQB8psI4c9qDmMsnLSA=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.13 h1:SYVGSFQHlchIcy6e7x12bsrxClCXSP5et8cqVhL8cuw=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.13/go.mod h1:kizuDaLX37bG5WZaoxGPQR/LNFXpxp0vsUnqfkWXfNE=
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.13 h1:OBsrtam3rk8NfBEq7OLOMm5HtQ9Yyw32X4UQMya/wjw=
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.13/go.mod h1:3U4gFA5pmoCOja7aq4nSaIAGbaOHv2Yl2ug018cmC+Q=
github.com/aws/aws-sdk-go-v2/service/s3 v1.72.2 h1:a7aQ3RW+ug4IbhoQp29NZdc7vqrzKZZfWZSaQAXOZvQ=
github.com/aws/aws-sdk-go-v2/service/s3 v1.72.2/go.mod h1:xMekrnhmJ5aqmyxtmALs7mlvXw5xRh+eYjOjvrIIFJ4=
github.com/aws/aws-sdk-go-v2/service/sso v1.24.15 h1:/eE3DogBjYlvlbhd2ssWyeuovWunHLxfgw3s/OJa4GQ=
github.com/aws/aws-sdk-go-v2/service/sso v1.24.15/go.mod h1:2PCJYpi7EKeA5SkStAmZlF6fi0uUABuhtF8ILHjGc3Y=
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.14 h1:M/zwXiL2iXUrHputuXgmO94TVNmcenPHxgLXLutodKE=
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.14/go.mod h1:RVwIw3y/IqxC2YEXSIkAzRDdEU1iRabDPaYjpGCbCGQ=
github.com/aws/aws-sdk-go-v2/service/sts v1.33.14 h1:TzeR06UCMUq+KA3bDkujxK1GVGy+G8qQN/QVYzGLkQE=
github.com/aws/aws-sdk-go-v2/service/sts v1.33.14/go.mod h1:dspXf/oYWGWo6DEvj98wpaTeqt5+DMidZD0A9BYTizc=
github.com/aws/smithy-go v1.22.2 h1:6D9hW43xKFrRx/tXXfAlIZc4JI+yQe6snnWcQyxSyLQ=
github.com/aws/smithy-go v1.22.2/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/disintegration/imaging v1.6.2 h1:w1LecBlG2Lnp8B3jk5zSuNqd7b4DXhcjwek1ei82L+c=
github.com/disintegration/imaging v1.6.2/go.mod h1:44/5580QXChDfwIclfc/PCwrr44amcmDAg8hxG0Ewe4=
github.com/dlclark/regexp2 v1.11.4 h1:rPYF9/LECdNymJufQKmri9gV604RvvABwgOA8un7yAo=
@ -80,14 +19,8 @@ github.com/dop251/goja_nodejs v0.0.0-20240728170619-29b559befffc h1:MKYt39yZJi0Z
github.com/dop251/goja_nodejs v0.0.0-20240728170619-29b559befffc/go.mod h1:VULptt4Q/fNzQUJlqY/GP3qHyU7ZH46mFkBZe0ZTokU=
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM=
github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU=
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8=
github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
@ -96,67 +29,30 @@ github.com/gabriel-vasile/mimetype v1.4.8 h1:FfZ3gj38NjllZIeJAmMhr+qKL8Wu+nOoI3G
github.com/gabriel-vasile/mimetype v1.4.8/go.mod h1:ByKUIKGjh1ODkGM1asKUbQZOLGrPjydw3hYPU2YU9t8=
github.com/ganigeorgiev/fexpr v0.4.1 h1:hpUgbUEEWIZhSDBtf4M9aUNfQQ0BZkGRaMePy7Gcx5k=
github.com/ganigeorgiev/fexpr v0.4.1/go.mod h1:RyGiGqmeXhEQ6+mlGdnUleLHgtzzu/VGO2WtJkF5drE=
github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
github.com/go-ozzo/ozzo-validation/v4 v4.3.0 h1:byhDUpfEwjsVQb1vBunvIjh2BHQ9ead57VkAEY4V+Es=
github.com/go-ozzo/ozzo-validation/v4 v4.3.0/go.mod h1:2NKgrcHl3z6cJs+3Oo940FPRiTzuqKbvfrL2RxCj6Ew=
github.com/go-sourcemap/sourcemap v2.1.4+incompatible h1:a+iTbH5auLKxaNwQFg0B+TCYl6lbukKPc7b5x0n1s6Q=
github.com/go-sourcemap/sourcemap v2.1.4+incompatible/go.mod h1:F8jJfvm2KbVjc5NqelyYJmf/v5J0dwNLS2mL4sNA1Jg=
github.com/go-sql-driver/mysql v1.4.1 h1:g24URVg0OFbNUTx9qqY1IRZ9D9z3iPyi5zKhQZpNwpA=
github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
github.com/go-sql-driver/mysql v1.8.1 h1:LedoTUt/eveggdHS9qUFC1EFSa8bU2+1pZjSRpvNJ1Y=
github.com/go-sql-driver/mysql v1.8.1/go.mod h1:wEBSXgmK//2ZFJyE+qWnIsVGmvmEKlqwuVSjsCm7DZg=
github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk=
github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 h1:f+oWsMOmNPc8JmEHVZIycC7hBoQxHH9pNKQORJNozsQ=
github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8/go.mod h1:wcDNUvekVysuuOpQKo3191zZyTpiI6se1N1ULghS0sw=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8 h1:FKHo8hFI3A+7w0aUQuYXQ+6EN5stWmeY/AZqtM8xk9k=
github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo=
github.com/google/s2a-go v0.1.9 h1:LGD7gtMgezd8a/Xak7mEWL0PjoTQFvpRudN895yqKW0=
github.com/google/s2a-go v0.1.9/go.mod h1:YA0Ei2ZQL3acow2O62kdp9UlnvMmU7kA6Eutn0dXayM=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/wire v0.6.0 h1:HBkoIh4BdSxoyo9PveV8giw7ZsaBOvzWKfcg/6MrVwI=
github.com/google/wire v0.6.0/go.mod h1:F4QhpQ9EDIdJ1Mbop/NZBRB+5yrR6qg3BnctaoUk6NA=
github.com/googleapis/enterprise-certificate-proxy v0.3.4 h1:XYIDZApgAnrN1c855gTgghdIA6Stxb52D5RnLI1SLyw=
github.com/googleapis/enterprise-certificate-proxy v0.3.4/go.mod h1:YKe7cfqYXjKGpGvmSg28/fFvhNzinZQm8DGnaburhGA=
github.com/googleapis/gax-go/v2 v2.14.1 h1:hb0FFeiPaQskmvakKu5EbCbpntQn48jyHuvrkurSS/Q=
github.com/googleapis/gax-go/v2 v2.14.1/go.mod h1:Hb/NubMaVM88SrNkvl8X/o8XWwDJEPqouaLeN2IUxoA=
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE=
github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8=
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/ncruces/go-strftime v0.1.9 h1:bY0MQC28UADQmHmaF5dgpLmImcShSi2kHU9XLdhx/f4=
@ -167,7 +63,6 @@ github.com/pocketbase/dbx v1.11.0 h1:LpZezioMfT3K4tLrqA55wWFw1EtH1pM4tzSVa7kgszU
github.com/pocketbase/dbx v1.11.0/go.mod h1:xXRCIAKTHMgUCyCKZm55pUOdvFziJjQfXaWKhu2vhMs=
github.com/pocketbase/tygoja v0.0.0-20250103200817-ca580d8c5119 h1:TjQtEReJDTpvlNFTRjuHvPQpJHAeJdcQF130eCAAT/o=
github.com/pocketbase/tygoja v0.0.0-20250103200817-ca580d8c5119/go.mod h1:hKJWPGFqavk3cdTa47Qvs8g37lnfI57OYdVVbIqW5aE=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE=
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8=
@ -177,160 +72,67 @@ github.com/spf13/cast v1.7.1 h1:cuNEagBQEHWN1FnbGEjCXL2szYEXqfJPbP2HNUaca9Y=
github.com/spf13/cast v1.7.1/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo=
github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM=
github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o=
github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8=
github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.58.0 h1:PS8wXpbyaDJQ2VDHHncMe9Vct0Zn1fEjpsjrLxGJoSc=
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.58.0/go.mod h1:HDBUsEjOuRC0EzKZ1bSaRGZWUBAzo+MhAcUUORSr4D0=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 h1:yd02MEjBdJkG3uabWP9apV+OuWRIXGDuJEUJbOHmCFU=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0/go.mod h1:umTcuxiv1n/s/S6/c2AT/g2CQ7u5C59sHDNmfSwgz7Q=
go.opentelemetry.io/otel v1.34.0 h1:zRLXxLCgL1WyKsPVrgbSdMN4c0FMkDAskSTQP+0hdUY=
go.opentelemetry.io/otel v1.34.0/go.mod h1:OWFPOQ+h4G8xpyjgqo4SxJYdDQ/qmRH+wivy7zzx9oI=
go.opentelemetry.io/otel/metric v1.34.0 h1:+eTR3U0MyfWjRDhmFMxe2SsW64QrZ84AOhvqS7Y+PoQ=
go.opentelemetry.io/otel/metric v1.34.0/go.mod h1:CEDrp0fy2D0MvkXE+dPV7cMi8tWZwX3dmaIhwPOaqHE=
go.opentelemetry.io/otel/sdk v1.32.0 h1:RNxepc9vK59A8XsgZQouW8ue8Gkb4jpWtJm9ge5lEG4=
go.opentelemetry.io/otel/sdk v1.32.0/go.mod h1:LqgegDBjKMmb2GC6/PrTnteJG39I8/vJCAP9LlJXEjU=
go.opentelemetry.io/otel/sdk/metric v1.32.0 h1:rZvFnvmvawYb0alrYkjraqJq0Z4ZUJAiyYCU9snn1CU=
go.opentelemetry.io/otel/sdk/metric v1.32.0/go.mod h1:PWeZlq0zt9YkYAp3gjKZ0eicRYvOh1Gd+X99x6GHpCQ=
go.opentelemetry.io/otel/trace v1.34.0 h1:+ouXS2V8Rd4hp4580a8q23bg0azF2nI8cqLYnC8mh/k=
go.opentelemetry.io/otel/trace v1.34.0/go.mod h1:Svm7lSjQD7kG7KJ/MUHPVXSDGz2OX4h0M2jHBhmSfRE=
gocloud.dev v0.40.0 h1:f8LgP+4WDqOG/RXoUcyLpeIAGOcAbZrZbDQCUee10ng=
gocloud.dev v0.40.0/go.mod h1:drz+VyYNBvrMTW0KZiBAYEdl8lbNZx+OQ7oQvdrFmSQ=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.33.0 h1:IOBPskki6Lysi0lo9qQvbxiQ+FvsCC/YWOecCHAixus=
golang.org/x/crypto v0.33.0/go.mod h1:bVdXmD7IV/4GdElGPozy6U7lWdRXA4qyRVGJV57uQ5M=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20230315142452-642cacee5cc0 h1:pVgRXcIictcr+lBQIFeiwuwtDIs4eL21OuM9nyAADmo=
golang.org/x/exp v0.0.0-20230315142452-642cacee5cc0/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc=
golang.org/x/image v0.0.0-20191009234506-e7c1f5e7dbb8 h1:hVwzHzIUGRjiF7EcUjqNxk3NCfkPxbDKRdnNE1Rpg0U=
golang.org/x/image v0.0.0-20191009234506-e7c1f5e7dbb8/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/image v0.24.0 h1:AN7zRgVsbvmTfNyqIbbOraYL8mSwcKncEj8ofjgzcMQ=
golang.org/x/image v0.24.0/go.mod h1:4b/ITuLfqYq1hqZcjofwctIhi7sZh2WaCjvsBNjjya8=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/mod v0.23.0 h1:Zb7khfcRGKk+kqfxFaP5tZqCnDZMjC5VtUBs87Hr6QM=
golang.org/x/mod v0.23.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0=
golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY=
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.35.0 h1:T5GQRQb2y08kTAByq9L4/bz8cipCdA8FbRTXewonqY8=
golang.org/x/net v0.35.0/go.mod h1:EglIi67kWsHKlRzzVMUD93VMSWGFOMSZgxFjparz1Qk=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.26.0 h1:afQXWNNaeC4nvZ0Ed9XvCCzXM6UHJG7iCg0W4fPqSBE=
golang.org/x/oauth2 v0.26.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.11.0 h1:GGz8+XQP4FvTTrjZPzNKTMFtSXH80RAzG+5ghFPgK9w=
golang.org/x/sync v0.11.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc=
golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.22.0 h1:bofq7m3/HAFvbF51jz3Q9wLg3jkvSPuiZu/pD1XwgtM=
golang.org/x/text v0.22.0/go.mod h1:YRoo4H8PVmsu+E3Ou7cqLVH8oXWIHVoX0jqUWALQhfY=
golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY=
golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.30.0 h1:BgcpHewrV5AUp2G9MebG4XPFI1E2W41zU1SaqVA9vJY=
golang.org/x/tools v0.30.0/go.mod h1:c347cR/OJfw5TI+GfX7RUPNMdDRRbjvYTS0jPyvsVtY=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da h1:noIWHXmPHxILtqtCOPIhSt0ABwskkZKjD3bXGnZGpNY=
golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90=
google.golang.org/api v0.220.0 h1:3oMI4gdBgB72WFVwE1nerDD8W3HUOS4kypK6rRLbGns=
google.golang.org/api v0.220.0/go.mod h1:26ZAlY6aN/8WgpCzjPNy18QpYaz7Zgg1h0qe1GkZEmY=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
golang.org/x/tools v0.26.0 h1:v/60pFQmzmT9ExmjDv2gGIfi3OqfKoEP6I5+umXlbnQ=
golang.org/x/tools v0.26.0/go.mod h1:TPVVj70c7JJ3WCazhD8OdXcZg/og+b9+tH/KxylGwH0=
google.golang.org/appengine v1.6.5 h1:tycE03LOZYQNhDpS27tcQdAzLCVMaj7QT2SXxebnpCM=
google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
google.golang.org/genproto v0.0.0-20240812133136-8ffd90a71988 h1:CT2Thj5AuPV9phrYMtzX11k+XkzMGfRAet42PmoTATM=
google.golang.org/genproto v0.0.0-20240812133136-8ffd90a71988/go.mod h1:7uvplUBj4RjHAxIZ//98LzOvrQ04JBkaixRmCMI29hc=
google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576 h1:CkkIfIt50+lT6NHAVoRYEyAvQGFM7xEwXUUywFvEb3Q=
google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576/go.mod h1:1R3kvZ1dtP3+4p4d3G8uJ8rFk/fWlScl38vanWACI08=
google.golang.org/genproto/googleapis/rpc v0.0.0-20250207221924-e9438ea467c6 h1:2duwAxN2+k0xLNpjnHTXoMUgnv6VPSp5fiqTuwSxjmI=
google.golang.org/genproto/googleapis/rpc v0.0.0-20250207221924-e9438ea467c6/go.mod h1:8BS3B93F/U1juMFq9+EDk+qOT5CO1R9IzXxG3PTqiRk=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
google.golang.org/grpc v1.70.0 h1:pWFv03aZoHzlRKHWicjsZytKAiYCtNS0dHbXnIdq7jQ=
google.golang.org/grpc v1.70.0/go.mod h1:ofIJqVKDXx/JiXrwr2IG4/zwdH9txy3IlF40RmcJSQw=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM=
google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
modernc.org/cc/v4 v4.21.4 h1:3Be/Rdo1fpr8GrQ7IVw9OHtplU4gWbb+wNgeoBMmGLQ=
modernc.org/cc/v4 v4.21.4/go.mod h1:HM7VJTZbUCR3rV8EYBi9wxnJ0ZBRiGE5OeGXNA0IsLQ=
modernc.org/ccgo/v4 v4.19.2 h1:lwQZgvboKD0jBwdaeVCTouxhxAyN6iawF3STraAal8Y=
modernc.org/ccgo/v4 v4.19.2/go.mod h1:ysS3mxiMV38XGRTTcgo0DQTeTmAO4oCmJl1nX9VFI3s=
modernc.org/cc/v4 v4.24.4 h1:TFkx1s6dCkQpd6dKurBNmpo+G8Zl4Sq/ztJ+2+DEsh0=
modernc.org/cc/v4 v4.24.4/go.mod h1:uVtb5OGqUKpoLWhqwNQo/8LwvoiEBLvZXIQ/SmO6mL0=
modernc.org/ccgo/v4 v4.23.16 h1:Z2N+kk38b7SfySC1ZkpGLN2vthNJP1+ZzGZIlH7uBxo=
modernc.org/ccgo/v4 v4.23.16/go.mod h1:nNma8goMTY7aQZQNTyN9AIoJfxav4nvTnvKThAeMDdo=
modernc.org/fileutil v1.3.0 h1:gQ5SIzK3H9kdfai/5x41oQiKValumqNTDXMvKo62HvE=
modernc.org/fileutil v1.3.0/go.mod h1:XatxS8fZi3pS8/hKG2GH/ArUogfxjpEKs3Ku3aK4JyQ=
modernc.org/gc/v2 v2.4.1 h1:9cNzOqPyMJBvrUipmynX0ZohMhcxPtMccYgGOJdOiBw=
modernc.org/gc/v2 v2.4.1/go.mod h1:wzN5dK1AzVGoH6XOzc3YZ+ey/jPgYHLuVckd62P0GYU=
modernc.org/libc v1.55.3 h1:AzcW1mhlPNrRtjS5sS+eW2ISCgSOLLNyFzRh/V3Qj/U=
modernc.org/libc v1.55.3/go.mod h1:qFXepLhz+JjFThQ4kzwzOjA/y/artDeg+pcYnY+Q83w=
modernc.org/gc/v2 v2.6.3 h1:aJVhcqAte49LF+mGveZ5KPlsp4tdGdAOT4sipJXADjw=
modernc.org/gc/v2 v2.6.3/go.mod h1:YgIahr1ypgfe7chRuJi2gD7DBQiKSLMPgBQe9oIiito=
modernc.org/libc v1.61.13 h1:3LRd6ZO1ezsFiX1y+bHd1ipyEHIJKvuprv0sLTBwLW8=
modernc.org/libc v1.61.13/go.mod h1:8F/uJWL/3nNil0Lgt1Dpz+GgkApWh04N3el3hxJcA6E=
modernc.org/mathutil v1.7.1 h1:GCZVGXdaN8gTqB1Mf/usp1Y/hSqgI2vAGGP4jZMCxOU=
modernc.org/mathutil v1.7.1/go.mod h1:4p5IwJITfppl0G4sUEDtCr4DthTaT47/N3aT6MhfgJg=
modernc.org/memory v1.8.2 h1:cL9L4bcoAObu4NkxOlKWBWtNHIsnnACGF/TbqQ6sbcI=
modernc.org/memory v1.8.2/go.mod h1:ZbjSvMO5NQ1A2i3bWeDiVMxIorXwdClKE/0SZ+BMotU=
modernc.org/opt v0.1.3 h1:3XOZf2yznlhC+ibLltsDGzABUGVx8J6pnFMS3E4dcq4=
modernc.org/opt v0.1.3/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0=
modernc.org/sortutil v1.2.0 h1:jQiD3PfS2REGJNzNCMMaLSp/wdMNieTbKX920Cqdgqc=
modernc.org/sortutil v1.2.0/go.mod h1:TKU2s7kJMf1AE84OoiGppNHJwvB753OYfNl2WRb++Ss=
modernc.org/sqlite v1.34.5 h1:Bb6SR13/fjp15jt70CL4f18JIN7p7dnMExd+UFnF15g=
modernc.org/sqlite v1.34.5/go.mod h1:YLuNmX9NKs8wRNK2ko1LW1NGYcc9FkBO69JOt1AR9JE=
modernc.org/opt v0.1.4 h1:2kNGMRiUjrp4LcaPuLY2PzUfqM/w9N23quVwhKt5Qm8=
modernc.org/opt v0.1.4/go.mod h1:03fq9lsNfvkYSfxrfUhZCWPk1lm4cq4N+Bh//bEtgns=
modernc.org/sortutil v1.2.1 h1:+xyoGf15mM3NMlPDnFqrteY07klSFxLElE2PVuWIJ7w=
modernc.org/sortutil v1.2.1/go.mod h1:7ZI3a3REbai7gzCLcotuw9AC4VZVpYMjDzETGsSMqJE=
modernc.org/sqlite v1.35.0 h1:yQps4fegMnZFdphtzlfQTCNBWtS0CZv48pRpW3RFHRw=
modernc.org/sqlite v1.35.0/go.mod h1:9cr2sicr7jIaWTBKQmAxQLfBv9LL0su4ZTEV+utt3ic=
modernc.org/strutil v1.2.0 h1:agBi9dp1I+eOnxXeiZawM8F4LawKv4NzGWSaLfyeNZA=
modernc.org/strutil v1.2.0/go.mod h1:/mdcBmfOibveCTBxUl5B5l6W+TTH1FXPLHZE6bTosX0=
modernc.org/strutil v1.2.1 h1:UneZBkQA+DX2Rp35KcM69cSsNES9ly8mQWD71HKlOA0=
modernc.org/strutil v1.2.1/go.mod h1:EHkiggD70koQxjVdSBM3JKM7k6L0FbGE5eymy9i3B9A=
modernc.org/token v1.1.0 h1:Xl7Ap9dKaEs5kLoOQeQmPWevfnk/DM5qcLcYlA8ys6Y=
modernc.org/token v1.1.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM=

File diff suppressed because it is too large Load Diff

View File

@ -1158,7 +1158,6 @@ func main() {
"pflag.*": "any",
"flag.*": "any",
"log.*": "any",
"aws.*": "any",
"http.Client": "any",
"mail.Address": "{ address: string; name?: string; }", // prevents the LSP to complain in case no name is provided
},

View File

@ -0,0 +1,716 @@
// Package blob defines a lightweight abstration for interacting with
// various storage services (local filesystem, S3, etc.).
//
// NB!
// For compatibility with earlier PocketBase versions and to prevent
// unnecessary breaking changes, this package is based and implemented
// as a minimal, stripped down version of the previously used gocloud.dev/blob.
// While there is no promise that it won't diverge in the future to accomodate
// better some PocketBase specific use cases, currently it copies and
// tries to follow as close as possible the same implementaitons,
// conventions and rules for the key escaping/unescaping, blob read/write
// interfaces and struct options as gocloud.dev/blob, therefore the
// credits goes to the original Go Cloud Development Kit Authors.
package blob
import (
"bytes"
"context"
"crypto/md5"
"errors"
"fmt"
"io"
"log"
"mime"
"runtime"
"strings"
"sync"
"time"
"unicode/utf8"
)
var (
ErrNotFound = errors.New("resource not found")
ErrClosed = errors.New("bucket or blob is closed")
)
// Bucket provides an easy and portable way to interact with blobs
// within a "bucket", including read, write, and list operations.
// To create a Bucket, use constructors found in driver subpackages.
type Bucket struct {
drv Driver
// mu protects the closed variable.
// Read locks are kept to allow holding a read lock for long-running calls,
// and thereby prevent closing until a call finishes.
mu sync.RWMutex
closed bool
}
// NewBucket creates a new *Bucket based on a specific driver implementation.
func NewBucket(drv Driver) *Bucket {
return &Bucket{drv: drv}
}
// ListOptions sets options for listing blobs via Bucket.List.
type ListOptions struct {
// Prefix indicates that only blobs with a key starting with this prefix
// should be returned.
Prefix string
// Delimiter sets the delimiter used to define a hierarchical namespace,
// like a filesystem with "directories". It is highly recommended that you
// use "" or "/" as the Delimiter. Other values should work through this API,
// but service UIs generally assume "/".
//
// An empty delimiter means that the bucket is treated as a single flat
// namespace.
//
// A non-empty delimiter means that any result with the delimiter in its key
// after Prefix is stripped will be returned with ListObject.IsDir = true,
// ListObject.Key truncated after the delimiter, and zero values for other
// ListObject fields. These results represent "directories". Multiple results
// in a "directory" are returned as a single result.
Delimiter string
// PageSize sets the maximum number of objects to be returned.
// 0 means no maximum; driver implementations should choose a reasonable
// max. It is guaranteed to be >= 0.
PageSize int
// PageToken may be filled in with the NextPageToken from a previous
// ListPaged call.
PageToken []byte
}
// ListPage represents a page of results return from ListPaged.
type ListPage struct {
// Objects is the slice of objects found. If ListOptions.PageSize > 0,
// it should have at most ListOptions.PageSize entries.
//
// Objects should be returned in lexicographical order of UTF-8 encoded keys,
// including across pages. I.e., all objects returned from a ListPage request
// made using a PageToken from a previous ListPage request's NextPageToken
// should have Key >= the Key for all objects from the previous request.
Objects []*ListObject
// NextPageToken should be left empty unless there are more objects
// to return. The value may be returned as ListOptions.PageToken on a
// subsequent ListPaged call, to fetch the next page of results.
// It can be an arbitrary []byte; it need not be a valid key.
NextPageToken []byte
}
// ListIterator iterates over List results.
type ListIterator struct {
b *Bucket
opts *ListOptions
page *ListPage
nextIdx int
}
// Next returns a *ListObject for the next blob.
// It returns (nil, io.EOF) if there are no more.
func (i *ListIterator) Next(ctx context.Context) (*ListObject, error) {
if i.page != nil {
// We've already got a page of results.
if i.nextIdx < len(i.page.Objects) {
// Next object is in the page; return it.
dobj := i.page.Objects[i.nextIdx]
i.nextIdx++
return &ListObject{
Key: dobj.Key,
ModTime: dobj.ModTime,
Size: dobj.Size,
MD5: dobj.MD5,
IsDir: dobj.IsDir,
}, nil
}
if len(i.page.NextPageToken) == 0 {
// Done with current page, and there are no more; return io.EOF.
return nil, io.EOF
}
// We need to load the next page.
i.opts.PageToken = i.page.NextPageToken
}
i.b.mu.RLock()
defer i.b.mu.RUnlock()
if i.b.closed {
return nil, ErrClosed
}
// Loading a new page.
p, err := i.b.drv.ListPaged(ctx, i.opts)
if err != nil {
return nil, wrapError(i.b.drv, err, "")
}
i.page = p
i.nextIdx = 0
return i.Next(ctx)
}
// ListObject represents a single blob returned from List.
type ListObject struct {
// Key is the key for this blob.
Key string
// ModTime is the time the blob was last modified.
ModTime time.Time
// Size is the size of the blob's content in bytes.
Size int64
// MD5 is an MD5 hash of the blob contents or nil if not available.
MD5 []byte
// IsDir indicates that this result represents a "directory" in the
// hierarchical namespace, ending in ListOptions.Delimiter. Key can be
// passed as ListOptions.Prefix to list items in the "directory".
// Fields other than Key and IsDir will not be set if IsDir is true.
IsDir bool
}
// List returns a ListIterator that can be used to iterate over blobs in a
// bucket, in lexicographical order of UTF-8 encoded keys. The underlying
// implementation fetches results in pages.
//
// A nil ListOptions is treated the same as the zero value.
//
// List is not guaranteed to include all recently-written blobs;
// some services are only eventually consistent.
func (b *Bucket) List(opts *ListOptions) *ListIterator {
if opts == nil {
opts = &ListOptions{}
}
dopts := &ListOptions{
Prefix: opts.Prefix,
Delimiter: opts.Delimiter,
}
return &ListIterator{b: b, opts: dopts}
}
// FirstPageToken is the pageToken to pass to ListPage to retrieve the first page of results.
var FirstPageToken = []byte("first page")
// ListPage returns a page of ListObject results for blobs in a bucket, in lexicographical
// order of UTF-8 encoded keys.
//
// To fetch the first page, pass FirstPageToken as the pageToken. For subsequent pages, pass
// the pageToken returned from a previous call to ListPage.
// It is not possible to "skip ahead" pages.
//
// Each call will return pageSize results, unless there are not enough blobs to fill the
// page, in which case it will return fewer results (possibly 0).
//
// If there are no more blobs available, ListPage will return an empty pageToken. Note that
// this may happen regardless of the number of returned results -- the last page might have
// 0 results (i.e., if the last item was deleted), pageSize results, or anything in between.
//
// Calling ListPage with an empty pageToken will immediately return io.EOF. When looping
// over pages, callers can either check for an empty pageToken, or they can make one more
// call and check for io.EOF.
//
// The underlying implementation fetches results in pages, but one call to ListPage may
// require multiple page fetches (and therefore, multiple calls to the BeforeList callback).
//
// A nil ListOptions is treated the same as the zero value.
//
// ListPage is not guaranteed to include all recently-written blobs;
// some services are only eventually consistent.
func (b *Bucket) ListPage(ctx context.Context, pageToken []byte, pageSize int, opts *ListOptions) (retval []*ListObject, nextPageToken []byte, err error) {
if opts == nil {
opts = &ListOptions{}
}
if pageSize <= 0 {
return nil, nil, fmt.Errorf("pageSize must be > 0 (%d)", pageSize)
}
// Nil pageToken means no more results.
if len(pageToken) == 0 {
return nil, nil, io.EOF
}
// FirstPageToken fetches the first page. Drivers use nil.
// The public API doesn't use nil for the first page because it would be too easy to
// keep fetching forever (since the last page return nil for the next pageToken).
if bytes.Equal(pageToken, FirstPageToken) {
pageToken = nil
}
b.mu.RLock()
defer b.mu.RUnlock()
if b.closed {
return nil, nil, ErrClosed
}
dopts := &ListOptions{
Prefix: opts.Prefix,
Delimiter: opts.Delimiter,
PageToken: pageToken,
PageSize: pageSize,
}
retval = make([]*ListObject, 0, pageSize)
for len(retval) < pageSize {
p, err := b.drv.ListPaged(ctx, dopts)
if err != nil {
return nil, nil, wrapError(b.drv, err, "")
}
for _, dobj := range p.Objects {
retval = append(retval, &ListObject{
Key: dobj.Key,
ModTime: dobj.ModTime,
Size: dobj.Size,
MD5: dobj.MD5,
IsDir: dobj.IsDir,
})
}
// ListPaged may return fewer results than pageSize. If there are more results
// available, signalled by non-empty p.NextPageToken, try to fetch the remainder
// of the page.
// It does not work to ask for more results than we need, because then we'd have
// a NextPageToken on a non-page boundary.
dopts.PageSize = pageSize - len(retval)
dopts.PageToken = p.NextPageToken
if len(dopts.PageToken) == 0 {
dopts.PageToken = nil
break
}
}
return retval, dopts.PageToken, nil
}
// Attributes contains attributes about a blob.
type Attributes struct {
// CacheControl specifies caching attributes that services may use
// when serving the blob.
// https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Cache-Control
CacheControl string
// ContentDisposition specifies whether the blob content is expected to be
// displayed inline or as an attachment.
// https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Disposition
ContentDisposition string
// ContentEncoding specifies the encoding used for the blob's content, if any.
// https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Encoding
ContentEncoding string
// ContentLanguage specifies the language used in the blob's content, if any.
// https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Language
ContentLanguage string
// ContentType is the MIME type of the blob. It will not be empty.
// https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Type
ContentType string
// Metadata holds key/value pairs associated with the blob.
// Keys are guaranteed to be in lowercase, even if the backend service
// has case-sensitive keys (although note that Metadata written via
// this package will always be lowercased). If there are duplicate
// case-insensitive keys (e.g., "foo" and "FOO"), only one value
// will be kept, and it is undefined which one.
Metadata map[string]string
// CreateTime is the time the blob was created, if available. If not available,
// CreateTime will be the zero time.
CreateTime time.Time
// ModTime is the time the blob was last modified.
ModTime time.Time
// Size is the size of the blob's content in bytes.
Size int64
// MD5 is an MD5 hash of the blob contents or nil if not available.
MD5 []byte
// ETag for the blob; see https://en.wikipedia.org/wiki/HTTP_ETag.
ETag string
}
// Attributes returns attributes for the blob stored at key.
//
// If the blob does not exist, Attributes returns an error for which
// gcerrors.Code will return gcerrors.NotFound.
func (b *Bucket) Attributes(ctx context.Context, key string) (_ *Attributes, err error) {
if !utf8.ValidString(key) {
return nil, fmt.Errorf("Attributes key must be a valid UTF-8 string: %q", key)
}
b.mu.RLock()
defer b.mu.RUnlock()
if b.closed {
return nil, ErrClosed
}
a, err := b.drv.Attributes(ctx, key)
if err != nil {
return nil, wrapError(b.drv, err, key)
}
var md map[string]string
if len(a.Metadata) > 0 {
// Services are inconsistent, but at least some treat keys
// as case-insensitive. To make the behavior consistent, we
// force-lowercase them when writing and reading.
md = make(map[string]string, len(a.Metadata))
for k, v := range a.Metadata {
md[strings.ToLower(k)] = v
}
}
return &Attributes{
CacheControl: a.CacheControl,
ContentDisposition: a.ContentDisposition,
ContentEncoding: a.ContentEncoding,
ContentLanguage: a.ContentLanguage,
ContentType: a.ContentType,
Metadata: md,
CreateTime: a.CreateTime,
ModTime: a.ModTime,
Size: a.Size,
MD5: a.MD5,
ETag: a.ETag,
}, nil
}
// Exists returns true if a blob exists at key, false if it does not exist, or
// an error.
//
// It is a shortcut for calling Attributes and checking if it returns an error
// with code ErrNotFound.
func (b *Bucket) Exists(ctx context.Context, key string) (bool, error) {
_, err := b.Attributes(ctx, key)
if err == nil {
return true, nil
}
if errors.Is(err, ErrNotFound) {
return false, nil
}
return false, err
}
// NewReader is a shortcut for NewRangeReader with offset=0 and length=-1.
func (b *Bucket) NewReader(ctx context.Context, key string) (*Reader, error) {
return b.newRangeReader(ctx, key, 0, -1)
}
// NewRangeReader returns a Reader to read content from the blob stored at key.
// It reads at most length bytes starting at offset (>= 0).
// If length is negative, it will read till the end of the blob.
//
// For the purposes of Seek, the returned Reader will start at offset and
// end at the minimum of the actual end of the blob or (if length > 0) offset + length.
//
// Note that ctx is used for all reads performed during the lifetime of the reader.
//
// If the blob does not exist, NewRangeReader returns an error for which
// gcerrors.Code will return gcerrors.NotFound. Exists is a lighter-weight way
// to check for existence.
//
// A nil ReaderOptions is treated the same as the zero value.
//
// The caller must call Close on the returned Reader when done reading.
func (b *Bucket) NewRangeReader(ctx context.Context, key string, offset, length int64) (_ *Reader, err error) {
return b.newRangeReader(ctx, key, offset, length)
}
func (b *Bucket) newRangeReader(ctx context.Context, key string, offset, length int64) (_ *Reader, err error) {
b.mu.RLock()
defer b.mu.RUnlock()
if b.closed {
return nil, ErrClosed
}
if offset < 0 {
return nil, fmt.Errorf("NewRangeReader offset must be non-negative (%d)", offset)
}
if !utf8.ValidString(key) {
return nil, fmt.Errorf("NewRangeReader key must be a valid UTF-8 string: %q", key)
}
var dr DriverReader
dr, err = b.drv.NewRangeReader(ctx, key, offset, length)
if err != nil {
return nil, wrapError(b.drv, err, key)
}
r := &Reader{
drv: b.drv,
r: dr,
key: key,
ctx: ctx,
baseOffset: offset,
baseLength: length,
savedOffset: -1,
}
_, file, lineno, ok := runtime.Caller(2)
runtime.SetFinalizer(r, func(r *Reader) {
if !r.closed {
var caller string
if ok {
caller = fmt.Sprintf(" (%s:%d)", file, lineno)
}
log.Printf("A blob.Reader reading from %q was never closed%s", key, caller)
}
})
return r, nil
}
// WriterOptions sets options for NewWriter.
type WriterOptions struct {
// BufferSize changes the default size in bytes of the chunks that
// Writer will upload in a single request; larger blobs will be split into
// multiple requests.
//
// This option may be ignored by some drivers.
//
// If 0, the driver will choose a reasonable default.
//
// If the Writer is used to do many small writes concurrently, using a
// smaller BufferSize may reduce memory usage.
BufferSize int
// MaxConcurrency changes the default concurrency for parts of an upload.
//
// This option may be ignored by some drivers.
//
// If 0, the driver will choose a reasonable default.
MaxConcurrency int
// CacheControl specifies caching attributes that services may use
// when serving the blob.
// https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Cache-Control
CacheControl string
// ContentDisposition specifies whether the blob content is expected to be
// displayed inline or as an attachment.
// https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Disposition
ContentDisposition string
// ContentEncoding specifies the encoding used for the blob's content, if any.
// https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Encoding
ContentEncoding string
// ContentLanguage specifies the language used in the blob's content, if any.
// https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Language
ContentLanguage string
// ContentType specifies the MIME type of the blob being written. If not set,
// it will be inferred from the content using the algorithm described at
// http://mimesniff.spec.whatwg.org/.
// Set DisableContentTypeDetection to true to disable the above and force
// the ContentType to stay empty.
// https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Type
ContentType string
// When true, if ContentType is the empty string, it will stay the empty
// string rather than being inferred from the content.
// Note that while the blob will be written with an empty string ContentType,
// most providers will fill one in during reads, so don't expect an empty
// ContentType if you read the blob back.
DisableContentTypeDetection bool
// ContentMD5 is used as a message integrity check.
// If len(ContentMD5) > 0, the MD5 hash of the bytes written must match
// ContentMD5, or Close will return an error without completing the write.
// https://tools.ietf.org/html/rfc1864
ContentMD5 []byte
// Metadata holds key/value strings to be associated with the blob, or nil.
// Keys may not be empty, and are lowercased before being written.
// Duplicate case-insensitive keys (e.g., "foo" and "FOO") will result in
// an error.
Metadata map[string]string
}
// NewWriter returns a Writer that writes to the blob stored at key.
// A nil WriterOptions is treated the same as the zero value.
//
// If a blob with this key already exists, it will be replaced.
// The blob being written is not guaranteed to be readable until Close
// has been called; until then, any previous blob will still be readable.
// Even after Close is called, newly written blobs are not guaranteed to be
// returned from List; some services are only eventually consistent.
//
// The returned Writer will store ctx for later use in Write and/or Close.
// To abort a write, cancel ctx; otherwise, it must remain open until
// Close is called.
//
// The caller must call Close on the returned Writer, even if the write is
// aborted.
func (b *Bucket) NewWriter(ctx context.Context, key string, opts *WriterOptions) (_ *Writer, err error) {
if !utf8.ValidString(key) {
return nil, fmt.Errorf("NewWriter key must be a valid UTF-8 string: %q", key)
}
if opts == nil {
opts = &WriterOptions{}
}
dopts := &WriterOptions{
CacheControl: opts.CacheControl,
ContentDisposition: opts.ContentDisposition,
ContentEncoding: opts.ContentEncoding,
ContentLanguage: opts.ContentLanguage,
ContentMD5: opts.ContentMD5,
BufferSize: opts.BufferSize,
MaxConcurrency: opts.MaxConcurrency,
DisableContentTypeDetection: opts.DisableContentTypeDetection,
}
if len(opts.Metadata) > 0 {
// Services are inconsistent, but at least some treat keys
// as case-insensitive. To make the behavior consistent, we
// force-lowercase them when writing and reading.
md := make(map[string]string, len(opts.Metadata))
for k, v := range opts.Metadata {
if k == "" {
return nil, errors.New("WriterOptions.Metadata keys may not be empty strings")
}
if !utf8.ValidString(k) {
return nil, fmt.Errorf("WriterOptions.Metadata keys must be valid UTF-8 strings: %q", k)
}
if !utf8.ValidString(v) {
return nil, fmt.Errorf("WriterOptions.Metadata values must be valid UTF-8 strings: %q", v)
}
lowerK := strings.ToLower(k)
if _, found := md[lowerK]; found {
return nil, fmt.Errorf("WriterOptions.Metadata has a duplicate case-insensitive metadata key: %q", lowerK)
}
md[lowerK] = v
}
dopts.Metadata = md
}
b.mu.RLock()
defer b.mu.RUnlock()
if b.closed {
return nil, ErrClosed
}
ctx, cancel := context.WithCancel(ctx)
w := &Writer{
drv: b.drv,
cancel: cancel,
key: key,
contentMD5: opts.ContentMD5,
md5hash: md5.New(),
}
if opts.ContentType != "" || opts.DisableContentTypeDetection {
var ct string
if opts.ContentType != "" {
t, p, err := mime.ParseMediaType(opts.ContentType)
if err != nil {
cancel()
return nil, err
}
ct = mime.FormatMediaType(t, p)
}
dw, err := b.drv.NewTypedWriter(ctx, key, ct, dopts)
if err != nil {
cancel()
return nil, wrapError(b.drv, err, key)
}
w.w = dw
} else {
// Save the fields needed to called NewTypedWriter later, once we've gotten
// sniffLen bytes; see the comment on Writer.
w.ctx = ctx
w.opts = dopts
w.buf = bytes.NewBuffer([]byte{})
}
_, file, lineno, ok := runtime.Caller(1)
runtime.SetFinalizer(w, func(w *Writer) {
if !w.closed {
var caller string
if ok {
caller = fmt.Sprintf(" (%s:%d)", file, lineno)
}
log.Printf("A blob.Writer writing to %q was never closed%s", key, caller)
}
})
return w, nil
}
// Copy the blob stored at srcKey to dstKey.
// A nil CopyOptions is treated the same as the zero value.
//
// If the source blob does not exist, Copy returns an error for which
// gcerrors.Code will return gcerrors.NotFound.
//
// If the destination blob already exists, it is overwritten.
func (b *Bucket) Copy(ctx context.Context, dstKey, srcKey string) (err error) {
if !utf8.ValidString(srcKey) {
return fmt.Errorf("Copy srcKey must be a valid UTF-8 string: %q", srcKey)
}
if !utf8.ValidString(dstKey) {
return fmt.Errorf("Copy dstKey must be a valid UTF-8 string: %q", dstKey)
}
b.mu.RLock()
defer b.mu.RUnlock()
if b.closed {
return ErrClosed
}
return wrapError(b.drv, b.drv.Copy(ctx, dstKey, srcKey), fmt.Sprintf("%s -> %s", srcKey, dstKey))
}
// Delete deletes the blob stored at key.
//
// If the blob does not exist, Delete returns an error for which
// gcerrors.Code will return gcerrors.NotFound.
func (b *Bucket) Delete(ctx context.Context, key string) (err error) {
if !utf8.ValidString(key) {
return fmt.Errorf("Delete key must be a valid UTF-8 string: %q", key)
}
b.mu.RLock()
defer b.mu.RUnlock()
if b.closed {
return ErrClosed
}
return wrapError(b.drv, b.drv.Delete(ctx, key), key)
}
// Close releases any resources used for the bucket.
func (b *Bucket) Close() error {
b.mu.Lock()
prev := b.closed
b.closed = true
b.mu.Unlock()
if prev {
return ErrClosed
}
return wrapError(b.drv, b.drv.Close(), "")
}
func wrapError(b Driver, err error, key string) error {
if err == nil {
return nil
}
err = b.NormalizeError(err)
if key != "" {
err = fmt.Errorf("[key: %s] %w", key, err)
}
return err
}

View File

@ -0,0 +1,108 @@
package blob
import (
"context"
"io"
"time"
)
// ReaderAttributes contains a subset of attributes about a blob that are
// accessible from Reader.
type ReaderAttributes struct {
// ContentType is the MIME type of the blob object. It must not be empty.
// https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Type
ContentType string
// ModTime is the time the blob object was last modified.
ModTime time.Time
// Size is the size of the object in bytes.
Size int64
}
// DriverReader reads an object from the blob.
type DriverReader interface {
io.ReadCloser
// Attributes returns a subset of attributes about the blob.
// The portable type will not modify the returned ReaderAttributes.
Attributes() *ReaderAttributes
}
// DriverWriter writes an object to the blob.
type DriverWriter interface {
io.WriteCloser
}
// Driver provides read, write and delete operations on objects within it on the
// blob service.
type Driver interface {
NormalizeError(err error) error
// Attributes returns attributes for the blob. If the specified object does
// not exist, Attributes must return an error for which ErrorCode returns
// gcerrors.NotFound.
// The portable type will not modify the returned Attributes.
Attributes(ctx context.Context, key string) (*Attributes, error)
// ListPaged lists objects in the bucket, in lexicographical order by
// UTF-8-encoded key, returning pages of objects at a time.
// Services are only required to be eventually consistent with respect
// to recently written or deleted objects. That is to say, there is no
// guarantee that an object that's been written will immediately be returned
// from ListPaged.
// opts is guaranteed to be non-nil.
ListPaged(ctx context.Context, opts *ListOptions) (*ListPage, error)
// NewRangeReader returns a Reader that reads part of an object, reading at
// most length bytes starting at the given offset. If length is negative, it
// will read until the end of the object. If the specified object does not
// exist, NewRangeReader must return an error for which ErrorCode returns
// gcerrors.NotFound.
// opts is guaranteed to be non-nil.
//
// The returned Reader *may* also implement Downloader if the underlying
// implementation can take advantage of that. The Download call is guaranteed
// to be the only call to the Reader. For such readers, offset will always
// be 0 and length will always be -1.
NewRangeReader(ctx context.Context, key string, offset, length int64) (DriverReader, error)
// NewTypedWriter returns Writer that writes to an object associated with key.
//
// A new object will be created unless an object with this key already exists.
// Otherwise any previous object with the same key will be replaced.
// The object may not be available (and any previous object will remain)
// until Close has been called.
//
// contentType sets the MIME type of the object to be written.
// opts is guaranteed to be non-nil.
//
// The caller must call Close on the returned Writer when done writing.
//
// Implementations should abort an ongoing write if ctx is later canceled,
// and do any necessary cleanup in Close. Close should then return ctx.Err().
//
// The returned Writer *may* also implement Uploader if the underlying
// implementation can take advantage of that. The Upload call is guaranteed
// to be the only non-Close call to the Writer..
NewTypedWriter(ctx context.Context, key, contentType string, opts *WriterOptions) (DriverWriter, error)
// Copy copies the object associated with srcKey to dstKey.
//
// If the source object does not exist, Copy must return an error for which
// ErrorCode returns gcerrors.NotFound.
//
// If the destination object already exists, it should be overwritten.
//
// opts is guaranteed to be non-nil.
Copy(ctx context.Context, dstKey, srcKey string) error
// Delete deletes the object associated with key. If the specified object does
// not exist, Delete must return an error for which ErrorCode returns
// gcerrors.NotFound.
Delete(ctx context.Context, key string) error
// Close cleans up any resources used by the Bucket. Once Close is called,
// there will be no method calls to the Bucket other than As, ErrorAs, and
// ErrorCode. There may be open readers or writers that will receive calls.
// It is up to the driver as to how these will be handled.
Close() error
}

View File

@ -0,0 +1,153 @@
package blob
// Copied from gocloud.dev/blob to avoid nuances around the specific
// HEX escaping/unescaping rules.
//
// -------------------------------------------------------------------
// Copyright 2019 The Go Cloud Development Kit Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// -------------------------------------------------------------------
import (
"fmt"
"strconv"
)
// HexEscape returns s, with all runes for which shouldEscape returns true
// escaped to "__0xXXX__", where XXX is the hex representation of the rune
// value. For example, " " would escape to "__0x20__".
//
// Non-UTF-8 strings will have their non-UTF-8 characters escaped to
// unicode.ReplacementChar; the original value is lost. Please file an
// issue if you need non-UTF8 support.
//
// Note: shouldEscape takes the whole string as a slice of runes and an
// index. Passing it a single byte or a single rune doesn't provide
// enough context for some escape decisions; for example, the caller might
// want to escape the second "/" in "//" but not the first one.
// We pass a slice of runes instead of the string or a slice of bytes
// because some decisions will be made on a rune basis (e.g., encode
// all non-ASCII runes).
func HexEscape(s string, shouldEscape func(s []rune, i int) bool) string {
// Do a first pass to see which runes (if any) need escaping.
runes := []rune(s)
var toEscape []int
for i := range runes {
if shouldEscape(runes, i) {
toEscape = append(toEscape, i)
}
}
if len(toEscape) == 0 {
return s
}
// Each escaped rune turns into at most 14 runes ("__0x7fffffff__"),
// so allocate an extra 13 for each. We'll reslice at the end
// if we didn't end up using them.
escaped := make([]rune, len(runes)+13*len(toEscape))
n := 0 // current index into toEscape
j := 0 // current index into escaped
for i, r := range runes {
if n < len(toEscape) && i == toEscape[n] {
// We were asked to escape this rune.
for _, x := range fmt.Sprintf("__%#x__", r) {
escaped[j] = x
j++
}
n++
} else {
escaped[j] = r
j++
}
}
return string(escaped[0:j])
}
// unescape tries to unescape starting at r[i].
// It returns a boolean indicating whether the unescaping was successful,
// and (if true) the unescaped rune and the last index of r that was used
// during unescaping.
func unescape(r []rune, i int) (bool, rune, int) {
// Look for "__0x".
if r[i] != '_' {
return false, 0, 0
}
i++
if i >= len(r) || r[i] != '_' {
return false, 0, 0
}
i++
if i >= len(r) || r[i] != '0' {
return false, 0, 0
}
i++
if i >= len(r) || r[i] != 'x' {
return false, 0, 0
}
i++
// Capture the digits until the next "_" (if any).
var hexdigits []rune
for ; i < len(r) && r[i] != '_'; i++ {
hexdigits = append(hexdigits, r[i])
}
// Look for the trailing "__".
if i >= len(r) || r[i] != '_' {
return false, 0, 0
}
i++
if i >= len(r) || r[i] != '_' {
return false, 0, 0
}
// Parse the hex digits into an int32.
retval, err := strconv.ParseInt(string(hexdigits), 16, 32)
if err != nil {
return false, 0, 0
}
return true, rune(retval), i
}
// HexUnescape reverses HexEscape.
func HexUnescape(s string) string {
var unescaped []rune
runes := []rune(s)
for i := 0; i < len(runes); i++ {
if ok, newR, newI := unescape(runes, i); ok {
// We unescaped some runes starting at i, resulting in the
// unescaped rune newR. The last rune used was newI.
if unescaped == nil {
// This is the first rune we've encountered that
// needed unescaping. Allocate a buffer and copy any
// previous runes.
unescaped = make([]rune, i)
copy(unescaped, runes)
}
unescaped = append(unescaped, newR)
i = newI
} else if unescaped != nil {
unescaped = append(unescaped, runes[i])
}
}
if unescaped == nil {
return s
}
return string(unescaped)
}

View File

@ -0,0 +1,178 @@
package blob
import (
"context"
"fmt"
"io"
"log"
"time"
)
var _ io.ReadSeekCloser = (*Reader)(nil)
// Reader reads bytes from a blob.
// It implements io.ReadSeekCloser, and must be closed after reads are finished.
type Reader struct {
ctx context.Context // Used to recreate r after Seeks
r DriverReader
drv Driver
key string
baseOffset int64 // The base offset provided to NewRangeReader.
baseLength int64 // The length provided to NewRangeReader (may be negative).
relativeOffset int64 // Current offset (relative to baseOffset).
savedOffset int64 // Last relativeOffset for r, saved after relativeOffset is changed in Seek, or -1 if no Seek.
closed bool
}
// Read implements io.Reader (https://golang.org/pkg/io/#Reader).
func (r *Reader) Read(p []byte) (int, error) {
if r.savedOffset != -1 {
// We've done one or more Seeks since the last read. We may have
// to recreate the Reader.
//
// Note that remembering the savedOffset and lazily resetting the
// reader like this allows the caller to Seek, then Seek again back,
// to the original offset, without having to recreate the reader.
// We only have to recreate the reader if we actually read after a Seek.
// This is an important optimization because it's common to Seek
// to (SeekEnd, 0) and use the return value to determine the size
// of the data, then Seek back to (SeekStart, 0).
saved := r.savedOffset
if r.relativeOffset == saved {
// Nope! We're at the same place we left off.
r.savedOffset = -1
} else {
// Yep! We've changed the offset. Recreate the reader.
length := r.baseLength
if length >= 0 {
length -= r.relativeOffset
if length < 0 {
// Shouldn't happen based on checks in Seek.
return 0, fmt.Errorf("invalid Seek (base length %d, relative offset %d)", r.baseLength, r.relativeOffset)
}
}
newR, err := r.drv.NewRangeReader(r.ctx, r.key, r.baseOffset+r.relativeOffset, length)
if err != nil {
return 0, wrapError(r.drv, err, r.key)
}
_ = r.r.Close()
r.savedOffset = -1
r.r = newR
}
}
n, err := r.r.Read(p)
r.relativeOffset += int64(n)
return n, wrapError(r.drv, err, r.key)
}
// Seek implements io.Seeker (https://golang.org/pkg/io/#Seeker).
func (r *Reader) Seek(offset int64, whence int) (int64, error) {
if r.savedOffset == -1 {
// Save the current offset for our reader. If the Seek changes the
// offset, and then we try to read, we'll need to recreate the reader.
// See comment above in Read for why we do it lazily.
r.savedOffset = r.relativeOffset
}
// The maximum relative offset is the minimum of:
// 1. The actual size of the blob, minus our initial baseOffset.
// 2. The length provided to NewRangeReader (if it was non-negative).
maxRelativeOffset := r.Size() - r.baseOffset
if r.baseLength >= 0 && r.baseLength < maxRelativeOffset {
maxRelativeOffset = r.baseLength
}
switch whence {
case io.SeekStart:
r.relativeOffset = offset
case io.SeekCurrent:
r.relativeOffset += offset
case io.SeekEnd:
r.relativeOffset = maxRelativeOffset + offset
}
if r.relativeOffset < 0 {
// "Seeking to an offset before the start of the file is an error."
invalidOffset := r.relativeOffset
r.relativeOffset = 0
return 0, fmt.Errorf("Seek resulted in invalid offset %d, using 0", invalidOffset)
}
if r.relativeOffset > maxRelativeOffset {
// "Seeking to any positive offset is legal, but the behavior of subsequent
// I/O operations on the underlying object is implementation-dependent."
// We'll choose to set the offset to the EOF.
log.Printf("blob.Reader.Seek set an offset after EOF (base offset/length from NewRangeReader %d, %d; actual blob size %d; relative offset %d -> absolute offset %d).", r.baseOffset, r.baseLength, r.Size(), r.relativeOffset, r.baseOffset+r.relativeOffset)
r.relativeOffset = maxRelativeOffset
}
return r.relativeOffset, nil
}
// Close implements io.Closer (https://golang.org/pkg/io/#Closer).
func (r *Reader) Close() error {
r.closed = true
err := wrapError(r.drv, r.r.Close(), r.key)
return err
}
// ContentType returns the MIME type of the blob.
func (r *Reader) ContentType() string {
return r.r.Attributes().ContentType
}
// ModTime returns the time the blob was last modified.
func (r *Reader) ModTime() time.Time {
return r.r.Attributes().ModTime
}
// Size returns the size of the blob content in bytes.
func (r *Reader) Size() int64 {
return r.r.Attributes().Size
}
// WriteTo reads from r and writes to w until there's no more data or
// an error occurs.
// The return value is the number of bytes written to w.
//
// It implements the io.WriterTo interface.
func (r *Reader) WriteTo(w io.Writer) (int64, error) {
// If the writer has a ReaderFrom method, use it to do the copy.
// Don't do this for our own *Writer to avoid infinite recursion.
// Avoids an allocation and a copy.
switch w.(type) {
case *Writer:
default:
if rf, ok := w.(io.ReaderFrom); ok {
n, err := rf.ReadFrom(r)
return n, err
}
}
_, nw, err := readFromWriteTo(r, w)
return nw, err
}
// readFromWriteTo is a helper for ReadFrom and WriteTo.
// It reads data from r and writes to w, until EOF or a read/write error.
// It returns the number of bytes read from r and the number of bytes
// written to w.
func readFromWriteTo(r io.Reader, w io.Writer) (int64, int64, error) {
// Note: can't use io.Copy because it will try to use r.WriteTo
// or w.WriteTo, which is recursive in this context.
buf := make([]byte, 1024)
var totalRead, totalWritten int64
for {
numRead, rerr := r.Read(buf)
if numRead > 0 {
totalRead += int64(numRead)
numWritten, werr := w.Write(buf[0:numRead])
totalWritten += int64(numWritten)
if werr != nil {
return totalRead, totalWritten, werr
}
}
if rerr == io.EOF {
// Done!
return totalRead, totalWritten, nil
}
if rerr != nil {
return totalRead, totalWritten, rerr
}
}
}

View File

@ -0,0 +1,166 @@
package blob
import (
"bytes"
"context"
"fmt"
"hash"
"io"
"net/http"
)
var _ io.WriteCloser = (*Writer)(nil)
// Writer writes bytes to a blob.
//
// It implements io.WriteCloser (https://golang.org/pkg/io/#Closer), and must be
// closed after all writes are done.
type Writer struct {
drv Driver
w DriverWriter
key string
cancel func() // cancels the ctx provided to NewTypedWriter if contentMD5 verification fails
contentMD5 []byte
md5hash hash.Hash
bytesWritten int
closed bool
// These fields are non-zero values only when w is nil (not yet created).
//
// A ctx is stored in the Writer since we need to pass it into NewTypedWriter
// when we finish detecting the content type of the blob and create the
// underlying driver.Writer. This step happens inside Write or Close and
// neither of them take a context.Context as an argument.
//
// All 3 fields are only initialized when we create the Writer without
// setting the w field, and are reset to zero values after w is created.
ctx context.Context
opts *WriterOptions
buf *bytes.Buffer
}
// sniffLen is the byte size of Writer.buf used to detect content-type.
const sniffLen = 512
// Write implements the io.Writer interface (https://golang.org/pkg/io/#Writer).
//
// Writes may happen asynchronously, so the returned error can be nil
// even if the actual write eventually fails. The write is only guaranteed to
// have succeeded if Close returns no error.
func (w *Writer) Write(p []byte) (int, error) {
if len(w.contentMD5) > 0 {
if _, err := w.md5hash.Write(p); err != nil {
return 0, err
}
}
if w.w != nil {
return w.write(p)
}
// If w is not yet created due to no content-type being passed in, try to sniff
// the MIME type based on at most 512 bytes of the blob content of p.
// Detect the content-type directly if the first chunk is at least 512 bytes.
if w.buf.Len() == 0 && len(p) >= sniffLen {
return w.open(p)
}
// Store p in w.buf and detect the content-type when the size of content in
// w.buf is at least 512 bytes.
n, err := w.buf.Write(p)
if err != nil {
return 0, err
}
if w.buf.Len() >= sniffLen {
// Note that w.open will return the full length of the buffer; we don't want
// to return that as the length of this write since some of them were written in
// previous writes. Instead, we return the n from this write, above.
_, err := w.open(w.buf.Bytes())
return n, err
}
return n, nil
}
// Close closes the blob writer. The write operation is not guaranteed
// to have succeeded until Close returns with no error.
//
// Close may return an error if the context provided to create the
// Writer is canceled or reaches its deadline.
func (w *Writer) Close() (err error) {
w.closed = true
// Verify the MD5 hash of what was written matches the ContentMD5 provided by the user.
if len(w.contentMD5) > 0 {
md5sum := w.md5hash.Sum(nil)
if !bytes.Equal(md5sum, w.contentMD5) {
// No match! Return an error, but first cancel the context and call the
// driver's Close function to ensure the write is aborted.
w.cancel()
if w.w != nil {
_ = w.w.Close()
}
return fmt.Errorf("the WriterOptions.ContentMD5 you specified (%X) did not match what was written (%X)", w.contentMD5, md5sum)
}
}
defer w.cancel()
if w.w != nil {
return wrapError(w.drv, w.w.Close(), w.key)
}
if _, err := w.open(w.buf.Bytes()); err != nil {
return err
}
return wrapError(w.drv, w.w.Close(), w.key)
}
// open tries to detect the MIME type of p and write it to the blob.
// The error it returns is wrapped.
func (w *Writer) open(p []byte) (int, error) {
ct := http.DetectContentType(p)
var err error
w.w, err = w.drv.NewTypedWriter(w.ctx, w.key, ct, w.opts)
if err != nil {
return 0, wrapError(w.drv, err, w.key)
}
// Set the 3 fields needed for lazy NewTypedWriter back to zero values
// (see the comment on Writer).
w.buf = nil
w.ctx = nil
w.opts = nil
return w.write(p)
}
func (w *Writer) write(p []byte) (int, error) {
n, err := w.w.Write(p)
w.bytesWritten += n
return n, wrapError(w.drv, err, w.key)
}
// ReadFrom reads from r and writes to w until EOF or error.
// The return value is the number of bytes read from r.
//
// It implements the io.ReaderFrom interface.
func (w *Writer) ReadFrom(r io.Reader) (int64, error) {
// If the reader has a WriteTo method, use it to do the copy.
// Don't do this for our own *Reader to avoid infinite recursion.
// Avoids an allocation and a copy.
switch r.(type) {
case *Reader:
default:
if wt, ok := r.(io.WriterTo); ok {
return wt.WriteTo(w)
}
}
nr, _, err := readFromWriteTo(r, w)
return nr, err
}

View File

@ -14,50 +14,23 @@ import (
"strconv"
"strings"
"github.com/aws/aws-sdk-go-v2/aws"
"github.com/aws/aws-sdk-go-v2/config"
"github.com/aws/aws-sdk-go-v2/credentials"
"github.com/aws/aws-sdk-go-v2/service/s3"
"github.com/disintegration/imaging"
"github.com/gabriel-vasile/mimetype"
"github.com/pocketbase/pocketbase/tools/filesystem/internal/s3lite"
"github.com/pocketbase/pocketbase/tools/filesystem/blob"
"github.com/pocketbase/pocketbase/tools/filesystem/internal/fileblob"
"github.com/pocketbase/pocketbase/tools/filesystem/internal/s3blob"
"github.com/pocketbase/pocketbase/tools/filesystem/internal/s3blob/s3"
"github.com/pocketbase/pocketbase/tools/list"
"gocloud.dev/blob"
"gocloud.dev/blob/fileblob"
"gocloud.dev/gcerrors"
)
var gcpIgnoreHeaders = []string{"Accept-Encoding"}
var ErrNotFound = errors.New("blob not found")
// note: the same as blob.ErrNotFound for backward compatibility with earlier versions
var ErrNotFound = blob.ErrNotFound
type System struct {
ctx context.Context
bucket *blob.Bucket
}
// -------------------------------------------------------------------
// @todo delete after replacing the aws-sdk-go-v2 dependency
//
// enforce WHEN_REQUIRED by default in case the user has updated AWS SDK dependency
// https://github.com/aws/aws-sdk-go-v2/discussions/2960
// https://github.com/pocketbase/pocketbase/discussions/6440
// https://github.com/pocketbase/pocketbase/discussions/6313
func init() {
reqEnv := os.Getenv("AWS_REQUEST_CHECKSUM_CALCULATION")
if reqEnv == "" {
os.Setenv("AWS_REQUEST_CHECKSUM_CALCULATION", "WHEN_REQUIRED")
}
resEnv := os.Getenv("AWS_RESPONSE_CHECKSUM_VALIDATION")
if resEnv == "" {
os.Setenv("AWS_RESPONSE_CHECKSUM_VALIDATION", "WHEN_REQUIRED")
}
}
// -------------------------------------------------------------------
// NewS3 initializes an S3 filesystem instance.
//
// NB! Make sure to call `Close()` after you are done working with it.
@ -71,41 +44,21 @@ func NewS3(
) (*System, error) {
ctx := context.Background() // default context
cred := credentials.NewStaticCredentialsProvider(accessKey, secretKey, "")
client := &s3.S3{
Bucket: bucketName,
Region: region,
Endpoint: endpoint,
AccessKey: accessKey,
SecretKey: secretKey,
UsePathStyle: s3ForcePathStyle,
}
cfg, err := config.LoadDefaultConfig(
ctx,
config.WithCredentialsProvider(cred),
config.WithRegion(region),
)
drv, err := s3blob.New(client)
if err != nil {
return nil, err
}
client := s3.NewFromConfig(cfg, func(o *s3.Options) {
// ensure that the endpoint has url scheme for
// backward compatibility with v1 of the aws sdk
if !strings.Contains(endpoint, "://") {
endpoint = "https://" + endpoint
}
o.BaseEndpoint = aws.String(endpoint)
o.UsePathStyle = s3ForcePathStyle
// Google Cloud Storage alters the Accept-Encoding header,
// which breaks the v2 request signature
// (https://github.com/aws/aws-sdk-go-v2/issues/1816)
if strings.Contains(endpoint, "storage.googleapis.com") {
ignoreSigningHeaders(o, gcpIgnoreHeaders)
}
})
bucket, err := s3lite.OpenBucketV2(ctx, client, bucketName, nil)
if err != nil {
return nil, err
}
return &System{ctx: ctx, bucket: bucket}, nil
return &System{ctx: ctx, bucket: blob.NewBucket(drv)}, nil
}
// NewLocal initializes a new local filesystem instance.
@ -119,14 +72,14 @@ func NewLocal(dirPath string) (*System, error) {
return nil, err
}
bucket, err := fileblob.OpenBucket(dirPath, &fileblob.Options{
drv, err := fileblob.New(dirPath, &fileblob.Options{
NoTempDir: true,
})
if err != nil {
return nil, err
}
return &System{ctx: ctx, bucket: bucket}, nil
return &System{ctx: ctx, bucket: blob.NewBucket(drv)}, nil
}
// SetContext assigns the specified context to the current filesystem.
@ -140,29 +93,15 @@ func (s *System) Close() error {
}
// Exists checks if file with fileKey path exists or not.
//
// If the file doesn't exist returns false and ErrNotFound.
func (s *System) Exists(fileKey string) (bool, error) {
exists, err := s.bucket.Exists(s.ctx, fileKey)
if gcerrors.Code(err) == gcerrors.NotFound {
err = ErrNotFound
}
return exists, err
return s.bucket.Exists(s.ctx, fileKey)
}
// Attributes returns the attributes for the file with fileKey path.
//
// If the file doesn't exist it returns ErrNotFound.
func (s *System) Attributes(fileKey string) (*blob.Attributes, error) {
attrs, err := s.bucket.Attributes(s.ctx, fileKey)
if gcerrors.Code(err) == gcerrors.NotFound {
err = ErrNotFound
}
return attrs, err
return s.bucket.Attributes(s.ctx, fileKey)
}
// GetFile returns a file content reader for the given fileKey.
@ -171,13 +110,7 @@ func (s *System) Attributes(fileKey string) (*blob.Attributes, error) {
//
// If the file doesn't exist returns ErrNotFound.
func (s *System) GetFile(fileKey string) (*blob.Reader, error) {
br, err := s.bucket.NewReader(s.ctx, fileKey, nil)
if gcerrors.Code(err) == gcerrors.NotFound {
err = ErrNotFound
}
return br, err
return s.bucket.NewReader(s.ctx, fileKey)
}
// Copy copies the file stored at srcKey to dstKey.
@ -186,13 +119,7 @@ func (s *System) GetFile(fileKey string) (*blob.Reader, error) {
//
// If dstKey file already exists, it is overwritten.
func (s *System) Copy(srcKey, dstKey string) error {
err := s.bucket.Copy(s.ctx, dstKey, srcKey, nil)
if gcerrors.Code(err) == gcerrors.NotFound {
err = ErrNotFound
}
return err
return s.bucket.Copy(s.ctx, dstKey, srcKey)
}
// List returns a flat list with info for all files under the specified prefix.
@ -206,7 +133,7 @@ func (s *System) List(prefix string) ([]*blob.ListObject, error) {
for {
obj, err := iter.Next(s.ctx)
if err != nil {
if err != io.EOF {
if !errors.Is(err, io.EOF) {
return nil, err
}
break
@ -323,13 +250,7 @@ func (s *System) UploadMultipart(fh *multipart.FileHeader, fileKey string) error
//
// If the file doesn't exist returns ErrNotFound.
func (s *System) Delete(fileKey string) error {
err := s.bucket.Delete(s.ctx, fileKey)
if gcerrors.Code(err) == gcerrors.NotFound {
return ErrNotFound
}
return err
return s.bucket.Delete(s.ctx, fileKey)
}
// DeletePrefix deletes everything starting with the specified prefix.
@ -361,7 +282,7 @@ func (s *System) DeletePrefix(prefix string) []error {
for {
obj, err := iter.Next(s.ctx)
if err != nil {
if err != io.EOF {
if !errors.Is(err, io.EOF) {
failed = append(failed, err)
}
break
@ -420,7 +341,7 @@ func (s *System) IsEmptyDir(dir string) bool {
_, err := iter.Next(s.ctx)
return err == io.EOF
return err != nil && errors.Is(err, io.EOF)
}
var inlineServeContentTypes = []string{

View File

@ -1,72 +0,0 @@
package filesystem
import (
"context"
"fmt"
v4 "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
"github.com/aws/aws-sdk-go-v2/service/s3"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
)
// ignoreSigningHeaders excludes the listed headers
// from the request signing because some providers may alter them.
//
// See https://github.com/aws/aws-sdk-go-v2/issues/1816.
func ignoreSigningHeaders(o *s3.Options, headers []string) {
o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error {
if err := stack.Finalize.Insert(ignoreHeaders(headers), "Signing", middleware.Before); err != nil {
return err
}
if err := stack.Finalize.Insert(restoreIgnored(), "Signing", middleware.After); err != nil {
return err
}
return nil
})
}
type ignoredHeadersKey struct{}
func ignoreHeaders(headers []string) middleware.FinalizeMiddleware {
return middleware.FinalizeMiddlewareFunc(
"IgnoreHeaders",
func(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (out middleware.FinalizeOutput, metadata middleware.Metadata, err error) {
req, ok := in.Request.(*smithyhttp.Request)
if !ok {
return out, metadata, &v4.SigningError{Err: fmt.Errorf("(ignoreHeaders) unexpected request middleware type %T", in.Request)}
}
ignored := make(map[string]string, len(headers))
for _, h := range headers {
ignored[h] = req.Header.Get(h)
req.Header.Del(h)
}
ctx = middleware.WithStackValue(ctx, ignoredHeadersKey{}, ignored)
return next.HandleFinalize(ctx, in)
},
)
}
func restoreIgnored() middleware.FinalizeMiddleware {
return middleware.FinalizeMiddlewareFunc(
"RestoreIgnored",
func(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (out middleware.FinalizeOutput, metadata middleware.Metadata, err error) {
req, ok := in.Request.(*smithyhttp.Request)
if !ok {
return out, metadata, &v4.SigningError{Err: fmt.Errorf("(restoreIgnored) unexpected request middleware type %T", in.Request)}
}
ignored, _ := middleware.GetStackValue(ctx, ignoredHeadersKey{}).(map[string]string)
for k, v := range ignored {
req.Header.Set(k, v)
}
return next.HandleFinalize(ctx, in)
},
)
}

View File

@ -0,0 +1,79 @@
// Copyright 2018 The Go Cloud Development Kit Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package fileblob
import (
"encoding/json"
"fmt"
"os"
)
const attrsExt = ".attrs"
var errAttrsExt = fmt.Errorf("file extension %q is reserved", attrsExt)
// xattrs stores extended attributes for an object. The format is like
// filesystem extended attributes, see
// https://www.freedesktop.org/wiki/CommonExtendedAttributes.
type xattrs struct {
CacheControl string `json:"user.cache_control"`
ContentDisposition string `json:"user.content_disposition"`
ContentEncoding string `json:"user.content_encoding"`
ContentLanguage string `json:"user.content_language"`
ContentType string `json:"user.content_type"`
Metadata map[string]string `json:"user.metadata"`
MD5 []byte `json:"md5"`
}
// setAttrs creates a "path.attrs" file along with blob to store the attributes,
// it uses JSON format.
func setAttrs(path string, xa xattrs) error {
f, err := os.Create(path + attrsExt)
if err != nil {
return err
}
if err := json.NewEncoder(f).Encode(xa); err != nil {
f.Close()
os.Remove(f.Name())
return err
}
return f.Close()
}
// getAttrs looks at the "path.attrs" file to retrieve the attributes and
// decodes them into a xattrs struct. It doesn't return error when there is no
// such .attrs file.
func getAttrs(path string) (xattrs, error) {
f, err := os.Open(path + attrsExt)
if err != nil {
if os.IsNotExist(err) {
// Handle gracefully for non-existent .attr files.
return xattrs{
ContentType: "application/octet-stream",
}, nil
}
return xattrs{}, err
}
xa := new(xattrs)
if err := json.NewDecoder(f).Decode(xa); err != nil {
f.Close()
return xattrs{}, err
}
return *xa, f.Close()
}

View File

@ -0,0 +1,713 @@
// Package fileblob provides a blob.Bucket driver implementation.
//
// NB! To minimize breaking changes with older PocketBase releases,
// the driver is a stripped down and adapted version of the previously
// used gocloud.dev/blob/fileblob, hence many of the below doc comments,
// struct options and interface implementations are the same.
//
// To avoid partial writes, fileblob writes to a temporary file and then renames
// the temporary file to the final path on Close. By default, it creates these
// temporary files in `os.TempDir`. If `os.TempDir` is on a different mount than
// your base bucket path, the `os.Rename` will fail with `invalid cross-device link`.
// To avoid this, either configure the temp dir to use by setting the environment
// variable `TMPDIR`, or set `Options.NoTempDir` to `true` (fileblob will create
// the temporary files next to the actual files instead of in a temporary directory).
//
// By default fileblob stores blob metadata in "sidecar" files under the original
// filename with an additional ".attrs" suffix.
// This behaviour can be changed via `Options.Metadata`;
// writing of those metadata files can be suppressed by setting it to
// `MetadataDontWrite` or its equivalent "metadata=skip" in the URL for the opener.
// In either case, absent any stored metadata many `blob.Attributes` fields
// will be set to default values.
//
// The blob abstraction supports all UTF-8 strings; to make this work with services lacking
// full UTF-8 support, strings must be escaped (during writes) and unescaped
// (during reads). The following escapes are performed for fileblob:
// - Blob keys: ASCII characters 0-31 are escaped to "__0x<hex>__".
// If os.PathSeparator != "/", it is also escaped.
// Additionally, the "/" in "../", the trailing "/" in "//", and a trailing
// "/" is key names are escaped in the same way.
// On Windows, the characters "<>:"|?*" are also escaped.
//
// Example:
//
// drv, _ := fileblob.New("/path/to/dir", nil)
// bucket := blob.NewBucket(drv)
package fileblob
import (
"context"
"crypto/md5"
"errors"
"fmt"
"hash"
"io"
"io/fs"
"os"
"path/filepath"
"strconv"
"strings"
"time"
"github.com/pocketbase/pocketbase/tools/filesystem/blob"
)
const defaultPageSize = 1000
type metadataOption string // Not exported as subject to change.
// Settings for Options.Metadata.
const (
// Metadata gets written to a separate file.
MetadataInSidecar metadataOption = ""
// Writes won't carry metadata, as per the package docstring.
MetadataDontWrite metadataOption = "skip"
)
// Options sets options for constructing a *blob.Bucket backed by fileblob.
type Options struct {
// Refers to the strategy for how to deal with metadata (such as blob.Attributes).
// For supported values please see the Metadata* constants.
// If left unchanged, 'MetadataInSidecar' will be used.
Metadata metadataOption
// The FileMode to use when creating directories for the top-level directory
// backing the bucket (when CreateDir is true), and for subdirectories for keys.
// Defaults to 0777.
DirFileMode os.FileMode
// If true, create the directory backing the Bucket if it does not exist
// (using os.MkdirAll).
CreateDir bool
// If true, don't use os.TempDir for temporary files, but instead place them
// next to the actual files. This may result in "stranded" temporary files
// (e.g., if the application is killed before the file cleanup runs).
//
// If your bucket directory is on a different mount than os.TempDir, you will
// need to set this to true, as os.Rename will fail across mount points.
NoTempDir bool
}
// New creates a new instance of the fileblob driver backed by the
// filesystem and rooted at dir, which must exist.
func New(dir string, opts *Options) (blob.Driver, error) {
if opts == nil {
opts = &Options{}
}
if opts.DirFileMode == 0 {
opts.DirFileMode = os.FileMode(0o777)
}
absdir, err := filepath.Abs(dir)
if err != nil {
return nil, fmt.Errorf("failed to convert %s into an absolute path: %v", dir, err)
}
// Optionally, create the directory if it does not already exist.
info, err := os.Stat(absdir)
if err != nil && opts.CreateDir && os.IsNotExist(err) {
err = os.MkdirAll(absdir, opts.DirFileMode)
if err != nil {
return nil, fmt.Errorf("tried to create directory but failed: %v", err)
}
info, err = os.Stat(absdir)
}
if err != nil {
return nil, err
}
if !info.IsDir() {
return nil, fmt.Errorf("%s is not a directory", absdir)
}
return &driver{dir: absdir, opts: opts}, nil
}
type driver struct {
opts *Options
dir string
}
// Close implements [blob/Driver.Close].
func (drv *driver) Close() error {
return nil
}
// NormalizeError implements [blob/Driver.NormalizeError].
func (drv *driver) NormalizeError(err error) error {
if os.IsNotExist(err) {
return errors.Join(err, blob.ErrNotFound)
}
return err
}
// path returns the full path for a key.
func (drv *driver) path(key string) (string, error) {
path := filepath.Join(drv.dir, escapeKey(key))
if strings.HasSuffix(path, attrsExt) {
return "", errAttrsExt
}
return path, nil
}
// forKey returns the full path, os.FileInfo, and attributes for key.
func (drv *driver) forKey(key string) (string, os.FileInfo, *xattrs, error) {
path, err := drv.path(key)
if err != nil {
return "", nil, nil, err
}
info, err := os.Stat(path)
if err != nil {
return "", nil, nil, err
}
if info.IsDir() {
return "", nil, nil, os.ErrNotExist
}
xa, err := getAttrs(path)
if err != nil {
return "", nil, nil, err
}
return path, info, &xa, nil
}
// ListPaged implements [blob/Driver.ListPaged].
func (drv *driver) ListPaged(ctx context.Context, opts *blob.ListOptions) (*blob.ListPage, error) {
var pageToken string
if len(opts.PageToken) > 0 {
pageToken = string(opts.PageToken)
}
pageSize := opts.PageSize
if pageSize == 0 {
pageSize = defaultPageSize
}
// If opts.Delimiter != "", lastPrefix contains the last "directory" key we
// added. It is used to avoid adding it again; all files in this "directory"
// are collapsed to the single directory entry.
var lastPrefix string
var lastKeyAdded string
// If the Prefix contains a "/", we can set the root of the Walk
// to the path specified by the Prefix as any files below the path will not
// match the Prefix.
// Note that we use "/" explicitly and not os.PathSeparator, as the opts.Prefix
// is in the unescaped form.
root := drv.dir
if i := strings.LastIndex(opts.Prefix, "/"); i > -1 {
root = filepath.Join(root, opts.Prefix[:i])
}
var result blob.ListPage
// Do a full recursive scan of the root directory.
err := filepath.WalkDir(root, func(path string, info fs.DirEntry, err error) error {
if err != nil {
// Couldn't read this file/directory for some reason; just skip it.
return nil
}
// Skip the self-generated attribute files.
if strings.HasSuffix(path, attrsExt) {
return nil
}
// os.Walk returns the root directory; skip it.
if path == drv.dir {
return nil
}
// Strip the <drv.dir> prefix from path.
prefixLen := len(drv.dir)
// Include the separator for non-root.
if drv.dir != "/" {
prefixLen++
}
path = path[prefixLen:]
// Unescape the path to get the key.
key := unescapeKey(path)
// Skip all directories. If opts.Delimiter is set, we'll create
// pseudo-directories later.
// Note that returning nil means that we'll still recurse into it;
// we're just not adding a result for the directory itself.
if info.IsDir() {
key += "/"
// Avoid recursing into subdirectories if the directory name already
// doesn't match the prefix; any files in it are guaranteed not to match.
if len(key) > len(opts.Prefix) && !strings.HasPrefix(key, opts.Prefix) {
return filepath.SkipDir
}
// Similarly, avoid recursing into subdirectories if we're making
// "directories" and all of the files in this subdirectory are guaranteed
// to collapse to a "directory" that we've already added.
if lastPrefix != "" && strings.HasPrefix(key, lastPrefix) {
return filepath.SkipDir
}
return nil
}
// Skip files/directories that don't match the Prefix.
if !strings.HasPrefix(key, opts.Prefix) {
return nil
}
var md5 []byte
if xa, err := getAttrs(path); err == nil {
// Note: we only have the MD5 hash for blobs that we wrote.
// For other blobs, md5 will remain nil.
md5 = xa.MD5
}
fi, err := info.Info()
if err != nil {
return err
}
obj := &blob.ListObject{
Key: key,
ModTime: fi.ModTime(),
Size: fi.Size(),
MD5: md5,
}
// If using Delimiter, collapse "directories".
if opts.Delimiter != "" {
// Strip the prefix, which may contain Delimiter.
keyWithoutPrefix := key[len(opts.Prefix):]
// See if the key still contains Delimiter.
// If no, it's a file and we just include it.
// If yes, it's a file in a "sub-directory" and we want to collapse
// all files in that "sub-directory" into a single "directory" result.
if idx := strings.Index(keyWithoutPrefix, opts.Delimiter); idx != -1 {
prefix := opts.Prefix + keyWithoutPrefix[0:idx+len(opts.Delimiter)]
// We've already included this "directory"; don't add it.
if prefix == lastPrefix {
return nil
}
// Update the object to be a "directory".
obj = &blob.ListObject{
Key: prefix,
IsDir: true,
}
lastPrefix = prefix
}
}
// If there's a pageToken, skip anything before it.
if pageToken != "" && obj.Key <= pageToken {
return nil
}
// If we've already got a full page of results, set NextPageToken and stop.
// Unless the current object is a directory, in which case there may
// still be objects coming that are alphabetically before it (since
// we appended the delimiter). In that case, keep going; we'll trim the
// extra entries (if any) before returning.
if len(result.Objects) == pageSize && !obj.IsDir {
result.NextPageToken = []byte(result.Objects[pageSize-1].Key)
return io.EOF
}
result.Objects = append(result.Objects, obj)
// Normally, objects are added in the correct order (by Key).
// However, sometimes adding the file delimiter messes that up
// (e.g., if the file delimiter is later in the alphabet than the last character of a key).
// Detect if this happens and swap if needed.
if len(result.Objects) > 1 && obj.Key < lastKeyAdded {
i := len(result.Objects) - 1
result.Objects[i-1], result.Objects[i] = result.Objects[i], result.Objects[i-1]
lastKeyAdded = result.Objects[i].Key
} else {
lastKeyAdded = obj.Key
}
return nil
})
if err != nil && err != io.EOF {
return nil, err
}
if len(result.Objects) > pageSize {
result.Objects = result.Objects[0:pageSize]
result.NextPageToken = []byte(result.Objects[pageSize-1].Key)
}
return &result, nil
}
// Attributes implements [blob/Driver.Attributes].
func (drv *driver) Attributes(ctx context.Context, key string) (*blob.Attributes, error) {
_, info, xa, err := drv.forKey(key)
if err != nil {
return nil, err
}
return &blob.Attributes{
CacheControl: xa.CacheControl,
ContentDisposition: xa.ContentDisposition,
ContentEncoding: xa.ContentEncoding,
ContentLanguage: xa.ContentLanguage,
ContentType: xa.ContentType,
Metadata: xa.Metadata,
// CreateTime left as the zero time.
ModTime: info.ModTime(),
Size: info.Size(),
MD5: xa.MD5,
ETag: fmt.Sprintf("\"%x-%x\"", info.ModTime().UnixNano(), info.Size()),
}, nil
}
// NewRangeReader implements [blob/Driver.NewRangeReader].
func (drv *driver) NewRangeReader(ctx context.Context, key string, offset, length int64) (blob.DriverReader, error) {
path, info, xa, err := drv.forKey(key)
if err != nil {
return nil, err
}
f, err := os.Open(path)
if err != nil {
return nil, err
}
if offset > 0 {
if _, err := f.Seek(offset, io.SeekStart); err != nil {
return nil, err
}
}
r := io.Reader(f)
if length >= 0 {
r = io.LimitReader(r, length)
}
return &reader{
r: r,
c: f,
attrs: &blob.ReaderAttributes{
ContentType: xa.ContentType,
ModTime: info.ModTime(),
Size: info.Size(),
},
}, nil
}
func createTemp(path string, noTempDir bool) (*os.File, error) {
// Use a custom createTemp function rather than os.CreateTemp() as
// os.CreateTemp() sets the permissions of the tempfile to 0600, rather than
// 0666, making it inconsistent with the directories and attribute files.
try := 0
for {
// Append the current time with nanosecond precision and .tmp to the
// base path. If the file already exists try again. Nanosecond changes enough
// between each iteration to make a conflict unlikely. Using the full
// time lowers the chance of a collision with a file using a similar
// pattern, but has undefined behavior after the year 2262.
var name string
if noTempDir {
name = path
} else {
name = filepath.Join(os.TempDir(), filepath.Base(path))
}
name += "." + strconv.FormatInt(time.Now().UnixNano(), 16) + ".tmp"
f, err := os.OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0o666)
if os.IsExist(err) {
if try++; try < 10000 {
continue
}
return nil, &os.PathError{Op: "createtemp", Path: path + ".*.tmp", Err: os.ErrExist}
}
return f, err
}
}
// NewTypedWriter implements [blob/Driver.NewTypedWriter].
func (drv *driver) NewTypedWriter(ctx context.Context, key, contentType string, opts *blob.WriterOptions) (blob.DriverWriter, error) {
path, err := drv.path(key)
if err != nil {
return nil, err
}
err = os.MkdirAll(filepath.Dir(path), drv.opts.DirFileMode)
if err != nil {
return nil, err
}
f, err := createTemp(path, drv.opts.NoTempDir)
if err != nil {
return nil, err
}
if drv.opts.Metadata == MetadataDontWrite {
w := &writer{
ctx: ctx,
File: f,
path: path,
}
return w, nil
}
var metadata map[string]string
if len(opts.Metadata) > 0 {
metadata = opts.Metadata
}
return &writerWithSidecar{
ctx: ctx,
f: f,
path: path,
contentMD5: opts.ContentMD5,
md5hash: md5.New(),
attrs: xattrs{
CacheControl: opts.CacheControl,
ContentDisposition: opts.ContentDisposition,
ContentEncoding: opts.ContentEncoding,
ContentLanguage: opts.ContentLanguage,
ContentType: contentType,
Metadata: metadata,
},
}, nil
}
// Copy implements [blob/Driver.Copy].
func (drv *driver) Copy(ctx context.Context, dstKey, srcKey string) error {
// Note: we could use NewRangeReader here, but since we need to copy all of
// the metadata (from xa), it's more efficient to do it directly.
srcPath, _, xa, err := drv.forKey(srcKey)
if err != nil {
return err
}
f, err := os.Open(srcPath)
if err != nil {
return err
}
defer f.Close()
// We'll write the copy using Writer, to avoid re-implementing making of a
// temp file, cleaning up after partial failures, etc.
wopts := blob.WriterOptions{
CacheControl: xa.CacheControl,
ContentDisposition: xa.ContentDisposition,
ContentEncoding: xa.ContentEncoding,
ContentLanguage: xa.ContentLanguage,
Metadata: xa.Metadata,
}
// Create a cancelable context so we can cancel the write if there are problems.
writeCtx, cancel := context.WithCancel(ctx)
defer cancel()
w, err := drv.NewTypedWriter(writeCtx, dstKey, xa.ContentType, &wopts)
if err != nil {
return err
}
_, err = io.Copy(w, f)
if err != nil {
cancel() // cancel before Close cancels the write
w.Close()
return err
}
return w.Close()
}
// Delete implements [blob/Driver.Delete].
func (b *driver) Delete(ctx context.Context, key string) error {
path, err := b.path(key)
if err != nil {
return err
}
err = os.Remove(path)
if err != nil {
return err
}
err = os.Remove(path + attrsExt)
if err != nil && !os.IsNotExist(err) {
return err
}
return nil
}
// -------------------------------------------------------------------
type reader struct {
r io.Reader
c io.Closer
attrs *blob.ReaderAttributes
}
func (r *reader) Read(p []byte) (int, error) {
if r.r == nil {
return 0, io.EOF
}
return r.r.Read(p)
}
func (r *reader) Close() error {
if r.c == nil {
return nil
}
return r.c.Close()
}
// Attributes implements [blob/DriverReader.Attributes].
func (r *reader) Attributes() *blob.ReaderAttributes {
return r.attrs
}
// -------------------------------------------------------------------
// writerWithSidecar implements the strategy of storing metadata in a distinct file.
type writerWithSidecar struct {
ctx context.Context
md5hash hash.Hash
f *os.File
path string
attrs xattrs
contentMD5 []byte
}
func (w *writerWithSidecar) Write(p []byte) (n int, err error) {
n, err = w.f.Write(p)
if err != nil {
// Don't hash the unwritten tail twice when writing is resumed.
w.md5hash.Write(p[:n])
return n, err
}
if _, err := w.md5hash.Write(p); err != nil {
return n, err
}
return n, nil
}
func (w *writerWithSidecar) Close() error {
err := w.f.Close()
if err != nil {
return err
}
// Always delete the temp file. On success, it will have been
// renamed so the Remove will fail.
defer func() {
_ = os.Remove(w.f.Name())
}()
// Check if the write was cancelled.
if err := w.ctx.Err(); err != nil {
return err
}
md5sum := w.md5hash.Sum(nil)
w.attrs.MD5 = md5sum
// Write the attributes file.
if err := setAttrs(w.path, w.attrs); err != nil {
return err
}
// Rename the temp file to path.
if err := os.Rename(w.f.Name(), w.path); err != nil {
_ = os.Remove(w.path + attrsExt)
return err
}
return nil
}
// writer is a file with a temporary name until closed.
//
// Embedding os.File allows the likes of io.Copy to use optimizations,
// which is why it is not folded into writerWithSidecar.
type writer struct {
*os.File
ctx context.Context
path string
}
func (w *writer) Close() error {
err := w.File.Close()
if err != nil {
return err
}
// Always delete the temp file. On success, it will have been renamed so
// the Remove will fail.
tempname := w.File.Name()
defer os.Remove(tempname)
// Check if the write was cancelled.
if err := w.ctx.Err(); err != nil {
return err
}
// Rename the temp file to path.
return os.Rename(tempname, w.path)
}
// -------------------------------------------------------------------
// escapeKey does all required escaping for UTF-8 strings to work the filesystem.
func escapeKey(s string) string {
s = blob.HexEscape(s, func(r []rune, i int) bool {
c := r[i]
switch {
case c < 32:
return true
// We're going to replace '/' with os.PathSeparator below. In order for this
// to be reversible, we need to escape raw os.PathSeparators.
case os.PathSeparator != '/' && c == os.PathSeparator:
return true
// For "../", escape the trailing slash.
case i > 1 && c == '/' && r[i-1] == '.' && r[i-2] == '.':
return true
// For "//", escape the trailing slash.
case i > 0 && c == '/' && r[i-1] == '/':
return true
// Escape the trailing slash in a key.
case c == '/' && i == len(r)-1:
return true
// https://docs.microsoft.com/en-us/windows/desktop/fileio/naming-a-file
case os.PathSeparator == '\\' && (c == '>' || c == '<' || c == ':' || c == '"' || c == '|' || c == '?' || c == '*'):
return true
}
return false
})
// Replace "/" with os.PathSeparator if needed, so that the local filesystem
// can use subdirectories.
if os.PathSeparator != '/' {
s = strings.ReplaceAll(s, "/", string(os.PathSeparator))
}
return s
}
// unescapeKey reverses escapeKey.
func unescapeKey(s string) string {
if os.PathSeparator != '/' {
s = strings.ReplaceAll(s, string(os.PathSeparator), "/")
}
return blob.HexUnescape(s)
}

View File

@ -0,0 +1,482 @@
// Package s3blob provides a blob.Bucket S3 driver implementation.
//
// NB! To minimize breaking changes with older PocketBase releases,
// the driver is based of the previously used gocloud.dev/blob/s3blob,
// hence many of the below doc comments, struct options and interface
// implementations are the same.
//
// The blob abstraction supports all UTF-8 strings; to make this work with services lacking
// full UTF-8 support, strings must be escaped (during writes) and unescaped
// (during reads). The following escapes are performed for s3blob:
// - Blob keys: ASCII characters 0-31 are escaped to "__0x<hex>__".
// Additionally, the "/" in "../" is escaped in the same way.
// - Metadata keys: Escaped using URL encoding, then additionally "@:=" are
// escaped using "__0x<hex>__". These characters were determined by
// experimentation.
// - Metadata values: Escaped using URL encoding.
//
// Example:
//
// drv, _ := s3blob.New(&s3.S3{
// Bucket: "bucketName",
// Region: "region",
// Endpoint: "endpoint",
// AccessKey: "accessKey",
// SecretKey: "secretKey",
// })
// bucket := blob.NewBucket(drv)
package s3blob
import (
"context"
"encoding/base64"
"encoding/hex"
"errors"
"fmt"
"io"
"net/http"
"net/url"
"sort"
"strconv"
"strings"
"github.com/pocketbase/pocketbase/tools/filesystem/blob"
"github.com/pocketbase/pocketbase/tools/filesystem/internal/s3blob/s3"
)
const defaultPageSize = 1000
// New creates a new instance of the S3 driver backed by the the internal S3 client.
func New(s3Client *s3.S3) (blob.Driver, error) {
if s3Client.Bucket == "" {
return nil, errors.New("s3blob.New: missing bucket name")
}
if s3Client.Endpoint == "" {
return nil, errors.New("s3blob.New: missing endpoint")
}
if s3Client.Region == "" {
return nil, errors.New("s3blob.New: missing region")
}
return &driver{s3: s3Client}, nil
}
type driver struct {
s3 *s3.S3
}
// Close implements [blob/Driver.Close].
func (drv *driver) Close() error {
return nil
}
// NormalizeError implements [blob/Driver.NormalizeError].
func (drv *driver) NormalizeError(err error) error {
var ae s3.ResponseError
if errors.As(err, &ae) {
switch ae.Code {
case "NoSuchBucket", "NoSuchKey", "NotFound":
return errors.Join(err, blob.ErrNotFound)
}
}
return err
}
// ListPaged implements [blob/Driver.ListPaged].
func (drv *driver) ListPaged(ctx context.Context, opts *blob.ListOptions) (*blob.ListPage, error) {
pageSize := opts.PageSize
if pageSize == 0 {
pageSize = defaultPageSize
}
in := s3.ListParams{
MaxKeys: pageSize,
}
if len(opts.PageToken) > 0 {
in.ContinuationToken = string(opts.PageToken)
}
if opts.Prefix != "" {
in.Prefix = escapeKey(opts.Prefix)
}
if opts.Delimiter != "" {
in.Delimiter = escapeKey(opts.Delimiter)
}
var reqOptions []func(*http.Request)
resp, err := drv.s3.ListObjects(ctx, in, reqOptions...)
if err != nil {
return nil, err
}
page := blob.ListPage{}
if resp.NextContinuationToken != "" {
page.NextPageToken = []byte(resp.NextContinuationToken)
}
if n := len(resp.Contents) + len(resp.CommonPrefixes); n > 0 {
page.Objects = make([]*blob.ListObject, n)
for i, obj := range resp.Contents {
page.Objects[i] = &blob.ListObject{
Key: unescapeKey(obj.Key),
ModTime: obj.LastModified,
Size: obj.Size,
MD5: eTagToMD5(obj.ETag),
}
}
for i, prefix := range resp.CommonPrefixes {
page.Objects[i+len(resp.Contents)] = &blob.ListObject{
Key: unescapeKey(prefix.Prefix),
IsDir: true,
}
}
if len(resp.Contents) > 0 && len(resp.CommonPrefixes) > 0 {
// S3 gives us blobs and "directories" in separate lists; sort them.
sort.Slice(page.Objects, func(i, j int) bool {
return page.Objects[i].Key < page.Objects[j].Key
})
}
}
return &page, nil
}
// Attributes implements [blob/Driver.Attributes].
func (drv *driver) Attributes(ctx context.Context, key string) (*blob.Attributes, error) {
key = escapeKey(key)
resp, err := drv.s3.HeadObject(ctx, key)
if err != nil {
return nil, err
}
md := make(map[string]string, len(resp.Metadata))
for k, v := range resp.Metadata {
// See the package comments for more details on escaping of metadata
// keys & values.
md[blob.HexUnescape(urlUnescape(k))] = urlUnescape(v)
}
return &blob.Attributes{
CacheControl: resp.CacheControl,
ContentDisposition: resp.ContentDisposition,
ContentEncoding: resp.ContentEncoding,
ContentLanguage: resp.ContentLanguage,
ContentType: resp.ContentType,
Metadata: md,
// CreateTime not supported; left as the zero time.
ModTime: resp.LastModified,
Size: resp.ContentLength,
MD5: eTagToMD5(resp.ETag),
ETag: resp.ETag,
}, nil
}
// NewRangeReader implements [blob/Driver.NewRangeReader].
func (drv *driver) NewRangeReader(ctx context.Context, key string, offset, length int64) (blob.DriverReader, error) {
key = escapeKey(key)
var byteRange string
if offset > 0 && length < 0 {
byteRange = fmt.Sprintf("bytes=%d-", offset)
} else if length == 0 {
// AWS doesn't support a zero-length read; we'll read 1 byte and then
// ignore it in favor of http.NoBody below.
byteRange = fmt.Sprintf("bytes=%d-%d", offset, offset)
} else if length >= 0 {
byteRange = fmt.Sprintf("bytes=%d-%d", offset, offset+length-1)
}
reqOptions := []func(*http.Request){
func(req *http.Request) {
req.Header.Set("Range", byteRange)
},
}
resp, err := drv.s3.GetObject(ctx, key, reqOptions...)
if err != nil {
return nil, err
}
body := resp.Body
if length == 0 {
body = http.NoBody
}
return &reader{
body: body,
attrs: &blob.ReaderAttributes{
ContentType: resp.ContentType,
ModTime: resp.LastModified,
Size: getSize(resp.ContentLength, resp.ContentRange),
},
}, nil
}
// NewTypedWriter implements [blob/Driver.NewTypedWriter].
func (drv *driver) NewTypedWriter(ctx context.Context, key string, contentType string, opts *blob.WriterOptions) (blob.DriverWriter, error) {
key = escapeKey(key)
u := &s3.Uploader{
S3: drv.s3,
Key: key,
}
if opts.BufferSize != 0 {
u.MinPartSize = opts.BufferSize
}
if opts.MaxConcurrency != 0 {
u.MaxConcurrency = opts.MaxConcurrency
}
md := make(map[string]string, len(opts.Metadata))
for k, v := range opts.Metadata {
// See the package comments for more details on escaping of metadata keys & values.
k = blob.HexEscape(url.PathEscape(k), func(runes []rune, i int) bool {
c := runes[i]
return c == '@' || c == ':' || c == '='
})
md[k] = url.PathEscape(v)
}
u.Metadata = md
var reqOptions []func(*http.Request)
reqOptions = append(reqOptions, func(r *http.Request) {
r.Header.Set("Content-Type", contentType)
if opts.CacheControl != "" {
r.Header.Set("Cache-Control", opts.CacheControl)
}
if opts.ContentDisposition != "" {
r.Header.Set("Content-Disposition", opts.ContentDisposition)
}
if opts.ContentEncoding != "" {
r.Header.Set("Content-Encoding", opts.ContentEncoding)
}
if opts.ContentLanguage != "" {
r.Header.Set("Content-Language", opts.ContentLanguage)
}
if len(opts.ContentMD5) > 0 {
r.Header.Set("Content-MD5", base64.StdEncoding.EncodeToString(opts.ContentMD5))
}
})
return &writer{
ctx: ctx,
uploader: u,
donec: make(chan struct{}),
reqOptions: reqOptions,
}, nil
}
// Copy implements [blob/Driver.Copy].
func (drv *driver) Copy(ctx context.Context, dstKey, srcKey string) error {
dstKey = escapeKey(dstKey)
srcKey = escapeKey(srcKey)
_, err := drv.s3.CopyObject(ctx, srcKey, dstKey)
return err
}
// Delete implements [blob/Driver.Delete].
func (drv *driver) Delete(ctx context.Context, key string) error {
key = escapeKey(key)
return drv.s3.DeleteObject(ctx, key)
}
// -------------------------------------------------------------------
// reader reads an S3 object. It implements io.ReadCloser.
type reader struct {
attrs *blob.ReaderAttributes
body io.ReadCloser
}
// Read implements [io/ReadCloser.Read].
func (r *reader) Read(p []byte) (int, error) {
return r.body.Read(p)
}
// Close closes the reader itself. It must be called when done reading.
func (r *reader) Close() error {
return r.body.Close()
}
// Attributes implements [blob/DriverReader.Attributes].
func (r *reader) Attributes() *blob.ReaderAttributes {
return r.attrs
}
// -------------------------------------------------------------------
// writer writes an S3 object, it implements io.WriteCloser.
type writer struct {
ctx context.Context
err error // written before donec closes
uploader *s3.Uploader
// Ends of an io.Pipe, created when the first byte is written.
pw *io.PipeWriter
pr *io.PipeReader
donec chan struct{} // closed when done writing
reqOptions []func(*http.Request)
}
// Write appends p to w.pw. User must call Close to close the w after done writing.
func (w *writer) Write(p []byte) (int, error) {
// Avoid opening the pipe for a zero-length write;
// the concrete can do these for empty blobs.
if len(p) == 0 {
return 0, nil
}
if w.pw == nil {
// We'll write into pw and use pr as an io.Reader for the
// Upload call to S3.
w.pr, w.pw = io.Pipe()
w.open(w.pr, true)
}
return w.pw.Write(p)
}
// r may be nil if we're Closing and no data was written.
// If closePipeOnError is true, w.pr will be closed if there's an
// error uploading to S3.
func (w *writer) open(r io.Reader, closePipeOnError bool) {
// This goroutine will keep running until Close, unless there's an error.
go func() {
defer func() {
close(w.donec)
}()
if r == nil {
// AWS doesn't like a nil Body.
r = http.NoBody
}
var err error
w.uploader.Payload = r
err = w.uploader.Upload(w.ctx, w.reqOptions...)
if err != nil {
if closePipeOnError {
w.pr.CloseWithError(err)
}
w.err = err
}
}()
}
// Close completes the writer and closes it. Any error occurring during write
// will be returned. If a writer is closed before any Write is called, Close
// will create an empty file at the given key.
func (w *writer) Close() error {
if w.pr != nil {
defer w.pr.Close()
}
if w.pw == nil {
// We never got any bytes written. We'll write an http.NoBody.
w.open(nil, false)
} else if err := w.pw.Close(); err != nil {
return err
}
<-w.donec
return w.err
}
// -------------------------------------------------------------------
// etagToMD5 processes an ETag header and returns an MD5 hash if possible.
// S3's ETag header is sometimes a quoted hexstring of the MD5. Other times,
// notably when the object was uploaded in multiple parts, it is not.
// We do the best we can.
// Some links about ETag:
// https://docs.aws.amazon.com/AmazonS3/latest/API/RESTCommonResponseHeaders.html
// https://github.com/aws/aws-sdk-net/issues/815
// https://teppen.io/2018/06/23/aws_s3_etags/
func eTagToMD5(etag string) []byte {
// No header at all.
if etag == "" {
return nil
}
// Strip the expected leading and trailing quotes.
if len(etag) < 2 || etag[0] != '"' || etag[len(etag)-1] != '"' {
return nil
}
unquoted := etag[1 : len(etag)-1]
// Un-hex; we return nil on error. In particular, we'll get an error here
// for multi-part uploaded blobs, whose ETag will contain a "-" and so will
// never be a legal hex encoding.
md5, err := hex.DecodeString(unquoted)
if err != nil {
return nil
}
return md5
}
func getSize(contentLength int64, contentRange string) int64 {
// Default size to ContentLength, but that's incorrect for partial-length reads,
// where ContentLength refers to the size of the returned Body, not the entire
// size of the blob. ContentRange has the full size.
size := contentLength
if contentRange != "" {
// Sample: bytes 10-14/27 (where 27 is the full size).
parts := strings.Split(contentRange, "/")
if len(parts) == 2 {
if i, err := strconv.ParseInt(parts[1], 10, 64); err == nil {
size = i
}
}
}
return size
}
// escapeKey does all required escaping for UTF-8 strings to work with S3.
func escapeKey(key string) string {
return blob.HexEscape(key, func(r []rune, i int) bool {
c := r[i]
// S3 doesn't handle these characters (determined via experimentation).
if c < 32 {
return true
}
// For "../", escape the trailing slash.
if i > 1 && c == '/' && r[i-1] == '.' && r[i-2] == '.' {
return true
}
return false
})
}
// unescapeKey reverses escapeKey.
func unescapeKey(key string) string {
return blob.HexUnescape(key)
}
// urlUnescape reverses URLEscape using url.PathUnescape. If the unescape
// returns an error, it returns s.
func urlUnescape(s string) string {
if u, err := url.PathUnescape(s); err == nil {
return u
}
return s
}

View File

@ -0,0 +1,128 @@
package s3_test
import (
"errors"
"fmt"
"io"
"net/http"
"regexp"
"slices"
"strings"
"sync"
)
func checkHeaders(headers http.Header, expectations map[string]string) bool {
for h, expected := range expectations {
v := headers.Get(h)
pattern := expected
if !strings.HasPrefix(pattern, "^") && !strings.HasSuffix(pattern, "$") {
pattern = "^" + regexp.QuoteMeta(pattern) + "$"
}
expectedRegex, err := regexp.Compile(pattern)
if err != nil {
return false
}
if !expectedRegex.MatchString(v) {
return false
}
}
return true
}
type RequestStub struct {
Method string
URL string // plain string or regex pattern wrapped in "^pattern$"
Match func(req *http.Request) bool
Response *http.Response
}
func NewTestClient(stubs ...*RequestStub) *TestClient {
return &TestClient{stubs: stubs}
}
type TestClient struct {
stubs []*RequestStub
mu sync.Mutex
}
func (c *TestClient) AssertNoRemaining() error {
c.mu.Lock()
defer c.mu.Unlock()
if len(c.stubs) == 0 {
return nil
}
msgParts := make([]string, 0, len(c.stubs)+1)
msgParts = append(msgParts, "not all stub requests were processed:")
for _, stub := range c.stubs {
msgParts = append(msgParts, "- "+stub.Method+" "+stub.URL)
}
return errors.New(strings.Join(msgParts, "\n"))
}
func (c *TestClient) Do(req *http.Request) (*http.Response, error) {
c.mu.Lock()
defer c.mu.Unlock()
for i, stub := range c.stubs {
if req.Method != stub.Method {
continue
}
urlPattern := stub.URL
if !strings.HasPrefix(urlPattern, "^") && !strings.HasSuffix(urlPattern, "$") {
urlPattern = "^" + regexp.QuoteMeta(urlPattern) + "$"
}
urlRegex, err := regexp.Compile(urlPattern)
if err != nil {
return nil, err
}
if !urlRegex.MatchString(req.URL.String()) {
continue
}
if stub.Match != nil && !stub.Match(req) {
continue
}
// remove from the remaining stubs
c.stubs = slices.Delete(c.stubs, i, i+1)
response := stub.Response
if response == nil {
response = &http.Response{}
}
if response.Header == nil {
response.Header = http.Header{}
}
if response.Body == nil {
response.Body = http.NoBody
}
response.Request = req
return response, nil
}
var body []byte
if req.Body != nil {
defer req.Body.Close()
body, _ = io.ReadAll(req.Body)
}
return nil, fmt.Errorf(
"the below request doesn't have a corresponding stub:\n%s %s\nHeaders: %v\nBody: %q",
req.Method,
req.URL.String(),
req.Header,
body,
)
}

View File

@ -0,0 +1,59 @@
package s3
import (
"context"
"encoding/xml"
"net/http"
"net/url"
"strings"
"time"
)
// https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html#API_CopyObject_ResponseSyntax
type CopyObjectResponse struct {
CopyObjectResult xml.Name `json:"copyObjectResult" xml:"CopyObjectResult"`
ETag string `json:"etag" xml:"ETag"`
LastModified time.Time `json:"lastModified" xml:"LastModified"`
ChecksumType string `json:"checksumType" xml:"ChecksumType"`
ChecksumCRC32 string `json:"checksumCRC32" xml:"ChecksumCRC32"`
ChecksumCRC32C string `json:"checksumCRC32C" xml:"ChecksumCRC32C"`
ChecksumCRC64NVME string `json:"checksumCRC64NVME" xml:"ChecksumCRC64NVME"`
ChecksumSHA1 string `json:"checksumSHA1" xml:"ChecksumSHA1"`
ChecksumSHA256 string `json:"checksumSHA256" xml:"ChecksumSHA256"`
}
// CopyObject copies a single object from srcKey to dstKey destination.
// (both keys are expected to be operating within the same bucket).
//
// https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html
func (s3 *S3) CopyObject(ctx context.Context, srcKey string, dstKey string, optReqFuncs ...func(*http.Request)) (*CopyObjectResponse, error) {
req, err := http.NewRequestWithContext(ctx, http.MethodPut, s3.URL(dstKey), nil)
if err != nil {
return nil, err
}
// per the doc the header value must be URL-encoded
req.Header.Set("x-amz-copy-source", url.PathEscape(s3.Bucket+"/"+strings.TrimLeft(srcKey, "/")))
// apply optional request funcs
for _, fn := range optReqFuncs {
if fn != nil {
fn(req)
}
}
resp, err := s3.SignAndSend(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
result := &CopyObjectResponse{}
err = xml.NewDecoder(resp.Body).Decode(result)
if err != nil {
return nil, err
}
return result, nil
}

View File

@ -0,0 +1,66 @@
package s3_test
import (
"context"
"io"
"net/http"
"strings"
"testing"
"github.com/pocketbase/pocketbase/tools/filesystem/internal/s3blob/s3"
)
func TestS3CopyObject(t *testing.T) {
t.Parallel()
httpClient := NewTestClient(
&RequestStub{
Method: http.MethodPut,
URL: "http://test_bucket.example.com/@dst_test",
Match: func(req *http.Request) bool {
return checkHeaders(req.Header, map[string]string{
"test_header": "test",
"x-amz-copy-source": "test_bucket%2F@src_test",
"Authorization": "^.+Credential=123/.+$",
})
},
Response: &http.Response{
Body: io.NopCloser(strings.NewReader(`
<CopyObjectResult>
<LastModified>2025-01-01T01:02:03.456Z</LastModified>
<ETag>test_etag</ETag>
</CopyObjectResult>
`)),
},
},
)
s3Client := &s3.S3{
Client: httpClient,
Region: "test_region",
Bucket: "test_bucket",
Endpoint: "http://example.com",
AccessKey: "123",
SecretKey: "abc",
}
copyResp, err := s3Client.CopyObject(context.Background(), "@src_test", "@dst_test", func(r *http.Request) {
r.Header.Set("test_header", "test")
})
if err != nil {
t.Fatal(err)
}
err = httpClient.AssertNoRemaining()
if err != nil {
t.Fatal(err)
}
if copyResp.ETag != "test_etag" {
t.Fatalf("Expected ETag %q, got %q", "test_etag", copyResp.ETag)
}
if date := copyResp.LastModified.Format("2006-01-02T15:04:05.000Z"); date != "2025-01-01T01:02:03.456Z" {
t.Fatalf("Expected LastModified %q, got %q", "2025-01-01T01:02:03.456Z", date)
}
}

View File

@ -0,0 +1,31 @@
package s3
import (
"context"
"net/http"
)
// DeleteObject deletes a single object by its key.
//
// https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html
func (s3 *S3) DeleteObject(ctx context.Context, key string, optFuncs ...func(*http.Request)) error {
req, err := http.NewRequestWithContext(ctx, http.MethodDelete, s3.URL(key), nil)
if err != nil {
return err
}
// apply optional request funcs
for _, fn := range optFuncs {
if fn != nil {
fn(req)
}
}
resp, err := s3.SignAndSend(req)
if err != nil {
return err
}
defer resp.Body.Close()
return nil
}

View File

@ -0,0 +1,47 @@
package s3_test
import (
"context"
"net/http"
"testing"
"github.com/pocketbase/pocketbase/tools/filesystem/internal/s3blob/s3"
)
func TestS3DeleteObject(t *testing.T) {
t.Parallel()
httpClient := NewTestClient(
&RequestStub{
Method: http.MethodDelete,
URL: "http://test_bucket.example.com/test_key",
Match: func(req *http.Request) bool {
return checkHeaders(req.Header, map[string]string{
"test_header": "test",
"Authorization": "^.+Credential=123/.+$",
})
},
},
)
s3Client := &s3.S3{
Client: httpClient,
Region: "test_region",
Bucket: "test_bucket",
Endpoint: "http://example.com",
AccessKey: "123",
SecretKey: "abc",
}
err := s3Client.DeleteObject(context.Background(), "test_key", func(r *http.Request) {
r.Header.Set("test_header", "test")
})
if err != nil {
t.Fatal(err)
}
err = httpClient.AssertNoRemaining()
if err != nil {
t.Fatal(err)
}
}

View File

@ -0,0 +1,47 @@
package s3
import (
"encoding/xml"
"strconv"
"strings"
)
// ResponseError defines a general S3 response error.
//
// https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html
type ResponseError struct {
XMLName xml.Name `json:"-" xml:"Error"`
Code string `json:"code" xml:"Code"`
Message string `json:"message" xml:"Message"`
RequestId string `json:"requestId" xml:"RequestId"`
Resource string `json:"resource" xml:"Resource"`
Raw []byte `json:"-" xml:"-"`
Status int `json:"status" xml:"Status"`
}
// Error implements the std error interface.
func (err ResponseError) Error() string {
var strBuilder strings.Builder
strBuilder.WriteString(strconv.Itoa(err.Status))
strBuilder.WriteString(" ")
if err.Code != "" {
strBuilder.WriteString(err.Code)
} else {
strBuilder.WriteString("S3ResponseError")
}
if err.Message != "" {
strBuilder.WriteString(": ")
strBuilder.WriteString(err.Message)
}
if len(err.Raw) > 0 {
strBuilder.WriteString("\n(RAW: ")
strBuilder.Write(err.Raw)
strBuilder.WriteString(")")
}
return strBuilder.String()
}

View File

@ -0,0 +1,86 @@
package s3_test
import (
"encoding/json"
"encoding/xml"
"testing"
"github.com/pocketbase/pocketbase/tools/filesystem/internal/s3blob/s3"
)
func TestResponseErrorSerialization(t *testing.T) {
raw := `
<?xml version="1.0" encoding="UTF-8"?>
<Error>
<Code>test_code</Code>
<Message>test_message</Message>
<RequestId>test_request_id</RequestId>
<Resource>test_resource</Resource>
</Error>
`
respErr := s3.ResponseError{
Status: 123,
Raw: []byte("test"),
}
err := xml.Unmarshal([]byte(raw), &respErr)
if err != nil {
t.Fatal(err)
}
jsonRaw, err := json.Marshal(respErr)
if err != nil {
t.Fatal(err)
}
jsonStr := string(jsonRaw)
expected := `{"code":"test_code","message":"test_message","requestId":"test_request_id","resource":"test_resource","status":123}`
if expected != jsonStr {
t.Fatalf("Expected JSON\n%s\ngot\n%s", expected, jsonStr)
}
}
func TestResponseErrorErrorInterface(t *testing.T) {
scenarios := []struct {
name string
err s3.ResponseError
expected string
}{
{
"empty",
s3.ResponseError{},
"0 S3ResponseError",
},
{
"with code and message (nil raw)",
s3.ResponseError{
Status: 123,
Code: "test_code",
Message: "test_message",
},
"123 test_code: test_message",
},
{
"with code and message (non-nil raw)",
s3.ResponseError{
Status: 123,
Code: "test_code",
Message: "test_message",
Raw: []byte("test_raw"),
},
"123 test_code: test_message\n(RAW: test_raw)",
},
}
for _, s := range scenarios {
t.Run(s.name, func(t *testing.T) {
result := s.err.Error()
if result != s.expected {
t.Fatalf("Expected\n%s\ngot\n%s", s.expected, result)
}
})
}
}

View File

@ -0,0 +1,43 @@
package s3
import (
"context"
"io"
"net/http"
)
// https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html#API_GetObject_ResponseElements
type GetObjectResponse struct {
Body io.ReadCloser `json:"-" xml:"-"`
HeadObjectResponse
}
// GetObject retrieves a single object by its key.
//
// NB! Make sure to call GetObjectResponse.Body.Close() after done working with the result.
//
// https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html
func (s3 *S3) GetObject(ctx context.Context, key string, optFuncs ...func(*http.Request)) (*GetObjectResponse, error) {
req, err := http.NewRequestWithContext(ctx, http.MethodGet, s3.URL(key), nil)
if err != nil {
return nil, err
}
// apply optional request funcs
for _, fn := range optFuncs {
if fn != nil {
fn(req)
}
}
resp, err := s3.SignAndSend(req)
if err != nil {
return nil, err
}
result := &GetObjectResponse{Body: resp.Body}
result.load(resp.Header)
return result, nil
}

View File

@ -0,0 +1,91 @@
package s3_test
import (
"context"
"encoding/json"
"io"
"net/http"
"strings"
"testing"
"github.com/pocketbase/pocketbase/tools/filesystem/internal/s3blob/s3"
)
func TestS3GetObject(t *testing.T) {
t.Parallel()
httpClient := NewTestClient(
&RequestStub{
Method: http.MethodGet,
URL: "http://test_bucket.example.com/test_key",
Match: func(req *http.Request) bool {
return checkHeaders(req.Header, map[string]string{
"test_header": "test",
"Authorization": "^.+Credential=123/.+$",
})
},
Response: &http.Response{
Header: http.Header{
"Last-Modified": []string{"Mon, 01 Feb 2025 03:04:05 GMT"},
"Cache-Control": []string{"test_cache"},
"Content-Disposition": []string{"test_disposition"},
"Content-Encoding": []string{"test_encoding"},
"Content-Language": []string{"test_language"},
"Content-Type": []string{"test_type"},
"Content-Range": []string{"test_range"},
"Etag": []string{"test_etag"},
"Content-Length": []string{"100"},
"x-amz-meta-AbC": []string{"test_meta_a"},
"x-amz-meta-Def": []string{"test_meta_b"},
},
Body: io.NopCloser(strings.NewReader("test")),
},
},
)
s3Client := &s3.S3{
Client: httpClient,
Region: "test_region",
Bucket: "test_bucket",
Endpoint: "http://example.com",
AccessKey: "123",
SecretKey: "abc",
}
resp, err := s3Client.GetObject(context.Background(), "test_key", func(r *http.Request) {
r.Header.Set("test_header", "test")
})
if err != nil {
t.Fatal(err)
}
defer resp.Body.Close()
err = httpClient.AssertNoRemaining()
if err != nil {
t.Fatal(err)
}
// check body
body, err := io.ReadAll(resp.Body)
if err != nil {
t.Fatal(err)
}
bodyStr := string(body)
if bodyStr != "test" {
t.Fatalf("Expected body\n%q\ngot\n%q", "test", bodyStr)
}
// check serialized attributes
raw, err := json.Marshal(resp)
if err != nil {
t.Fatal(err)
}
rawStr := string(raw)
expected := `{"metadata":{"abc":"test_meta_a","def":"test_meta_b"},"lastModified":"2025-02-01T03:04:05Z","cacheControl":"test_cache","contentDisposition":"test_disposition","contentEncoding":"test_encoding","contentLanguage":"test_language","contentType":"test_type","contentRange":"test_range","etag":"test_etag","contentLength":100}`
if rawStr != expected {
t.Fatalf("Expected attributes\n%s\ngot\n%s", expected, rawStr)
}
}

View File

@ -0,0 +1,89 @@
package s3
import (
"context"
"net/http"
"strconv"
"time"
)
// https://docs.aws.amazon.com/AmazonS3/latest/API/API_HeadObject.html#API_HeadObject_ResponseElements
type HeadObjectResponse struct {
// Metadata is the extra data that is stored with the S3 object (aka. the "x-amz-meta-*" header values).
//
// The map keys are normalized to lower-case.
Metadata map[string]string `json:"metadata"`
// LastModified date and time when the object was last modified.
LastModified time.Time `json:"lastModified"`
// CacheControl specifies caching behavior along the request/reply chain.
CacheControl string `json:"cacheControl"`
// ContentDisposition specifies presentational information for the object.
ContentDisposition string `json:"contentDisposition"`
// ContentEncoding indicates what content encodings have been applied to the object
// and thus what decoding mechanisms must be applied to obtain the
// media-type referenced by the Content-Type header field.
ContentEncoding string `json:"contentEncoding"`
// ContentLanguage indicates the language the content is in.
ContentLanguage string `json:"contentLanguage"`
// ContentType is a standard MIME type describing the format of the object data.
ContentType string `json:"contentType"`
// ContentRange is the portion of the object usually returned in the response for a GET request.
ContentRange string `json:"contentRange"`
// ETag is an opaque identifier assigned by a web
// server to a specific version of a resource found at a URL.
ETag string `json:"etag"`
// ContentLength is size of the body in bytes.
ContentLength int64 `json:"contentLength"`
}
// load parses and load the header values into the current HeadObjectResponse fields.
func (o *HeadObjectResponse) load(headers http.Header) {
o.LastModified, _ = time.Parse(time.RFC1123, headers.Get("Last-Modified"))
o.CacheControl = headers.Get("Cache-Control")
o.ContentDisposition = headers.Get("Content-Disposition")
o.ContentEncoding = headers.Get("Content-Encoding")
o.ContentLanguage = headers.Get("Content-Language")
o.ContentType = headers.Get("Content-Type")
o.ContentRange = headers.Get("Content-Range")
o.ETag = headers.Get("ETag")
o.ContentLength, _ = strconv.ParseInt(headers.Get("Content-Length"), 10, 0)
o.Metadata = extractMetadata(headers)
}
// HeadObject sends a HEAD request for a single object to check its
// existence and to retrieve its metadata.
//
// https://docs.aws.amazon.com/AmazonS3/latest/API/API_HeadObject.html
func (s3 *S3) HeadObject(ctx context.Context, key string, optFuncs ...func(*http.Request)) (*HeadObjectResponse, error) {
req, err := http.NewRequestWithContext(ctx, http.MethodHead, s3.URL(key), nil)
if err != nil {
return nil, err
}
// apply optional request funcs
for _, fn := range optFuncs {
if fn != nil {
fn(req)
}
}
resp, err := s3.SignAndSend(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
result := &HeadObjectResponse{}
result.load(resp.Header)
return result, nil
}

View File

@ -0,0 +1,76 @@
package s3_test
import (
"context"
"encoding/json"
"net/http"
"testing"
"github.com/pocketbase/pocketbase/tools/filesystem/internal/s3blob/s3"
)
func TestS3HeadObject(t *testing.T) {
t.Parallel()
httpClient := NewTestClient(
&RequestStub{
Method: http.MethodHead,
URL: "http://test_bucket.example.com/test_key",
Match: func(req *http.Request) bool {
return checkHeaders(req.Header, map[string]string{
"test_header": "test",
"Authorization": "^.+Credential=123/.+$",
})
},
Response: &http.Response{
Header: http.Header{
"Last-Modified": []string{"Mon, 01 Feb 2025 03:04:05 GMT"},
"Cache-Control": []string{"test_cache"},
"Content-Disposition": []string{"test_disposition"},
"Content-Encoding": []string{"test_encoding"},
"Content-Language": []string{"test_language"},
"Content-Type": []string{"test_type"},
"Content-Range": []string{"test_range"},
"Etag": []string{"test_etag"},
"Content-Length": []string{"100"},
"x-amz-meta-AbC": []string{"test_meta_a"},
"x-amz-meta-Def": []string{"test_meta_b"},
},
Body: http.NoBody,
},
},
)
s3Client := &s3.S3{
Client: httpClient,
Region: "test_region",
Bucket: "test_bucket",
Endpoint: "http://example.com",
AccessKey: "123",
SecretKey: "abc",
}
resp, err := s3Client.HeadObject(context.Background(), "test_key", func(r *http.Request) {
r.Header.Set("test_header", "test")
})
if err != nil {
t.Fatal(err)
}
err = httpClient.AssertNoRemaining()
if err != nil {
t.Fatal(err)
}
raw, err := json.Marshal(resp)
if err != nil {
t.Fatal(err)
}
rawStr := string(raw)
expected := `{"metadata":{"abc":"test_meta_a","def":"test_meta_b"},"lastModified":"2025-02-01T03:04:05Z","cacheControl":"test_cache","contentDisposition":"test_disposition","contentEncoding":"test_encoding","contentLanguage":"test_language","contentType":"test_type","contentRange":"test_range","etag":"test_etag","contentLength":100}`
if rawStr != expected {
t.Fatalf("Expected response\n%s\ngot\n%s", expected, rawStr)
}
}

View File

@ -0,0 +1,165 @@
package s3
import (
"context"
"encoding/xml"
"net/http"
"net/url"
"strconv"
"time"
)
// ListParams defines optional parameters for the ListObject request.
type ListParams struct {
// ContinuationToken indicates that the list is being continued on this bucket with a token.
// ContinuationToken is obfuscated and is not a real key.
// You can use this ContinuationToken for pagination of the list results.
ContinuationToken string `json:"continuationToken"`
// Delimiter is a character that you use to group keys.
//
// For directory buckets, "/" is the only supported delimiter.
Delimiter string `json:"delimiter"`
// Prefix limits the response to keys that begin with the specified prefix.
Prefix string `json:"prefix"`
// Encoding type is used to encode the object keys in the response.
// Responses are encoded only in UTF-8.
// An object key can contain any Unicode character.
// However, the XML 1.0 parser can't parse certain characters,
// such as characters with an ASCII value from 0 to 10.
// For characters that aren't supported in XML 1.0, you can add
// this parameter to request that S3 encode the keys in the response.
//
// Valid Values: url
EncodingType string `json:"encodingType"`
// StartAfter is where you want S3 to start listing from.
// S3 starts listing after this specified key.
// StartAfter can be any key in the bucket.
//
// This functionality is not supported for directory buckets.
StartAfter string `json:"startAfter"`
// MaxKeys Sets the maximum number of keys returned in the response.
// By default, the action returns up to 1,000 key names.
// The response might contain fewer keys but will never contain more.
MaxKeys int `json:"maxKeys"`
// FetchOwner returns the owner field with each key in the result.
FetchOwner bool `json:"fetchOwner"`
}
// Encode encodes the parameters in a properly formatted query string.
func (l *ListParams) Encode() string {
query := url.Values{}
query.Add("list-type", "2")
if l.ContinuationToken != "" {
query.Add("continuation-token", l.ContinuationToken)
}
if l.Delimiter != "" {
query.Add("delimiter", l.Delimiter)
}
if l.Prefix != "" {
query.Add("prefix", l.Prefix)
}
if l.EncodingType != "" {
query.Add("encoding-type", l.EncodingType)
}
if l.FetchOwner {
query.Add("fetch-owner", "true")
}
if l.MaxKeys > 0 {
query.Add("max-keys", strconv.Itoa(l.MaxKeys))
}
if l.StartAfter != "" {
query.Add("start-after", l.StartAfter)
}
return query.Encode()
}
// ListObjects retrieves paginated objects list.
//
// https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html
func (s3 *S3) ListObjects(ctx context.Context, params ListParams, optReqFuncs ...func(*http.Request)) (*ListObjectsResponse, error) {
req, err := http.NewRequestWithContext(ctx, http.MethodGet, s3.URL("?"+params.Encode()), nil)
if err != nil {
return nil, err
}
// apply optional request funcs
for _, fn := range optReqFuncs {
if fn != nil {
fn(req)
}
}
resp, err := s3.SignAndSend(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
result := &ListObjectsResponse{}
err = xml.NewDecoder(resp.Body).Decode(result)
if err != nil {
return nil, err
}
return result, nil
}
// https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html#API_ListObjectsV2_ResponseSyntax
type ListObjectsResponse struct {
XMLName xml.Name `json:"-" xml:"ListBucketResult"`
EncodingType string `json:"encodingType" xml:"EncodingType"`
Name string `json:"name" xml:"Name"`
Prefix string `json:"prefix" xml:"Prefix"`
Delimiter string `json:"delimiter" xml:"Delimiter"`
ContinuationToken string `json:"continuationToken" xml:"ContinuationToken"`
NextContinuationToken string `json:"nextContinuationToken" xml:"NextContinuationToken"`
StartAfter string `json:"startAfter" xml:"StartAfter"`
CommonPrefixes []*ListObjectCommonPrefix `json:"commonPrefixes" xml:"CommonPrefixes"`
Contents []*ListObjectContent `json:"contents" xml:"Contents"`
KeyCount int `json:"keyCount" xml:"KeyCount"`
MaxKeys int `json:"maxKeys" xml:"MaxKeys"`
IsTruncated bool `json:"isTruncated" xml:"IsTruncated"`
}
type ListObjectCommonPrefix struct {
Prefix string `json:"prefix" xml:"Prefix"`
}
type ListObjectContent struct {
Owner struct {
DisplayName string `json:"displayName" xml:"DisplayName"`
ID string `json:"id" xml:"ID"`
} `json:"owner" xml:"Owner"`
ChecksumAlgorithm string `json:"checksumAlgorithm" xml:"ChecksumAlgorithm"`
ETag string `json:"etag" xml:"ETag"`
Key string `json:"key" xml:"Key"`
StorageClass string `json:"storageClass" xml:"StorageClass"`
LastModified time.Time `json:"lastModified" xml:"LastModified"`
RestoreStatus struct {
RestoreExpiryDate time.Time `json:"restoreExpiryDate" xml:"RestoreExpiryDate"`
IsRestoreInProgress bool `json:"isRestoreInProgress" xml:"IsRestoreInProgress"`
} `json:"restoreStatus" xml:"RestoreStatus"`
Size int64 `json:"size" xml:"Size"`
}

View File

@ -0,0 +1,156 @@
package s3_test
import (
"context"
"encoding/json"
"io"
"net/http"
"strings"
"testing"
"github.com/pocketbase/pocketbase/tools/filesystem/internal/s3blob/s3"
)
func TestS3ListParamsEncode(t *testing.T) {
t.Parallel()
scenarios := []struct {
name string
params s3.ListParams
expected string
}{
{
"blank",
s3.ListParams{},
"list-type=2",
},
{
"filled",
s3.ListParams{
ContinuationToken: "test_ct",
Delimiter: "test_delimiter",
Prefix: "test_prefix",
EncodingType: "test_et",
StartAfter: "test_sa",
MaxKeys: 1,
FetchOwner: true,
},
"continuation-token=test_ct&delimiter=test_delimiter&encoding-type=test_et&fetch-owner=true&list-type=2&max-keys=1&prefix=test_prefix&start-after=test_sa",
},
}
for _, s := range scenarios {
t.Run(s.name, func(t *testing.T) {
result := s.params.Encode()
if result != s.expected {
t.Fatalf("Expected\n%s\ngot\n%s", s.expected, result)
}
})
}
}
func TestS3ListObjects(t *testing.T) {
t.Parallel()
listParams := s3.ListParams{
ContinuationToken: "test_ct",
Delimiter: "test_delimiter",
Prefix: "test_prefix",
EncodingType: "test_et",
StartAfter: "test_sa",
MaxKeys: 10,
FetchOwner: true,
}
httpClient := NewTestClient(
&RequestStub{
Method: http.MethodGet,
URL: "http://test_bucket.example.com/?" + listParams.Encode(),
Match: func(req *http.Request) bool {
return checkHeaders(req.Header, map[string]string{
"test_header": "test",
"Authorization": "^.+Credential=123/.+$",
})
},
Response: &http.Response{
Body: io.NopCloser(strings.NewReader(`
<?xml version="1.0" encoding="UTF-8"?>
<ListBucketResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Name>example</Name>
<EncodingType>test_encoding</EncodingType>
<Prefix>a/</Prefix>
<Delimiter>/</Delimiter>
<ContinuationToken>ct</ContinuationToken>
<NextContinuationToken>nct</NextContinuationToken>
<StartAfter>example0.txt</StartAfter>
<KeyCount>1</KeyCount>
<MaxKeys>3</MaxKeys>
<IsTruncated>true</IsTruncated>
<Contents>
<Key>example1.txt</Key>
<LastModified>2025-01-01T01:02:03.123Z</LastModified>
<ChecksumAlgorithm>test_ca</ChecksumAlgorithm>
<ETag>test_etag1</ETag>
<Size>123</Size>
<StorageClass>STANDARD</StorageClass>
<Owner>
<DisplayName>owner_dn</DisplayName>
<ID>owner_id</ID>
</Owner>
<RestoreStatus>
<RestoreExpiryDate>2025-01-02T01:02:03.123Z</RestoreExpiryDate>
<IsRestoreInProgress>true</IsRestoreInProgress>
</RestoreStatus>
</Contents>
<Contents>
<Key>example2.txt</Key>
<LastModified>2025-01-02T01:02:03.123Z</LastModified>
<ETag>test_etag2</ETag>
<Size>456</Size>
<StorageClass>STANDARD</StorageClass>
</Contents>
<CommonPrefixes>
<Prefix>a/b/</Prefix>
</CommonPrefixes>
<CommonPrefixes>
<Prefix>a/c/</Prefix>
</CommonPrefixes>
</ListBucketResult>
`)),
},
},
)
s3Client := &s3.S3{
Client: httpClient,
Region: "test_region",
Bucket: "test_bucket",
Endpoint: "http://example.com",
AccessKey: "123",
SecretKey: "abc",
}
resp, err := s3Client.ListObjects(context.Background(), listParams, func(r *http.Request) {
r.Header.Set("test_header", "test")
})
if err != nil {
t.Fatal(err)
}
err = httpClient.AssertNoRemaining()
if err != nil {
t.Fatal(err)
}
raw, err := json.Marshal(resp)
if err != nil {
t.Fatal(err)
}
rawStr := string(raw)
expected := `{"encodingType":"test_encoding","name":"example","prefix":"a/","delimiter":"/","continuationToken":"ct","nextContinuationToken":"nct","startAfter":"example0.txt","commonPrefixes":[{"prefix":"a/b/"},{"prefix":"a/c/"}],"contents":[{"owner":{"displayName":"owner_dn","id":"owner_id"},"checksumAlgorithm":"test_ca","etag":"test_etag1","key":"example1.txt","storageClass":"STANDARD","lastModified":"2025-01-01T01:02:03.123Z","restoreStatus":{"restoreExpiryDate":"2025-01-02T01:02:03.123Z","isRestoreInProgress":true},"size":123},{"owner":{"displayName":"","id":""},"checksumAlgorithm":"","etag":"test_etag2","key":"example2.txt","storageClass":"STANDARD","lastModified":"2025-01-02T01:02:03.123Z","restoreStatus":{"restoreExpiryDate":"0001-01-01T00:00:00Z","isRestoreInProgress":false},"size":456}],"keyCount":1,"maxKeys":3,"isTruncated":true}`
if rawStr != expected {
t.Fatalf("Expected response\n%s\ngot\n%s", expected, rawStr)
}
}

View File

@ -0,0 +1,262 @@
package s3
import (
"crypto/hmac"
"crypto/sha256"
"encoding/hex"
"encoding/xml"
"errors"
"fmt"
"io"
"net/http"
"slices"
"strings"
"time"
)
const (
awsS3ServiceCode = "s3"
awsSignAlgorithm = "AWS4-HMAC-SHA256"
awsTerminationString = "aws4_request"
metadataPrefix = "x-amz-meta-"
dateTimeFormat = "20060102T150405Z"
)
type HTTPClient interface {
Do(req *http.Request) (*http.Response, error)
}
type S3 struct {
// Client specifies the HTTP client to send the request with.
//
// If not explicitly set, fallbacks to http.DefaultClient.
Client HTTPClient
Bucket string
Region string
Endpoint string // can be with or without the schema
AccessKey string
SecretKey string
UsePathStyle bool
}
// URL constructs an S3 request URL based on the current configuration.
func (s3 *S3) URL(key string) string {
scheme := "https"
endpoint := strings.TrimRight(s3.Endpoint, "/")
if after, ok := strings.CutPrefix(endpoint, "https://"); ok {
endpoint = after
} else if after, ok := strings.CutPrefix(endpoint, "http://"); ok {
endpoint = after
scheme = "http"
}
key = strings.TrimLeft(key, "/")
if s3.UsePathStyle {
return fmt.Sprintf("%s://%s/%s/%s", scheme, endpoint, s3.Bucket, key)
}
return fmt.Sprintf("%s://%s.%s/%s", scheme, s3.Bucket, endpoint, key)
}
// SignAndSend signs the provided request per AWS Signature v4 and sends it.
//
// It automatically normalizes all 40x/50x responses to ResponseError.
//
// Note: Don't forget to call resp.Body.Close() after done with the result.
func (s3 *S3) SignAndSend(req *http.Request) (*http.Response, error) {
s3.sign(req)
client := s3.Client
if client == nil {
client = http.DefaultClient
}
resp, err := client.Do(req)
if err != nil {
return nil, err
}
if resp.StatusCode >= 400 {
defer resp.Body.Close()
respErr := &ResponseError{
Status: resp.StatusCode,
}
respErr.Raw, err = io.ReadAll(resp.Body)
if err != nil && !errors.Is(err, io.EOF) {
return nil, errors.Join(err, respErr)
}
if len(respErr.Raw) > 0 {
err = xml.Unmarshal(respErr.Raw, respErr)
if err != nil {
return nil, errors.Join(err, respErr)
}
}
return nil, respErr
}
return resp, nil
}
// https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_sigv-create-signed-request.html#create-signed-request-steps
func (s3 *S3) sign(req *http.Request) {
// fallback to the Unsigned payload option
// (data integrity checks could be still applied via the content-md5 or x-amz-checksum-* headers)
if req.Header.Get("x-amz-content-sha256") == "" {
req.Header.Set("x-amz-content-sha256", "UNSIGNED-PAYLOAD")
}
reqDateTime, _ := time.Parse(dateTimeFormat, req.Header.Get("x-amz-date"))
if reqDateTime.IsZero() {
reqDateTime = time.Now().UTC()
req.Header.Set("x-amz-date", reqDateTime.Format(dateTimeFormat))
}
req.Header.Set("host", req.URL.Host)
date := reqDateTime.Format("20060102")
dateTime := reqDateTime.Format(dateTimeFormat)
// 1. Create canonical request
// https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_sigv-create-signed-request.html#create-canonical-request
// ---------------------------------------------------------------
canonicalHeaders, signedHeaders := canonicalAndSignedHeaders(req)
canonicalParts := []string{
req.Method,
req.URL.EscapedPath(),
encodeQuery(req),
canonicalHeaders,
signedHeaders,
req.Header.Get("x-amz-content-sha256"),
}
// 2. Create a hash of the canonical request
// https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_sigv-create-signed-request.html#create-canonical-request-hash
// ---------------------------------------------------------------
hashedCanonicalRequest := sha256Hex([]byte(strings.Join(canonicalParts, "\n")))
// 3. Create a string to sign
// https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_sigv-create-signed-request.html#create-string-to-sign
// ---------------------------------------------------------------
scope := strings.Join([]string{
date,
s3.Region,
awsS3ServiceCode,
awsTerminationString,
}, "/")
stringToSign := strings.Join([]string{
awsSignAlgorithm,
dateTime,
scope,
hashedCanonicalRequest,
}, "\n")
// 4. Derive a signing key for SigV4
// https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_sigv-create-signed-request.html#derive-signing-key
// ---------------------------------------------------------------
dateKey := hmacSHA256([]byte("AWS4"+s3.SecretKey), date)
dateRegionKey := hmacSHA256(dateKey, s3.Region)
dateRegionServiceKey := hmacSHA256(dateRegionKey, awsS3ServiceCode)
signingKey := hmacSHA256(dateRegionServiceKey, awsTerminationString)
signature := hex.EncodeToString(hmacSHA256(signingKey, stringToSign))
// 5. Add the signature to the request
// https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_sigv-create-signed-request.html#add-signature-to-request
authorization := fmt.Sprintf(
"%s Credential=%s/%s, SignedHeaders=%s, Signature=%s",
awsSignAlgorithm,
s3.AccessKey,
scope,
signedHeaders,
signature,
)
req.Header.Set("authorization", authorization)
}
// encodeQuery encodes the request query parameters according to the AWS requirements
// (see UriEncode description in https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_sigv-create-signed-request.html).
func encodeQuery(req *http.Request) string {
return strings.ReplaceAll(req.URL.Query().Encode(), "+", "%20")
}
func sha256Hex(content []byte) string {
h := sha256.New()
h.Write(content)
return hex.EncodeToString(h.Sum(nil))
}
func hmacSHA256(key []byte, content string) []byte {
mac := hmac.New(sha256.New, key)
mac.Write([]byte(content))
return mac.Sum(nil)
}
func canonicalAndSignedHeaders(req *http.Request) (string, string) {
signed := []string{}
canonical := map[string]string{}
for key, values := range req.Header {
normalizedKey := strings.ToLower(key)
if normalizedKey != "host" &&
normalizedKey != "content-type" &&
!strings.HasPrefix(normalizedKey, "x-amz-") {
continue
}
signed = append(signed, normalizedKey)
// for each value:
// trim any leading or trailing spaces
// convert sequential spaces to a single space
normalizedValues := make([]string, len(values))
for i, v := range values {
normalizedValues[i] = strings.ReplaceAll(strings.TrimSpace(v), " ", " ")
}
canonical[normalizedKey] = strings.Join(normalizedValues, ",")
}
slices.Sort(signed)
var sortedCanonical strings.Builder
for _, key := range signed {
sortedCanonical.WriteString(key)
sortedCanonical.WriteString(":")
sortedCanonical.WriteString(canonical[key])
sortedCanonical.WriteString("\n")
}
return sortedCanonical.String(), strings.Join(signed, ";")
}
// extractMetadata parses and extracts and the metadata from the specified request headers.
//
// The metadata keys are all lowercased and without the "x-amz-meta-" prefix.
func extractMetadata(headers http.Header) map[string]string {
result := map[string]string{}
for k, v := range headers {
if len(v) == 0 {
continue
}
metadataKey, ok := strings.CutPrefix(strings.ToLower(k), metadataPrefix)
if !ok {
continue
}
result[metadataKey] = v[0]
}
return result
}

View File

@ -0,0 +1,224 @@
package s3_test
import (
"io"
"net/http"
"strings"
"testing"
"github.com/pocketbase/pocketbase/tools/filesystem/internal/s3blob/s3"
)
func TestS3URL(t *testing.T) {
t.Parallel()
scenarios := []struct {
name string
s3Client *s3.S3
expected string
}{
{
"no schema",
&s3.S3{
Region: "test_region",
Bucket: "test_bucket",
Endpoint: "example.com/",
AccessKey: "123",
SecretKey: "abc",
},
"https://test_bucket.example.com/test_key/a/b/c?q=1",
},
{
"with https schema",
&s3.S3{
Region: "test_region",
Bucket: "test_bucket",
Endpoint: "https://example.com/",
AccessKey: "123",
SecretKey: "abc",
},
"https://test_bucket.example.com/test_key/a/b/c?q=1",
},
{
"with http schema",
&s3.S3{
Region: "test_region",
Bucket: "test_bucket",
Endpoint: "http://example.com/",
AccessKey: "123",
SecretKey: "abc",
},
"http://test_bucket.example.com/test_key/a/b/c?q=1",
},
{
"path style addressing (non-explicit schema)",
&s3.S3{
Region: "test_region",
Bucket: "test_bucket",
Endpoint: "example.com/",
AccessKey: "123",
SecretKey: "abc",
UsePathStyle: true,
},
"https://example.com/test_bucket/test_key/a/b/c?q=1",
},
{
"path style addressing (explicit schema)",
&s3.S3{
Region: "test_region",
Bucket: "test_bucket",
Endpoint: "http://example.com/",
AccessKey: "123",
SecretKey: "abc",
UsePathStyle: true,
},
"http://example.com/test_bucket/test_key/a/b/c?q=1",
},
}
for _, s := range scenarios {
t.Run(s.name, func(t *testing.T) {
result := s.s3Client.URL("/test_key/a/b/c?q=1")
if result != s.expected {
t.Fatalf("Expected URL\n%s\ngot\n%s", s.expected, result)
}
})
}
}
func TestS3SignAndSend(t *testing.T) {
t.Parallel()
testResponse := func() *http.Response {
return &http.Response{
Body: io.NopCloser(strings.NewReader("test_response")),
}
}
scenarios := []struct {
name string
reqFunc func(req *http.Request)
s3Client *s3.S3
}{
{
"minimal",
func(req *http.Request) {
req.Header.Set("x-amz-date", "20250102T150405Z")
},
&s3.S3{
Region: "test_region",
Bucket: "test_bucket",
Endpoint: "https://example.com/",
AccessKey: "123",
SecretKey: "abc",
Client: NewTestClient(&RequestStub{
Method: http.MethodGet,
URL: "https://test_bucket.example.com/test",
Response: testResponse(),
Match: func(req *http.Request) bool {
return checkHeaders(req.Header, map[string]string{
"Authorization": "AWS4-HMAC-SHA256 Credential=123/20250102/test_region/s3/aws4_request, SignedHeaders=host;x-amz-content-sha256;x-amz-date, Signature=ea093662bc1deef08dfb4ac35453dfaad5ea89edf102e9dd3b7156c9a27e4c1f",
"Host": "test_bucket.example.com",
"X-Amz-Content-Sha256": "UNSIGNED-PAYLOAD",
"X-Amz-Date": "20250102T150405Z",
})
},
}),
},
},
{
"minimal with different access and secret keys",
func(req *http.Request) {
req.Header.Set("x-amz-date", "20250102T150405Z")
},
&s3.S3{
Region: "test_region",
Bucket: "test_bucket",
Endpoint: "https://example.com/",
AccessKey: "456",
SecretKey: "def",
Client: NewTestClient(&RequestStub{
Method: http.MethodGet,
URL: "https://test_bucket.example.com/test",
Response: testResponse(),
Match: func(req *http.Request) bool {
return checkHeaders(req.Header, map[string]string{
"Authorization": "AWS4-HMAC-SHA256 Credential=456/20250102/test_region/s3/aws4_request, SignedHeaders=host;x-amz-content-sha256;x-amz-date, Signature=17510fa1f724403dd0a563b61c9b31d1d718f877fcbd75455620d17a8afce5fb",
"Host": "test_bucket.example.com",
"X-Amz-Content-Sha256": "UNSIGNED-PAYLOAD",
"X-Amz-Date": "20250102T150405Z",
})
},
}),
},
},
{
"with extra headers",
func(req *http.Request) {
req.Header.Set("x-amz-date", "20250102T150405Z")
req.Header.Set("x-amz-content-sha256", "test_sha256")
req.Header.Set("x-amz-example", "123")
req.Header.Set("x-amz-meta-a", "456")
req.Header.Set("content-type", "image/png")
req.Header.Set("x-test", "789") // shouldn't be included in the signing headers
},
&s3.S3{
Region: "test_region",
Bucket: "test_bucket",
Endpoint: "https://example.com/",
AccessKey: "123",
SecretKey: "abc",
Client: NewTestClient(&RequestStub{
Method: http.MethodGet,
URL: "https://test_bucket.example.com/test",
Response: testResponse(),
Match: func(req *http.Request) bool {
return checkHeaders(req.Header, map[string]string{
"authorization": "AWS4-HMAC-SHA256 Credential=123/20250102/test_region/s3/aws4_request, SignedHeaders=content-type;host;x-amz-content-sha256;x-amz-date;x-amz-example;x-amz-meta-a, Signature=86dccbcd012c33073dc99e9d0a9e0b717a4d8c11c37848cfa9a4a02716bc0db3",
"host": "test_bucket.example.com",
"x-amz-date": "20250102T150405Z",
"x-amz-content-sha256": "test_sha256",
"x-amz-example": "123",
"x-amz-meta-a": "456",
"x-test": "789",
})
},
}),
},
},
}
for _, s := range scenarios {
t.Run(s.name, func(t *testing.T) {
req, err := http.NewRequest(http.MethodGet, s.s3Client.URL("/test"), strings.NewReader("test_request"))
if err != nil {
t.Fatal(err)
}
if s.reqFunc != nil {
s.reqFunc(req)
}
resp, err := s.s3Client.SignAndSend(req)
if err != nil {
t.Fatal(err)
}
defer resp.Body.Close()
err = s.s3Client.Client.(*TestClient).AssertNoRemaining()
if err != nil {
t.Fatal(err)
}
expectedBody := "test_response"
body, err := io.ReadAll(resp.Body)
if err != nil {
t.Fatal(err)
}
if str := string(body); str != expectedBody {
t.Fatalf("Expected body %q, got %q", expectedBody, str)
}
})
}
}

View File

@ -0,0 +1,414 @@
package s3
import (
"bytes"
"context"
"encoding/xml"
"errors"
"fmt"
"io"
"net/http"
"net/url"
"slices"
"strconv"
"strings"
"sync"
"golang.org/x/sync/errgroup"
)
var ErrUsedUploader = errors.New("the Uploader has been already used")
const (
defaultMaxConcurrency int = 5
defaultMinPartSize int = 6 << 20
)
// Uploader handles S3 object upload.
//
// If the Payload size is less than the configured MinPartSize it sends
// a single (PutObject) request, otherwise performs chunked/multipart upload.
type Uploader struct {
// S3 is the S3 client instance performing the upload object request (required).
S3 *S3
// Payload is the object content to upload (required).
Payload io.Reader
// Key is the destination key of the uploaded object (required).
Key string
// Metadata specifies the optional metadata to write with the object upload.
Metadata map[string]string
// MaxConcurrency specifies the max number of workers to use when
// performing chunked/multipart upload.
//
// If zero or negative, defaults to 5.
//
// This option is used only when the Payload size is > MinPartSize.
MaxConcurrency int
// MinPartSize specifies the min Payload size required to perform
// chunked/multipart upload.
//
// If zero or negative, defaults to ~6MB.
MinPartSize int
uploadId string
uploadedParts []*mpPart
lastPartNumber int
mu sync.Mutex // guards lastPartNumber and the uploadedParts slice
used bool
}
// Upload processes the current Uploader instance.
//
// Users can specify an optional optReqFuncs that will be passed down to all Upload internal requests
// (single upload, multipart init, multipart parts upload, multipart complete, multipart abort).
//
// Note that after this call the Uploader should be discarded (aka. no longer can be used).
func (u *Uploader) Upload(ctx context.Context, optReqFuncs ...func(*http.Request)) error {
if u.used {
return ErrUsedUploader
}
err := u.validateAndNormalize()
if err != nil {
return err
}
initPart, _, err := u.nextPart()
if err != nil && !errors.Is(err, io.EOF) {
return err
}
if len(initPart) < u.MinPartSize {
return u.singleUpload(ctx, initPart, optReqFuncs...)
}
err = u.multipartInit(ctx, optReqFuncs...)
if err != nil {
return fmt.Errorf("multipart init error: %w", err)
}
err = u.multipartUpload(ctx, initPart, optReqFuncs...)
if err != nil {
return errors.Join(
u.multipartAbort(ctx, optReqFuncs...),
fmt.Errorf("multipart upload error: %w", err),
)
}
err = u.multipartComplete(ctx, optReqFuncs...)
if err != nil {
return errors.Join(
u.multipartAbort(ctx, optReqFuncs...),
fmt.Errorf("multipart complete error: %w", err),
)
}
return nil
}
// -------------------------------------------------------------------
func (u *Uploader) validateAndNormalize() error {
if u.S3 == nil {
return errors.New("Uploader.S3 must be a non-empty and properly initialized S3 client instance")
}
if u.Key == "" {
return errors.New("Uploader.Key is required")
}
if u.Payload == nil {
return errors.New("Uploader.Payload must be non-nill")
}
if u.MaxConcurrency <= 0 {
u.MaxConcurrency = defaultMaxConcurrency
}
if u.MinPartSize <= 0 {
u.MinPartSize = defaultMinPartSize
}
return nil
}
func (u *Uploader) singleUpload(ctx context.Context, part []byte, optReqFuncs ...func(*http.Request)) error {
if u.used {
return ErrUsedUploader
}
req, err := http.NewRequestWithContext(ctx, http.MethodPut, u.S3.URL(u.Key), bytes.NewReader(part))
if err != nil {
return err
}
req.Header.Set("Content-Length", strconv.Itoa(len(part)))
for k, v := range u.Metadata {
req.Header.Set(metadataPrefix+k, v)
}
// apply optional request funcs
for _, fn := range optReqFuncs {
if fn != nil {
fn(req)
}
}
resp, err := u.S3.SignAndSend(req)
if err != nil {
return err
}
defer resp.Body.Close()
return nil
}
// -------------------------------------------------------------------
type mpPart struct {
XMLName xml.Name `xml:"Part"`
ETag string `xml:"ETag"`
PartNumber int `xml:"PartNumber"`
}
// https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html
func (u *Uploader) multipartInit(ctx context.Context, optReqFuncs ...func(*http.Request)) error {
if u.used {
return ErrUsedUploader
}
req, err := http.NewRequestWithContext(ctx, http.MethodPost, u.S3.URL(u.Key+"?uploads"), nil)
if err != nil {
return err
}
for k, v := range u.Metadata {
req.Header.Set(metadataPrefix+k, v)
}
// apply optional request funcs
for _, fn := range optReqFuncs {
if fn != nil {
fn(req)
}
}
resp, err := u.S3.SignAndSend(req)
if err != nil {
return err
}
defer resp.Body.Close()
body := &struct {
XMLName xml.Name `xml:"InitiateMultipartUploadResult"`
UploadId string `xml:"UploadId"`
}{}
err = xml.NewDecoder(resp.Body).Decode(body)
if err != nil {
return err
}
u.uploadId = body.UploadId
return nil
}
// https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html
func (u *Uploader) multipartAbort(ctx context.Context, optReqFuncs ...func(*http.Request)) error {
u.mu.Lock()
defer u.mu.Unlock()
u.used = true
// ensure that the specified abort context is always valid to allow cleanup
var abortCtx = ctx
if abortCtx.Err() != nil {
abortCtx = context.Background()
}
query := url.Values{"uploadId": []string{u.uploadId}}
req, err := http.NewRequestWithContext(ctx, http.MethodDelete, u.S3.URL(u.Key+"?"+query.Encode()), nil)
if err != nil {
return err
}
// apply optional request funcs
for _, fn := range optReqFuncs {
if fn != nil {
fn(req)
}
}
resp, err := u.S3.SignAndSend(req)
if err != nil {
return err
}
defer resp.Body.Close()
return nil
}
// https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html
func (u *Uploader) multipartComplete(ctx context.Context, optReqFuncs ...func(*http.Request)) error {
u.mu.Lock()
defer u.mu.Unlock()
u.used = true
// the list of parts must be sorted in ascending order
slices.SortFunc(u.uploadedParts, func(a, b *mpPart) int {
if a.PartNumber < b.PartNumber {
return -1
}
if a.PartNumber > b.PartNumber {
return 1
}
return 0
})
// build a request payload with the uploaded parts
xmlParts := &struct {
XMLName xml.Name `xml:"CompleteMultipartUpload"`
Parts []*mpPart
}{
Parts: u.uploadedParts,
}
rawXMLParts, err := xml.Marshal(xmlParts)
if err != nil {
return err
}
reqPayload := strings.NewReader(xml.Header + string(rawXMLParts))
query := url.Values{"uploadId": []string{u.uploadId}}
req, err := http.NewRequestWithContext(ctx, http.MethodPost, u.S3.URL(u.Key+"?"+query.Encode()), reqPayload)
if err != nil {
return err
}
// apply optional request funcs
for _, fn := range optReqFuncs {
if fn != nil {
fn(req)
}
}
resp, err := u.S3.SignAndSend(req)
if err != nil {
return err
}
defer resp.Body.Close()
return nil
}
func (u *Uploader) nextPart() ([]byte, int, error) {
u.mu.Lock()
defer u.mu.Unlock()
part := make([]byte, u.MinPartSize)
n, err := io.ReadFull(u.Payload, part)
// normalize io.EOF errors and ensure that io.EOF is returned only when there were no read bytes
if err != nil && (errors.Is(err, io.EOF) || errors.Is(err, io.ErrUnexpectedEOF)) {
if n == 0 {
err = io.EOF
} else {
err = nil
}
}
u.lastPartNumber++
return part[0:n], u.lastPartNumber, err
}
func (u *Uploader) multipartUpload(ctx context.Context, initPart []byte, optReqFuncs ...func(*http.Request)) error {
var g errgroup.Group
g.SetLimit(u.MaxConcurrency)
totalParallel := u.MaxConcurrency
if len(initPart) != 0 {
totalParallel--
initPartNumber := u.lastPartNumber
g.Go(func() error {
mp, err := u.uploadPart(ctx, initPartNumber, initPart, optReqFuncs...)
if err != nil {
return err
}
u.mu.Lock()
u.uploadedParts = append(u.uploadedParts, mp)
u.mu.Unlock()
return nil
})
}
for i := 0; i < totalParallel; i++ {
g.Go(func() error {
for {
part, num, err := u.nextPart()
if err != nil {
if errors.Is(err, io.EOF) {
break
}
return err
}
mp, err := u.uploadPart(ctx, num, part, optReqFuncs...)
if err != nil {
return err
}
u.mu.Lock()
u.uploadedParts = append(u.uploadedParts, mp)
u.mu.Unlock()
}
return nil
})
}
return g.Wait()
}
// https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html
func (u *Uploader) uploadPart(ctx context.Context, partNumber int, partData []byte, optReqFuncs ...func(*http.Request)) (*mpPart, error) {
query := url.Values{}
query.Set("uploadId", u.uploadId)
query.Set("partNumber", strconv.Itoa(partNumber))
req, err := http.NewRequestWithContext(ctx, http.MethodPut, u.S3.URL(u.Key+"?"+query.Encode()), bytes.NewReader(partData))
if err != nil {
return nil, err
}
req.Header.Set("Content-Length", strconv.Itoa(len(partData)))
// apply optional request funcs
for _, fn := range optReqFuncs {
if fn != nil {
fn(req)
}
}
resp, err := u.S3.SignAndSend(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
return &mpPart{
PartNumber: partNumber,
ETag: resp.Header.Get("ETag"),
}, nil
}

View File

@ -0,0 +1,462 @@
package s3_test
import (
"context"
"io"
"net/http"
"strings"
"testing"
"github.com/pocketbase/pocketbase/tools/filesystem/internal/s3blob/s3"
)
func TestUploaderRequiredFields(t *testing.T) {
t.Parallel()
s3Client := &s3.S3{
Client: NewTestClient(&RequestStub{Method: "PUT", URL: `^.+$`}), // match every upload
Region: "test_region",
Bucket: "test_bucket",
Endpoint: "http://example.com",
AccessKey: "123",
SecretKey: "abc",
}
payload := strings.NewReader("test")
scenarios := []struct {
name string
uploader *s3.Uploader
expectedError bool
}{
{
"blank",
&s3.Uploader{},
true,
},
{
"no Key",
&s3.Uploader{S3: s3Client, Payload: payload},
true,
},
{
"no S3",
&s3.Uploader{Key: "abc", Payload: payload},
true,
},
{
"no Payload",
&s3.Uploader{S3: s3Client, Key: "abc"},
true,
},
{
"with S3, Key and Payload",
&s3.Uploader{S3: s3Client, Key: "abc", Payload: payload},
false,
},
}
for _, s := range scenarios {
t.Run(s.name, func(t *testing.T) {
err := s.uploader.Upload(context.Background())
hasErr := err != nil
if hasErr != s.expectedError {
t.Fatalf("Expected hasErr %v, got %v", s.expectedError, hasErr)
}
})
}
}
func TestUploaderSingleUpload(t *testing.T) {
t.Parallel()
httpClient := NewTestClient(
&RequestStub{
Method: http.MethodPut,
URL: "http://test_bucket.example.com/test_key",
Match: func(req *http.Request) bool {
body, err := io.ReadAll(req.Body)
if err != nil {
return false
}
return string(body) == "abcdefg" && checkHeaders(req.Header, map[string]string{
"Content-Length": "7",
"x-amz-meta-a": "123",
"x-amz-meta-b": "456",
"test_header": "test",
"Authorization": "^.+Credential=123/.+$",
})
},
},
)
uploader := &s3.Uploader{
S3: &s3.S3{
Client: httpClient,
Region: "test_region",
Bucket: "test_bucket",
Endpoint: "http://example.com",
AccessKey: "123",
SecretKey: "abc",
},
Key: "test_key",
Payload: strings.NewReader("abcdefg"),
Metadata: map[string]string{"a": "123", "b": "456"},
MinPartSize: 8,
}
err := uploader.Upload(context.Background(), func(r *http.Request) {
r.Header.Set("test_header", "test")
})
if err != nil {
t.Fatal(err)
}
err = httpClient.AssertNoRemaining()
if err != nil {
t.Fatal(err)
}
}
func TestUploaderMultipartUploadSuccess(t *testing.T) {
t.Parallel()
httpClient := NewTestClient(
&RequestStub{
Method: http.MethodPost,
URL: "http://test_bucket.example.com/test_key?uploads",
Match: func(req *http.Request) bool {
return checkHeaders(req.Header, map[string]string{
"x-amz-meta-a": "123",
"x-amz-meta-b": "456",
"test_header": "test",
"Authorization": "^.+Credential=123/.+$",
})
},
Response: &http.Response{
Body: io.NopCloser(strings.NewReader(`
<?xml version="1.0" encoding="UTF-8"?>
<InitiateMultipartUploadResult>
<Bucket>test_bucket</Bucket>
<Key>test_key</Key>
<UploadId>test_id</UploadId>
</InitiateMultipartUploadResult>
`)),
},
},
&RequestStub{
Method: http.MethodPut,
URL: "http://test_bucket.example.com/test_key?partNumber=1&uploadId=test_id",
Match: func(req *http.Request) bool {
body, err := io.ReadAll(req.Body)
if err != nil {
return false
}
return string(body) == "abc" && checkHeaders(req.Header, map[string]string{
"Content-Length": "3",
"test_header": "test",
"Authorization": "^.+Credential=123/.+$",
})
},
Response: &http.Response{
Header: http.Header{"Etag": []string{"etag1"}},
},
},
&RequestStub{
Method: http.MethodPut,
URL: "http://test_bucket.example.com/test_key?partNumber=2&uploadId=test_id",
Match: func(req *http.Request) bool {
body, err := io.ReadAll(req.Body)
if err != nil {
return false
}
return string(body) == "def" && checkHeaders(req.Header, map[string]string{
"Content-Length": "3",
"test_header": "test",
"Authorization": "^.+Credential=123/.+$",
})
},
Response: &http.Response{
Header: http.Header{"Etag": []string{"etag2"}},
},
},
&RequestStub{
Method: http.MethodPut,
URL: "http://test_bucket.example.com/test_key?partNumber=3&uploadId=test_id",
Match: func(req *http.Request) bool {
body, err := io.ReadAll(req.Body)
if err != nil {
return false
}
return string(body) == "g" && checkHeaders(req.Header, map[string]string{
"Content-Length": "1",
"test_header": "test",
"Authorization": "^.+Credential=123/.+$",
})
},
Response: &http.Response{
Header: http.Header{"Etag": []string{"etag3"}},
},
},
&RequestStub{
Method: http.MethodPost,
URL: "http://test_bucket.example.com/test_key?uploadId=test_id",
Match: func(req *http.Request) bool {
body, err := io.ReadAll(req.Body)
if err != nil {
return false
}
expected := `<CompleteMultipartUpload><Part><ETag>etag1</ETag><PartNumber>1</PartNumber></Part><Part><ETag>etag2</ETag><PartNumber>2</PartNumber></Part><Part><ETag>etag3</ETag><PartNumber>3</PartNumber></Part></CompleteMultipartUpload>`
return strings.Contains(string(body), expected) && checkHeaders(req.Header, map[string]string{
"test_header": "test",
"Authorization": "^.+Credential=123/.+$",
})
},
},
)
uploader := &s3.Uploader{
S3: &s3.S3{
Client: httpClient,
Region: "test_region",
Bucket: "test_bucket",
Endpoint: "http://example.com",
AccessKey: "123",
SecretKey: "abc",
},
Key: "test_key",
Payload: strings.NewReader("abcdefg"),
Metadata: map[string]string{"a": "123", "b": "456"},
MinPartSize: 3,
}
err := uploader.Upload(context.Background(), func(r *http.Request) {
r.Header.Set("test_header", "test")
})
if err != nil {
t.Fatal(err)
}
err = httpClient.AssertNoRemaining()
if err != nil {
t.Fatal(err)
}
}
func TestUploaderMultipartUploadPartFailure(t *testing.T) {
t.Parallel()
httpClient := NewTestClient(
&RequestStub{
Method: http.MethodPost,
URL: "http://test_bucket.example.com/test_key?uploads",
Match: func(req *http.Request) bool {
return checkHeaders(req.Header, map[string]string{
"x-amz-meta-a": "123",
"x-amz-meta-b": "456",
"test_header": "test",
"Authorization": "^.+Credential=123/.+$",
})
},
Response: &http.Response{
Body: io.NopCloser(strings.NewReader(`
<?xml version="1.0" encoding="UTF-8"?>
<InitiateMultipartUploadResult>
<Bucket>test_bucket</Bucket>
<Key>test_key</Key>
<UploadId>test_id</UploadId>
</InitiateMultipartUploadResult>
`)),
},
},
&RequestStub{
Method: http.MethodPut,
URL: "http://test_bucket.example.com/test_key?partNumber=1&uploadId=test_id",
Match: func(req *http.Request) bool {
body, err := io.ReadAll(req.Body)
if err != nil {
return false
}
return string(body) == "abc" && checkHeaders(req.Header, map[string]string{
"Content-Length": "3",
"test_header": "test",
"Authorization": "^.+Credential=123/.+$",
})
},
Response: &http.Response{
Header: http.Header{"Etag": []string{"etag1"}},
},
},
&RequestStub{
Method: http.MethodPut,
URL: "http://test_bucket.example.com/test_key?partNumber=2&uploadId=test_id",
Match: func(req *http.Request) bool {
return checkHeaders(req.Header, map[string]string{
"test_header": "test",
"Authorization": "^.+Credential=123/.+$",
})
},
Response: &http.Response{
StatusCode: 400,
},
},
&RequestStub{
Method: http.MethodDelete,
URL: "http://test_bucket.example.com/test_key?uploadId=test_id",
Match: func(req *http.Request) bool {
return checkHeaders(req.Header, map[string]string{
"test_header": "test",
"Authorization": "^.+Credential=123/.+$",
})
},
},
)
uploader := &s3.Uploader{
S3: &s3.S3{
Client: httpClient,
Region: "test_region",
Bucket: "test_bucket",
Endpoint: "http://example.com",
AccessKey: "123",
SecretKey: "abc",
},
Key: "test_key",
Payload: strings.NewReader("abcdefg"),
Metadata: map[string]string{"a": "123", "b": "456"},
MinPartSize: 3,
}
err := uploader.Upload(context.Background(), func(r *http.Request) {
r.Header.Set("test_header", "test")
})
if err == nil {
t.Fatal("Expected non-nil error")
}
err = httpClient.AssertNoRemaining()
if err != nil {
t.Fatal(err)
}
}
func TestUploaderMultipartUploadCompleteFailure(t *testing.T) {
t.Parallel()
httpClient := NewTestClient(
&RequestStub{
Method: http.MethodPost,
URL: "http://test_bucket.example.com/test_key?uploads",
Match: func(req *http.Request) bool {
return checkHeaders(req.Header, map[string]string{
"x-amz-meta-a": "123",
"x-amz-meta-b": "456",
"test_header": "test",
"Authorization": "^.+Credential=123/.+$",
})
},
Response: &http.Response{
Body: io.NopCloser(strings.NewReader(`
<?xml version="1.0" encoding="UTF-8"?>
<InitiateMultipartUploadResult>
<Bucket>test_bucket</Bucket>
<Key>test_key</Key>
<UploadId>test_id</UploadId>
</InitiateMultipartUploadResult>
`)),
},
},
&RequestStub{
Method: http.MethodPut,
URL: "http://test_bucket.example.com/test_key?partNumber=1&uploadId=test_id",
Match: func(req *http.Request) bool {
body, err := io.ReadAll(req.Body)
if err != nil {
return false
}
return string(body) == "abc" && checkHeaders(req.Header, map[string]string{
"Content-Length": "3",
"test_header": "test",
"Authorization": "^.+Credential=123/.+$",
})
},
Response: &http.Response{
Header: http.Header{"Etag": []string{"etag1"}},
},
},
&RequestStub{
Method: http.MethodPut,
URL: "http://test_bucket.example.com/test_key?partNumber=2&uploadId=test_id",
Match: func(req *http.Request) bool {
body, err := io.ReadAll(req.Body)
if err != nil {
return false
}
return string(body) == "def" && checkHeaders(req.Header, map[string]string{
"Content-Length": "3",
"test_header": "test",
"Authorization": "^.+Credential=123/.+$",
})
},
Response: &http.Response{
Header: http.Header{"Etag": []string{"etag2"}},
},
},
&RequestStub{
Method: http.MethodPost,
URL: "http://test_bucket.example.com/test_key?uploadId=test_id",
Match: func(req *http.Request) bool {
return checkHeaders(req.Header, map[string]string{
"test_header": "test",
"Authorization": "^.+Credential=123/.+$",
})
},
Response: &http.Response{
StatusCode: 400,
},
},
&RequestStub{
Method: http.MethodDelete,
URL: "http://test_bucket.example.com/test_key?uploadId=test_id",
Match: func(req *http.Request) bool {
return checkHeaders(req.Header, map[string]string{
"test_header": "test",
"Authorization": "^.+Credential=123/.+$",
})
},
},
)
uploader := &s3.Uploader{
S3: &s3.S3{
Client: httpClient,
Region: "test_region",
Bucket: "test_bucket",
Endpoint: "http://example.com",
AccessKey: "123",
SecretKey: "abc",
},
Key: "test_key",
Payload: strings.NewReader("abcdef"),
Metadata: map[string]string{"a": "123", "b": "456"},
MinPartSize: 3,
}
err := uploader.Upload(context.Background(), func(r *http.Request) {
r.Header.Set("test_header", "test")
})
if err == nil {
t.Fatal("Expected non-nil error")
}
err = httpClient.AssertNoRemaining()
if err != nil {
t.Fatal(err)
}
}

View File

@ -1,917 +0,0 @@
// This is a trimmed version of the original go-cloud/s3blob driver
// to avoid loading both aws-sdk-go and aws-sdk-go-v2 dependencies.
// It helps reducing the final binary with ~11MB.
//
// In the future this may get replaced entirely with an even slimmer
// version without relying on aws-sdk-go-v2.
//
//--------------------------------------------------------------------
//
// Copyright 2018 The Go Cloud Development Kit Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package s3blob provides a blob implementation that uses S3. Use OpenBucket
// to construct a *blob.Bucket.
//
// # URLs
//
// For blob.OpenBucket, s3blob registers for the scheme "s3".
// The default URL opener will use an AWS session with the default credentials
// and configuration; see https://docs.aws.amazon.com/sdk-for-go/api/aws/session/
// for more details.
// Use "awssdk=v1" or "awssdk=v2" to force a specific AWS SDK version.
// To customize the URL opener, or for more details on the URL format,
// see URLOpener.
// See https://gocloud.dev/concepts/urls/ for background information.
//
// # Escaping
//
// Go CDK supports all UTF-8 strings; to make this work with services lacking
// full UTF-8 support, strings must be escaped (during writes) and unescaped
// (during reads). The following escapes are performed for s3blob:
// - Blob keys: ASCII characters 0-31 are escaped to "__0x<hex>__".
// Additionally, the "/" in "../" is escaped in the same way.
// - Metadata keys: Escaped using URL encoding, then additionally "@:=" are
// escaped using "__0x<hex>__". These characters were determined by
// experimentation.
// - Metadata values: Escaped using URL encoding.
//
// # As
//
// s3blob exposes the following types for As:
// - Bucket: (V1) *s3.S3; (V2) *s3v2.Client
// - Error: (V1) awserr.Error; (V2) any error type returned by the service, notably smithy.APIError
// - ListObject: (V1) s3.Object for objects, s3.CommonPrefix for "directories"; (V2) typesv2.Object for objects, typesv2.CommonPrefix for "directories"
// - ListOptions.BeforeList: (V1) *s3.ListObjectsV2Input or *s3.ListObjectsInput
// when Options.UseLegacyList == true; (V2) *s3v2.ListObjectsV2Input or *[]func(*s3v2.Options), or *s3v2.ListObjectsInput
// when Options.UseLegacyList == true
// - Reader: (V1) s3.GetObjectOutput; (V2) s3v2.GetObjectInput
// - ReaderOptions.BeforeRead: (V1) *s3.GetObjectInput; (V2) *s3v2.GetObjectInput or *[]func(*s3v2.Options)
// - Attributes: (V1) s3.HeadObjectOutput; (V2)s3v2.HeadObjectOutput
// - CopyOptions.BeforeCopy: *(V1) s3.CopyObjectInput; (V2) s3v2.CopyObjectInput
// - WriterOptions.BeforeWrite: (V1) *s3manager.UploadInput, *s3manager.Uploader; (V2) *s3v2.PutObjectInput, *s3v2manager.Uploader
// - SignedURLOptions.BeforeSign:
// (V1) *s3.GetObjectInput; (V2) *s3v2.GetObjectInput, when Options.Method == http.MethodGet, or
// (V1) *s3.PutObjectInput; (V2) *s3v2.PutObjectInput, when Options.Method == http.MethodPut, or
// (V1) *s3.DeleteObjectInput; (V2) [not supported] when Options.Method == http.MethodDelete
package s3lite
import (
"context"
"encoding/base64"
"encoding/hex"
"errors"
"fmt"
"io"
"net/http"
"net/url"
"sort"
"strconv"
"strings"
awsv2 "github.com/aws/aws-sdk-go-v2/aws"
s3managerv2 "github.com/aws/aws-sdk-go-v2/feature/s3/manager"
s3v2 "github.com/aws/aws-sdk-go-v2/service/s3"
typesv2 "github.com/aws/aws-sdk-go-v2/service/s3/types"
"github.com/aws/smithy-go"
"gocloud.dev/blob"
"gocloud.dev/blob/driver"
"gocloud.dev/gcerrors"
)
// -------------------------------------------------------------------
// Copyright 2019 The Go Cloud Development Kit Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// HexEscape returns s, with all runes for which shouldEscape returns true
// escaped to "__0xXXX__", where XXX is the hex representation of the rune
// value. For example, " " would escape to "__0x20__".
//
// Non-UTF-8 strings will have their non-UTF-8 characters escaped to
// unicode.ReplacementChar; the original value is lost. Please file an
// issue if you need non-UTF8 support.
//
// Note: shouldEscape takes the whole string as a slice of runes and an
// index. Passing it a single byte or a single rune doesn't provide
// enough context for some escape decisions; for example, the caller might
// want to escape the second "/" in "//" but not the first one.
// We pass a slice of runes instead of the string or a slice of bytes
// because some decisions will be made on a rune basis (e.g., encode
// all non-ASCII runes).
func HexEscape(s string, shouldEscape func(s []rune, i int) bool) string {
// Do a first pass to see which runes (if any) need escaping.
runes := []rune(s)
var toEscape []int
for i := range runes {
if shouldEscape(runes, i) {
toEscape = append(toEscape, i)
}
}
if len(toEscape) == 0 {
return s
}
// Each escaped rune turns into at most 14 runes ("__0x7fffffff__"),
// so allocate an extra 13 for each. We'll reslice at the end
// if we didn't end up using them.
escaped := make([]rune, len(runes)+13*len(toEscape))
n := 0 // current index into toEscape
j := 0 // current index into escaped
for i, r := range runes {
if n < len(toEscape) && i == toEscape[n] {
// We were asked to escape this rune.
for _, x := range fmt.Sprintf("__%#x__", r) {
escaped[j] = x
j++
}
n++
} else {
escaped[j] = r
j++
}
}
return string(escaped[0:j])
}
// unescape tries to unescape starting at r[i].
// It returns a boolean indicating whether the unescaping was successful,
// and (if true) the unescaped rune and the last index of r that was used
// during unescaping.
func unescape(r []rune, i int) (bool, rune, int) {
// Look for "__0x".
if r[i] != '_' {
return false, 0, 0
}
i++
if i >= len(r) || r[i] != '_' {
return false, 0, 0
}
i++
if i >= len(r) || r[i] != '0' {
return false, 0, 0
}
i++
if i >= len(r) || r[i] != 'x' {
return false, 0, 0
}
i++
// Capture the digits until the next "_" (if any).
var hexdigits []rune
for ; i < len(r) && r[i] != '_'; i++ {
hexdigits = append(hexdigits, r[i])
}
// Look for the trailing "__".
if i >= len(r) || r[i] != '_' {
return false, 0, 0
}
i++
if i >= len(r) || r[i] != '_' {
return false, 0, 0
}
// Parse the hex digits into an int32.
retval, err := strconv.ParseInt(string(hexdigits), 16, 32)
if err != nil {
return false, 0, 0
}
return true, rune(retval), i
}
// HexUnescape reverses HexEscape.
func HexUnescape(s string) string {
var unescaped []rune
runes := []rune(s)
for i := 0; i < len(runes); i++ {
if ok, newR, newI := unescape(runes, i); ok {
// We unescaped some runes starting at i, resulting in the
// unescaped rune newR. The last rune used was newI.
if unescaped == nil {
// This is the first rune we've encountered that
// needed unescaping. Allocate a buffer and copy any
// previous runes.
unescaped = make([]rune, i)
copy(unescaped, runes)
}
unescaped = append(unescaped, newR)
i = newI
} else if unescaped != nil {
unescaped = append(unescaped, runes[i])
}
}
if unescaped == nil {
return s
}
return string(unescaped)
}
// URLEscape uses url.PathEscape to escape s.
func URLEscape(s string) string {
return url.PathEscape(s)
}
// URLUnescape reverses URLEscape using url.PathUnescape. If the unescape
// returns an error, it returns s.
func URLUnescape(s string) string {
if u, err := url.PathUnescape(s); err == nil {
return u
}
return s
}
// -------------------------------------------------------------------
const defaultPageSize = 1000
// Options sets options for constructing a *blob.Bucket backed by fileblob.
type Options struct {
// UseLegacyList forces the use of ListObjects instead of ListObjectsV2.
// Some S3-compatible services (like CEPH) do not currently support
// ListObjectsV2.
UseLegacyList bool
}
// openBucket returns an S3 Bucket.
func openBucket(ctx context.Context, useV2 bool, clientV2 *s3v2.Client, bucketName string, opts *Options) (*bucket, error) {
if bucketName == "" {
return nil, errors.New("s3blob.OpenBucket: bucketName is required")
}
if opts == nil {
opts = &Options{}
}
if clientV2 == nil {
return nil, errors.New("s3blob.OpenBucketV2: client is required")
}
return &bucket{
useV2: useV2,
name: bucketName,
clientV2: clientV2,
useLegacyList: opts.UseLegacyList,
}, nil
}
// OpenBucketV2 returns a *blob.Bucket backed by S3, using AWS SDK v2.
func OpenBucketV2(ctx context.Context, client *s3v2.Client, bucketName string, opts *Options) (*blob.Bucket, error) {
drv, err := openBucket(ctx, true, client, bucketName, opts)
if err != nil {
return nil, err
}
return blob.NewBucket(drv), nil
}
// reader reads an S3 object. It implements io.ReadCloser.
type reader struct {
useV2 bool
body io.ReadCloser
attrs driver.ReaderAttributes
rawV2 *s3v2.GetObjectOutput
}
func (r *reader) Read(p []byte) (int, error) {
return r.body.Read(p)
}
// Close closes the reader itself. It must be called when done reading.
func (r *reader) Close() error {
return r.body.Close()
}
func (r *reader) As(i interface{}) bool {
p, ok := i.(*s3v2.GetObjectOutput)
if !ok {
return false
}
*p = *r.rawV2
return true
}
func (r *reader) Attributes() *driver.ReaderAttributes {
return &r.attrs
}
// writer writes an S3 object, it implements io.WriteCloser.
type writer struct {
// Ends of an io.Pipe, created when the first byte is written.
pw *io.PipeWriter
pr *io.PipeReader
// Alternatively, upload is set to true when Upload was
// used to upload data.
upload bool
ctx context.Context
useV2 bool
// v2
uploaderV2 *s3managerv2.Uploader
reqV2 *s3v2.PutObjectInput
donec chan struct{} // closed when done writing
// The following fields will be written before donec closes:
err error
}
// Write appends p to w.pw. User must call Close to close the w after done writing.
func (w *writer) Write(p []byte) (int, error) {
// Avoid opening the pipe for a zero-length write;
// the concrete can do these for empty blobs.
if len(p) == 0 {
return 0, nil
}
if w.pw == nil {
// We'll write into pw and use pr as an io.Reader for the
// Upload call to S3.
w.pr, w.pw = io.Pipe()
w.open(w.pr, true)
}
return w.pw.Write(p)
}
// Upload reads from r. Per the driver, it is guaranteed to be the only
// write call for this writer.
func (w *writer) Upload(r io.Reader) error {
w.upload = true
w.open(r, false)
return nil
}
// r may be nil if we're Closing and no data was written.
// If closePipeOnError is true, w.pr will be closed if there's an
// error uploading to S3.
func (w *writer) open(r io.Reader, closePipeOnError bool) {
// This goroutine will keep running until Close, unless there's an error.
go func() {
defer close(w.donec)
if r == nil {
// AWS doesn't like a nil Body.
r = http.NoBody
}
var err error
w.reqV2.Body = r
_, err = w.uploaderV2.Upload(w.ctx, w.reqV2)
if err != nil {
if closePipeOnError {
w.pr.CloseWithError(err)
}
w.err = err
}
}()
}
// Close completes the writer and closes it. Any error occurring during write
// will be returned. If a writer is closed before any Write is called, Close
// will create an empty file at the given key.
func (w *writer) Close() error {
if !w.upload {
if w.pr != nil {
defer w.pr.Close()
}
if w.pw == nil {
// We never got any bytes written. We'll write an http.NoBody.
w.open(nil, false)
} else if err := w.pw.Close(); err != nil {
return err
}
}
<-w.donec
return w.err
}
// bucket represents an S3 bucket and handles read, write and delete operations.
type bucket struct {
name string
useV2 bool
clientV2 *s3v2.Client
useLegacyList bool
}
func (b *bucket) Close() error {
return nil
}
func (b *bucket) ErrorCode(err error) gcerrors.ErrorCode {
var code string
var ae smithy.APIError
var oe *smithy.OperationError
if errors.As(err, &oe) && strings.Contains(oe.Error(), "301") {
// V2 returns an OperationError with a missing redirect for invalid buckets.
code = "NoSuchBucket"
} else if errors.As(err, &ae) {
code = ae.ErrorCode()
} else {
return gcerrors.Unknown
}
switch {
case code == "NoSuchBucket" || code == "NoSuchKey" || code == "NotFound":
return gcerrors.NotFound
default:
return gcerrors.Unknown
}
}
// ListPaged implements driver.ListPaged.
func (b *bucket) ListPaged(ctx context.Context, opts *driver.ListOptions) (*driver.ListPage, error) {
pageSize := opts.PageSize
if pageSize == 0 {
pageSize = defaultPageSize
}
in := &s3v2.ListObjectsV2Input{
Bucket: awsv2.String(b.name),
MaxKeys: awsv2.Int32(int32(pageSize)),
}
if len(opts.PageToken) > 0 {
in.ContinuationToken = awsv2.String(string(opts.PageToken))
}
if opts.Prefix != "" {
in.Prefix = awsv2.String(escapeKey(opts.Prefix))
}
if opts.Delimiter != "" {
in.Delimiter = awsv2.String(escapeKey(opts.Delimiter))
}
resp, err := b.listObjectsV2(ctx, in, opts)
if err != nil {
return nil, err
}
page := driver.ListPage{}
if resp.NextContinuationToken != nil {
page.NextPageToken = []byte(*resp.NextContinuationToken)
}
if n := len(resp.Contents) + len(resp.CommonPrefixes); n > 0 {
page.Objects = make([]*driver.ListObject, n)
for i, obj := range resp.Contents {
obj := obj
page.Objects[i] = &driver.ListObject{
Key: unescapeKey(awsv2.ToString(obj.Key)),
ModTime: *obj.LastModified,
Size: awsv2.ToInt64(obj.Size),
MD5: eTagToMD5(obj.ETag),
AsFunc: func(i interface{}) bool {
p, ok := i.(*typesv2.Object)
if !ok {
return false
}
*p = obj
return true
},
}
}
for i, prefix := range resp.CommonPrefixes {
prefix := prefix
page.Objects[i+len(resp.Contents)] = &driver.ListObject{
Key: unescapeKey(awsv2.ToString(prefix.Prefix)),
IsDir: true,
AsFunc: func(i interface{}) bool {
p, ok := i.(*typesv2.CommonPrefix)
if !ok {
return false
}
*p = prefix
return true
},
}
}
if len(resp.Contents) > 0 && len(resp.CommonPrefixes) > 0 {
// S3 gives us blobs and "directories" in separate lists; sort them.
sort.Slice(page.Objects, func(i, j int) bool {
return page.Objects[i].Key < page.Objects[j].Key
})
}
}
return &page, nil
}
func (b *bucket) listObjectsV2(ctx context.Context, in *s3v2.ListObjectsV2Input, opts *driver.ListOptions) (*s3v2.ListObjectsV2Output, error) {
if !b.useLegacyList {
var varopt []func(*s3v2.Options)
if opts.BeforeList != nil {
asFunc := func(i interface{}) bool {
if p, ok := i.(**s3v2.ListObjectsV2Input); ok {
*p = in
return true
}
if p, ok := i.(**[]func(*s3v2.Options)); ok {
*p = &varopt
return true
}
return false
}
if err := opts.BeforeList(asFunc); err != nil {
return nil, err
}
}
return b.clientV2.ListObjectsV2(ctx, in, varopt...)
}
// Use the legacy ListObjects request.
legacyIn := &s3v2.ListObjectsInput{
Bucket: in.Bucket,
Delimiter: in.Delimiter,
EncodingType: in.EncodingType,
Marker: in.ContinuationToken,
MaxKeys: in.MaxKeys,
Prefix: in.Prefix,
RequestPayer: in.RequestPayer,
}
if opts.BeforeList != nil {
asFunc := func(i interface{}) bool {
p, ok := i.(**s3v2.ListObjectsInput)
if !ok {
return false
}
*p = legacyIn
return true
}
if err := opts.BeforeList(asFunc); err != nil {
return nil, err
}
}
legacyResp, err := b.clientV2.ListObjects(ctx, legacyIn)
if err != nil {
return nil, err
}
var nextContinuationToken *string
if legacyResp.NextMarker != nil {
nextContinuationToken = legacyResp.NextMarker
} else if awsv2.ToBool(legacyResp.IsTruncated) {
nextContinuationToken = awsv2.String(awsv2.ToString(legacyResp.Contents[len(legacyResp.Contents)-1].Key))
}
return &s3v2.ListObjectsV2Output{
CommonPrefixes: legacyResp.CommonPrefixes,
Contents: legacyResp.Contents,
NextContinuationToken: nextContinuationToken,
}, nil
}
// As implements driver.As.
func (b *bucket) As(i interface{}) bool {
p, ok := i.(**s3v2.Client)
if !ok {
return false
}
*p = b.clientV2
return true
}
// As implements driver.ErrorAs.
func (b *bucket) ErrorAs(err error, i interface{}) bool {
return errors.As(err, i)
}
// Attributes implements driver.Attributes.
func (b *bucket) Attributes(ctx context.Context, key string) (*driver.Attributes, error) {
key = escapeKey(key)
in := &s3v2.HeadObjectInput{
Bucket: awsv2.String(b.name),
Key: awsv2.String(key),
}
resp, err := b.clientV2.HeadObject(ctx, in)
if err != nil {
return nil, err
}
md := make(map[string]string, len(resp.Metadata))
for k, v := range resp.Metadata {
// See the package comments for more details on escaping of metadata
// keys & values.
md[HexUnescape(URLUnescape(k))] = URLUnescape(v)
}
return &driver.Attributes{
CacheControl: awsv2.ToString(resp.CacheControl),
ContentDisposition: awsv2.ToString(resp.ContentDisposition),
ContentEncoding: awsv2.ToString(resp.ContentEncoding),
ContentLanguage: awsv2.ToString(resp.ContentLanguage),
ContentType: awsv2.ToString(resp.ContentType),
Metadata: md,
// CreateTime not supported; left as the zero time.
ModTime: awsv2.ToTime(resp.LastModified),
Size: awsv2.ToInt64(resp.ContentLength),
MD5: eTagToMD5(resp.ETag),
ETag: awsv2.ToString(resp.ETag),
AsFunc: func(i interface{}) bool {
p, ok := i.(*s3v2.HeadObjectOutput)
if !ok {
return false
}
*p = *resp
return true
},
}, nil
}
// NewRangeReader implements driver.NewRangeReader.
func (b *bucket) NewRangeReader(ctx context.Context, key string, offset, length int64, opts *driver.ReaderOptions) (driver.Reader, error) {
key = escapeKey(key)
var byteRange *string
if offset > 0 && length < 0 {
byteRange = awsv2.String(fmt.Sprintf("bytes=%d-", offset))
} else if length == 0 {
// AWS doesn't support a zero-length read; we'll read 1 byte and then
// ignore it in favor of http.NoBody below.
byteRange = awsv2.String(fmt.Sprintf("bytes=%d-%d", offset, offset))
} else if length >= 0 {
byteRange = awsv2.String(fmt.Sprintf("bytes=%d-%d", offset, offset+length-1))
}
in := &s3v2.GetObjectInput{
Bucket: awsv2.String(b.name),
Key: awsv2.String(key),
Range: byteRange,
}
var varopt []func(*s3v2.Options)
if opts.BeforeRead != nil {
asFunc := func(i interface{}) bool {
if p, ok := i.(**s3v2.GetObjectInput); ok {
*p = in
return true
}
if p, ok := i.(**[]func(*s3v2.Options)); ok {
*p = &varopt
return true
}
return false
}
if err := opts.BeforeRead(asFunc); err != nil {
return nil, err
}
}
resp, err := b.clientV2.GetObject(ctx, in, varopt...)
if err != nil {
return nil, err
}
body := resp.Body
if length == 0 {
body = http.NoBody
}
return &reader{
useV2: true,
body: body,
attrs: driver.ReaderAttributes{
ContentType: awsv2.ToString(resp.ContentType),
ModTime: awsv2.ToTime(resp.LastModified),
Size: getSize(awsv2.ToInt64(resp.ContentLength), awsv2.ToString(resp.ContentRange)),
},
rawV2: resp,
}, nil
}
// etagToMD5 processes an ETag header and returns an MD5 hash if possible.
// S3's ETag header is sometimes a quoted hexstring of the MD5. Other times,
// notably when the object was uploaded in multiple parts, it is not.
// We do the best we can.
// Some links about ETag:
// https://docs.aws.amazon.com/AmazonS3/latest/API/RESTCommonResponseHeaders.html
// https://github.com/aws/aws-sdk-net/issues/815
// https://teppen.io/2018/06/23/aws_s3_etags/
func eTagToMD5(etag *string) []byte {
if etag == nil {
// No header at all.
return nil
}
// Strip the expected leading and trailing quotes.
quoted := *etag
if len(quoted) < 2 || quoted[0] != '"' || quoted[len(quoted)-1] != '"' {
return nil
}
unquoted := quoted[1 : len(quoted)-1]
// Un-hex; we return nil on error. In particular, we'll get an error here
// for multi-part uploaded blobs, whose ETag will contain a "-" and so will
// never be a legal hex encoding.
md5, err := hex.DecodeString(unquoted)
if err != nil {
return nil
}
return md5
}
func getSize(contentLength int64, contentRange string) int64 {
// Default size to ContentLength, but that's incorrect for partial-length reads,
// where ContentLength refers to the size of the returned Body, not the entire
// size of the blob. ContentRange has the full size.
size := contentLength
if contentRange != "" {
// Sample: bytes 10-14/27 (where 27 is the full size).
parts := strings.Split(contentRange, "/")
if len(parts) == 2 {
if i, err := strconv.ParseInt(parts[1], 10, 64); err == nil {
size = i
}
}
}
return size
}
// escapeKey does all required escaping for UTF-8 strings to work with S3.
func escapeKey(key string) string {
return HexEscape(key, func(r []rune, i int) bool {
c := r[i]
switch {
// S3 doesn't handle these characters (determined via experimentation).
case c < 32:
return true
// For "../", escape the trailing slash.
case i > 1 && c == '/' && r[i-1] == '.' && r[i-2] == '.':
return true
}
return false
})
}
// unescapeKey reverses escapeKey.
func unescapeKey(key string) string {
return HexUnescape(key)
}
// NewTypedWriter implements driver.NewTypedWriter.
func (b *bucket) NewTypedWriter(ctx context.Context, key string, contentType string, opts *driver.WriterOptions) (driver.Writer, error) {
key = escapeKey(key)
uploaderV2 := s3managerv2.NewUploader(b.clientV2, func(u *s3managerv2.Uploader) {
if opts.BufferSize != 0 {
u.PartSize = int64(opts.BufferSize)
}
if opts.MaxConcurrency != 0 {
u.Concurrency = opts.MaxConcurrency
}
})
md := make(map[string]string, len(opts.Metadata))
for k, v := range opts.Metadata {
// See the package comments for more details on escaping of metadata
// keys & values.
k = HexEscape(url.PathEscape(k), func(runes []rune, i int) bool {
c := runes[i]
return c == '@' || c == ':' || c == '='
})
md[k] = url.PathEscape(v)
}
reqV2 := &s3v2.PutObjectInput{
Bucket: awsv2.String(b.name),
ContentType: awsv2.String(contentType),
Key: awsv2.String(key),
Metadata: md,
}
if opts.CacheControl != "" {
reqV2.CacheControl = awsv2.String(opts.CacheControl)
}
if opts.ContentDisposition != "" {
reqV2.ContentDisposition = awsv2.String(opts.ContentDisposition)
}
if opts.ContentEncoding != "" {
reqV2.ContentEncoding = awsv2.String(opts.ContentEncoding)
}
if opts.ContentLanguage != "" {
reqV2.ContentLanguage = awsv2.String(opts.ContentLanguage)
}
if len(opts.ContentMD5) > 0 {
reqV2.ContentMD5 = awsv2.String(base64.StdEncoding.EncodeToString(opts.ContentMD5))
}
if opts.BeforeWrite != nil {
asFunc := func(i interface{}) bool {
// Note that since the Go CDK Blob
// abstraction does not expose AWS's
// Uploader concept, there does not
// appear to be any utility in
// exposing the options list to the v2
// Uploader's Upload() method.
// Instead, applications can
// manipulate the exposed *Uploader
// directly, including by setting
// ClientOptions if needed.
if p, ok := i.(**s3managerv2.Uploader); ok {
*p = uploaderV2
return true
}
if p, ok := i.(**s3v2.PutObjectInput); ok {
*p = reqV2
return true
}
return false
}
if err := opts.BeforeWrite(asFunc); err != nil {
return nil, err
}
}
return &writer{
ctx: ctx,
useV2: true,
uploaderV2: uploaderV2,
reqV2: reqV2,
donec: make(chan struct{}),
}, nil
}
// Copy implements driver.Copy.
func (b *bucket) Copy(ctx context.Context, dstKey, srcKey string, opts *driver.CopyOptions) error {
dstKey = escapeKey(dstKey)
srcKey = escapeKey(srcKey)
input := &s3v2.CopyObjectInput{
Bucket: awsv2.String(b.name),
CopySource: awsv2.String(b.name + "/" + srcKey),
Key: awsv2.String(dstKey),
}
if opts.BeforeCopy != nil {
asFunc := func(i interface{}) bool {
switch v := i.(type) {
case **s3v2.CopyObjectInput:
*v = input
return true
}
return false
}
if err := opts.BeforeCopy(asFunc); err != nil {
return err
}
}
_, err := b.clientV2.CopyObject(ctx, input)
return err
}
// Delete implements driver.Delete.
func (b *bucket) Delete(ctx context.Context, key string) error {
if _, err := b.Attributes(ctx, key); err != nil {
return err
}
key = escapeKey(key)
input := &s3v2.DeleteObjectInput{
Bucket: awsv2.String(b.name),
Key: awsv2.String(key),
}
_, err := b.clientV2.DeleteObject(ctx, input)
return err
}
func (b *bucket) SignedURL(ctx context.Context, key string, opts *driver.SignedURLOptions) (string, error) {
key = escapeKey(key)
switch opts.Method {
case http.MethodGet:
in := &s3v2.GetObjectInput{
Bucket: awsv2.String(b.name),
Key: awsv2.String(key),
}
if opts.BeforeSign != nil {
asFunc := func(i interface{}) bool {
v, ok := i.(**s3v2.GetObjectInput)
if ok {
*v = in
}
return ok
}
if err := opts.BeforeSign(asFunc); err != nil {
return "", err
}
}
p, err := s3v2.NewPresignClient(b.clientV2, s3v2.WithPresignExpires(opts.Expiry)).PresignGetObject(ctx, in)
if err != nil {
return "", err
}
return p.URL, nil
case http.MethodPut:
in := &s3v2.PutObjectInput{
Bucket: awsv2.String(b.name),
Key: awsv2.String(key),
}
if opts.EnforceAbsentContentType || opts.ContentType != "" {
// https://github.com/aws/aws-sdk-go-v2/issues/1475
return "", errors.New("s3blob: AWS SDK v2 does not supported enforcing ContentType in SignedURLs for PUT")
}
if opts.BeforeSign != nil {
asFunc := func(i interface{}) bool {
v, ok := i.(**s3v2.PutObjectInput)
if ok {
*v = in
}
return ok
}
if err := opts.BeforeSign(asFunc); err != nil {
return "", err
}
}
p, err := s3v2.NewPresignClient(b.clientV2, s3v2.WithPresignExpires(opts.Expiry)).PresignPutObject(ctx, in)
if err != nil {
return "", err
}
return p.URL, nil
}
return "", fmt.Errorf("unsupported Method %q", opts.Method)
}