From 501c49012ec956b0fc79a45ce55ab67f6bb85fad Mon Sep 17 00:00:00 2001 From: Gani Georgiev Date: Wed, 5 Mar 2025 15:58:21 +0200 Subject: [PATCH] [poc] replaced aws-sdk-go-v2 and gocloud.dev/blob --- CHANGELOG.md | 2 + apis/backup_test.go | 4 +- go.mod | 41 +- go.sum | 252 +- .../jsvm/internal/types/generated/types.d.ts | 9571 ++++++++--------- plugins/jsvm/internal/types/types.go | 1 - tools/filesystem/blob/bucket.go | 716 ++ tools/filesystem/blob/driver.go | 108 + tools/filesystem/blob/hex.go | 153 + tools/filesystem/blob/reader.go | 178 + tools/filesystem/blob/writer.go | 166 + tools/filesystem/filesystem.go | 131 +- tools/filesystem/ignore_signing_headers.go | 72 - tools/filesystem/internal/fileblob/attrs.go | 79 + .../filesystem/internal/fileblob/fileblob.go | 713 ++ tools/filesystem/internal/s3blob/driver.go | 482 + .../internal/s3blob/s3/client_test.go | 128 + .../internal/s3blob/s3/copy_object.go | 59 + .../internal/s3blob/s3/copy_object_test.go | 66 + .../internal/s3blob/s3/delete_object.go | 31 + .../internal/s3blob/s3/delete_object_test.go | 47 + tools/filesystem/internal/s3blob/s3/error.go | 47 + .../internal/s3blob/s3/error_test.go | 86 + .../internal/s3blob/s3/get_object.go | 43 + .../internal/s3blob/s3/get_object_test.go | 91 + .../internal/s3blob/s3/head_object.go | 89 + .../internal/s3blob/s3/head_object_test.go | 76 + .../internal/s3blob/s3/list_objects.go | 165 + .../internal/s3blob/s3/list_objects_test.go | 156 + tools/filesystem/internal/s3blob/s3/s3.go | 262 + .../filesystem/internal/s3blob/s3/s3_test.go | 224 + .../filesystem/internal/s3blob/s3/uploader.go | 414 + .../internal/s3blob/s3/uploader_test.go | 462 + tools/filesystem/internal/s3lite/s3lite.go | 917 -- 34 files changed, 9845 insertions(+), 6187 deletions(-) create mode 100644 tools/filesystem/blob/bucket.go create mode 100644 tools/filesystem/blob/driver.go create mode 100644 tools/filesystem/blob/hex.go create mode 100644 tools/filesystem/blob/reader.go create mode 100644 tools/filesystem/blob/writer.go delete mode 100644 tools/filesystem/ignore_signing_headers.go create mode 100644 tools/filesystem/internal/fileblob/attrs.go create mode 100644 tools/filesystem/internal/fileblob/fileblob.go create mode 100644 tools/filesystem/internal/s3blob/driver.go create mode 100644 tools/filesystem/internal/s3blob/s3/client_test.go create mode 100644 tools/filesystem/internal/s3blob/s3/copy_object.go create mode 100644 tools/filesystem/internal/s3blob/s3/copy_object_test.go create mode 100644 tools/filesystem/internal/s3blob/s3/delete_object.go create mode 100644 tools/filesystem/internal/s3blob/s3/delete_object_test.go create mode 100644 tools/filesystem/internal/s3blob/s3/error.go create mode 100644 tools/filesystem/internal/s3blob/s3/error_test.go create mode 100644 tools/filesystem/internal/s3blob/s3/get_object.go create mode 100644 tools/filesystem/internal/s3blob/s3/get_object_test.go create mode 100644 tools/filesystem/internal/s3blob/s3/head_object.go create mode 100644 tools/filesystem/internal/s3blob/s3/head_object_test.go create mode 100644 tools/filesystem/internal/s3blob/s3/list_objects.go create mode 100644 tools/filesystem/internal/s3blob/s3/list_objects_test.go create mode 100644 tools/filesystem/internal/s3blob/s3/s3.go create mode 100644 tools/filesystem/internal/s3blob/s3/s3_test.go create mode 100644 tools/filesystem/internal/s3blob/s3/uploader.go create mode 100644 tools/filesystem/internal/s3blob/s3/uploader_test.go delete mode 100644 tools/filesystem/internal/s3lite/s3lite.go diff --git a/CHANGELOG.md b/CHANGELOG.md index 30df9b17..ce3d5bbb 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,7 @@ ## v0.26.0 (WIP) +- ⚠️ Replaced `aws-sdk-go-v2` and `gocloud.dev/blob` with custom lighter implementation (@todo docs and tests) + - ⚠️ Prioritized the user submitted non-empty `createData.email` (_it will be unverified_) when creating the PocketBase user during the first OAuth2 auth. - Load the request info context during password/OAuth2/OTP authentication ([#6402](https://github.com/pocketbase/pocketbase/issues/6402)). diff --git a/apis/backup_test.go b/apis/backup_test.go index ca4f5c10..0bc70985 100644 --- a/apis/backup_test.go +++ b/apis/backup_test.go @@ -13,7 +13,7 @@ import ( "github.com/pocketbase/pocketbase/apis" "github.com/pocketbase/pocketbase/core" "github.com/pocketbase/pocketbase/tests" - "gocloud.dev/blob" + "github.com/pocketbase/pocketbase/tools/filesystem/blob" ) func TestBackupsList(t *testing.T) { @@ -490,7 +490,7 @@ func TestBackupsDownload(t *testing.T) { t.Fatal(err) } }, - ExpectedStatus: 400, + ExpectedStatus: 404, ExpectedContent: []string{`"data":{}`}, ExpectedEvents: map[string]int{"*": 0}, }, diff --git a/go.mod b/go.mod index 2e8711ca..782fc3f9 100644 --- a/go.mod +++ b/go.mod @@ -3,12 +3,6 @@ module github.com/pocketbase/pocketbase go 1.23 require ( - github.com/aws/aws-sdk-go-v2 v1.36.1 - github.com/aws/aws-sdk-go-v2/config v1.28.10 - github.com/aws/aws-sdk-go-v2/credentials v1.17.51 - github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.48 - github.com/aws/aws-sdk-go-v2/service/s3 v1.72.2 - github.com/aws/smithy-go v1.22.2 github.com/disintegration/imaging v1.6.2 github.com/domodwyer/mailyak/v3 v3.6.2 github.com/dop251/goja v0.0.0-20241009100908-5f46f2705ca3 @@ -23,7 +17,6 @@ require ( github.com/pocketbase/tygoja v0.0.0-20250103200817-ca580d8c5119 github.com/spf13/cast v1.7.1 github.com/spf13/cobra v1.8.1 - gocloud.dev v0.40.0 golang.org/x/crypto v0.33.0 golang.org/x/net v0.35.0 golang.org/x/oauth2 v0.26.0 @@ -32,47 +25,25 @@ require ( ) require ( - github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect - github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.8 // indirect - github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.28 // indirect - github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.32 // indirect - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.32 // indirect - github.com/aws/aws-sdk-go-v2/internal/ini v1.8.2 // indirect - github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.32 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.2 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.6.0 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.13 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.13 // indirect - github.com/aws/aws-sdk-go-v2/service/sso v1.24.15 // indirect - github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.14 // indirect - github.com/aws/aws-sdk-go-v2/service/sts v1.33.14 // indirect + github.com/asaskevich/govalidator v0.0.0-20200108200545-475eaeb16496 // indirect github.com/dlclark/regexp2 v1.11.4 // indirect github.com/dop251/base64dec v0.0.0-20231022112746-c6c9f9a96217 // indirect github.com/dustin/go-humanize v1.0.1 // indirect github.com/go-sourcemap/sourcemap v2.1.4+incompatible // indirect - github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 // indirect github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8 // indirect github.com/google/uuid v1.6.0 // indirect - github.com/googleapis/gax-go/v2 v2.14.1 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect - github.com/mattn/go-colorable v0.1.14 // indirect + github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.20 // indirect github.com/ncruces/go-strftime v0.1.9 // indirect github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect - github.com/spf13/pflag v1.0.6 // indirect - github.com/stretchr/testify v1.8.2 // indirect - go.opencensus.io v0.24.0 // indirect + github.com/spf13/pflag v1.0.5 // indirect golang.org/x/exp v0.0.0-20230315142452-642cacee5cc0 // indirect - golang.org/x/image v0.24.0 // indirect - golang.org/x/mod v0.23.0 // indirect + golang.org/x/image v0.0.0-20191009234506-e7c1f5e7dbb8 // indirect + golang.org/x/mod v0.21.0 // indirect golang.org/x/sys v0.30.0 // indirect golang.org/x/text v0.22.0 // indirect - golang.org/x/tools v0.30.0 // indirect - golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da // indirect - google.golang.org/api v0.220.0 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20250207221924-e9438ea467c6 // indirect - google.golang.org/grpc v1.70.0 // indirect - google.golang.org/protobuf v1.36.5 // indirect + golang.org/x/tools v0.26.0 // indirect modernc.org/libc v1.61.13 // indirect modernc.org/mathutil v1.7.1 // indirect modernc.org/memory v1.8.2 // indirect diff --git a/go.sum b/go.sum index adacf08c..9369c2e2 100644 --- a/go.sum +++ b/go.sum @@ -1,71 +1,10 @@ -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.115.0 h1:CnFSK6Xo3lDYRoBKEcAtia6VSC837/ZkJuRduSFnr14= -cloud.google.com/go v0.115.0/go.mod h1:8jIM5vVgoAEoiVxQ/O4BFTfHqulPZgs/ufEzMcFMdWU= -cloud.google.com/go/auth v0.14.1 h1:AwoJbzUdxA/whv1qj3TLKwh3XX5sikny2fc40wUl+h0= -cloud.google.com/go/auth v0.14.1/go.mod h1:4JHUxlGXisL0AW8kXPtUF6ztuOksyfUQNFjfsOCXkPM= -cloud.google.com/go/auth/oauth2adapt v0.2.7 h1:/Lc7xODdqcEw8IrZ9SvwnlLX6j9FHQM74z6cBk9Rw6M= -cloud.google.com/go/auth/oauth2adapt v0.2.7/go.mod h1:NTbTTzfvPl1Y3V1nPpOgl2w6d/FjO7NNUQaWSox6ZMc= -cloud.google.com/go/compute/metadata v0.6.0 h1:A6hENjEsCDtC1k8byVsgwvVcioamEHvZ4j01OwKxG9I= -cloud.google.com/go/compute/metadata v0.6.0/go.mod h1:FjyFAW1MW0C203CEOMDTu3Dk1FlqW3Rga40jzHL4hfg= -cloud.google.com/go/iam v1.1.13 h1:7zWBXG9ERbMLrzQBRhFliAV+kjcRToDTgQT3CTwYyv4= -cloud.google.com/go/iam v1.1.13/go.mod h1:K8mY0uSXwEXS30KrnVb+j54LB/ntfZu1dr+4zFMNbus= -cloud.google.com/go/storage v1.43.0 h1:CcxnSohZwizt4LCzQHWvBf1/kvtHUn7gk9QERXPyXFs= -cloud.google.com/go/storage v1.43.0/go.mod h1:ajvxEa7WmZS1PxvKRq4bq0tFT3vMd502JwstCcYv0Q0= -filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= -filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/Masterminds/semver/v3 v3.2.1 h1:RN9w6+7QoMeJVGyfmbcgs28Br8cvmnucEXnY0rYXWg0= github.com/Masterminds/semver/v3 v3.2.1/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= +github.com/asaskevich/govalidator v0.0.0-20200108200545-475eaeb16496 h1:zV3ejI06GQ59hwDQAvmK1qxOQGB3WuVTRoY0okPTAv0= github.com/asaskevich/govalidator v0.0.0-20200108200545-475eaeb16496/go.mod h1:oGkLhpf+kjZl6xBf758TQhh5XrAeiJv/7FRz/2spLIg= -github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so= -github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= -github.com/aws/aws-sdk-go v1.55.5 h1:KKUZBfBoyqy5d3swXyiC7Q76ic40rYcbqH7qjh59kzU= -github.com/aws/aws-sdk-go v1.55.5/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU= -github.com/aws/aws-sdk-go-v2 v1.36.1 h1:iTDl5U6oAhkNPba0e1t1hrwAo02ZMqbrGq4k5JBWM5E= -github.com/aws/aws-sdk-go-v2 v1.36.1/go.mod h1:5PMILGVKiW32oDzjj6RU52yrNrDPUHcbZQYr1sM7qmM= -github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.8 h1:zAxi9p3wsZMIaVCdoiQp2uZ9k1LsZvmAnoTBeZPXom0= -github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.8/go.mod h1:3XkePX5dSaxveLAYY7nsbsZZrKxCyEuE5pM4ziFxyGg= -github.com/aws/aws-sdk-go-v2/config v1.28.10 h1:fKODZHfqQu06pCzR69KJ3GuttraRJkhlC8g80RZ0Dfg= -github.com/aws/aws-sdk-go-v2/config v1.28.10/go.mod h1:PvdxRYZ5Um9QMq9PQ0zHHNdtKK+he2NHtFCUFMXWXeg= -github.com/aws/aws-sdk-go-v2/credentials v1.17.51 h1:F/9Sm6Y6k4LqDesZDPJCLxQGXNNHd/ZtJiWd0lCZKRk= -github.com/aws/aws-sdk-go-v2/credentials v1.17.51/go.mod h1:TKbzCHm43AoPyA+iLGGcruXd4AFhF8tOmLex2R9jWNQ= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.28 h1:KwsodFKVQTlI5EyhRSugALzsV6mG/SGrdjlMXSZSdso= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.28/go.mod h1:EY3APf9MzygVhKuPXAc5H+MkGb8k/DOSQjWS0LgkKqI= -github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.48 h1:XnXVe2zRyPf0+fAW5L05esmngvBpC6DQZK7oZB/z/Co= -github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.48/go.mod h1:S3wey90OrS4f7kYxH6PT175YyEcHTORY07++HurMaRM= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.32 h1:BjUcr3X3K0wZPGFg2bxOWW3VPN8rkE3/61zhP+IHviA= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.32/go.mod h1:80+OGC/bgzzFFTUmcuwD0lb4YutwQeKLFpmt6hoWapU= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.32 h1:m1GeXHVMJsRsUAqG6HjZWx9dj7F5TR+cF1bjyfYyBd4= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.32/go.mod h1:IitoQxGfaKdVLNg0hD8/DXmAqNy0H4K2H2Sf91ti8sI= -github.com/aws/aws-sdk-go-v2/internal/ini v1.8.2 h1:Pg9URiobXy85kgFev3og2CuOZ8JZUBENF+dcgWBaYNk= -github.com/aws/aws-sdk-go-v2/internal/ini v1.8.2/go.mod h1:FbtygfRFze9usAadmnGJNc8KsP346kEe+y2/oyhGAGc= -github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.32 h1:OIHj/nAhVzIXGzbAE+4XmZ8FPvro3THr6NlqErJc3wY= -github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.32/go.mod h1:LiBEsDo34OJXqdDlRGsilhlIiXR7DL+6Cx2f4p1EgzI= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.2 h1:D4oz8/CzT9bAEYtVhSBmFj2dNOtaHOtMKc2vHBwYizA= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.2/go.mod h1:Za3IHqTQ+yNcRHxu1OFucBh0ACZT4j4VQFF0BqpZcLY= -github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.6.0 h1:kT2WeWcFySdYpPgyqJMSUE7781Qucjtn6wBvrgm9P+M= -github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.6.0/go.mod h1:WYH1ABybY7JK9TITPnk6ZlP7gQB8psI4c9qDmMsnLSA= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.13 h1:SYVGSFQHlchIcy6e7x12bsrxClCXSP5et8cqVhL8cuw= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.13/go.mod h1:kizuDaLX37bG5WZaoxGPQR/LNFXpxp0vsUnqfkWXfNE= -github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.13 h1:OBsrtam3rk8NfBEq7OLOMm5HtQ9Yyw32X4UQMya/wjw= -github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.13/go.mod h1:3U4gFA5pmoCOja7aq4nSaIAGbaOHv2Yl2ug018cmC+Q= -github.com/aws/aws-sdk-go-v2/service/s3 v1.72.2 h1:a7aQ3RW+ug4IbhoQp29NZdc7vqrzKZZfWZSaQAXOZvQ= -github.com/aws/aws-sdk-go-v2/service/s3 v1.72.2/go.mod h1:xMekrnhmJ5aqmyxtmALs7mlvXw5xRh+eYjOjvrIIFJ4= -github.com/aws/aws-sdk-go-v2/service/sso v1.24.15 h1:/eE3DogBjYlvlbhd2ssWyeuovWunHLxfgw3s/OJa4GQ= -github.com/aws/aws-sdk-go-v2/service/sso v1.24.15/go.mod h1:2PCJYpi7EKeA5SkStAmZlF6fi0uUABuhtF8ILHjGc3Y= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.14 h1:M/zwXiL2iXUrHputuXgmO94TVNmcenPHxgLXLutodKE= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.14/go.mod h1:RVwIw3y/IqxC2YEXSIkAzRDdEU1iRabDPaYjpGCbCGQ= -github.com/aws/aws-sdk-go-v2/service/sts v1.33.14 h1:TzeR06UCMUq+KA3bDkujxK1GVGy+G8qQN/QVYzGLkQE= -github.com/aws/aws-sdk-go-v2/service/sts v1.33.14/go.mod h1:dspXf/oYWGWo6DEvj98wpaTeqt5+DMidZD0A9BYTizc= -github.com/aws/smithy-go v1.22.2 h1:6D9hW43xKFrRx/tXXfAlIZc4JI+yQe6snnWcQyxSyLQ= -github.com/aws/smithy-go v1.22.2/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg= -github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/disintegration/imaging v1.6.2 h1:w1LecBlG2Lnp8B3jk5zSuNqd7b4DXhcjwek1ei82L+c= github.com/disintegration/imaging v1.6.2/go.mod h1:44/5580QXChDfwIclfc/PCwrr44amcmDAg8hxG0Ewe4= github.com/dlclark/regexp2 v1.11.4 h1:rPYF9/LECdNymJufQKmri9gV604RvvABwgOA8un7yAo= @@ -80,14 +19,8 @@ github.com/dop251/goja_nodejs v0.0.0-20240728170619-29b559befffc h1:MKYt39yZJi0Z github.com/dop251/goja_nodejs v0.0.0-20240728170619-29b559befffc/go.mod h1:VULptt4Q/fNzQUJlqY/GP3qHyU7ZH46mFkBZe0ZTokU= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= -github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM= github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU= -github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= -github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= @@ -96,67 +29,30 @@ github.com/gabriel-vasile/mimetype v1.4.8 h1:FfZ3gj38NjllZIeJAmMhr+qKL8Wu+nOoI3G github.com/gabriel-vasile/mimetype v1.4.8/go.mod h1:ByKUIKGjh1ODkGM1asKUbQZOLGrPjydw3hYPU2YU9t8= github.com/ganigeorgiev/fexpr v0.4.1 h1:hpUgbUEEWIZhSDBtf4M9aUNfQQ0BZkGRaMePy7Gcx5k= github.com/ganigeorgiev/fexpr v0.4.1/go.mod h1:RyGiGqmeXhEQ6+mlGdnUleLHgtzzu/VGO2WtJkF5drE= -github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= -github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= -github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= -github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-ozzo/ozzo-validation/v4 v4.3.0 h1:byhDUpfEwjsVQb1vBunvIjh2BHQ9ead57VkAEY4V+Es= github.com/go-ozzo/ozzo-validation/v4 v4.3.0/go.mod h1:2NKgrcHl3z6cJs+3Oo940FPRiTzuqKbvfrL2RxCj6Ew= github.com/go-sourcemap/sourcemap v2.1.4+incompatible h1:a+iTbH5auLKxaNwQFg0B+TCYl6lbukKPc7b5x0n1s6Q= github.com/go-sourcemap/sourcemap v2.1.4+incompatible/go.mod h1:F8jJfvm2KbVjc5NqelyYJmf/v5J0dwNLS2mL4sNA1Jg= +github.com/go-sql-driver/mysql v1.4.1 h1:g24URVg0OFbNUTx9qqY1IRZ9D9z3iPyi5zKhQZpNwpA= github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= -github.com/go-sql-driver/mysql v1.8.1 h1:LedoTUt/eveggdHS9qUFC1EFSa8bU2+1pZjSRpvNJ1Y= -github.com/go-sql-driver/mysql v1.8.1/go.mod h1:wEBSXgmK//2ZFJyE+qWnIsVGmvmEKlqwuVSjsCm7DZg= github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk= github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 h1:f+oWsMOmNPc8JmEHVZIycC7hBoQxHH9pNKQORJNozsQ= -github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8/go.mod h1:wcDNUvekVysuuOpQKo3191zZyTpiI6se1N1ULghS0sw= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= -github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8 h1:FKHo8hFI3A+7w0aUQuYXQ+6EN5stWmeY/AZqtM8xk9k= github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo= -github.com/google/s2a-go v0.1.9 h1:LGD7gtMgezd8a/Xak7mEWL0PjoTQFvpRudN895yqKW0= -github.com/google/s2a-go v0.1.9/go.mod h1:YA0Ei2ZQL3acow2O62kdp9UlnvMmU7kA6Eutn0dXayM= -github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/wire v0.6.0 h1:HBkoIh4BdSxoyo9PveV8giw7ZsaBOvzWKfcg/6MrVwI= -github.com/google/wire v0.6.0/go.mod h1:F4QhpQ9EDIdJ1Mbop/NZBRB+5yrR6qg3BnctaoUk6NA= -github.com/googleapis/enterprise-certificate-proxy v0.3.4 h1:XYIDZApgAnrN1c855gTgghdIA6Stxb52D5RnLI1SLyw= -github.com/googleapis/enterprise-certificate-proxy v0.3.4/go.mod h1:YKe7cfqYXjKGpGvmSg28/fFvhNzinZQm8DGnaburhGA= -github.com/googleapis/gax-go/v2 v2.14.1 h1:hb0FFeiPaQskmvakKu5EbCbpntQn48jyHuvrkurSS/Q= -github.com/googleapis/gax-go/v2 v2.14.1/go.mod h1:Hb/NubMaVM88SrNkvl8X/o8XWwDJEPqouaLeN2IUxoA= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= -github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= -github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE= -github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8= +github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/ncruces/go-strftime v0.1.9 h1:bY0MQC28UADQmHmaF5dgpLmImcShSi2kHU9XLdhx/f4= @@ -167,7 +63,6 @@ github.com/pocketbase/dbx v1.11.0 h1:LpZezioMfT3K4tLrqA55wWFw1EtH1pM4tzSVa7kgszU github.com/pocketbase/dbx v1.11.0/go.mod h1:xXRCIAKTHMgUCyCKZm55pUOdvFziJjQfXaWKhu2vhMs= github.com/pocketbase/tygoja v0.0.0-20250103200817-ca580d8c5119 h1:TjQtEReJDTpvlNFTRjuHvPQpJHAeJdcQF130eCAAT/o= github.com/pocketbase/tygoja v0.0.0-20250103200817-ca580d8c5119/go.mod h1:hKJWPGFqavk3cdTa47Qvs8g37lnfI57OYdVVbIqW5aE= -github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE= github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8= @@ -177,160 +72,67 @@ github.com/spf13/cast v1.7.1 h1:cuNEagBQEHWN1FnbGEjCXL2szYEXqfJPbP2HNUaca9Y= github.com/spf13/cast v1.7.1/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o= -github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= -github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= -go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= -go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.58.0 h1:PS8wXpbyaDJQ2VDHHncMe9Vct0Zn1fEjpsjrLxGJoSc= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.58.0/go.mod h1:HDBUsEjOuRC0EzKZ1bSaRGZWUBAzo+MhAcUUORSr4D0= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 h1:yd02MEjBdJkG3uabWP9apV+OuWRIXGDuJEUJbOHmCFU= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0/go.mod h1:umTcuxiv1n/s/S6/c2AT/g2CQ7u5C59sHDNmfSwgz7Q= -go.opentelemetry.io/otel v1.34.0 h1:zRLXxLCgL1WyKsPVrgbSdMN4c0FMkDAskSTQP+0hdUY= -go.opentelemetry.io/otel v1.34.0/go.mod h1:OWFPOQ+h4G8xpyjgqo4SxJYdDQ/qmRH+wivy7zzx9oI= -go.opentelemetry.io/otel/metric v1.34.0 h1:+eTR3U0MyfWjRDhmFMxe2SsW64QrZ84AOhvqS7Y+PoQ= -go.opentelemetry.io/otel/metric v1.34.0/go.mod h1:CEDrp0fy2D0MvkXE+dPV7cMi8tWZwX3dmaIhwPOaqHE= -go.opentelemetry.io/otel/sdk v1.32.0 h1:RNxepc9vK59A8XsgZQouW8ue8Gkb4jpWtJm9ge5lEG4= -go.opentelemetry.io/otel/sdk v1.32.0/go.mod h1:LqgegDBjKMmb2GC6/PrTnteJG39I8/vJCAP9LlJXEjU= -go.opentelemetry.io/otel/sdk/metric v1.32.0 h1:rZvFnvmvawYb0alrYkjraqJq0Z4ZUJAiyYCU9snn1CU= -go.opentelemetry.io/otel/sdk/metric v1.32.0/go.mod h1:PWeZlq0zt9YkYAp3gjKZ0eicRYvOh1Gd+X99x6GHpCQ= -go.opentelemetry.io/otel/trace v1.34.0 h1:+ouXS2V8Rd4hp4580a8q23bg0azF2nI8cqLYnC8mh/k= -go.opentelemetry.io/otel/trace v1.34.0/go.mod h1:Svm7lSjQD7kG7KJ/MUHPVXSDGz2OX4h0M2jHBhmSfRE= -gocloud.dev v0.40.0 h1:f8LgP+4WDqOG/RXoUcyLpeIAGOcAbZrZbDQCUee10ng= -gocloud.dev v0.40.0/go.mod h1:drz+VyYNBvrMTW0KZiBAYEdl8lbNZx+OQ7oQvdrFmSQ= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.33.0 h1:IOBPskki6Lysi0lo9qQvbxiQ+FvsCC/YWOecCHAixus= golang.org/x/crypto v0.33.0/go.mod h1:bVdXmD7IV/4GdElGPozy6U7lWdRXA4qyRVGJV57uQ5M= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20230315142452-642cacee5cc0 h1:pVgRXcIictcr+lBQIFeiwuwtDIs4eL21OuM9nyAADmo= golang.org/x/exp v0.0.0-20230315142452-642cacee5cc0/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= +golang.org/x/image v0.0.0-20191009234506-e7c1f5e7dbb8 h1:hVwzHzIUGRjiF7EcUjqNxk3NCfkPxbDKRdnNE1Rpg0U= golang.org/x/image v0.0.0-20191009234506-e7c1f5e7dbb8/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/image v0.24.0 h1:AN7zRgVsbvmTfNyqIbbOraYL8mSwcKncEj8ofjgzcMQ= -golang.org/x/image v0.24.0/go.mod h1:4b/ITuLfqYq1hqZcjofwctIhi7sZh2WaCjvsBNjjya8= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/mod v0.23.0 h1:Zb7khfcRGKk+kqfxFaP5tZqCnDZMjC5VtUBs87Hr6QM= -golang.org/x/mod v0.23.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0= +golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.35.0 h1:T5GQRQb2y08kTAByq9L4/bz8cipCdA8FbRTXewonqY8= golang.org/x/net v0.35.0/go.mod h1:EglIi67kWsHKlRzzVMUD93VMSWGFOMSZgxFjparz1Qk= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.26.0 h1:afQXWNNaeC4nvZ0Ed9XvCCzXM6UHJG7iCg0W4fPqSBE= golang.org/x/oauth2 v0.26.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.11.0 h1:GGz8+XQP4FvTTrjZPzNKTMFtSXH80RAzG+5ghFPgK9w= golang.org/x/sync v0.11.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc= golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.22.0 h1:bofq7m3/HAFvbF51jz3Q9wLg3jkvSPuiZu/pD1XwgtM= golang.org/x/text v0.22.0/go.mod h1:YRoo4H8PVmsu+E3Ou7cqLVH8oXWIHVoX0jqUWALQhfY= -golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY= -golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.30.0 h1:BgcpHewrV5AUp2G9MebG4XPFI1E2W41zU1SaqVA9vJY= -golang.org/x/tools v0.30.0/go.mod h1:c347cR/OJfw5TI+GfX7RUPNMdDRRbjvYTS0jPyvsVtY= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da h1:noIWHXmPHxILtqtCOPIhSt0ABwskkZKjD3bXGnZGpNY= -golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90= -google.golang.org/api v0.220.0 h1:3oMI4gdBgB72WFVwE1nerDD8W3HUOS4kypK6rRLbGns= -google.golang.org/api v0.220.0/go.mod h1:26ZAlY6aN/8WgpCzjPNy18QpYaz7Zgg1h0qe1GkZEmY= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +golang.org/x/tools v0.26.0 h1:v/60pFQmzmT9ExmjDv2gGIfi3OqfKoEP6I5+umXlbnQ= +golang.org/x/tools v0.26.0/go.mod h1:TPVVj70c7JJ3WCazhD8OdXcZg/og+b9+tH/KxylGwH0= +google.golang.org/appengine v1.6.5 h1:tycE03LOZYQNhDpS27tcQdAzLCVMaj7QT2SXxebnpCM= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20240812133136-8ffd90a71988 h1:CT2Thj5AuPV9phrYMtzX11k+XkzMGfRAet42PmoTATM= -google.golang.org/genproto v0.0.0-20240812133136-8ffd90a71988/go.mod h1:7uvplUBj4RjHAxIZ//98LzOvrQ04JBkaixRmCMI29hc= -google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576 h1:CkkIfIt50+lT6NHAVoRYEyAvQGFM7xEwXUUywFvEb3Q= -google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576/go.mod h1:1R3kvZ1dtP3+4p4d3G8uJ8rFk/fWlScl38vanWACI08= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250207221924-e9438ea467c6 h1:2duwAxN2+k0xLNpjnHTXoMUgnv6VPSp5fiqTuwSxjmI= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250207221924-e9438ea467c6/go.mod h1:8BS3B93F/U1juMFq9+EDk+qOT5CO1R9IzXxG3PTqiRk= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.70.0 h1:pWFv03aZoHzlRKHWicjsZytKAiYCtNS0dHbXnIdq7jQ= -google.golang.org/grpc v1.70.0/go.mod h1:ofIJqVKDXx/JiXrwr2IG4/zwdH9txy3IlF40RmcJSQw= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM= -google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -modernc.org/cc/v4 v4.21.4 h1:3Be/Rdo1fpr8GrQ7IVw9OHtplU4gWbb+wNgeoBMmGLQ= -modernc.org/cc/v4 v4.21.4/go.mod h1:HM7VJTZbUCR3rV8EYBi9wxnJ0ZBRiGE5OeGXNA0IsLQ= -modernc.org/ccgo/v4 v4.19.2 h1:lwQZgvboKD0jBwdaeVCTouxhxAyN6iawF3STraAal8Y= -modernc.org/ccgo/v4 v4.19.2/go.mod h1:ysS3mxiMV38XGRTTcgo0DQTeTmAO4oCmJl1nX9VFI3s= +modernc.org/cc/v4 v4.24.4 h1:TFkx1s6dCkQpd6dKurBNmpo+G8Zl4Sq/ztJ+2+DEsh0= +modernc.org/cc/v4 v4.24.4/go.mod h1:uVtb5OGqUKpoLWhqwNQo/8LwvoiEBLvZXIQ/SmO6mL0= +modernc.org/ccgo/v4 v4.23.16 h1:Z2N+kk38b7SfySC1ZkpGLN2vthNJP1+ZzGZIlH7uBxo= +modernc.org/ccgo/v4 v4.23.16/go.mod h1:nNma8goMTY7aQZQNTyN9AIoJfxav4nvTnvKThAeMDdo= modernc.org/fileutil v1.3.0 h1:gQ5SIzK3H9kdfai/5x41oQiKValumqNTDXMvKo62HvE= modernc.org/fileutil v1.3.0/go.mod h1:XatxS8fZi3pS8/hKG2GH/ArUogfxjpEKs3Ku3aK4JyQ= -modernc.org/gc/v2 v2.4.1 h1:9cNzOqPyMJBvrUipmynX0ZohMhcxPtMccYgGOJdOiBw= -modernc.org/gc/v2 v2.4.1/go.mod h1:wzN5dK1AzVGoH6XOzc3YZ+ey/jPgYHLuVckd62P0GYU= -modernc.org/libc v1.55.3 h1:AzcW1mhlPNrRtjS5sS+eW2ISCgSOLLNyFzRh/V3Qj/U= -modernc.org/libc v1.55.3/go.mod h1:qFXepLhz+JjFThQ4kzwzOjA/y/artDeg+pcYnY+Q83w= +modernc.org/gc/v2 v2.6.3 h1:aJVhcqAte49LF+mGveZ5KPlsp4tdGdAOT4sipJXADjw= +modernc.org/gc/v2 v2.6.3/go.mod h1:YgIahr1ypgfe7chRuJi2gD7DBQiKSLMPgBQe9oIiito= modernc.org/libc v1.61.13 h1:3LRd6ZO1ezsFiX1y+bHd1ipyEHIJKvuprv0sLTBwLW8= modernc.org/libc v1.61.13/go.mod h1:8F/uJWL/3nNil0Lgt1Dpz+GgkApWh04N3el3hxJcA6E= modernc.org/mathutil v1.7.1 h1:GCZVGXdaN8gTqB1Mf/usp1Y/hSqgI2vAGGP4jZMCxOU= modernc.org/mathutil v1.7.1/go.mod h1:4p5IwJITfppl0G4sUEDtCr4DthTaT47/N3aT6MhfgJg= modernc.org/memory v1.8.2 h1:cL9L4bcoAObu4NkxOlKWBWtNHIsnnACGF/TbqQ6sbcI= modernc.org/memory v1.8.2/go.mod h1:ZbjSvMO5NQ1A2i3bWeDiVMxIorXwdClKE/0SZ+BMotU= -modernc.org/opt v0.1.3 h1:3XOZf2yznlhC+ibLltsDGzABUGVx8J6pnFMS3E4dcq4= -modernc.org/opt v0.1.3/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0= -modernc.org/sortutil v1.2.0 h1:jQiD3PfS2REGJNzNCMMaLSp/wdMNieTbKX920Cqdgqc= -modernc.org/sortutil v1.2.0/go.mod h1:TKU2s7kJMf1AE84OoiGppNHJwvB753OYfNl2WRb++Ss= -modernc.org/sqlite v1.34.5 h1:Bb6SR13/fjp15jt70CL4f18JIN7p7dnMExd+UFnF15g= -modernc.org/sqlite v1.34.5/go.mod h1:YLuNmX9NKs8wRNK2ko1LW1NGYcc9FkBO69JOt1AR9JE= +modernc.org/opt v0.1.4 h1:2kNGMRiUjrp4LcaPuLY2PzUfqM/w9N23quVwhKt5Qm8= +modernc.org/opt v0.1.4/go.mod h1:03fq9lsNfvkYSfxrfUhZCWPk1lm4cq4N+Bh//bEtgns= +modernc.org/sortutil v1.2.1 h1:+xyoGf15mM3NMlPDnFqrteY07klSFxLElE2PVuWIJ7w= +modernc.org/sortutil v1.2.1/go.mod h1:7ZI3a3REbai7gzCLcotuw9AC4VZVpYMjDzETGsSMqJE= modernc.org/sqlite v1.35.0 h1:yQps4fegMnZFdphtzlfQTCNBWtS0CZv48pRpW3RFHRw= modernc.org/sqlite v1.35.0/go.mod h1:9cr2sicr7jIaWTBKQmAxQLfBv9LL0su4ZTEV+utt3ic= -modernc.org/strutil v1.2.0 h1:agBi9dp1I+eOnxXeiZawM8F4LawKv4NzGWSaLfyeNZA= -modernc.org/strutil v1.2.0/go.mod h1:/mdcBmfOibveCTBxUl5B5l6W+TTH1FXPLHZE6bTosX0= +modernc.org/strutil v1.2.1 h1:UneZBkQA+DX2Rp35KcM69cSsNES9ly8mQWD71HKlOA0= +modernc.org/strutil v1.2.1/go.mod h1:EHkiggD70koQxjVdSBM3JKM7k6L0FbGE5eymy9i3B9A= modernc.org/token v1.1.0 h1:Xl7Ap9dKaEs5kLoOQeQmPWevfnk/DM5qcLcYlA8ys6Y= modernc.org/token v1.1.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= diff --git a/plugins/jsvm/internal/types/generated/types.d.ts b/plugins/jsvm/internal/types/generated/types.d.ts index 7b10f30f..182f8952 100644 --- a/plugins/jsvm/internal/types/generated/types.d.ts +++ b/plugins/jsvm/internal/types/generated/types.d.ts @@ -1,4 +1,4 @@ -// 1740135758 +// 1740663457 // GENERATED CODE - DO NOT MODIFY BY HAND // ------------------------------------------------------------------- @@ -1749,8 +1749,8 @@ namespace os { * than ReadFrom. This is used to permit ReadFrom to call io.Copy * without leading to a recursive call to ReadFrom. */ - type _sBRFPzE = noReadFrom&File - interface fileWithoutReadFrom extends _sBRFPzE { + type _sdoNzPp = noReadFrom&File + interface fileWithoutReadFrom extends _sdoNzPp { } interface File { /** @@ -1794,8 +1794,8 @@ namespace os { * than WriteTo. This is used to permit WriteTo to call io.Copy * without leading to a recursive call to WriteTo. */ - type _sNGIEZq = noWriteTo&File - interface fileWithoutWriteTo extends _sNGIEZq { + type _sCeWKZE = noWriteTo&File + interface fileWithoutWriteTo extends _sCeWKZE { } interface File { /** @@ -2439,8 +2439,8 @@ namespace os { * * The methods of File are safe for concurrent use. */ - type _szdtcnZ = file - interface File extends _szdtcnZ { + type _sCBuhEq = file + interface File extends _sCBuhEq { } /** * A FileInfo describes a file and is returned by [Stat] and [Lstat]. @@ -2490,6 +2490,991 @@ namespace os { } } +/** + * Package filepath implements utility routines for manipulating filename paths + * in a way compatible with the target operating system-defined file paths. + * + * The filepath package uses either forward slashes or backslashes, + * depending on the operating system. To process paths such as URLs + * that always use forward slashes regardless of the operating + * system, see the [path] package. + */ +namespace filepath { + interface match { + /** + * Match reports whether name matches the shell file name pattern. + * The pattern syntax is: + * + * ``` + * pattern: + * { term } + * term: + * '*' matches any sequence of non-Separator characters + * '?' matches any single non-Separator character + * '[' [ '^' ] { character-range } ']' + * character class (must be non-empty) + * c matches character c (c != '*', '?', '\\', '[') + * '\\' c matches character c + * + * character-range: + * c matches character c (c != '\\', '-', ']') + * '\\' c matches character c + * lo '-' hi matches character c for lo <= c <= hi + * ``` + * + * Match requires pattern to match all of name, not just a substring. + * The only possible returned error is [ErrBadPattern], when pattern + * is malformed. + * + * On Windows, escaping is disabled. Instead, '\\' is treated as + * path separator. + */ + (pattern: string, name: string): boolean + } + interface glob { + /** + * Glob returns the names of all files matching pattern or nil + * if there is no matching file. The syntax of patterns is the same + * as in [Match]. The pattern may describe hierarchical names such as + * /usr/*\/bin/ed (assuming the [Separator] is '/'). + * + * Glob ignores file system errors such as I/O errors reading directories. + * The only possible returned error is [ErrBadPattern], when pattern + * is malformed. + */ + (pattern: string): Array + } + interface clean { + /** + * Clean returns the shortest path name equivalent to path + * by purely lexical processing. It applies the following rules + * iteratively until no further processing can be done: + * + * 1. Replace multiple [Separator] elements with a single one. + * 2. Eliminate each . path name element (the current directory). + * 3. Eliminate each inner .. path name element (the parent directory) + * ``` + * along with the non-.. element that precedes it. + * ``` + * 4. Eliminate .. elements that begin a rooted path: + * ``` + * that is, replace "/.." by "/" at the beginning of a path, + * assuming Separator is '/'. + * ``` + * + * The returned path ends in a slash only if it represents a root directory, + * such as "/" on Unix or `C:\` on Windows. + * + * Finally, any occurrences of slash are replaced by Separator. + * + * If the result of this process is an empty string, Clean + * returns the string ".". + * + * On Windows, Clean does not modify the volume name other than to replace + * occurrences of "/" with `\`. + * For example, Clean("//host/share/../x") returns `\\host\share\x`. + * + * See also Rob Pike, “Lexical File Names in Plan 9 or + * Getting Dot-Dot Right,” + * https://9p.io/sys/doc/lexnames.html + */ + (path: string): string + } + interface isLocal { + /** + * IsLocal reports whether path, using lexical analysis only, has all of these properties: + * + * ``` + * - is within the subtree rooted at the directory in which path is evaluated + * - is not an absolute path + * - is not empty + * - on Windows, is not a reserved name such as "NUL" + * ``` + * + * If IsLocal(path) returns true, then + * Join(base, path) will always produce a path contained within base and + * Clean(path) will always produce an unrooted path with no ".." path elements. + * + * IsLocal is a purely lexical operation. + * In particular, it does not account for the effect of any symbolic links + * that may exist in the filesystem. + */ + (path: string): boolean + } + interface localize { + /** + * Localize converts a slash-separated path into an operating system path. + * The input path must be a valid path as reported by [io/fs.ValidPath]. + * + * Localize returns an error if the path cannot be represented by the operating system. + * For example, the path a\b is rejected on Windows, on which \ is a separator + * character and cannot be part of a filename. + * + * The path returned by Localize will always be local, as reported by IsLocal. + */ + (path: string): string + } + interface toSlash { + /** + * ToSlash returns the result of replacing each separator character + * in path with a slash ('/') character. Multiple separators are + * replaced by multiple slashes. + */ + (path: string): string + } + interface fromSlash { + /** + * FromSlash returns the result of replacing each slash ('/') character + * in path with a separator character. Multiple slashes are replaced + * by multiple separators. + * + * See also the Localize function, which converts a slash-separated path + * as used by the io/fs package to an operating system path. + */ + (path: string): string + } + interface splitList { + /** + * SplitList splits a list of paths joined by the OS-specific [ListSeparator], + * usually found in PATH or GOPATH environment variables. + * Unlike strings.Split, SplitList returns an empty slice when passed an empty + * string. + */ + (path: string): Array + } + interface split { + /** + * Split splits path immediately following the final [Separator], + * separating it into a directory and file name component. + * If there is no Separator in path, Split returns an empty dir + * and file set to path. + * The returned values have the property that path = dir+file. + */ + (path: string): [string, string] + } + interface join { + /** + * Join joins any number of path elements into a single path, + * separating them with an OS specific [Separator]. Empty elements + * are ignored. The result is Cleaned. However, if the argument + * list is empty or all its elements are empty, Join returns + * an empty string. + * On Windows, the result will only be a UNC path if the first + * non-empty element is a UNC path. + */ + (...elem: string[]): string + } + interface ext { + /** + * Ext returns the file name extension used by path. + * The extension is the suffix beginning at the final dot + * in the final element of path; it is empty if there is + * no dot. + */ + (path: string): string + } + interface evalSymlinks { + /** + * EvalSymlinks returns the path name after the evaluation of any symbolic + * links. + * If path is relative the result will be relative to the current directory, + * unless one of the components is an absolute symbolic link. + * EvalSymlinks calls [Clean] on the result. + */ + (path: string): string + } + interface isAbs { + /** + * IsAbs reports whether the path is absolute. + */ + (path: string): boolean + } + interface abs { + /** + * Abs returns an absolute representation of path. + * If the path is not absolute it will be joined with the current + * working directory to turn it into an absolute path. The absolute + * path name for a given file is not guaranteed to be unique. + * Abs calls [Clean] on the result. + */ + (path: string): string + } + interface rel { + /** + * Rel returns a relative path that is lexically equivalent to targpath when + * joined to basepath with an intervening separator. That is, + * [Join](basepath, Rel(basepath, targpath)) is equivalent to targpath itself. + * On success, the returned path will always be relative to basepath, + * even if basepath and targpath share no elements. + * An error is returned if targpath can't be made relative to basepath or if + * knowing the current working directory would be necessary to compute it. + * Rel calls [Clean] on the result. + */ + (basepath: string, targpath: string): string + } + /** + * WalkFunc is the type of the function called by [Walk] to visit each + * file or directory. + * + * The path argument contains the argument to Walk as a prefix. + * That is, if Walk is called with root argument "dir" and finds a file + * named "a" in that directory, the walk function will be called with + * argument "dir/a". + * + * The directory and file are joined with Join, which may clean the + * directory name: if Walk is called with the root argument "x/../dir" + * and finds a file named "a" in that directory, the walk function will + * be called with argument "dir/a", not "x/../dir/a". + * + * The info argument is the fs.FileInfo for the named path. + * + * The error result returned by the function controls how Walk continues. + * If the function returns the special value [SkipDir], Walk skips the + * current directory (path if info.IsDir() is true, otherwise path's + * parent directory). If the function returns the special value [SkipAll], + * Walk skips all remaining files and directories. Otherwise, if the function + * returns a non-nil error, Walk stops entirely and returns that error. + * + * The err argument reports an error related to path, signaling that Walk + * will not walk into that directory. The function can decide how to + * handle that error; as described earlier, returning the error will + * cause Walk to stop walking the entire tree. + * + * Walk calls the function with a non-nil err argument in two cases. + * + * First, if an [os.Lstat] on the root directory or any directory or file + * in the tree fails, Walk calls the function with path set to that + * directory or file's path, info set to nil, and err set to the error + * from os.Lstat. + * + * Second, if a directory's Readdirnames method fails, Walk calls the + * function with path set to the directory's path, info, set to an + * [fs.FileInfo] describing the directory, and err set to the error from + * Readdirnames. + */ + interface WalkFunc {(path: string, info: fs.FileInfo, err: Error): void } + interface walkDir { + /** + * WalkDir walks the file tree rooted at root, calling fn for each file or + * directory in the tree, including root. + * + * All errors that arise visiting files and directories are filtered by fn: + * see the [fs.WalkDirFunc] documentation for details. + * + * The files are walked in lexical order, which makes the output deterministic + * but requires WalkDir to read an entire directory into memory before proceeding + * to walk that directory. + * + * WalkDir does not follow symbolic links. + * + * WalkDir calls fn with paths that use the separator character appropriate + * for the operating system. This is unlike [io/fs.WalkDir], which always + * uses slash separated paths. + */ + (root: string, fn: fs.WalkDirFunc): void + } + interface walk { + /** + * Walk walks the file tree rooted at root, calling fn for each file or + * directory in the tree, including root. + * + * All errors that arise visiting files and directories are filtered by fn: + * see the [WalkFunc] documentation for details. + * + * The files are walked in lexical order, which makes the output deterministic + * but requires Walk to read an entire directory into memory before proceeding + * to walk that directory. + * + * Walk does not follow symbolic links. + * + * Walk is less efficient than [WalkDir], introduced in Go 1.16, + * which avoids calling os.Lstat on every visited file or directory. + */ + (root: string, fn: WalkFunc): void + } + interface base { + /** + * Base returns the last element of path. + * Trailing path separators are removed before extracting the last element. + * If the path is empty, Base returns ".". + * If the path consists entirely of separators, Base returns a single separator. + */ + (path: string): string + } + interface dir { + /** + * Dir returns all but the last element of path, typically the path's directory. + * After dropping the final element, Dir calls [Clean] on the path and trailing + * slashes are removed. + * If the path is empty, Dir returns ".". + * If the path consists entirely of separators, Dir returns a single separator. + * The returned path does not end in a separator unless it is the root directory. + */ + (path: string): string + } + interface volumeName { + /** + * VolumeName returns leading volume name. + * Given "C:\foo\bar" it returns "C:" on Windows. + * Given "\\host\share\foo" it returns "\\host\share". + * On other platforms it returns "". + */ + (path: string): string + } + interface hasPrefix { + /** + * HasPrefix exists for historical compatibility and should not be used. + * + * Deprecated: HasPrefix does not respect path boundaries and + * does not ignore case when required. + */ + (p: string, prefix: string): boolean + } +} + +/** + * Package exec runs external commands. It wraps os.StartProcess to make it + * easier to remap stdin and stdout, connect I/O with pipes, and do other + * adjustments. + * + * Unlike the "system" library call from C and other languages, the + * os/exec package intentionally does not invoke the system shell and + * does not expand any glob patterns or handle other expansions, + * pipelines, or redirections typically done by shells. The package + * behaves more like C's "exec" family of functions. To expand glob + * patterns, either call the shell directly, taking care to escape any + * dangerous input, or use the [path/filepath] package's Glob function. + * To expand environment variables, use package os's ExpandEnv. + * + * Note that the examples in this package assume a Unix system. + * They may not run on Windows, and they do not run in the Go Playground + * used by golang.org and godoc.org. + * + * # Executables in the current directory + * + * The functions [Command] and [LookPath] look for a program + * in the directories listed in the current path, following the + * conventions of the host operating system. + * Operating systems have for decades included the current + * directory in this search, sometimes implicitly and sometimes + * configured explicitly that way by default. + * Modern practice is that including the current directory + * is usually unexpected and often leads to security problems. + * + * To avoid those security problems, as of Go 1.19, this package will not resolve a program + * using an implicit or explicit path entry relative to the current directory. + * That is, if you run [LookPath]("go"), it will not successfully return + * ./go on Unix nor .\go.exe on Windows, no matter how the path is configured. + * Instead, if the usual path algorithms would result in that answer, + * these functions return an error err satisfying [errors.Is](err, [ErrDot]). + * + * For example, consider these two program snippets: + * + * ``` + * path, err := exec.LookPath("prog") + * if err != nil { + * log.Fatal(err) + * } + * use(path) + * ``` + * + * and + * + * ``` + * cmd := exec.Command("prog") + * if err := cmd.Run(); err != nil { + * log.Fatal(err) + * } + * ``` + * + * These will not find and run ./prog or .\prog.exe, + * no matter how the current path is configured. + * + * Code that always wants to run a program from the current directory + * can be rewritten to say "./prog" instead of "prog". + * + * Code that insists on including results from relative path entries + * can instead override the error using an errors.Is check: + * + * ``` + * path, err := exec.LookPath("prog") + * if errors.Is(err, exec.ErrDot) { + * err = nil + * } + * if err != nil { + * log.Fatal(err) + * } + * use(path) + * ``` + * + * and + * + * ``` + * cmd := exec.Command("prog") + * if errors.Is(cmd.Err, exec.ErrDot) { + * cmd.Err = nil + * } + * if err := cmd.Run(); err != nil { + * log.Fatal(err) + * } + * ``` + * + * Setting the environment variable GODEBUG=execerrdot=0 + * disables generation of ErrDot entirely, temporarily restoring the pre-Go 1.19 + * behavior for programs that are unable to apply more targeted fixes. + * A future version of Go may remove support for this variable. + * + * Before adding such overrides, make sure you understand the + * security implications of doing so. + * See https://go.dev/blog/path-security for more information. + */ +namespace exec { + interface command { + /** + * Command returns the [Cmd] struct to execute the named program with + * the given arguments. + * + * It sets only the Path and Args in the returned structure. + * + * If name contains no path separators, Command uses [LookPath] to + * resolve name to a complete path if possible. Otherwise it uses name + * directly as Path. + * + * The returned Cmd's Args field is constructed from the command name + * followed by the elements of arg, so arg should not include the + * command name itself. For example, Command("echo", "hello"). + * Args[0] is always name, not the possibly resolved Path. + * + * On Windows, processes receive the whole command line as a single string + * and do their own parsing. Command combines and quotes Args into a command + * line string with an algorithm compatible with applications using + * CommandLineToArgvW (which is the most common way). Notable exceptions are + * msiexec.exe and cmd.exe (and thus, all batch files), which have a different + * unquoting algorithm. In these or other similar cases, you can do the + * quoting yourself and provide the full command line in SysProcAttr.CmdLine, + * leaving Args empty. + */ + (name: string, ...arg: string[]): (Cmd) + } +} + +/** + * Package template is a thin wrapper around the standard html/template + * and text/template packages that implements a convenient registry to + * load and cache templates on the fly concurrently. + * + * It was created to assist the JSVM plugin HTML rendering, but could be used in other Go code. + * + * Example: + * + * ``` + * registry := template.NewRegistry() + * + * html1, err := registry.LoadFiles( + * // the files set wil be parsed only once and then cached + * "layout.html", + * "content.html", + * ).Render(map[string]any{"name": "John"}) + * + * html2, err := registry.LoadFiles( + * // reuse the already parsed and cached files set + * "layout.html", + * "content.html", + * ).Render(map[string]any{"name": "Jane"}) + * ``` + */ +namespace template { + interface newRegistry { + /** + * NewRegistry creates and initializes a new templates registry with + * some defaults (eg. global "raw" template function for unescaped HTML). + * + * Use the Registry.Load* methods to load templates into the registry. + */ + (): (Registry) + } + /** + * Registry defines a templates registry that is safe to be used by multiple goroutines. + * + * Use the Registry.Load* methods to load templates into the registry. + */ + interface Registry { + } + interface Registry { + /** + * AddFuncs registers new global template functions. + * + * The key of each map entry is the function name that will be used in the templates. + * If a function with the map entry name already exists it will be replaced with the new one. + * + * The value of each map entry is a function that must have either a + * single return value, or two return values of which the second has type error. + * + * Example: + * + * ``` + * r.AddFuncs(map[string]any{ + * "toUpper": func(str string) string { + * return strings.ToUppser(str) + * }, + * ... + * }) + * ``` + */ + addFuncs(funcs: _TygojaDict): (Registry) + } + interface Registry { + /** + * LoadFiles caches (if not already) the specified filenames set as a + * single template and returns a ready to use Renderer instance. + * + * There must be at least 1 filename specified. + */ + loadFiles(...filenames: string[]): (Renderer) + } + interface Registry { + /** + * LoadString caches (if not already) the specified inline string as a + * single template and returns a ready to use Renderer instance. + */ + loadString(text: string): (Renderer) + } + interface Registry { + /** + * LoadFS caches (if not already) the specified fs and globPatterns + * pair as single template and returns a ready to use Renderer instance. + * + * There must be at least 1 file matching the provided globPattern(s) + * (note that most file names serves as glob patterns matching themselves). + */ + loadFS(fsys: fs.FS, ...globPatterns: string[]): (Renderer) + } + /** + * Renderer defines a single parsed template. + */ + interface Renderer { + } + interface Renderer { + /** + * Render executes the template with the specified data as the dot object + * and returns the result as plain string. + */ + render(data: any): string + } +} + +namespace security { + interface s256Challenge { + /** + * S256Challenge creates base64 encoded sha256 challenge string derived from code. + * The padding of the result base64 string is stripped per [RFC 7636]. + * + * [RFC 7636]: https://datatracker.ietf.org/doc/html/rfc7636#section-4.2 + */ + (code: string): string + } + interface md5 { + /** + * MD5 creates md5 hash from the provided plain text. + */ + (text: string): string + } + interface sha256 { + /** + * SHA256 creates sha256 hash as defined in FIPS 180-4 from the provided text. + */ + (text: string): string + } + interface sha512 { + /** + * SHA512 creates sha512 hash as defined in FIPS 180-4 from the provided text. + */ + (text: string): string + } + interface hs256 { + /** + * HS256 creates a HMAC hash with sha256 digest algorithm. + */ + (text: string, secret: string): string + } + interface hs512 { + /** + * HS512 creates a HMAC hash with sha512 digest algorithm. + */ + (text: string, secret: string): string + } + interface equal { + /** + * Equal compares two hash strings for equality without leaking timing information. + */ + (hash1: string, hash2: string): boolean + } + // @ts-ignore + import crand = rand + interface encrypt { + /** + * Encrypt encrypts "data" with the specified "key" (must be valid 32 char AES key). + * + * This method uses AES-256-GCM block cypher mode. + */ + (data: string|Array, key: string): string + } + interface decrypt { + /** + * Decrypt decrypts encrypted text with key (must be valid 32 chars AES key). + * + * This method uses AES-256-GCM block cypher mode. + */ + (cipherText: string, key: string): string|Array + } + interface parseUnverifiedJWT { + /** + * ParseUnverifiedJWT parses JWT and returns its claims + * but DOES NOT verify the signature. + * + * It verifies only the exp, iat and nbf claims. + */ + (token: string): jwt.MapClaims + } + interface parseJWT { + /** + * ParseJWT verifies and parses JWT and returns its claims. + */ + (token: string, verificationKey: string): jwt.MapClaims + } + interface newJWT { + /** + * NewJWT generates and returns new HS256 signed JWT. + */ + (payload: jwt.MapClaims, signingKey: string, duration: time.Duration): string + } + // @ts-ignore + import cryptoRand = rand + // @ts-ignore + import mathRand = rand + interface randomString { + /** + * RandomString generates a cryptographically random string with the specified length. + * + * The generated string matches [A-Za-z0-9]+ and it's transparent to URL-encoding. + */ + (length: number): string + } + interface randomStringWithAlphabet { + /** + * RandomStringWithAlphabet generates a cryptographically random string + * with the specified length and characters set. + * + * It panics if for some reason rand.Int returns a non-nil error. + */ + (length: number, alphabet: string): string + } + interface pseudorandomString { + /** + * PseudorandomString generates a pseudorandom string with the specified length. + * + * The generated string matches [A-Za-z0-9]+ and it's transparent to URL-encoding. + * + * For a cryptographically random string (but a little bit slower) use RandomString instead. + */ + (length: number): string + } + interface pseudorandomStringWithAlphabet { + /** + * PseudorandomStringWithAlphabet generates a pseudorandom string + * with the specified length and characters set. + * + * For a cryptographically random (but a little bit slower) use RandomStringWithAlphabet instead. + */ + (length: number, alphabet: string): string + } + interface randomStringByRegex { + /** + * RandomStringByRegex generates a random string matching the regex pattern. + * If optFlags is not set, fallbacks to [syntax.Perl]. + * + * NB! While the source of the randomness comes from [crypto/rand] this method + * is not recommended to be used on its own in critical secure contexts because + * the generated length could vary too much on the used pattern and may not be + * as secure as simply calling [security.RandomString]. + * If you still insist on using it for such purposes, consider at least + * a large enough minimum length for the generated string, e.g. `[a-z0-9]{30}`. + * + * This function is inspired by github.com/pipe01/revregexp, github.com/lucasjones/reggen and other similar packages. + */ + (pattern: string, ...optFlags: syntax.Flags[]): string + } +} + +namespace filesystem { + /** + * FileReader defines an interface for a file resource reader. + */ + interface FileReader { + [key:string]: any; + open(): io.ReadSeekCloser + } + /** + * File defines a single file [io.ReadSeekCloser] resource. + * + * The file could be from a local path, multipart/form-data header, etc. + */ + interface File { + reader: FileReader + name: string + originalName: string + size: number + } + interface File { + /** + * AsMap implements [core.mapExtractor] and returns a value suitable + * to be used in an API rule expression. + */ + asMap(): _TygojaDict + } + interface newFileFromPath { + /** + * NewFileFromPath creates a new File instance from the provided local file path. + */ + (path: string): (File) + } + interface newFileFromBytes { + /** + * NewFileFromBytes creates a new File instance from the provided byte slice. + */ + (b: string|Array, name: string): (File) + } + interface newFileFromMultipart { + /** + * NewFileFromMultipart creates a new File from the provided multipart header. + */ + (mh: multipart.FileHeader): (File) + } + interface newFileFromURL { + /** + * NewFileFromURL creates a new File from the provided url by + * downloading the resource and load it as BytesReader. + * + * Example + * + * ``` + * ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + * defer cancel() + * + * file, err := filesystem.NewFileFromURL(ctx, "https://example.com/image.png") + * ``` + */ + (ctx: context.Context, url: string): (File) + } + /** + * MultipartReader defines a FileReader from [multipart.FileHeader]. + */ + interface MultipartReader { + header?: multipart.FileHeader + } + interface MultipartReader { + /** + * Open implements the [filesystem.FileReader] interface. + */ + open(): io.ReadSeekCloser + } + /** + * PathReader defines a FileReader from a local file path. + */ + interface PathReader { + path: string + } + interface PathReader { + /** + * Open implements the [filesystem.FileReader] interface. + */ + open(): io.ReadSeekCloser + } + /** + * BytesReader defines a FileReader from bytes content. + */ + interface BytesReader { + bytes: string|Array + } + interface BytesReader { + /** + * Open implements the [filesystem.FileReader] interface. + */ + open(): io.ReadSeekCloser + } + type _sSpMxLi = bytes.Reader + interface bytesReadSeekCloser extends _sSpMxLi { + } + interface bytesReadSeekCloser { + /** + * Close implements the [io.ReadSeekCloser] interface. + */ + close(): void + } + interface System { + } + interface newS3 { + /** + * NewS3 initializes an S3 filesystem instance. + * + * NB! Make sure to call `Close()` after you are done working with it. + */ + (bucketName: string, region: string, endpoint: string, accessKey: string, secretKey: string, s3ForcePathStyle: boolean): (System) + } + interface newLocal { + /** + * NewLocal initializes a new local filesystem instance. + * + * NB! Make sure to call `Close()` after you are done working with it. + */ + (dirPath: string): (System) + } + interface System { + /** + * SetContext assigns the specified context to the current filesystem. + */ + setContext(ctx: context.Context): void + } + interface System { + /** + * Close releases any resources used for the related filesystem. + */ + close(): void + } + interface System { + /** + * Exists checks if file with fileKey path exists or not. + * + * If the file doesn't exist returns false and ErrNotFound. + */ + exists(fileKey: string): boolean + } + interface System { + /** + * Attributes returns the attributes for the file with fileKey path. + * + * If the file doesn't exist it returns ErrNotFound. + */ + attributes(fileKey: string): (blob.Attributes) + } + interface System { + /** + * GetFile returns a file content reader for the given fileKey. + * + * NB! Make sure to call Close() on the file after you are done working with it. + * + * If the file doesn't exist returns ErrNotFound. + */ + getFile(fileKey: string): (blob.Reader) + } + interface System { + /** + * Copy copies the file stored at srcKey to dstKey. + * + * If srcKey file doesn't exist, it returns ErrNotFound. + * + * If dstKey file already exists, it is overwritten. + */ + copy(srcKey: string, dstKey: string): void + } + interface System { + /** + * List returns a flat list with info for all files under the specified prefix. + */ + list(prefix: string): Array<(blob.ListObject | undefined)> + } + interface System { + /** + * Upload writes content into the fileKey location. + */ + upload(content: string|Array, fileKey: string): void + } + interface System { + /** + * UploadFile uploads the provided File to the fileKey location. + */ + uploadFile(file: File, fileKey: string): void + } + interface System { + /** + * UploadMultipart uploads the provided multipart file to the fileKey location. + */ + uploadMultipart(fh: multipart.FileHeader, fileKey: string): void + } + interface System { + /** + * Delete deletes stored file at fileKey location. + * + * If the file doesn't exist returns ErrNotFound. + */ + delete(fileKey: string): void + } + interface System { + /** + * DeletePrefix deletes everything starting with the specified prefix. + * + * The prefix could be subpath (ex. "/a/b/") or filename prefix (ex. "/a/b/file_"). + */ + deletePrefix(prefix: string): Array + } + interface System { + /** + * Checks if the provided dir prefix doesn't have any files. + * + * A trailing slash will be appended to a non-empty dir string argument + * to ensure that the checked prefix is a "directory". + * + * Returns "false" in case the has at least one file, otherwise - "true". + */ + isEmptyDir(dir: string): boolean + } + interface System { + /** + * Serve serves the file at fileKey location to an HTTP response. + * + * If the `download` query parameter is used the file will be always served for + * download no matter of its type (aka. with "Content-Disposition: attachment"). + * + * Internally this method uses [http.ServeContent] so Range requests, + * If-Match, If-Unmodified-Since, etc. headers are handled transparently. + */ + serve(res: http.ResponseWriter, req: http.Request, fileKey: string, name: string): void + } + interface System { + /** + * CreateThumb creates a new thumb image for the file at originalKey location. + * The new thumb file is stored at thumbKey location. + * + * thumbSize is in the format: + * - 0xH (eg. 0x100) - resize to H height preserving the aspect ratio + * - Wx0 (eg. 300x0) - resize to W width preserving the aspect ratio + * - WxH (eg. 300x100) - resize and crop to WxH viewbox (from center) + * - WxHt (eg. 300x100t) - resize and crop to WxH viewbox (from top) + * - WxHb (eg. 300x100b) - resize and crop to WxH viewbox (from bottom) + * - WxHf (eg. 300x100f) - fit inside a WxH viewbox (without cropping) + */ + createThumb(originalKey: string, thumbKey: string, thumbSize: string): void + } +} + +/** + * Package validation provides configurable and extensible rules for validating data of various types. + */ +namespace ozzo_validation { + /** + * Error interface represents an validation error + */ + interface Error { + [key:string]: any; + error(): string + code(): string + message(): string + setMessage(_arg0: string): Error + params(): _TygojaDict + setParams(_arg0: _TygojaDict): Error + } +} + /** * Package dbx provides a set of DB-agnostic and easy-to-use query building methods for relational databases. */ @@ -2826,14 +3811,14 @@ namespace dbx { /** * MssqlBuilder is the builder for SQL Server databases. */ - type _sWweZLs = BaseBuilder - interface MssqlBuilder extends _sWweZLs { + type _sejKnXq = BaseBuilder + interface MssqlBuilder extends _sejKnXq { } /** * MssqlQueryBuilder is the query builder for SQL Server databases. */ - type _slrylmr = BaseQueryBuilder - interface MssqlQueryBuilder extends _slrylmr { + type _scZColE = BaseQueryBuilder + interface MssqlQueryBuilder extends _scZColE { } interface newMssqlBuilder { /** @@ -2904,8 +3889,8 @@ namespace dbx { /** * MysqlBuilder is the builder for MySQL databases. */ - type _sQgmeeB = BaseBuilder - interface MysqlBuilder extends _sQgmeeB { + type _sAfugAD = BaseBuilder + interface MysqlBuilder extends _sAfugAD { } interface newMysqlBuilder { /** @@ -2980,14 +3965,14 @@ namespace dbx { /** * OciBuilder is the builder for Oracle databases. */ - type _syxTjAH = BaseBuilder - interface OciBuilder extends _syxTjAH { + type _sbmOZGJ = BaseBuilder + interface OciBuilder extends _sbmOZGJ { } /** * OciQueryBuilder is the query builder for Oracle databases. */ - type _sAegXQQ = BaseQueryBuilder - interface OciQueryBuilder extends _sAegXQQ { + type _sYFTATC = BaseQueryBuilder + interface OciQueryBuilder extends _sYFTATC { } interface newOciBuilder { /** @@ -3050,8 +4035,8 @@ namespace dbx { /** * PgsqlBuilder is the builder for PostgreSQL databases. */ - type _syhyUPF = BaseBuilder - interface PgsqlBuilder extends _syhyUPF { + type _sstEYYR = BaseBuilder + interface PgsqlBuilder extends _sstEYYR { } interface newPgsqlBuilder { /** @@ -3118,8 +4103,8 @@ namespace dbx { /** * SqliteBuilder is the builder for SQLite databases. */ - type _sCFPLeQ = BaseBuilder - interface SqliteBuilder extends _sCFPLeQ { + type _sMkYrTE = BaseBuilder + interface SqliteBuilder extends _sMkYrTE { } interface newSqliteBuilder { /** @@ -3218,8 +4203,8 @@ namespace dbx { /** * StandardBuilder is the builder that is used by DB for an unknown driver. */ - type _szajavG = BaseBuilder - interface StandardBuilder extends _szajavG { + type _sCwOaji = BaseBuilder + interface StandardBuilder extends _sCwOaji { } interface newStandardBuilder { /** @@ -3285,8 +4270,8 @@ namespace dbx { * DB enhances sql.DB by providing a set of DB-agnostic query building methods. * DB allows easier query building and population of data into Go variables. */ - type _sfEcirX = Builder - interface DB extends _sfEcirX { + type _sTsSRxW = Builder + interface DB extends _sTsSRxW { /** * FieldMapper maps struct fields to DB columns. Defaults to DefaultFieldMapFunc. */ @@ -4090,8 +5075,8 @@ namespace dbx { * Rows enhances sql.Rows by providing additional data query methods. * Rows can be obtained by calling Query.Rows(). It is mainly used to populate data row by row. */ - type _sBdUiuS = sql.Rows - interface Rows extends _sBdUiuS { + type _sNRFNCQ = sql.Rows + interface Rows extends _sNRFNCQ { } interface Rows { /** @@ -4463,8 +5448,8 @@ namespace dbx { }): string } interface structInfo { } - type _seFFTtv = structInfo - interface structValue extends _seFFTtv { + type _siYgCxj = structInfo + interface structValue extends _siYgCxj { } interface fieldInfo { } @@ -4503,8 +5488,8 @@ namespace dbx { /** * Tx enhances sql.Tx with additional querying methods. */ - type _sAVpcIc = Builder - interface Tx extends _sAVpcIc { + type _sVeGhCE = Builder + interface Tx extends _sVeGhCE { } interface Tx { /** @@ -4520,997 +5505,6 @@ namespace dbx { } } -/** - * Package filepath implements utility routines for manipulating filename paths - * in a way compatible with the target operating system-defined file paths. - * - * The filepath package uses either forward slashes or backslashes, - * depending on the operating system. To process paths such as URLs - * that always use forward slashes regardless of the operating - * system, see the [path] package. - */ -namespace filepath { - interface match { - /** - * Match reports whether name matches the shell file name pattern. - * The pattern syntax is: - * - * ``` - * pattern: - * { term } - * term: - * '*' matches any sequence of non-Separator characters - * '?' matches any single non-Separator character - * '[' [ '^' ] { character-range } ']' - * character class (must be non-empty) - * c matches character c (c != '*', '?', '\\', '[') - * '\\' c matches character c - * - * character-range: - * c matches character c (c != '\\', '-', ']') - * '\\' c matches character c - * lo '-' hi matches character c for lo <= c <= hi - * ``` - * - * Match requires pattern to match all of name, not just a substring. - * The only possible returned error is [ErrBadPattern], when pattern - * is malformed. - * - * On Windows, escaping is disabled. Instead, '\\' is treated as - * path separator. - */ - (pattern: string, name: string): boolean - } - interface glob { - /** - * Glob returns the names of all files matching pattern or nil - * if there is no matching file. The syntax of patterns is the same - * as in [Match]. The pattern may describe hierarchical names such as - * /usr/*\/bin/ed (assuming the [Separator] is '/'). - * - * Glob ignores file system errors such as I/O errors reading directories. - * The only possible returned error is [ErrBadPattern], when pattern - * is malformed. - */ - (pattern: string): Array - } - interface clean { - /** - * Clean returns the shortest path name equivalent to path - * by purely lexical processing. It applies the following rules - * iteratively until no further processing can be done: - * - * 1. Replace multiple [Separator] elements with a single one. - * 2. Eliminate each . path name element (the current directory). - * 3. Eliminate each inner .. path name element (the parent directory) - * ``` - * along with the non-.. element that precedes it. - * ``` - * 4. Eliminate .. elements that begin a rooted path: - * ``` - * that is, replace "/.." by "/" at the beginning of a path, - * assuming Separator is '/'. - * ``` - * - * The returned path ends in a slash only if it represents a root directory, - * such as "/" on Unix or `C:\` on Windows. - * - * Finally, any occurrences of slash are replaced by Separator. - * - * If the result of this process is an empty string, Clean - * returns the string ".". - * - * On Windows, Clean does not modify the volume name other than to replace - * occurrences of "/" with `\`. - * For example, Clean("//host/share/../x") returns `\\host\share\x`. - * - * See also Rob Pike, “Lexical File Names in Plan 9 or - * Getting Dot-Dot Right,” - * https://9p.io/sys/doc/lexnames.html - */ - (path: string): string - } - interface isLocal { - /** - * IsLocal reports whether path, using lexical analysis only, has all of these properties: - * - * ``` - * - is within the subtree rooted at the directory in which path is evaluated - * - is not an absolute path - * - is not empty - * - on Windows, is not a reserved name such as "NUL" - * ``` - * - * If IsLocal(path) returns true, then - * Join(base, path) will always produce a path contained within base and - * Clean(path) will always produce an unrooted path with no ".." path elements. - * - * IsLocal is a purely lexical operation. - * In particular, it does not account for the effect of any symbolic links - * that may exist in the filesystem. - */ - (path: string): boolean - } - interface localize { - /** - * Localize converts a slash-separated path into an operating system path. - * The input path must be a valid path as reported by [io/fs.ValidPath]. - * - * Localize returns an error if the path cannot be represented by the operating system. - * For example, the path a\b is rejected on Windows, on which \ is a separator - * character and cannot be part of a filename. - * - * The path returned by Localize will always be local, as reported by IsLocal. - */ - (path: string): string - } - interface toSlash { - /** - * ToSlash returns the result of replacing each separator character - * in path with a slash ('/') character. Multiple separators are - * replaced by multiple slashes. - */ - (path: string): string - } - interface fromSlash { - /** - * FromSlash returns the result of replacing each slash ('/') character - * in path with a separator character. Multiple slashes are replaced - * by multiple separators. - * - * See also the Localize function, which converts a slash-separated path - * as used by the io/fs package to an operating system path. - */ - (path: string): string - } - interface splitList { - /** - * SplitList splits a list of paths joined by the OS-specific [ListSeparator], - * usually found in PATH or GOPATH environment variables. - * Unlike strings.Split, SplitList returns an empty slice when passed an empty - * string. - */ - (path: string): Array - } - interface split { - /** - * Split splits path immediately following the final [Separator], - * separating it into a directory and file name component. - * If there is no Separator in path, Split returns an empty dir - * and file set to path. - * The returned values have the property that path = dir+file. - */ - (path: string): [string, string] - } - interface join { - /** - * Join joins any number of path elements into a single path, - * separating them with an OS specific [Separator]. Empty elements - * are ignored. The result is Cleaned. However, if the argument - * list is empty or all its elements are empty, Join returns - * an empty string. - * On Windows, the result will only be a UNC path if the first - * non-empty element is a UNC path. - */ - (...elem: string[]): string - } - interface ext { - /** - * Ext returns the file name extension used by path. - * The extension is the suffix beginning at the final dot - * in the final element of path; it is empty if there is - * no dot. - */ - (path: string): string - } - interface evalSymlinks { - /** - * EvalSymlinks returns the path name after the evaluation of any symbolic - * links. - * If path is relative the result will be relative to the current directory, - * unless one of the components is an absolute symbolic link. - * EvalSymlinks calls [Clean] on the result. - */ - (path: string): string - } - interface isAbs { - /** - * IsAbs reports whether the path is absolute. - */ - (path: string): boolean - } - interface abs { - /** - * Abs returns an absolute representation of path. - * If the path is not absolute it will be joined with the current - * working directory to turn it into an absolute path. The absolute - * path name for a given file is not guaranteed to be unique. - * Abs calls [Clean] on the result. - */ - (path: string): string - } - interface rel { - /** - * Rel returns a relative path that is lexically equivalent to targpath when - * joined to basepath with an intervening separator. That is, - * [Join](basepath, Rel(basepath, targpath)) is equivalent to targpath itself. - * On success, the returned path will always be relative to basepath, - * even if basepath and targpath share no elements. - * An error is returned if targpath can't be made relative to basepath or if - * knowing the current working directory would be necessary to compute it. - * Rel calls [Clean] on the result. - */ - (basepath: string, targpath: string): string - } - /** - * WalkFunc is the type of the function called by [Walk] to visit each - * file or directory. - * - * The path argument contains the argument to Walk as a prefix. - * That is, if Walk is called with root argument "dir" and finds a file - * named "a" in that directory, the walk function will be called with - * argument "dir/a". - * - * The directory and file are joined with Join, which may clean the - * directory name: if Walk is called with the root argument "x/../dir" - * and finds a file named "a" in that directory, the walk function will - * be called with argument "dir/a", not "x/../dir/a". - * - * The info argument is the fs.FileInfo for the named path. - * - * The error result returned by the function controls how Walk continues. - * If the function returns the special value [SkipDir], Walk skips the - * current directory (path if info.IsDir() is true, otherwise path's - * parent directory). If the function returns the special value [SkipAll], - * Walk skips all remaining files and directories. Otherwise, if the function - * returns a non-nil error, Walk stops entirely and returns that error. - * - * The err argument reports an error related to path, signaling that Walk - * will not walk into that directory. The function can decide how to - * handle that error; as described earlier, returning the error will - * cause Walk to stop walking the entire tree. - * - * Walk calls the function with a non-nil err argument in two cases. - * - * First, if an [os.Lstat] on the root directory or any directory or file - * in the tree fails, Walk calls the function with path set to that - * directory or file's path, info set to nil, and err set to the error - * from os.Lstat. - * - * Second, if a directory's Readdirnames method fails, Walk calls the - * function with path set to the directory's path, info, set to an - * [fs.FileInfo] describing the directory, and err set to the error from - * Readdirnames. - */ - interface WalkFunc {(path: string, info: fs.FileInfo, err: Error): void } - interface walkDir { - /** - * WalkDir walks the file tree rooted at root, calling fn for each file or - * directory in the tree, including root. - * - * All errors that arise visiting files and directories are filtered by fn: - * see the [fs.WalkDirFunc] documentation for details. - * - * The files are walked in lexical order, which makes the output deterministic - * but requires WalkDir to read an entire directory into memory before proceeding - * to walk that directory. - * - * WalkDir does not follow symbolic links. - * - * WalkDir calls fn with paths that use the separator character appropriate - * for the operating system. This is unlike [io/fs.WalkDir], which always - * uses slash separated paths. - */ - (root: string, fn: fs.WalkDirFunc): void - } - interface walk { - /** - * Walk walks the file tree rooted at root, calling fn for each file or - * directory in the tree, including root. - * - * All errors that arise visiting files and directories are filtered by fn: - * see the [WalkFunc] documentation for details. - * - * The files are walked in lexical order, which makes the output deterministic - * but requires Walk to read an entire directory into memory before proceeding - * to walk that directory. - * - * Walk does not follow symbolic links. - * - * Walk is less efficient than [WalkDir], introduced in Go 1.16, - * which avoids calling os.Lstat on every visited file or directory. - */ - (root: string, fn: WalkFunc): void - } - interface base { - /** - * Base returns the last element of path. - * Trailing path separators are removed before extracting the last element. - * If the path is empty, Base returns ".". - * If the path consists entirely of separators, Base returns a single separator. - */ - (path: string): string - } - interface dir { - /** - * Dir returns all but the last element of path, typically the path's directory. - * After dropping the final element, Dir calls [Clean] on the path and trailing - * slashes are removed. - * If the path is empty, Dir returns ".". - * If the path consists entirely of separators, Dir returns a single separator. - * The returned path does not end in a separator unless it is the root directory. - */ - (path: string): string - } - interface volumeName { - /** - * VolumeName returns leading volume name. - * Given "C:\foo\bar" it returns "C:" on Windows. - * Given "\\host\share\foo" it returns "\\host\share". - * On other platforms it returns "". - */ - (path: string): string - } - interface hasPrefix { - /** - * HasPrefix exists for historical compatibility and should not be used. - * - * Deprecated: HasPrefix does not respect path boundaries and - * does not ignore case when required. - */ - (p: string, prefix: string): boolean - } -} - -namespace security { - interface s256Challenge { - /** - * S256Challenge creates base64 encoded sha256 challenge string derived from code. - * The padding of the result base64 string is stripped per [RFC 7636]. - * - * [RFC 7636]: https://datatracker.ietf.org/doc/html/rfc7636#section-4.2 - */ - (code: string): string - } - interface md5 { - /** - * MD5 creates md5 hash from the provided plain text. - */ - (text: string): string - } - interface sha256 { - /** - * SHA256 creates sha256 hash as defined in FIPS 180-4 from the provided text. - */ - (text: string): string - } - interface sha512 { - /** - * SHA512 creates sha512 hash as defined in FIPS 180-4 from the provided text. - */ - (text: string): string - } - interface hs256 { - /** - * HS256 creates a HMAC hash with sha256 digest algorithm. - */ - (text: string, secret: string): string - } - interface hs512 { - /** - * HS512 creates a HMAC hash with sha512 digest algorithm. - */ - (text: string, secret: string): string - } - interface equal { - /** - * Equal compares two hash strings for equality without leaking timing information. - */ - (hash1: string, hash2: string): boolean - } - // @ts-ignore - import crand = rand - interface encrypt { - /** - * Encrypt encrypts "data" with the specified "key" (must be valid 32 char AES key). - * - * This method uses AES-256-GCM block cypher mode. - */ - (data: string|Array, key: string): string - } - interface decrypt { - /** - * Decrypt decrypts encrypted text with key (must be valid 32 chars AES key). - * - * This method uses AES-256-GCM block cypher mode. - */ - (cipherText: string, key: string): string|Array - } - interface parseUnverifiedJWT { - /** - * ParseUnverifiedJWT parses JWT and returns its claims - * but DOES NOT verify the signature. - * - * It verifies only the exp, iat and nbf claims. - */ - (token: string): jwt.MapClaims - } - interface parseJWT { - /** - * ParseJWT verifies and parses JWT and returns its claims. - */ - (token: string, verificationKey: string): jwt.MapClaims - } - interface newJWT { - /** - * NewJWT generates and returns new HS256 signed JWT. - */ - (payload: jwt.MapClaims, signingKey: string, duration: time.Duration): string - } - // @ts-ignore - import cryptoRand = rand - // @ts-ignore - import mathRand = rand - interface randomString { - /** - * RandomString generates a cryptographically random string with the specified length. - * - * The generated string matches [A-Za-z0-9]+ and it's transparent to URL-encoding. - */ - (length: number): string - } - interface randomStringWithAlphabet { - /** - * RandomStringWithAlphabet generates a cryptographically random string - * with the specified length and characters set. - * - * It panics if for some reason rand.Int returns a non-nil error. - */ - (length: number, alphabet: string): string - } - interface pseudorandomString { - /** - * PseudorandomString generates a pseudorandom string with the specified length. - * - * The generated string matches [A-Za-z0-9]+ and it's transparent to URL-encoding. - * - * For a cryptographically random string (but a little bit slower) use RandomString instead. - */ - (length: number): string - } - interface pseudorandomStringWithAlphabet { - /** - * PseudorandomStringWithAlphabet generates a pseudorandom string - * with the specified length and characters set. - * - * For a cryptographically random (but a little bit slower) use RandomStringWithAlphabet instead. - */ - (length: number, alphabet: string): string - } - interface randomStringByRegex { - /** - * RandomStringByRegex generates a random string matching the regex pattern. - * If optFlags is not set, fallbacks to [syntax.Perl]. - * - * NB! While the source of the randomness comes from [crypto/rand] this method - * is not recommended to be used on its own in critical secure contexts because - * the generated length could vary too much on the used pattern and may not be - * as secure as simply calling [security.RandomString]. - * If you still insist on using it for such purposes, consider at least - * a large enough minimum length for the generated string, e.g. `[a-z0-9]{30}`. - * - * This function is inspired by github.com/pipe01/revregexp, github.com/lucasjones/reggen and other similar packages. - */ - (pattern: string, ...optFlags: syntax.Flags[]): string - } -} - -/** - * Package exec runs external commands. It wraps os.StartProcess to make it - * easier to remap stdin and stdout, connect I/O with pipes, and do other - * adjustments. - * - * Unlike the "system" library call from C and other languages, the - * os/exec package intentionally does not invoke the system shell and - * does not expand any glob patterns or handle other expansions, - * pipelines, or redirections typically done by shells. The package - * behaves more like C's "exec" family of functions. To expand glob - * patterns, either call the shell directly, taking care to escape any - * dangerous input, or use the [path/filepath] package's Glob function. - * To expand environment variables, use package os's ExpandEnv. - * - * Note that the examples in this package assume a Unix system. - * They may not run on Windows, and they do not run in the Go Playground - * used by golang.org and godoc.org. - * - * # Executables in the current directory - * - * The functions [Command] and [LookPath] look for a program - * in the directories listed in the current path, following the - * conventions of the host operating system. - * Operating systems have for decades included the current - * directory in this search, sometimes implicitly and sometimes - * configured explicitly that way by default. - * Modern practice is that including the current directory - * is usually unexpected and often leads to security problems. - * - * To avoid those security problems, as of Go 1.19, this package will not resolve a program - * using an implicit or explicit path entry relative to the current directory. - * That is, if you run [LookPath]("go"), it will not successfully return - * ./go on Unix nor .\go.exe on Windows, no matter how the path is configured. - * Instead, if the usual path algorithms would result in that answer, - * these functions return an error err satisfying [errors.Is](err, [ErrDot]). - * - * For example, consider these two program snippets: - * - * ``` - * path, err := exec.LookPath("prog") - * if err != nil { - * log.Fatal(err) - * } - * use(path) - * ``` - * - * and - * - * ``` - * cmd := exec.Command("prog") - * if err := cmd.Run(); err != nil { - * log.Fatal(err) - * } - * ``` - * - * These will not find and run ./prog or .\prog.exe, - * no matter how the current path is configured. - * - * Code that always wants to run a program from the current directory - * can be rewritten to say "./prog" instead of "prog". - * - * Code that insists on including results from relative path entries - * can instead override the error using an errors.Is check: - * - * ``` - * path, err := exec.LookPath("prog") - * if errors.Is(err, exec.ErrDot) { - * err = nil - * } - * if err != nil { - * log.Fatal(err) - * } - * use(path) - * ``` - * - * and - * - * ``` - * cmd := exec.Command("prog") - * if errors.Is(cmd.Err, exec.ErrDot) { - * cmd.Err = nil - * } - * if err := cmd.Run(); err != nil { - * log.Fatal(err) - * } - * ``` - * - * Setting the environment variable GODEBUG=execerrdot=0 - * disables generation of ErrDot entirely, temporarily restoring the pre-Go 1.19 - * behavior for programs that are unable to apply more targeted fixes. - * A future version of Go may remove support for this variable. - * - * Before adding such overrides, make sure you understand the - * security implications of doing so. - * See https://go.dev/blog/path-security for more information. - */ -namespace exec { - interface command { - /** - * Command returns the [Cmd] struct to execute the named program with - * the given arguments. - * - * It sets only the Path and Args in the returned structure. - * - * If name contains no path separators, Command uses [LookPath] to - * resolve name to a complete path if possible. Otherwise it uses name - * directly as Path. - * - * The returned Cmd's Args field is constructed from the command name - * followed by the elements of arg, so arg should not include the - * command name itself. For example, Command("echo", "hello"). - * Args[0] is always name, not the possibly resolved Path. - * - * On Windows, processes receive the whole command line as a single string - * and do their own parsing. Command combines and quotes Args into a command - * line string with an algorithm compatible with applications using - * CommandLineToArgvW (which is the most common way). Notable exceptions are - * msiexec.exe and cmd.exe (and thus, all batch files), which have a different - * unquoting algorithm. In these or other similar cases, you can do the - * quoting yourself and provide the full command line in SysProcAttr.CmdLine, - * leaving Args empty. - */ - (name: string, ...arg: string[]): (Cmd) - } -} - -namespace filesystem { - /** - * FileReader defines an interface for a file resource reader. - */ - interface FileReader { - [key:string]: any; - open(): io.ReadSeekCloser - } - /** - * File defines a single file [io.ReadSeekCloser] resource. - * - * The file could be from a local path, multipart/form-data header, etc. - */ - interface File { - reader: FileReader - name: string - originalName: string - size: number - } - interface File { - /** - * AsMap implements [core.mapExtractor] and returns a value suitable - * to be used in an API rule expression. - */ - asMap(): _TygojaDict - } - interface newFileFromPath { - /** - * NewFileFromPath creates a new File instance from the provided local file path. - */ - (path: string): (File) - } - interface newFileFromBytes { - /** - * NewFileFromBytes creates a new File instance from the provided byte slice. - */ - (b: string|Array, name: string): (File) - } - interface newFileFromMultipart { - /** - * NewFileFromMultipart creates a new File from the provided multipart header. - */ - (mh: multipart.FileHeader): (File) - } - interface newFileFromURL { - /** - * NewFileFromURL creates a new File from the provided url by - * downloading the resource and load it as BytesReader. - * - * Example - * - * ``` - * ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) - * defer cancel() - * - * file, err := filesystem.NewFileFromURL(ctx, "https://example.com/image.png") - * ``` - */ - (ctx: context.Context, url: string): (File) - } - /** - * MultipartReader defines a FileReader from [multipart.FileHeader]. - */ - interface MultipartReader { - header?: multipart.FileHeader - } - interface MultipartReader { - /** - * Open implements the [filesystem.FileReader] interface. - */ - open(): io.ReadSeekCloser - } - /** - * PathReader defines a FileReader from a local file path. - */ - interface PathReader { - path: string - } - interface PathReader { - /** - * Open implements the [filesystem.FileReader] interface. - */ - open(): io.ReadSeekCloser - } - /** - * BytesReader defines a FileReader from bytes content. - */ - interface BytesReader { - bytes: string|Array - } - interface BytesReader { - /** - * Open implements the [filesystem.FileReader] interface. - */ - open(): io.ReadSeekCloser - } - type _spzrlQg = bytes.Reader - interface bytesReadSeekCloser extends _spzrlQg { - } - interface bytesReadSeekCloser { - /** - * Close implements the [io.ReadSeekCloser] interface. - */ - close(): void - } - interface System { - } - interface newS3 { - /** - * NewS3 initializes an S3 filesystem instance. - * - * NB! Make sure to call `Close()` after you are done working with it. - */ - (bucketName: string, region: string, endpoint: string, accessKey: string, secretKey: string, s3ForcePathStyle: boolean): (System) - } - interface newLocal { - /** - * NewLocal initializes a new local filesystem instance. - * - * NB! Make sure to call `Close()` after you are done working with it. - */ - (dirPath: string): (System) - } - interface System { - /** - * SetContext assigns the specified context to the current filesystem. - */ - setContext(ctx: context.Context): void - } - interface System { - /** - * Close releases any resources used for the related filesystem. - */ - close(): void - } - interface System { - /** - * Exists checks if file with fileKey path exists or not. - * - * If the file doesn't exist returns false and ErrNotFound. - */ - exists(fileKey: string): boolean - } - interface System { - /** - * Attributes returns the attributes for the file with fileKey path. - * - * If the file doesn't exist it returns ErrNotFound. - */ - attributes(fileKey: string): (blob.Attributes) - } - interface System { - /** - * GetFile returns a file content reader for the given fileKey. - * - * NB! Make sure to call Close() on the file after you are done working with it. - * - * If the file doesn't exist returns ErrNotFound. - */ - getFile(fileKey: string): (blob.Reader) - } - interface System { - /** - * Copy copies the file stored at srcKey to dstKey. - * - * If srcKey file doesn't exist, it returns ErrNotFound. - * - * If dstKey file already exists, it is overwritten. - */ - copy(srcKey: string, dstKey: string): void - } - interface System { - /** - * List returns a flat list with info for all files under the specified prefix. - */ - list(prefix: string): Array<(blob.ListObject | undefined)> - } - interface System { - /** - * Upload writes content into the fileKey location. - */ - upload(content: string|Array, fileKey: string): void - } - interface System { - /** - * UploadFile uploads the provided File to the fileKey location. - */ - uploadFile(file: File, fileKey: string): void - } - interface System { - /** - * UploadMultipart uploads the provided multipart file to the fileKey location. - */ - uploadMultipart(fh: multipart.FileHeader, fileKey: string): void - } - interface System { - /** - * Delete deletes stored file at fileKey location. - * - * If the file doesn't exist returns ErrNotFound. - */ - delete(fileKey: string): void - } - interface System { - /** - * DeletePrefix deletes everything starting with the specified prefix. - * - * The prefix could be subpath (ex. "/a/b/") or filename prefix (ex. "/a/b/file_"). - */ - deletePrefix(prefix: string): Array - } - interface System { - /** - * Checks if the provided dir prefix doesn't have any files. - * - * A trailing slash will be appended to a non-empty dir string argument - * to ensure that the checked prefix is a "directory". - * - * Returns "false" in case the has at least one file, otherwise - "true". - */ - isEmptyDir(dir: string): boolean - } - interface System { - /** - * Serve serves the file at fileKey location to an HTTP response. - * - * If the `download` query parameter is used the file will be always served for - * download no matter of its type (aka. with "Content-Disposition: attachment"). - * - * Internally this method uses [http.ServeContent] so Range requests, - * If-Match, If-Unmodified-Since, etc. headers are handled transparently. - */ - serve(res: http.ResponseWriter, req: http.Request, fileKey: string, name: string): void - } - interface System { - /** - * CreateThumb creates a new thumb image for the file at originalKey location. - * The new thumb file is stored at thumbKey location. - * - * thumbSize is in the format: - * - 0xH (eg. 0x100) - resize to H height preserving the aspect ratio - * - Wx0 (eg. 300x0) - resize to W width preserving the aspect ratio - * - WxH (eg. 300x100) - resize and crop to WxH viewbox (from center) - * - WxHt (eg. 300x100t) - resize and crop to WxH viewbox (from top) - * - WxHb (eg. 300x100b) - resize and crop to WxH viewbox (from bottom) - * - WxHf (eg. 300x100f) - fit inside a WxH viewbox (without cropping) - */ - createThumb(originalKey: string, thumbKey: string, thumbSize: string): void - } - // @ts-ignore - import v4 = signer - // @ts-ignore - import smithyhttp = http - interface ignoredHeadersKey { - } -} - -/** - * Package template is a thin wrapper around the standard html/template - * and text/template packages that implements a convenient registry to - * load and cache templates on the fly concurrently. - * - * It was created to assist the JSVM plugin HTML rendering, but could be used in other Go code. - * - * Example: - * - * ``` - * registry := template.NewRegistry() - * - * html1, err := registry.LoadFiles( - * // the files set wil be parsed only once and then cached - * "layout.html", - * "content.html", - * ).Render(map[string]any{"name": "John"}) - * - * html2, err := registry.LoadFiles( - * // reuse the already parsed and cached files set - * "layout.html", - * "content.html", - * ).Render(map[string]any{"name": "Jane"}) - * ``` - */ -namespace template { - interface newRegistry { - /** - * NewRegistry creates and initializes a new templates registry with - * some defaults (eg. global "raw" template function for unescaped HTML). - * - * Use the Registry.Load* methods to load templates into the registry. - */ - (): (Registry) - } - /** - * Registry defines a templates registry that is safe to be used by multiple goroutines. - * - * Use the Registry.Load* methods to load templates into the registry. - */ - interface Registry { - } - interface Registry { - /** - * AddFuncs registers new global template functions. - * - * The key of each map entry is the function name that will be used in the templates. - * If a function with the map entry name already exists it will be replaced with the new one. - * - * The value of each map entry is a function that must have either a - * single return value, or two return values of which the second has type error. - * - * Example: - * - * ``` - * r.AddFuncs(map[string]any{ - * "toUpper": func(str string) string { - * return strings.ToUppser(str) - * }, - * ... - * }) - * ``` - */ - addFuncs(funcs: _TygojaDict): (Registry) - } - interface Registry { - /** - * LoadFiles caches (if not already) the specified filenames set as a - * single template and returns a ready to use Renderer instance. - * - * There must be at least 1 filename specified. - */ - loadFiles(...filenames: string[]): (Renderer) - } - interface Registry { - /** - * LoadString caches (if not already) the specified inline string as a - * single template and returns a ready to use Renderer instance. - */ - loadString(text: string): (Renderer) - } - interface Registry { - /** - * LoadFS caches (if not already) the specified fs and globPatterns - * pair as single template and returns a ready to use Renderer instance. - * - * There must be at least 1 file matching the provided globPattern(s) - * (note that most file names serves as glob patterns matching themselves). - */ - loadFS(fsys: fs.FS, ...globPatterns: string[]): (Renderer) - } - /** - * Renderer defines a single parsed template. - */ - interface Renderer { - } - interface Renderer { - /** - * Render executes the template with the specified data as the dot object - * and returns the result as plain string. - */ - render(data: any): string - } -} - -/** - * Package validation provides configurable and extensible rules for validating data of various types. - */ -namespace ozzo_validation { - /** - * Error interface represents an validation error - */ - interface Error { - [key:string]: any; - error(): string - code(): string - message(): string - setMessage(_arg0: string): Error - params(): _TygojaDict - setParams(_arg0: _TygojaDict): Error - } -} - /** * Package core is the backbone of PocketBase. * @@ -7123,8 +7117,8 @@ namespace core { /** * AuthOrigin defines a Record proxy for working with the authOrigins collection. */ - type _sXjnJFS = Record - interface AuthOrigin extends _sXjnJFS { + type _sqErxva = Record + interface AuthOrigin extends _sqErxva { } interface newAuthOrigin { /** @@ -7818,8 +7812,8 @@ namespace core { /** * @todo experiment eventually replacing the rules *string with a struct? */ - type _suBhwZm = BaseModel - interface baseCollection extends _suBhwZm { + type _sIsZUCV = BaseModel + interface baseCollection extends _sIsZUCV { listRule?: string viewRule?: string createRule?: string @@ -7846,8 +7840,8 @@ namespace core { /** * Collection defines the table, fields and various options related to a set of records. */ - type _sbygvdT = baseCollection&collectionAuthOptions&collectionViewOptions - interface Collection extends _sbygvdT { + type _sDzrdYC = baseCollection&collectionAuthOptions&collectionViewOptions + interface Collection extends _sDzrdYC { } interface newCollection { /** @@ -8676,8 +8670,8 @@ namespace core { /** * RequestEvent defines the PocketBase router handler event. */ - type _sKGDyUb = router.Event - interface RequestEvent extends _sKGDyUb { + type _sfbdRlS = router.Event + interface RequestEvent extends _sfbdRlS { app: App auth?: Record } @@ -8737,8 +8731,8 @@ namespace core { */ clone(): (RequestInfo) } - type _sOBZmPY = hook.Event&RequestEvent - interface BatchRequestEvent extends _sOBZmPY { + type _sKLhYaY = hook.Event&RequestEvent + interface BatchRequestEvent extends _sKLhYaY { batch: Array<(InternalRequest | undefined)> } interface InternalRequest { @@ -8775,24 +8769,24 @@ namespace core { interface baseCollectionEventData { tags(): Array } - type _sYNZdgx = hook.Event - interface BootstrapEvent extends _sYNZdgx { + type _suaVoQY = hook.Event + interface BootstrapEvent extends _suaVoQY { app: App } - type _sfVveKf = hook.Event - interface TerminateEvent extends _sfVveKf { + type _swrQCUh = hook.Event + interface TerminateEvent extends _swrQCUh { app: App isRestart: boolean } - type _sBKETDR = hook.Event - interface BackupEvent extends _sBKETDR { + type _sctLyGm = hook.Event + interface BackupEvent extends _sctLyGm { app: App context: context.Context name: string // the name of the backup to create/restore. exclude: Array // list of dir entries to exclude from the backup create/restore. } - type _spAiUFT = hook.Event - interface ServeEvent extends _spAiUFT { + type _suEJJva = hook.Event + interface ServeEvent extends _suEJJva { app: App router?: router.Router server?: http.Server @@ -8815,31 +8809,31 @@ namespace core { */ installerFunc: (app: App, systemSuperuser: Record, baseURL: string) => void } - type _sLYXOHF = hook.Event&RequestEvent - interface SettingsListRequestEvent extends _sLYXOHF { + type _sxQheHa = hook.Event&RequestEvent + interface SettingsListRequestEvent extends _sxQheHa { settings?: Settings } - type _sshwyIs = hook.Event&RequestEvent - interface SettingsUpdateRequestEvent extends _sshwyIs { + type _srhpJpV = hook.Event&RequestEvent + interface SettingsUpdateRequestEvent extends _srhpJpV { oldSettings?: Settings newSettings?: Settings } - type _sYRPDLZ = hook.Event - interface SettingsReloadEvent extends _sYRPDLZ { + type _skEnUWq = hook.Event + interface SettingsReloadEvent extends _skEnUWq { app: App } - type _sOWPOYB = hook.Event - interface MailerEvent extends _sOWPOYB { + type _smNBXFO = hook.Event + interface MailerEvent extends _smNBXFO { app: App mailer: mailer.Mailer message?: mailer.Message } - type _sjrcHjj = MailerEvent&baseRecordEventData - interface MailerRecordEvent extends _sjrcHjj { + type _sacJzAt = MailerEvent&baseRecordEventData + interface MailerRecordEvent extends _sacJzAt { meta: _TygojaDict } - type _svHXVSF = hook.Event&baseModelEventData - interface ModelEvent extends _svHXVSF { + type _sViUFbp = hook.Event&baseModelEventData + interface ModelEvent extends _sViUFbp { app: App context: context.Context /** @@ -8851,12 +8845,12 @@ namespace core { */ type: string } - type _sEKBOAj = ModelEvent - interface ModelErrorEvent extends _sEKBOAj { + type _sopgRtL = ModelEvent + interface ModelErrorEvent extends _sopgRtL { error: Error } - type _sDKTjsh = hook.Event&baseRecordEventData - interface RecordEvent extends _sDKTjsh { + type _sIDFyEA = hook.Event&baseRecordEventData + interface RecordEvent extends _sIDFyEA { app: App context: context.Context /** @@ -8868,12 +8862,12 @@ namespace core { */ type: string } - type _sVnzxqj = RecordEvent - interface RecordErrorEvent extends _sVnzxqj { + type _skVTglp = RecordEvent + interface RecordErrorEvent extends _skVTglp { error: Error } - type _shffWQr = hook.Event&baseCollectionEventData - interface CollectionEvent extends _shffWQr { + type _sZqWowa = hook.Event&baseCollectionEventData + interface CollectionEvent extends _sZqWowa { app: App context: context.Context /** @@ -8885,95 +8879,95 @@ namespace core { */ type: string } - type _sipOpzl = CollectionEvent - interface CollectionErrorEvent extends _sipOpzl { + type _sSaYsvN = CollectionEvent + interface CollectionErrorEvent extends _sSaYsvN { error: Error } - type _srLjgGG = hook.Event&RequestEvent&baseRecordEventData - interface FileTokenRequestEvent extends _srLjgGG { + type _sfGARNE = hook.Event&RequestEvent&baseRecordEventData + interface FileTokenRequestEvent extends _sfGARNE { token: string } - type _sMiTbMH = hook.Event&RequestEvent&baseCollectionEventData - interface FileDownloadRequestEvent extends _sMiTbMH { + type _sOMrEmW = hook.Event&RequestEvent&baseCollectionEventData + interface FileDownloadRequestEvent extends _sOMrEmW { record?: Record fileField?: FileField servedPath: string servedName: string } - type _slIRsyc = hook.Event&RequestEvent - interface CollectionsListRequestEvent extends _slIRsyc { + type _sWgYcWu = hook.Event&RequestEvent + interface CollectionsListRequestEvent extends _sWgYcWu { collections: Array<(Collection | undefined)> result?: search.Result } - type _sgnCOAA = hook.Event&RequestEvent - interface CollectionsImportRequestEvent extends _sgnCOAA { + type _siNgmUx = hook.Event&RequestEvent + interface CollectionsImportRequestEvent extends _siNgmUx { collectionsData: Array<_TygojaDict> deleteMissing: boolean } - type _sLvgtJh = hook.Event&RequestEvent&baseCollectionEventData - interface CollectionRequestEvent extends _sLvgtJh { + type _sNuaiMv = hook.Event&RequestEvent&baseCollectionEventData + interface CollectionRequestEvent extends _sNuaiMv { } - type _sVYSLQz = hook.Event&RequestEvent - interface RealtimeConnectRequestEvent extends _sVYSLQz { + type _sSwwaCF = hook.Event&RequestEvent + interface RealtimeConnectRequestEvent extends _sSwwaCF { client: subscriptions.Client /** * note: modifying it after the connect has no effect */ idleTimeout: time.Duration } - type _szVLXoO = hook.Event&RequestEvent - interface RealtimeMessageEvent extends _szVLXoO { + type _svQPnsB = hook.Event&RequestEvent + interface RealtimeMessageEvent extends _svQPnsB { client: subscriptions.Client message?: subscriptions.Message } - type _sgtygRL = hook.Event&RequestEvent - interface RealtimeSubscribeRequestEvent extends _sgtygRL { + type _saPDVHI = hook.Event&RequestEvent + interface RealtimeSubscribeRequestEvent extends _saPDVHI { client: subscriptions.Client subscriptions: Array } - type _sptdldX = hook.Event&RequestEvent&baseCollectionEventData - interface RecordsListRequestEvent extends _sptdldX { + type _spgXjdN = hook.Event&RequestEvent&baseCollectionEventData + interface RecordsListRequestEvent extends _spgXjdN { /** * @todo consider removing and maybe add as generic to the search.Result? */ records: Array<(Record | undefined)> result?: search.Result } - type _sOlbGQv = hook.Event&RequestEvent&baseCollectionEventData - interface RecordRequestEvent extends _sOlbGQv { + type _sHhmAHA = hook.Event&RequestEvent&baseCollectionEventData + interface RecordRequestEvent extends _sHhmAHA { record?: Record } - type _syWDviE = hook.Event&baseRecordEventData - interface RecordEnrichEvent extends _syWDviE { + type _sLuUYYk = hook.Event&baseRecordEventData + interface RecordEnrichEvent extends _sLuUYYk { app: App requestInfo?: RequestInfo } - type _sSZvanR = hook.Event&RequestEvent&baseCollectionEventData - interface RecordCreateOTPRequestEvent extends _sSZvanR { + type _sXXVyWa = hook.Event&RequestEvent&baseCollectionEventData + interface RecordCreateOTPRequestEvent extends _sXXVyWa { record?: Record password: string } - type _sPRLtpu = hook.Event&RequestEvent&baseCollectionEventData - interface RecordAuthWithOTPRequestEvent extends _sPRLtpu { + type _sdUDedh = hook.Event&RequestEvent&baseCollectionEventData + interface RecordAuthWithOTPRequestEvent extends _sdUDedh { record?: Record otp?: OTP } - type _sWjsIGY = hook.Event&RequestEvent&baseCollectionEventData - interface RecordAuthRequestEvent extends _sWjsIGY { + type _sAMBjtl = hook.Event&RequestEvent&baseCollectionEventData + interface RecordAuthRequestEvent extends _sAMBjtl { record?: Record token: string meta: any authMethod: string } - type _sKhyjxD = hook.Event&RequestEvent&baseCollectionEventData - interface RecordAuthWithPasswordRequestEvent extends _sKhyjxD { + type _sXDpRRU = hook.Event&RequestEvent&baseCollectionEventData + interface RecordAuthWithPasswordRequestEvent extends _sXDpRRU { record?: Record identity: string identityField: string password: string } - type _sSLCTMY = hook.Event&RequestEvent&baseCollectionEventData - interface RecordAuthWithOAuth2RequestEvent extends _sSLCTMY { + type _sBTDICk = hook.Event&RequestEvent&baseCollectionEventData + interface RecordAuthWithOAuth2RequestEvent extends _sBTDICk { providerName: string providerClient: auth.Provider record?: Record @@ -8981,41 +8975,41 @@ namespace core { createData: _TygojaDict isNewRecord: boolean } - type _sdMtVCN = hook.Event&RequestEvent&baseCollectionEventData - interface RecordAuthRefreshRequestEvent extends _sdMtVCN { + type _sNQGFjX = hook.Event&RequestEvent&baseCollectionEventData + interface RecordAuthRefreshRequestEvent extends _sNQGFjX { record?: Record } - type _sFKAUAk = hook.Event&RequestEvent&baseCollectionEventData - interface RecordRequestPasswordResetRequestEvent extends _sFKAUAk { + type _sAHGBIR = hook.Event&RequestEvent&baseCollectionEventData + interface RecordRequestPasswordResetRequestEvent extends _sAHGBIR { record?: Record } - type _seiumWo = hook.Event&RequestEvent&baseCollectionEventData - interface RecordConfirmPasswordResetRequestEvent extends _seiumWo { + type _sArEBJJ = hook.Event&RequestEvent&baseCollectionEventData + interface RecordConfirmPasswordResetRequestEvent extends _sArEBJJ { record?: Record } - type _syGniwc = hook.Event&RequestEvent&baseCollectionEventData - interface RecordRequestVerificationRequestEvent extends _syGniwc { + type _spjorJU = hook.Event&RequestEvent&baseCollectionEventData + interface RecordRequestVerificationRequestEvent extends _spjorJU { record?: Record } - type _sEyoZsq = hook.Event&RequestEvent&baseCollectionEventData - interface RecordConfirmVerificationRequestEvent extends _sEyoZsq { + type _srfGVbA = hook.Event&RequestEvent&baseCollectionEventData + interface RecordConfirmVerificationRequestEvent extends _srfGVbA { record?: Record } - type _slmTjxi = hook.Event&RequestEvent&baseCollectionEventData - interface RecordRequestEmailChangeRequestEvent extends _slmTjxi { + type _sDbcbsb = hook.Event&RequestEvent&baseCollectionEventData + interface RecordRequestEmailChangeRequestEvent extends _sDbcbsb { record?: Record newEmail: string } - type _svZFQAi = hook.Event&RequestEvent&baseCollectionEventData - interface RecordConfirmEmailChangeRequestEvent extends _svZFQAi { + type _sShMfHP = hook.Event&RequestEvent&baseCollectionEventData + interface RecordConfirmEmailChangeRequestEvent extends _sShMfHP { record?: Record newEmail: string } /** * ExternalAuth defines a Record proxy for working with the externalAuths collection. */ - type _sJxDBtb = Record - interface ExternalAuth extends _sJxDBtb { + type _sqTiNdW = Record + interface ExternalAuth extends _sqTiNdW { } interface newExternalAuth { /** @@ -11355,8 +11349,8 @@ namespace core { interface onlyFieldType { type: string } - type _shJAxEV = Field - interface fieldWithType extends _shJAxEV { + type _sLXaMrs = Field + interface fieldWithType extends _sLXaMrs { type: string } interface fieldWithType { @@ -11388,8 +11382,8 @@ namespace core { */ scan(value: any): void } - type _sBQszNi = BaseModel - interface Log extends _sBQszNi { + type _sWsfWti = BaseModel + interface Log extends _sWsfWti { created: types.DateTime data: types.JSONMap message: string @@ -11435,8 +11429,8 @@ namespace core { /** * MFA defines a Record proxy for working with the mfas collection. */ - type _sDhrnLT = Record - interface MFA extends _sDhrnLT { + type _svCDVEl = Record + interface MFA extends _svCDVEl { } interface newMFA { /** @@ -11658,8 +11652,8 @@ namespace core { /** * OTP defines a Record proxy for working with the otps collection. */ - type _sWDwyVB = Record - interface OTP extends _sWDwyVB { + type _sBymELa = Record + interface OTP extends _sBymELa { } interface newOTP { /** @@ -11895,8 +11889,8 @@ namespace core { } interface runner { } - type _sqXedFV = BaseModel - interface Record extends _sqXedFV { + type _sjipUNd = BaseModel + interface Record extends _sjipUNd { } interface newRecord { /** @@ -12365,8 +12359,8 @@ namespace core { * BaseRecordProxy implements the [RecordProxy] interface and it is intended * to be used as embed to custom user provided Record proxy structs. */ - type _sodETgL = Record - interface BaseRecordProxy extends _sodETgL { + type _sJZTNTm = Record + interface BaseRecordProxy extends _sJZTNTm { } interface BaseRecordProxy { /** @@ -12615,8 +12609,8 @@ namespace core { /** * Settings defines the PocketBase app settings. */ - type _sLdgopF = settings - interface Settings extends _sLdgopF { + type _seECYhN = settings + interface Settings extends _seECYhN { } interface Settings { /** @@ -12917,8 +12911,8 @@ namespace core { */ durationTime(): time.Duration } - type _snosmsk = BaseModel - interface Param extends _snosmsk { + type _sEWnpkR = BaseModel + interface Param extends _sEWnpkR { created: types.DateTime updated: types.DateTime value: types.JSONRaw @@ -13432,8 +13426,8 @@ namespace apis { */ (limitBytes: number): (hook.Handler) } - type _sNImrxn = io.ReadCloser - interface limitedReader extends _sNImrxn { + type _sxFUeqL = io.ReadCloser + interface limitedReader extends _sxFUeqL { } interface limitedReader { read(b: string|Array): number @@ -13584,8 +13578,8 @@ namespace apis { */ (config: GzipConfig): (hook.Handler) } - type _sKvKKzu = http.ResponseWriter&io.Writer - interface gzipResponseWriter extends _sKvKKzu { + type _sRXenFJ = http.ResponseWriter&io.Writer + interface gzipResponseWriter extends _sRXenFJ { } interface gzipResponseWriter { writeHeader(code: number): void @@ -13605,11 +13599,11 @@ namespace apis { interface gzipResponseWriter { unwrap(): http.ResponseWriter } - type _sMimWpG = sync.RWMutex - interface rateLimiter extends _sMimWpG { + type _slMSCsI = sync.RWMutex + interface rateLimiter extends _slMSCsI { } - type _sKxzZAt = sync.Mutex - interface fixedWindow extends _sKxzZAt { + type _sAwhZRh = sync.Mutex + interface fixedWindow extends _sAwhZRh { } interface realtimeSubscribeForm { clientId: string @@ -13850,8 +13844,8 @@ namespace pocketbase { * It implements [CoreApp] via embedding and all of the app interface methods * could be accessed directly through the instance (eg. PocketBase.DataDir()). */ - type _sKRdoPc = CoreApp - interface PocketBase extends _sKRdoPc { + type _sZtVfLX = CoreApp + interface PocketBase extends _sZtVfLX { /** * RootCmd is the main console command */ @@ -14246,21 +14240,6 @@ namespace bytes { } } -/** - * Package bufio implements buffered I/O. It wraps an io.Reader or io.Writer - * object, creating another object (Reader or Writer) that also implements - * the interface but provides buffering and some help for textual I/O. - */ -namespace bufio { - /** - * ReadWriter stores pointers to a [Reader] and a [Writer]. - * It implements [io.ReadWriter]. - */ - type _soayqjf = Reader&Writer - interface ReadWriter extends _soayqjf { - } -} - /** * Package syscall contains an interface to the low-level operating system * primitives. The details vary depending on the underlying system, and @@ -14994,169 +14973,6 @@ namespace time { } } -/** - * Package context defines the Context type, which carries deadlines, - * cancellation signals, and other request-scoped values across API boundaries - * and between processes. - * - * Incoming requests to a server should create a [Context], and outgoing - * calls to servers should accept a Context. The chain of function - * calls between them must propagate the Context, optionally replacing - * it with a derived Context created using [WithCancel], [WithDeadline], - * [WithTimeout], or [WithValue]. When a Context is canceled, all - * Contexts derived from it are also canceled. - * - * The [WithCancel], [WithDeadline], and [WithTimeout] functions take a - * Context (the parent) and return a derived Context (the child) and a - * [CancelFunc]. Calling the CancelFunc cancels the child and its - * children, removes the parent's reference to the child, and stops - * any associated timers. Failing to call the CancelFunc leaks the - * child and its children until the parent is canceled or the timer - * fires. The go vet tool checks that CancelFuncs are used on all - * control-flow paths. - * - * The [WithCancelCause] function returns a [CancelCauseFunc], which - * takes an error and records it as the cancellation cause. Calling - * [Cause] on the canceled context or any of its children retrieves - * the cause. If no cause is specified, Cause(ctx) returns the same - * value as ctx.Err(). - * - * Programs that use Contexts should follow these rules to keep interfaces - * consistent across packages and enable static analysis tools to check context - * propagation: - * - * Do not store Contexts inside a struct type; instead, pass a Context - * explicitly to each function that needs it. The Context should be the first - * parameter, typically named ctx: - * - * ``` - * func DoSomething(ctx context.Context, arg Arg) error { - * // ... use ctx ... - * } - * ``` - * - * Do not pass a nil [Context], even if a function permits it. Pass [context.TODO] - * if you are unsure about which Context to use. - * - * Use context Values only for request-scoped data that transits processes and - * APIs, not for passing optional parameters to functions. - * - * The same Context may be passed to functions running in different goroutines; - * Contexts are safe for simultaneous use by multiple goroutines. - * - * See https://blog.golang.org/context for example code for a server that uses - * Contexts. - */ -namespace context { - /** - * A Context carries a deadline, a cancellation signal, and other values across - * API boundaries. - * - * Context's methods may be called by multiple goroutines simultaneously. - */ - interface Context { - [key:string]: any; - /** - * Deadline returns the time when work done on behalf of this context - * should be canceled. Deadline returns ok==false when no deadline is - * set. Successive calls to Deadline return the same results. - */ - deadline(): [time.Time, boolean] - /** - * Done returns a channel that's closed when work done on behalf of this - * context should be canceled. Done may return nil if this context can - * never be canceled. Successive calls to Done return the same value. - * The close of the Done channel may happen asynchronously, - * after the cancel function returns. - * - * WithCancel arranges for Done to be closed when cancel is called; - * WithDeadline arranges for Done to be closed when the deadline - * expires; WithTimeout arranges for Done to be closed when the timeout - * elapses. - * - * Done is provided for use in select statements: - * - * // Stream generates values with DoSomething and sends them to out - * // until DoSomething returns an error or ctx.Done is closed. - * func Stream(ctx context.Context, out chan<- Value) error { - * for { - * v, err := DoSomething(ctx) - * if err != nil { - * return err - * } - * select { - * case <-ctx.Done(): - * return ctx.Err() - * case out <- v: - * } - * } - * } - * - * See https://blog.golang.org/pipelines for more examples of how to use - * a Done channel for cancellation. - */ - done(): undefined - /** - * If Done is not yet closed, Err returns nil. - * If Done is closed, Err returns a non-nil error explaining why: - * Canceled if the context was canceled - * or DeadlineExceeded if the context's deadline passed. - * After Err returns a non-nil error, successive calls to Err return the same error. - */ - err(): void - /** - * Value returns the value associated with this context for key, or nil - * if no value is associated with key. Successive calls to Value with - * the same key returns the same result. - * - * Use context values only for request-scoped data that transits - * processes and API boundaries, not for passing optional parameters to - * functions. - * - * A key identifies a specific value in a Context. Functions that wish - * to store values in Context typically allocate a key in a global - * variable then use that key as the argument to context.WithValue and - * Context.Value. A key can be any type that supports equality; - * packages should define keys as an unexported type to avoid - * collisions. - * - * Packages that define a Context key should provide type-safe accessors - * for the values stored using that key: - * - * ``` - * // Package user defines a User type that's stored in Contexts. - * package user - * - * import "context" - * - * // User is the type of value stored in the Contexts. - * type User struct {...} - * - * // key is an unexported type for keys defined in this package. - * // This prevents collisions with keys defined in other packages. - * type key int - * - * // userKey is the key for user.User values in Contexts. It is - * // unexported; clients use user.NewContext and user.FromContext - * // instead of using this key directly. - * var userKey key - * - * // NewContext returns a new Context that carries value u. - * func NewContext(ctx context.Context, u *User) context.Context { - * return context.WithValue(ctx, userKey, u) - * } - * - * // FromContext returns the User value stored in ctx, if any. - * func FromContext(ctx context.Context) (*User, bool) { - * u, ok := ctx.Value(userKey).(*User) - * return u, ok - * } - * ``` - */ - value(key: any): any - } -} - /** * Package fs defines basic interfaces to a file system. * A file system can be provided by the host operating system @@ -15357,6 +15173,346 @@ namespace fs { interface WalkDirFunc {(path: string, d: DirEntry, err: Error): void } } +/** + * Package bufio implements buffered I/O. It wraps an io.Reader or io.Writer + * object, creating another object (Reader or Writer) that also implements + * the interface but provides buffering and some help for textual I/O. + */ +namespace bufio { + /** + * ReadWriter stores pointers to a [Reader] and a [Writer]. + * It implements [io.ReadWriter]. + */ + type _sUNKXrY = Reader&Writer + interface ReadWriter extends _sUNKXrY { + } +} + +/** + * Package syntax parses regular expressions into parse trees and compiles + * parse trees into programs. Most clients of regular expressions will use the + * facilities of package [regexp] (such as [regexp.Compile] and [regexp.Match]) instead of this package. + * + * # Syntax + * + * The regular expression syntax understood by this package when parsing with the [Perl] flag is as follows. + * Parts of the syntax can be disabled by passing alternate flags to [Parse]. + * + * Single characters: + * + * ``` + * . any character, possibly including newline (flag s=true) + * [xyz] character class + * [^xyz] negated character class + * \d Perl character class + * \D negated Perl character class + * [[:alpha:]] ASCII character class + * [[:^alpha:]] negated ASCII character class + * \pN Unicode character class (one-letter name) + * \p{Greek} Unicode character class + * \PN negated Unicode character class (one-letter name) + * \P{Greek} negated Unicode character class + * ``` + * + * Composites: + * + * ``` + * xy x followed by y + * x|y x or y (prefer x) + * ``` + * + * Repetitions: + * + * ``` + * x* zero or more x, prefer more + * x+ one or more x, prefer more + * x? zero or one x, prefer one + * x{n,m} n or n+1 or ... or m x, prefer more + * x{n,} n or more x, prefer more + * x{n} exactly n x + * x*? zero or more x, prefer fewer + * x+? one or more x, prefer fewer + * x?? zero or one x, prefer zero + * x{n,m}? n or n+1 or ... or m x, prefer fewer + * x{n,}? n or more x, prefer fewer + * x{n}? exactly n x + * ``` + * + * Implementation restriction: The counting forms x{n,m}, x{n,}, and x{n} + * reject forms that create a minimum or maximum repetition count above 1000. + * Unlimited repetitions are not subject to this restriction. + * + * Grouping: + * + * ``` + * (re) numbered capturing group (submatch) + * (?Pre) named & numbered capturing group (submatch) + * (?re) named & numbered capturing group (submatch) + * (?:re) non-capturing group + * (?flags) set flags within current group; non-capturing + * (?flags:re) set flags during re; non-capturing + * + * Flag syntax is xyz (set) or -xyz (clear) or xy-z (set xy, clear z). The flags are: + * + * i case-insensitive (default false) + * m multi-line mode: ^ and $ match begin/end line in addition to begin/end text (default false) + * s let . match \n (default false) + * U ungreedy: swap meaning of x* and x*?, x+ and x+?, etc (default false) + * ``` + * + * Empty strings: + * + * ``` + * ^ at beginning of text or line (flag m=true) + * $ at end of text (like \z not \Z) or line (flag m=true) + * \A at beginning of text + * \b at ASCII word boundary (\w on one side and \W, \A, or \z on the other) + * \B not at ASCII word boundary + * \z at end of text + * ``` + * + * Escape sequences: + * + * ``` + * \a bell (== \007) + * \f form feed (== \014) + * \t horizontal tab (== \011) + * \n newline (== \012) + * \r carriage return (== \015) + * \v vertical tab character (== \013) + * \* literal *, for any punctuation character * + * \123 octal character code (up to three digits) + * \x7F hex character code (exactly two digits) + * \x{10FFFF} hex character code + * \Q...\E literal text ... even if ... has punctuation + * ``` + * + * Character class elements: + * + * ``` + * x single character + * A-Z character range (inclusive) + * \d Perl character class + * [:foo:] ASCII character class foo + * \p{Foo} Unicode character class Foo + * \pF Unicode character class F (one-letter name) + * ``` + * + * Named character classes as character class elements: + * + * ``` + * [\d] digits (== \d) + * [^\d] not digits (== \D) + * [\D] not digits (== \D) + * [^\D] not not digits (== \d) + * [[:name:]] named ASCII class inside character class (== [:name:]) + * [^[:name:]] named ASCII class inside negated character class (== [:^name:]) + * [\p{Name}] named Unicode property inside character class (== \p{Name}) + * [^\p{Name}] named Unicode property inside negated character class (== \P{Name}) + * ``` + * + * Perl character classes (all ASCII-only): + * + * ``` + * \d digits (== [0-9]) + * \D not digits (== [^0-9]) + * \s whitespace (== [\t\n\f\r ]) + * \S not whitespace (== [^\t\n\f\r ]) + * \w word characters (== [0-9A-Za-z_]) + * \W not word characters (== [^0-9A-Za-z_]) + * ``` + * + * ASCII character classes: + * + * ``` + * [[:alnum:]] alphanumeric (== [0-9A-Za-z]) + * [[:alpha:]] alphabetic (== [A-Za-z]) + * [[:ascii:]] ASCII (== [\x00-\x7F]) + * [[:blank:]] blank (== [\t ]) + * [[:cntrl:]] control (== [\x00-\x1F\x7F]) + * [[:digit:]] digits (== [0-9]) + * [[:graph:]] graphical (== [!-~] == [A-Za-z0-9!"#$%&'()*+,\-./:;<=>?@[\\\]^_`{|}~]) + * [[:lower:]] lower case (== [a-z]) + * [[:print:]] printable (== [ -~] == [ [:graph:]]) + * [[:punct:]] punctuation (== [!-/:-@[-`{-~]) + * [[:space:]] whitespace (== [\t\n\v\f\r ]) + * [[:upper:]] upper case (== [A-Z]) + * [[:word:]] word characters (== [0-9A-Za-z_]) + * [[:xdigit:]] hex digit (== [0-9A-Fa-f]) + * ``` + * + * Unicode character classes are those in [unicode.Categories] and [unicode.Scripts]. + */ +namespace syntax { + /** + * Flags control the behavior of the parser and record information about regexp context. + */ + interface Flags extends Number{} +} + +/** + * Package context defines the Context type, which carries deadlines, + * cancellation signals, and other request-scoped values across API boundaries + * and between processes. + * + * Incoming requests to a server should create a [Context], and outgoing + * calls to servers should accept a Context. The chain of function + * calls between them must propagate the Context, optionally replacing + * it with a derived Context created using [WithCancel], [WithDeadline], + * [WithTimeout], or [WithValue]. When a Context is canceled, all + * Contexts derived from it are also canceled. + * + * The [WithCancel], [WithDeadline], and [WithTimeout] functions take a + * Context (the parent) and return a derived Context (the child) and a + * [CancelFunc]. Calling the CancelFunc cancels the child and its + * children, removes the parent's reference to the child, and stops + * any associated timers. Failing to call the CancelFunc leaks the + * child and its children until the parent is canceled or the timer + * fires. The go vet tool checks that CancelFuncs are used on all + * control-flow paths. + * + * The [WithCancelCause] function returns a [CancelCauseFunc], which + * takes an error and records it as the cancellation cause. Calling + * [Cause] on the canceled context or any of its children retrieves + * the cause. If no cause is specified, Cause(ctx) returns the same + * value as ctx.Err(). + * + * Programs that use Contexts should follow these rules to keep interfaces + * consistent across packages and enable static analysis tools to check context + * propagation: + * + * Do not store Contexts inside a struct type; instead, pass a Context + * explicitly to each function that needs it. The Context should be the first + * parameter, typically named ctx: + * + * ``` + * func DoSomething(ctx context.Context, arg Arg) error { + * // ... use ctx ... + * } + * ``` + * + * Do not pass a nil [Context], even if a function permits it. Pass [context.TODO] + * if you are unsure about which Context to use. + * + * Use context Values only for request-scoped data that transits processes and + * APIs, not for passing optional parameters to functions. + * + * The same Context may be passed to functions running in different goroutines; + * Contexts are safe for simultaneous use by multiple goroutines. + * + * See https://blog.golang.org/context for example code for a server that uses + * Contexts. + */ +namespace context { + /** + * A Context carries a deadline, a cancellation signal, and other values across + * API boundaries. + * + * Context's methods may be called by multiple goroutines simultaneously. + */ + interface Context { + [key:string]: any; + /** + * Deadline returns the time when work done on behalf of this context + * should be canceled. Deadline returns ok==false when no deadline is + * set. Successive calls to Deadline return the same results. + */ + deadline(): [time.Time, boolean] + /** + * Done returns a channel that's closed when work done on behalf of this + * context should be canceled. Done may return nil if this context can + * never be canceled. Successive calls to Done return the same value. + * The close of the Done channel may happen asynchronously, + * after the cancel function returns. + * + * WithCancel arranges for Done to be closed when cancel is called; + * WithDeadline arranges for Done to be closed when the deadline + * expires; WithTimeout arranges for Done to be closed when the timeout + * elapses. + * + * Done is provided for use in select statements: + * + * // Stream generates values with DoSomething and sends them to out + * // until DoSomething returns an error or ctx.Done is closed. + * func Stream(ctx context.Context, out chan<- Value) error { + * for { + * v, err := DoSomething(ctx) + * if err != nil { + * return err + * } + * select { + * case <-ctx.Done(): + * return ctx.Err() + * case out <- v: + * } + * } + * } + * + * See https://blog.golang.org/pipelines for more examples of how to use + * a Done channel for cancellation. + */ + done(): undefined + /** + * If Done is not yet closed, Err returns nil. + * If Done is closed, Err returns a non-nil error explaining why: + * Canceled if the context was canceled + * or DeadlineExceeded if the context's deadline passed. + * After Err returns a non-nil error, successive calls to Err return the same error. + */ + err(): void + /** + * Value returns the value associated with this context for key, or nil + * if no value is associated with key. Successive calls to Value with + * the same key returns the same result. + * + * Use context values only for request-scoped data that transits + * processes and API boundaries, not for passing optional parameters to + * functions. + * + * A key identifies a specific value in a Context. Functions that wish + * to store values in Context typically allocate a key in a global + * variable then use that key as the argument to context.WithValue and + * Context.Value. A key can be any type that supports equality; + * packages should define keys as an unexported type to avoid + * collisions. + * + * Packages that define a Context key should provide type-safe accessors + * for the values stored using that key: + * + * ``` + * // Package user defines a User type that's stored in Contexts. + * package user + * + * import "context" + * + * // User is the type of value stored in the Contexts. + * type User struct {...} + * + * // key is an unexported type for keys defined in this package. + * // This prevents collisions with keys defined in other packages. + * type key int + * + * // userKey is the key for user.User values in Contexts. It is + * // unexported; clients use user.NewContext and user.FromContext + * // instead of using this key directly. + * var userKey key + * + * // NewContext returns a new Context that carries value u. + * func NewContext(ctx context.Context, u *User) context.Context { + * return context.WithValue(ctx, userKey, u) + * } + * + * // FromContext returns the User value stored in ctx, if any. + * func FromContext(ctx context.Context) (*User, bool) { + * u, ok := ctx.Value(userKey).(*User) + * return u, ok + * } + * ``` + */ + value(key: any): any + } +} + /** * Package sql provides a generic interface around SQL (or SQL-like) * databases. @@ -16032,166 +16188,129 @@ namespace sql { } } -/** - * Package syntax parses regular expressions into parse trees and compiles - * parse trees into programs. Most clients of regular expressions will use the - * facilities of package [regexp] (such as [regexp.Compile] and [regexp.Match]) instead of this package. - * - * # Syntax - * - * The regular expression syntax understood by this package when parsing with the [Perl] flag is as follows. - * Parts of the syntax can be disabled by passing alternate flags to [Parse]. - * - * Single characters: - * - * ``` - * . any character, possibly including newline (flag s=true) - * [xyz] character class - * [^xyz] negated character class - * \d Perl character class - * \D negated Perl character class - * [[:alpha:]] ASCII character class - * [[:^alpha:]] negated ASCII character class - * \pN Unicode character class (one-letter name) - * \p{Greek} Unicode character class - * \PN negated Unicode character class (one-letter name) - * \P{Greek} negated Unicode character class - * ``` - * - * Composites: - * - * ``` - * xy x followed by y - * x|y x or y (prefer x) - * ``` - * - * Repetitions: - * - * ``` - * x* zero or more x, prefer more - * x+ one or more x, prefer more - * x? zero or one x, prefer one - * x{n,m} n or n+1 or ... or m x, prefer more - * x{n,} n or more x, prefer more - * x{n} exactly n x - * x*? zero or more x, prefer fewer - * x+? one or more x, prefer fewer - * x?? zero or one x, prefer zero - * x{n,m}? n or n+1 or ... or m x, prefer fewer - * x{n,}? n or more x, prefer fewer - * x{n}? exactly n x - * ``` - * - * Implementation restriction: The counting forms x{n,m}, x{n,}, and x{n} - * reject forms that create a minimum or maximum repetition count above 1000. - * Unlimited repetitions are not subject to this restriction. - * - * Grouping: - * - * ``` - * (re) numbered capturing group (submatch) - * (?Pre) named & numbered capturing group (submatch) - * (?re) named & numbered capturing group (submatch) - * (?:re) non-capturing group - * (?flags) set flags within current group; non-capturing - * (?flags:re) set flags during re; non-capturing - * - * Flag syntax is xyz (set) or -xyz (clear) or xy-z (set xy, clear z). The flags are: - * - * i case-insensitive (default false) - * m multi-line mode: ^ and $ match begin/end line in addition to begin/end text (default false) - * s let . match \n (default false) - * U ungreedy: swap meaning of x* and x*?, x+ and x+?, etc (default false) - * ``` - * - * Empty strings: - * - * ``` - * ^ at beginning of text or line (flag m=true) - * $ at end of text (like \z not \Z) or line (flag m=true) - * \A at beginning of text - * \b at ASCII word boundary (\w on one side and \W, \A, or \z on the other) - * \B not at ASCII word boundary - * \z at end of text - * ``` - * - * Escape sequences: - * - * ``` - * \a bell (== \007) - * \f form feed (== \014) - * \t horizontal tab (== \011) - * \n newline (== \012) - * \r carriage return (== \015) - * \v vertical tab character (== \013) - * \* literal *, for any punctuation character * - * \123 octal character code (up to three digits) - * \x7F hex character code (exactly two digits) - * \x{10FFFF} hex character code - * \Q...\E literal text ... even if ... has punctuation - * ``` - * - * Character class elements: - * - * ``` - * x single character - * A-Z character range (inclusive) - * \d Perl character class - * [:foo:] ASCII character class foo - * \p{Foo} Unicode character class Foo - * \pF Unicode character class F (one-letter name) - * ``` - * - * Named character classes as character class elements: - * - * ``` - * [\d] digits (== \d) - * [^\d] not digits (== \D) - * [\D] not digits (== \D) - * [^\D] not not digits (== \d) - * [[:name:]] named ASCII class inside character class (== [:name:]) - * [^[:name:]] named ASCII class inside negated character class (== [:^name:]) - * [\p{Name}] named Unicode property inside character class (== \p{Name}) - * [^\p{Name}] named Unicode property inside negated character class (== \P{Name}) - * ``` - * - * Perl character classes (all ASCII-only): - * - * ``` - * \d digits (== [0-9]) - * \D not digits (== [^0-9]) - * \s whitespace (== [\t\n\f\r ]) - * \S not whitespace (== [^\t\n\f\r ]) - * \w word characters (== [0-9A-Za-z_]) - * \W not word characters (== [^0-9A-Za-z_]) - * ``` - * - * ASCII character classes: - * - * ``` - * [[:alnum:]] alphanumeric (== [0-9A-Za-z]) - * [[:alpha:]] alphabetic (== [A-Za-z]) - * [[:ascii:]] ASCII (== [\x00-\x7F]) - * [[:blank:]] blank (== [\t ]) - * [[:cntrl:]] control (== [\x00-\x1F\x7F]) - * [[:digit:]] digits (== [0-9]) - * [[:graph:]] graphical (== [!-~] == [A-Za-z0-9!"#$%&'()*+,\-./:;<=>?@[\\\]^_`{|}~]) - * [[:lower:]] lower case (== [a-z]) - * [[:print:]] printable (== [ -~] == [ [:graph:]]) - * [[:punct:]] punctuation (== [!-/:-@[-`{-~]) - * [[:space:]] whitespace (== [\t\n\v\f\r ]) - * [[:upper:]] upper case (== [A-Z]) - * [[:word:]] word characters (== [0-9A-Za-z_]) - * [[:xdigit:]] hex digit (== [0-9A-Fa-f]) - * ``` - * - * Unicode character classes are those in [unicode.Categories] and [unicode.Scripts]. - */ -namespace syntax { +namespace store { /** - * Flags control the behavior of the parser and record information about regexp context. + * Store defines a concurrent safe in memory key-value data store. */ - interface Flags extends Number{} + interface Store { + } + interface Store { + /** + * Reset clears the store and replaces the store data with a + * shallow copy of the provided newData. + */ + reset(newData: _TygojaDict): void + } + interface Store { + /** + * Length returns the current number of elements in the store. + */ + length(): number + } + interface Store { + /** + * RemoveAll removes all the existing store entries. + */ + removeAll(): void + } + interface Store { + /** + * Remove removes a single entry from the store. + * + * Remove does nothing if key doesn't exist in the store. + */ + remove(key: K): void + } + interface Store { + /** + * Has checks if element with the specified key exist or not. + */ + has(key: K): boolean + } + interface Store { + /** + * Get returns a single element value from the store. + * + * If key is not set, the zero T value is returned. + */ + get(key: K): T + } + interface Store { + /** + * GetOk is similar to Get but returns also a boolean indicating whether the key exists or not. + */ + getOk(key: K): [T, boolean] + } + interface Store { + /** + * GetAll returns a shallow copy of the current store data. + */ + getAll(): _TygojaDict + } + interface Store { + /** + * Values returns a slice with all of the current store values. + */ + values(): Array + } + interface Store { + /** + * Set sets (or overwrite if already exists) a new value for key. + */ + set(key: K, value: T): void + } + interface Store { + /** + * SetFunc sets (or overwrite if already exists) a new value resolved + * from the function callback for the provided key. + * + * The function callback receives as argument the old store element value (if exists). + * If there is no old store element, the argument will be the T zero value. + * + * Example: + * + * ``` + * s := store.New[string, int](nil) + * s.SetFunc("count", func(old int) int { + * return old + 1 + * }) + * ``` + */ + setFunc(key: K, fn: (old: T) => T): void + } + interface Store { + /** + * GetOrSet retrieves a single existing value for the provided key + * or stores a new one if it doesn't exist. + */ + getOrSet(key: K, setFunc: () => T): T + } + interface Store { + /** + * SetIfLessThanLimit sets (or overwrite if already exist) a new value for key. + * + * This method is similar to Set() but **it will skip adding new elements** + * to the store if the store length has reached the specified limit. + * false is returned if maxAllowedElements limit is reached. + */ + setIfLessThanLimit(key: K, value: T, maxAllowedElements: number): boolean + } + interface Store { + /** + * UnmarshalJSON implements [json.Unmarshaler] and imports the + * provided JSON data into the store. + * + * The store entries that match with the ones from the data will be overwritten with the new value. + */ + unmarshalJSON(data: string|Array): void + } + interface Store { + /** + * MarshalJSON implements [json.Marshaler] and export the current + * store data into valid JSON. + */ + marshalJSON(): string|Array + } } /** @@ -16361,6 +16480,346 @@ namespace net { } } +/** + * Package jwt is a Go implementation of JSON Web Tokens: http://self-issued.info/docs/draft-jones-json-web-token.html + * + * See README.md for more info. + */ +namespace jwt { + /** + * MapClaims is a claims type that uses the map[string]interface{} for JSON + * decoding. This is the default claims type if you don't supply one + */ + interface MapClaims extends _TygojaDict{} + interface MapClaims { + /** + * GetExpirationTime implements the Claims interface. + */ + getExpirationTime(): (NumericDate) + } + interface MapClaims { + /** + * GetNotBefore implements the Claims interface. + */ + getNotBefore(): (NumericDate) + } + interface MapClaims { + /** + * GetIssuedAt implements the Claims interface. + */ + getIssuedAt(): (NumericDate) + } + interface MapClaims { + /** + * GetAudience implements the Claims interface. + */ + getAudience(): ClaimStrings + } + interface MapClaims { + /** + * GetIssuer implements the Claims interface. + */ + getIssuer(): string + } + interface MapClaims { + /** + * GetSubject implements the Claims interface. + */ + getSubject(): string + } +} + +/** + * Package types implements some commonly used db serializable types + * like datetime, json, etc. + */ +namespace types { + /** + * DateTime represents a [time.Time] instance in UTC that is wrapped + * and serialized using the app default date layout. + */ + interface DateTime { + } + interface DateTime { + /** + * Time returns the internal [time.Time] instance. + */ + time(): time.Time + } + interface DateTime { + /** + * Add returns a new DateTime based on the current DateTime + the specified duration. + */ + add(duration: time.Duration): DateTime + } + interface DateTime { + /** + * Sub returns a [time.Duration] by subtracting the specified DateTime from the current one. + * + * If the result exceeds the maximum (or minimum) value that can be stored in a [time.Duration], + * the maximum (or minimum) duration will be returned. + */ + sub(u: DateTime): time.Duration + } + interface DateTime { + /** + * AddDate returns a new DateTime based on the current one + duration. + * + * It follows the same rules as [time.AddDate]. + */ + addDate(years: number, months: number, days: number): DateTime + } + interface DateTime { + /** + * After reports whether the current DateTime instance is after u. + */ + after(u: DateTime): boolean + } + interface DateTime { + /** + * Before reports whether the current DateTime instance is before u. + */ + before(u: DateTime): boolean + } + interface DateTime { + /** + * Compare compares the current DateTime instance with u. + * If the current instance is before u, it returns -1. + * If the current instance is after u, it returns +1. + * If they're the same, it returns 0. + */ + compare(u: DateTime): number + } + interface DateTime { + /** + * Equal reports whether the current DateTime and u represent the same time instant. + * Two DateTime can be equal even if they are in different locations. + * For example, 6:00 +0200 and 4:00 UTC are Equal. + */ + equal(u: DateTime): boolean + } + interface DateTime { + /** + * Unix returns the current DateTime as a Unix time, aka. + * the number of seconds elapsed since January 1, 1970 UTC. + */ + unix(): number + } + interface DateTime { + /** + * IsZero checks whether the current DateTime instance has zero time value. + */ + isZero(): boolean + } + interface DateTime { + /** + * String serializes the current DateTime instance into a formatted + * UTC date string. + * + * The zero value is serialized to an empty string. + */ + string(): string + } + interface DateTime { + /** + * MarshalJSON implements the [json.Marshaler] interface. + */ + marshalJSON(): string|Array + } + interface DateTime { + /** + * UnmarshalJSON implements the [json.Unmarshaler] interface. + */ + unmarshalJSON(b: string|Array): void + } + interface DateTime { + /** + * Value implements the [driver.Valuer] interface. + */ + value(): any + } + interface DateTime { + /** + * Scan implements [sql.Scanner] interface to scan the provided value + * into the current DateTime instance. + */ + scan(value: any): void + } + /** + * JSONArray defines a slice that is safe for json and db read/write. + */ + interface JSONArray extends Array{} + /** + * JSONMap defines a map that is safe for json and db read/write. + */ + interface JSONMap extends _TygojaDict{} + /** + * JSONRaw defines a json value type that is safe for db read/write. + */ + interface JSONRaw extends Array{} + interface JSONRaw { + /** + * String returns the current JSONRaw instance as a json encoded string. + */ + string(): string + } + interface JSONRaw { + /** + * MarshalJSON implements the [json.Marshaler] interface. + */ + marshalJSON(): string|Array + } + interface JSONRaw { + /** + * UnmarshalJSON implements the [json.Unmarshaler] interface. + */ + unmarshalJSON(b: string|Array): void + } + interface JSONRaw { + /** + * Value implements the [driver.Valuer] interface. + */ + value(): any + } + interface JSONRaw { + /** + * Scan implements [sql.Scanner] interface to scan the provided value + * into the current JSONRaw instance. + */ + scan(value: any): void + } +} + +namespace search { + /** + * Result defines the returned search result structure. + */ + interface Result { + items: any + page: number + perPage: number + totalItems: number + totalPages: number + } + /** + * ResolverResult defines a single FieldResolver.Resolve() successfully parsed result. + */ + interface ResolverResult { + /** + * Identifier is the plain SQL identifier/column that will be used + * in the final db expression as left or right operand. + */ + identifier: string + /** + * NoCoalesce instructs to not use COALESCE or NULL fallbacks + * when building the identifier expression. + */ + noCoalesce: boolean + /** + * Params is a map with db placeholder->value pairs that will be added + * to the query when building both resolved operands/sides in a single expression. + */ + params: dbx.Params + /** + * MultiMatchSubQuery is an optional sub query expression that will be added + * in addition to the combined ResolverResult expression during build. + */ + multiMatchSubQuery: dbx.Expression + /** + * AfterBuild is an optional function that will be called after building + * and combining the result of both resolved operands/sides in a single expression. + */ + afterBuild: (expr: dbx.Expression) => dbx.Expression + } +} + +namespace hook { + /** + * Event implements [Resolver] and it is intended to be used as a base + * Hook event that you can embed in your custom typed event structs. + * + * Example: + * + * ``` + * type CustomEvent struct { + * hook.Event + * + * SomeField int + * } + * ``` + */ + interface Event { + } + interface Event { + /** + * Next calls the next hook handler. + */ + next(): void + } + /** + * Handler defines a single Hook handler. + * Multiple handlers can share the same id. + * If Id is not explicitly set it will be autogenerated by Hook.Add and Hook.AddHandler. + */ + interface Handler { + /** + * Func defines the handler function to execute. + * + * Note that users need to call e.Next() in order to proceed with + * the execution of the hook chain. + */ + func: (_arg0: T) => void + /** + * Id is the unique identifier of the handler. + * + * It could be used later to remove the handler from a hook via [Hook.Remove]. + * + * If missing, an autogenerated value will be assigned when adding + * the handler to a hook. + */ + id: string + /** + * Priority allows changing the default exec priority of the handler within a hook. + * + * If 0, the handler will be executed in the same order it was registered. + */ + priority: number + } + /** + * Hook defines a generic concurrent safe structure for managing event hooks. + * + * When using custom event it must embed the base [hook.Event]. + * + * Example: + * + * ``` + * type CustomEvent struct { + * hook.Event + * SomeField int + * } + * + * h := Hook[*CustomEvent]{} + * + * h.BindFunc(func(e *CustomEvent) error { + * println(e.SomeField) + * + * return e.Next() + * }) + * + * h.Trigger(&CustomEvent{ SomeField: 123 }) + * ``` + */ + interface Hook { + } + /** + * TaggedHook defines a proxy hook which register handlers that are triggered only + * if the TaggedHook.tags are empty or includes at least one of the event data tag(s). + */ + type _swRrsDC = mainHook + interface TaggedHook extends _swRrsDC { + } +} + /** * Package multipart implements MIME multipart parsing, as defined in RFC * 2046. @@ -17762,976 +18221,672 @@ namespace exec { } } -/** - * Package blob provides an easy and portable way to interact with blobs - * within a storage location. Subpackages contain driver implementations of - * blob for supported services. - * - * See https://gocloud.dev/howto/blob/ for a detailed how-to guide. - * - * *blob.Bucket implements io/fs.FS and io/fs.SubFS, so it can be used with - * functions in that package. - * - * # Errors - * - * The errors returned from this package can be inspected in several ways: - * - * The Code function from gocloud.dev/gcerrors will return an error code, also - * defined in that package, when invoked on an error. - * - * The Bucket.ErrorAs method can retrieve the driver error underlying the returned - * error. - * - * # OpenCensus Integration - * - * OpenCensus supports tracing and metric collection for multiple languages and - * backend providers. See https://opencensus.io. - * - * This API collects OpenCensus traces and metrics for the following methods: - * ``` - * - Attributes - * - Copy - * - Delete - * - ListPage - * - NewRangeReader, from creation until the call to Close. (NewReader and ReadAll - * are included because they call NewRangeReader.) - * - NewWriter, from creation until the call to Close. - * ``` - * - * All trace and metric names begin with the package import path. - * The traces add the method name. - * For example, "gocloud.dev/blob/Attributes". - * The metrics are "completed_calls", a count of completed method calls by driver, - * method and status (error code); and "latency", a distribution of method latency - * by driver and method. - * For example, "gocloud.dev/blob/latency". - * - * It also collects the following metrics: - * ``` - * - gocloud.dev/blob/bytes_read: the total number of bytes read, by driver. - * - gocloud.dev/blob/bytes_written: the total number of bytes written, by driver. - * ``` - * - * To enable trace collection in your application, see "Configure Exporter" at - * https://opencensus.io/quickstart/go/tracing. - * To enable metric collection in your application, see "Exporting stats" at - * https://opencensus.io/quickstart/go/metrics. - */ -namespace blob { +namespace mailer { /** - * Reader reads bytes from a blob. - * It implements io.ReadSeekCloser, and must be closed after - * reads are finished. + * Message defines a generic email message struct. */ - interface Reader { - } - interface Reader { - /** - * Read implements io.Reader (https://golang.org/pkg/io/#Reader). - */ - read(p: string|Array): number - } - interface Reader { - /** - * Seek implements io.Seeker (https://golang.org/pkg/io/#Seeker). - */ - seek(offset: number, whence: number): number - } - interface Reader { - /** - * Close implements io.Closer (https://golang.org/pkg/io/#Closer). - */ - close(): void - } - interface Reader { - /** - * ContentType returns the MIME type of the blob. - */ - contentType(): string - } - interface Reader { - /** - * ModTime returns the time the blob was last modified. - */ - modTime(): time.Time - } - interface Reader { - /** - * Size returns the size of the blob content in bytes. - */ - size(): number - } - interface Reader { - /** - * As converts i to driver-specific types. - * See https://gocloud.dev/concepts/as/ for background information, the "As" - * examples in this package for examples, and the driver package - * documentation for the specific types supported for that driver. - */ - as(i: { - }): boolean - } - interface Reader { - /** - * WriteTo reads from r and writes to w until there's no more data or - * an error occurs. - * The return value is the number of bytes written to w. - * - * It implements the io.WriterTo interface. - */ - writeTo(w: io.Writer): number + interface Message { + from: { address: string; name?: string; } + to: Array<{ address: string; name?: string; }> + bcc: Array<{ address: string; name?: string; }> + cc: Array<{ address: string; name?: string; }> + subject: string + html: string + text: string + headers: _TygojaDict + attachments: _TygojaDict + inlineAttachments: _TygojaDict } /** - * Attributes contains attributes about a blob. + * Mailer defines a base mail client interface. */ - interface Attributes { + interface Mailer { + [key:string]: any; /** - * CacheControl specifies caching attributes that services may use - * when serving the blob. - * https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Cache-Control + * Send sends an email with the provided Message. */ - cacheControl: string - /** - * ContentDisposition specifies whether the blob content is expected to be - * displayed inline or as an attachment. - * https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Disposition - */ - contentDisposition: string - /** - * ContentEncoding specifies the encoding used for the blob's content, if any. - * https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Encoding - */ - contentEncoding: string - /** - * ContentLanguage specifies the language used in the blob's content, if any. - * https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Language - */ - contentLanguage: string - /** - * ContentType is the MIME type of the blob. It will not be empty. - * https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Type - */ - contentType: string - /** - * Metadata holds key/value pairs associated with the blob. - * Keys are guaranteed to be in lowercase, even if the backend service - * has case-sensitive keys (although note that Metadata written via - * this package will always be lowercased). If there are duplicate - * case-insensitive keys (e.g., "foo" and "FOO"), only one value - * will be kept, and it is undefined which one. - */ - metadata: _TygojaDict - /** - * CreateTime is the time the blob was created, if available. If not available, - * CreateTime will be the zero time. - */ - createTime: time.Time - /** - * ModTime is the time the blob was last modified. - */ - modTime: time.Time - /** - * Size is the size of the blob's content in bytes. - */ - size: number - /** - * MD5 is an MD5 hash of the blob contents or nil if not available. - */ - md5: string|Array - /** - * ETag for the blob; see https://en.wikipedia.org/wiki/HTTP_ETag. - */ - eTag: string - } - interface Attributes { - /** - * As converts i to driver-specific types. - * See https://gocloud.dev/concepts/as/ for background information, the "As" - * examples in this package for examples, and the driver package - * documentation for the specific types supported for that driver. - */ - as(i: { - }): boolean - } - /** - * ListObject represents a single blob returned from List. - */ - interface ListObject { - /** - * Key is the key for this blob. - */ - key: string - /** - * ModTime is the time the blob was last modified. - */ - modTime: time.Time - /** - * Size is the size of the blob's content in bytes. - */ - size: number - /** - * MD5 is an MD5 hash of the blob contents or nil if not available. - */ - md5: string|Array - /** - * IsDir indicates that this result represents a "directory" in the - * hierarchical namespace, ending in ListOptions.Delimiter. Key can be - * passed as ListOptions.Prefix to list items in the "directory". - * Fields other than Key and IsDir will not be set if IsDir is true. - */ - isDir: boolean - } - interface ListObject { - /** - * As converts i to driver-specific types. - * See https://gocloud.dev/concepts/as/ for background information, the "As" - * examples in this package for examples, and the driver package - * documentation for the specific types supported for that driver. - */ - as(i: { - }): boolean - } -} - -namespace store { - /** - * Store defines a concurrent safe in memory key-value data store. - */ - interface Store { - } - interface Store { - /** - * Reset clears the store and replaces the store data with a - * shallow copy of the provided newData. - */ - reset(newData: _TygojaDict): void - } - interface Store { - /** - * Length returns the current number of elements in the store. - */ - length(): number - } - interface Store { - /** - * RemoveAll removes all the existing store entries. - */ - removeAll(): void - } - interface Store { - /** - * Remove removes a single entry from the store. - * - * Remove does nothing if key doesn't exist in the store. - */ - remove(key: K): void - } - interface Store { - /** - * Has checks if element with the specified key exist or not. - */ - has(key: K): boolean - } - interface Store { - /** - * Get returns a single element value from the store. - * - * If key is not set, the zero T value is returned. - */ - get(key: K): T - } - interface Store { - /** - * GetOk is similar to Get but returns also a boolean indicating whether the key exists or not. - */ - getOk(key: K): [T, boolean] - } - interface Store { - /** - * GetAll returns a shallow copy of the current store data. - */ - getAll(): _TygojaDict - } - interface Store { - /** - * Values returns a slice with all of the current store values. - */ - values(): Array - } - interface Store { - /** - * Set sets (or overwrite if already exists) a new value for key. - */ - set(key: K, value: T): void - } - interface Store { - /** - * SetFunc sets (or overwrite if already exists) a new value resolved - * from the function callback for the provided key. - * - * The function callback receives as argument the old store element value (if exists). - * If there is no old store element, the argument will be the T zero value. - * - * Example: - * - * ``` - * s := store.New[string, int](nil) - * s.SetFunc("count", func(old int) int { - * return old + 1 - * }) - * ``` - */ - setFunc(key: K, fn: (old: T) => T): void - } - interface Store { - /** - * GetOrSet retrieves a single existing value for the provided key - * or stores a new one if it doesn't exist. - */ - getOrSet(key: K, setFunc: () => T): T - } - interface Store { - /** - * SetIfLessThanLimit sets (or overwrite if already exist) a new value for key. - * - * This method is similar to Set() but **it will skip adding new elements** - * to the store if the store length has reached the specified limit. - * false is returned if maxAllowedElements limit is reached. - */ - setIfLessThanLimit(key: K, value: T, maxAllowedElements: number): boolean - } - interface Store { - /** - * UnmarshalJSON implements [json.Unmarshaler] and imports the - * provided JSON data into the store. - * - * The store entries that match with the ones from the data will be overwritten with the new value. - */ - unmarshalJSON(data: string|Array): void - } - interface Store { - /** - * MarshalJSON implements [json.Marshaler] and export the current - * store data into valid JSON. - */ - marshalJSON(): string|Array + send(message: Message): void } } /** - * Package jwt is a Go implementation of JSON Web Tokens: http://self-issued.info/docs/draft-jones-json-web-token.html + * Package slog provides structured logging, + * in which log records include a message, + * a severity level, and various other attributes + * expressed as key-value pairs. * - * See README.md for more info. + * It defines a type, [Logger], + * which provides several methods (such as [Logger.Info] and [Logger.Error]) + * for reporting events of interest. + * + * Each Logger is associated with a [Handler]. + * A Logger output method creates a [Record] from the method arguments + * and passes it to the Handler, which decides how to handle it. + * There is a default Logger accessible through top-level functions + * (such as [Info] and [Error]) that call the corresponding Logger methods. + * + * A log record consists of a time, a level, a message, and a set of key-value + * pairs, where the keys are strings and the values may be of any type. + * As an example, + * + * ``` + * slog.Info("hello", "count", 3) + * ``` + * + * creates a record containing the time of the call, + * a level of Info, the message "hello", and a single + * pair with key "count" and value 3. + * + * The [Info] top-level function calls the [Logger.Info] method on the default Logger. + * In addition to [Logger.Info], there are methods for Debug, Warn and Error levels. + * Besides these convenience methods for common levels, + * there is also a [Logger.Log] method which takes the level as an argument. + * Each of these methods has a corresponding top-level function that uses the + * default logger. + * + * The default handler formats the log record's message, time, level, and attributes + * as a string and passes it to the [log] package. + * + * ``` + * 2022/11/08 15:28:26 INFO hello count=3 + * ``` + * + * For more control over the output format, create a logger with a different handler. + * This statement uses [New] to create a new logger with a [TextHandler] + * that writes structured records in text form to standard error: + * + * ``` + * logger := slog.New(slog.NewTextHandler(os.Stderr, nil)) + * ``` + * + * [TextHandler] output is a sequence of key=value pairs, easily and unambiguously + * parsed by machine. This statement: + * + * ``` + * logger.Info("hello", "count", 3) + * ``` + * + * produces this output: + * + * ``` + * time=2022-11-08T15:28:26.000-05:00 level=INFO msg=hello count=3 + * ``` + * + * The package also provides [JSONHandler], whose output is line-delimited JSON: + * + * ``` + * logger := slog.New(slog.NewJSONHandler(os.Stdout, nil)) + * logger.Info("hello", "count", 3) + * ``` + * + * produces this output: + * + * ``` + * {"time":"2022-11-08T15:28:26.000000000-05:00","level":"INFO","msg":"hello","count":3} + * ``` + * + * Both [TextHandler] and [JSONHandler] can be configured with [HandlerOptions]. + * There are options for setting the minimum level (see Levels, below), + * displaying the source file and line of the log call, and + * modifying attributes before they are logged. + * + * Setting a logger as the default with + * + * ``` + * slog.SetDefault(logger) + * ``` + * + * will cause the top-level functions like [Info] to use it. + * [SetDefault] also updates the default logger used by the [log] package, + * so that existing applications that use [log.Printf] and related functions + * will send log records to the logger's handler without needing to be rewritten. + * + * Some attributes are common to many log calls. + * For example, you may wish to include the URL or trace identifier of a server request + * with all log events arising from the request. + * Rather than repeat the attribute with every log call, you can use [Logger.With] + * to construct a new Logger containing the attributes: + * + * ``` + * logger2 := logger.With("url", r.URL) + * ``` + * + * The arguments to With are the same key-value pairs used in [Logger.Info]. + * The result is a new Logger with the same handler as the original, but additional + * attributes that will appear in the output of every call. + * + * # Levels + * + * A [Level] is an integer representing the importance or severity of a log event. + * The higher the level, the more severe the event. + * This package defines constants for the most common levels, + * but any int can be used as a level. + * + * In an application, you may wish to log messages only at a certain level or greater. + * One common configuration is to log messages at Info or higher levels, + * suppressing debug logging until it is needed. + * The built-in handlers can be configured with the minimum level to output by + * setting [HandlerOptions.Level]. + * The program's `main` function typically does this. + * The default value is LevelInfo. + * + * Setting the [HandlerOptions.Level] field to a [Level] value + * fixes the handler's minimum level throughout its lifetime. + * Setting it to a [LevelVar] allows the level to be varied dynamically. + * A LevelVar holds a Level and is safe to read or write from multiple + * goroutines. + * To vary the level dynamically for an entire program, first initialize + * a global LevelVar: + * + * ``` + * var programLevel = new(slog.LevelVar) // Info by default + * ``` + * + * Then use the LevelVar to construct a handler, and make it the default: + * + * ``` + * h := slog.NewJSONHandler(os.Stderr, &slog.HandlerOptions{Level: programLevel}) + * slog.SetDefault(slog.New(h)) + * ``` + * + * Now the program can change its logging level with a single statement: + * + * ``` + * programLevel.Set(slog.LevelDebug) + * ``` + * + * # Groups + * + * Attributes can be collected into groups. + * A group has a name that is used to qualify the names of its attributes. + * How this qualification is displayed depends on the handler. + * [TextHandler] separates the group and attribute names with a dot. + * [JSONHandler] treats each group as a separate JSON object, with the group name as the key. + * + * Use [Group] to create a Group attribute from a name and a list of key-value pairs: + * + * ``` + * slog.Group("request", + * "method", r.Method, + * "url", r.URL) + * ``` + * + * TextHandler would display this group as + * + * ``` + * request.method=GET request.url=http://example.com + * ``` + * + * JSONHandler would display it as + * + * ``` + * "request":{"method":"GET","url":"http://example.com"} + * ``` + * + * Use [Logger.WithGroup] to qualify all of a Logger's output + * with a group name. Calling WithGroup on a Logger results in a + * new Logger with the same Handler as the original, but with all + * its attributes qualified by the group name. + * + * This can help prevent duplicate attribute keys in large systems, + * where subsystems might use the same keys. + * Pass each subsystem a different Logger with its own group name so that + * potential duplicates are qualified: + * + * ``` + * logger := slog.Default().With("id", systemID) + * parserLogger := logger.WithGroup("parser") + * parseInput(input, parserLogger) + * ``` + * + * When parseInput logs with parserLogger, its keys will be qualified with "parser", + * so even if it uses the common key "id", the log line will have distinct keys. + * + * # Contexts + * + * Some handlers may wish to include information from the [context.Context] that is + * available at the call site. One example of such information + * is the identifier for the current span when tracing is enabled. + * + * The [Logger.Log] and [Logger.LogAttrs] methods take a context as a first + * argument, as do their corresponding top-level functions. + * + * Although the convenience methods on Logger (Info and so on) and the + * corresponding top-level functions do not take a context, the alternatives ending + * in "Context" do. For example, + * + * ``` + * slog.InfoContext(ctx, "message") + * ``` + * + * It is recommended to pass a context to an output method if one is available. + * + * # Attrs and Values + * + * An [Attr] is a key-value pair. The Logger output methods accept Attrs as well as + * alternating keys and values. The statement + * + * ``` + * slog.Info("hello", slog.Int("count", 3)) + * ``` + * + * behaves the same as + * + * ``` + * slog.Info("hello", "count", 3) + * ``` + * + * There are convenience constructors for [Attr] such as [Int], [String], and [Bool] + * for common types, as well as the function [Any] for constructing Attrs of any + * type. + * + * The value part of an Attr is a type called [Value]. + * Like an [any], a Value can hold any Go value, + * but it can represent typical values, including all numbers and strings, + * without an allocation. + * + * For the most efficient log output, use [Logger.LogAttrs]. + * It is similar to [Logger.Log] but accepts only Attrs, not alternating + * keys and values; this allows it, too, to avoid allocation. + * + * The call + * + * ``` + * logger.LogAttrs(ctx, slog.LevelInfo, "hello", slog.Int("count", 3)) + * ``` + * + * is the most efficient way to achieve the same output as + * + * ``` + * slog.InfoContext(ctx, "hello", "count", 3) + * ``` + * + * # Customizing a type's logging behavior + * + * If a type implements the [LogValuer] interface, the [Value] returned from its LogValue + * method is used for logging. You can use this to control how values of the type + * appear in logs. For example, you can redact secret information like passwords, + * or gather a struct's fields in a Group. See the examples under [LogValuer] for + * details. + * + * A LogValue method may return a Value that itself implements [LogValuer]. The [Value.Resolve] + * method handles these cases carefully, avoiding infinite loops and unbounded recursion. + * Handler authors and others may wish to use [Value.Resolve] instead of calling LogValue directly. + * + * # Wrapping output methods + * + * The logger functions use reflection over the call stack to find the file name + * and line number of the logging call within the application. This can produce + * incorrect source information for functions that wrap slog. For instance, if you + * define this function in file mylog.go: + * + * ``` + * func Infof(logger *slog.Logger, format string, args ...any) { + * logger.Info(fmt.Sprintf(format, args...)) + * } + * ``` + * + * and you call it like this in main.go: + * + * ``` + * Infof(slog.Default(), "hello, %s", "world") + * ``` + * + * then slog will report the source file as mylog.go, not main.go. + * + * A correct implementation of Infof will obtain the source location + * (pc) and pass it to NewRecord. + * The Infof function in the package-level example called "wrapping" + * demonstrates how to do this. + * + * # Working with Records + * + * Sometimes a Handler will need to modify a Record + * before passing it on to another Handler or backend. + * A Record contains a mixture of simple public fields (e.g. Time, Level, Message) + * and hidden fields that refer to state (such as attributes) indirectly. This + * means that modifying a simple copy of a Record (e.g. by calling + * [Record.Add] or [Record.AddAttrs] to add attributes) + * may have unexpected effects on the original. + * Before modifying a Record, use [Record.Clone] to + * create a copy that shares no state with the original, + * or create a new Record with [NewRecord] + * and build up its Attrs by traversing the old ones with [Record.Attrs]. + * + * # Performance considerations + * + * If profiling your application demonstrates that logging is taking significant time, + * the following suggestions may help. + * + * If many log lines have a common attribute, use [Logger.With] to create a Logger with + * that attribute. The built-in handlers will format that attribute only once, at the + * call to [Logger.With]. The [Handler] interface is designed to allow that optimization, + * and a well-written Handler should take advantage of it. + * + * The arguments to a log call are always evaluated, even if the log event is discarded. + * If possible, defer computation so that it happens only if the value is actually logged. + * For example, consider the call + * + * ``` + * slog.Info("starting request", "url", r.URL.String()) // may compute String unnecessarily + * ``` + * + * The URL.String method will be called even if the logger discards Info-level events. + * Instead, pass the URL directly: + * + * ``` + * slog.Info("starting request", "url", &r.URL) // calls URL.String only if needed + * ``` + * + * The built-in [TextHandler] will call its String method, but only + * if the log event is enabled. + * Avoiding the call to String also preserves the structure of the underlying value. + * For example [JSONHandler] emits the components of the parsed URL as a JSON object. + * If you want to avoid eagerly paying the cost of the String call + * without causing the handler to potentially inspect the structure of the value, + * wrap the value in a fmt.Stringer implementation that hides its Marshal methods. + * + * You can also use the [LogValuer] interface to avoid unnecessary work in disabled log + * calls. Say you need to log some expensive value: + * + * ``` + * slog.Debug("frobbing", "value", computeExpensiveValue(arg)) + * ``` + * + * Even if this line is disabled, computeExpensiveValue will be called. + * To avoid that, define a type implementing LogValuer: + * + * ``` + * type expensive struct { arg int } + * + * func (e expensive) LogValue() slog.Value { + * return slog.AnyValue(computeExpensiveValue(e.arg)) + * } + * ``` + * + * Then use a value of that type in log calls: + * + * ``` + * slog.Debug("frobbing", "value", expensive{arg}) + * ``` + * + * Now computeExpensiveValue will only be called when the line is enabled. + * + * The built-in handlers acquire a lock before calling [io.Writer.Write] + * to ensure that exactly one [Record] is written at a time in its entirety. + * Although each log record has a timestamp, + * the built-in handlers do not use that time to sort the written records. + * User-defined handlers are responsible for their own locking and sorting. + * + * # Writing a handler + * + * For a guide to writing a custom handler, see https://golang.org/s/slog-handler-guide. */ -namespace jwt { - /** - * MapClaims is a claims type that uses the map[string]interface{} for JSON - * decoding. This is the default claims type if you don't supply one - */ - interface MapClaims extends _TygojaDict{} - interface MapClaims { - /** - * GetExpirationTime implements the Claims interface. - */ - getExpirationTime(): (NumericDate) - } - interface MapClaims { - /** - * GetNotBefore implements the Claims interface. - */ - getNotBefore(): (NumericDate) - } - interface MapClaims { - /** - * GetIssuedAt implements the Claims interface. - */ - getIssuedAt(): (NumericDate) - } - interface MapClaims { - /** - * GetAudience implements the Claims interface. - */ - getAudience(): ClaimStrings - } - interface MapClaims { - /** - * GetIssuer implements the Claims interface. - */ - getIssuer(): string - } - interface MapClaims { - /** - * GetSubject implements the Claims interface. - */ - getSubject(): string - } -} - -namespace hook { - /** - * Event implements [Resolver] and it is intended to be used as a base - * Hook event that you can embed in your custom typed event structs. - * - * Example: - * - * ``` - * type CustomEvent struct { - * hook.Event - * - * SomeField int - * } - * ``` - */ - interface Event { - } - interface Event { - /** - * Next calls the next hook handler. - */ - next(): void - } - /** - * Handler defines a single Hook handler. - * Multiple handlers can share the same id. - * If Id is not explicitly set it will be autogenerated by Hook.Add and Hook.AddHandler. - */ - interface Handler { - /** - * Func defines the handler function to execute. - * - * Note that users need to call e.Next() in order to proceed with - * the execution of the hook chain. - */ - func: (_arg0: T) => void - /** - * Id is the unique identifier of the handler. - * - * It could be used later to remove the handler from a hook via [Hook.Remove]. - * - * If missing, an autogenerated value will be assigned when adding - * the handler to a hook. - */ - id: string - /** - * Priority allows changing the default exec priority of the handler within a hook. - * - * If 0, the handler will be executed in the same order it was registered. - */ - priority: number - } - /** - * Hook defines a generic concurrent safe structure for managing event hooks. - * - * When using custom event it must embed the base [hook.Event]. - * - * Example: - * - * ``` - * type CustomEvent struct { - * hook.Event - * SomeField int - * } - * - * h := Hook[*CustomEvent]{} - * - * h.BindFunc(func(e *CustomEvent) error { - * println(e.SomeField) - * - * return e.Next() - * }) - * - * h.Trigger(&CustomEvent{ SomeField: 123 }) - * ``` - */ - interface Hook { - } - /** - * TaggedHook defines a proxy hook which register handlers that are triggered only - * if the TaggedHook.tags are empty or includes at least one of the event data tag(s). - */ - type _sNIdHeP = mainHook - interface TaggedHook extends _sNIdHeP { - } -} - -/** - * Package types implements some commonly used db serializable types - * like datetime, json, etc. - */ -namespace types { - /** - * DateTime represents a [time.Time] instance in UTC that is wrapped - * and serialized using the app default date layout. - */ - interface DateTime { - } - interface DateTime { - /** - * Time returns the internal [time.Time] instance. - */ - time(): time.Time - } - interface DateTime { - /** - * Add returns a new DateTime based on the current DateTime + the specified duration. - */ - add(duration: time.Duration): DateTime - } - interface DateTime { - /** - * Sub returns a [time.Duration] by subtracting the specified DateTime from the current one. - * - * If the result exceeds the maximum (or minimum) value that can be stored in a [time.Duration], - * the maximum (or minimum) duration will be returned. - */ - sub(u: DateTime): time.Duration - } - interface DateTime { - /** - * AddDate returns a new DateTime based on the current one + duration. - * - * It follows the same rules as [time.AddDate]. - */ - addDate(years: number, months: number, days: number): DateTime - } - interface DateTime { - /** - * After reports whether the current DateTime instance is after u. - */ - after(u: DateTime): boolean - } - interface DateTime { - /** - * Before reports whether the current DateTime instance is before u. - */ - before(u: DateTime): boolean - } - interface DateTime { - /** - * Compare compares the current DateTime instance with u. - * If the current instance is before u, it returns -1. - * If the current instance is after u, it returns +1. - * If they're the same, it returns 0. - */ - compare(u: DateTime): number - } - interface DateTime { - /** - * Equal reports whether the current DateTime and u represent the same time instant. - * Two DateTime can be equal even if they are in different locations. - * For example, 6:00 +0200 and 4:00 UTC are Equal. - */ - equal(u: DateTime): boolean - } - interface DateTime { - /** - * Unix returns the current DateTime as a Unix time, aka. - * the number of seconds elapsed since January 1, 1970 UTC. - */ - unix(): number - } - interface DateTime { - /** - * IsZero checks whether the current DateTime instance has zero time value. - */ - isZero(): boolean - } - interface DateTime { - /** - * String serializes the current DateTime instance into a formatted - * UTC date string. - * - * The zero value is serialized to an empty string. - */ - string(): string - } - interface DateTime { - /** - * MarshalJSON implements the [json.Marshaler] interface. - */ - marshalJSON(): string|Array - } - interface DateTime { - /** - * UnmarshalJSON implements the [json.Unmarshaler] interface. - */ - unmarshalJSON(b: string|Array): void - } - interface DateTime { - /** - * Value implements the [driver.Valuer] interface. - */ - value(): any - } - interface DateTime { - /** - * Scan implements [sql.Scanner] interface to scan the provided value - * into the current DateTime instance. - */ - scan(value: any): void - } - /** - * JSONArray defines a slice that is safe for json and db read/write. - */ - interface JSONArray extends Array{} - /** - * JSONMap defines a map that is safe for json and db read/write. - */ - interface JSONMap extends _TygojaDict{} - /** - * JSONRaw defines a json value type that is safe for db read/write. - */ - interface JSONRaw extends Array{} - interface JSONRaw { - /** - * String returns the current JSONRaw instance as a json encoded string. - */ - string(): string - } - interface JSONRaw { - /** - * MarshalJSON implements the [json.Marshaler] interface. - */ - marshalJSON(): string|Array - } - interface JSONRaw { - /** - * UnmarshalJSON implements the [json.Unmarshaler] interface. - */ - unmarshalJSON(b: string|Array): void - } - interface JSONRaw { - /** - * Value implements the [driver.Valuer] interface. - */ - value(): any - } - interface JSONRaw { - /** - * Scan implements [sql.Scanner] interface to scan the provided value - * into the current JSONRaw instance. - */ - scan(value: any): void - } -} - -namespace search { - /** - * Result defines the returned search result structure. - */ - interface Result { - items: any - page: number - perPage: number - totalItems: number - totalPages: number - } - /** - * ResolverResult defines a single FieldResolver.Resolve() successfully parsed result. - */ - interface ResolverResult { - /** - * Identifier is the plain SQL identifier/column that will be used - * in the final db expression as left or right operand. - */ - identifier: string - /** - * NoCoalesce instructs to not use COALESCE or NULL fallbacks - * when building the identifier expression. - */ - noCoalesce: boolean - /** - * Params is a map with db placeholder->value pairs that will be added - * to the query when building both resolved operands/sides in a single expression. - */ - params: dbx.Params - /** - * MultiMatchSubQuery is an optional sub query expression that will be added - * in addition to the combined ResolverResult expression during build. - */ - multiMatchSubQuery: dbx.Expression - /** - * AfterBuild is an optional function that will be called after building - * and combining the result of both resolved operands/sides in a single expression. - */ - afterBuild: (expr: dbx.Expression) => dbx.Expression - } -} - -namespace router { +namespace slog { // @ts-ignore - import validation = ozzo_validation + import loginternal = internal /** - * ApiError defines the struct for a basic api error response. + * A Logger records structured information about each call to its + * Log, Debug, Info, Warn, and Error methods. + * For each call, it creates a [Record] and passes it to a [Handler]. + * + * To create a new Logger, call [New] or a Logger method + * that begins "With". */ - interface ApiError { - data: _TygojaDict - message: string - status: number + interface Logger { } - interface ApiError { + interface Logger { /** - * Error makes it compatible with the `error` interface. + * Handler returns l's Handler. */ - error(): string + handler(): Handler } - interface ApiError { + interface Logger { /** - * RawData returns the unformatted error data (could be an internal error, text, etc.) + * With returns a Logger that includes the given attributes + * in each output operation. Arguments are converted to + * attributes as if by [Logger.Log]. */ - rawData(): any + with(...args: any[]): (Logger) } - interface ApiError { + interface Logger { /** - * Is reports whether the current ApiError wraps the target. + * WithGroup returns a Logger that starts a group, if name is non-empty. + * The keys of all attributes added to the Logger will be qualified by the given + * name. (How that qualification happens depends on the [Handler.WithGroup] + * method of the Logger's Handler.) + * + * If name is empty, WithGroup returns the receiver. */ - is(target: Error): boolean + withGroup(name: string): (Logger) + } + interface Logger { + /** + * Enabled reports whether l emits log records at the given context and level. + */ + enabled(ctx: context.Context, level: Level): boolean + } + interface Logger { + /** + * Log emits a log record with the current time and the given level and message. + * The Record's Attrs consist of the Logger's attributes followed by + * the Attrs specified by args. + * + * The attribute arguments are processed as follows: + * ``` + * - If an argument is an Attr, it is used as is. + * - If an argument is a string and this is not the last argument, + * the following argument is treated as the value and the two are combined + * into an Attr. + * - Otherwise, the argument is treated as a value with key "!BADKEY". + * ``` + */ + log(ctx: context.Context, level: Level, msg: string, ...args: any[]): void + } + interface Logger { + /** + * LogAttrs is a more efficient version of [Logger.Log] that accepts only Attrs. + */ + logAttrs(ctx: context.Context, level: Level, msg: string, ...attrs: Attr[]): void + } + interface Logger { + /** + * Debug logs at [LevelDebug]. + */ + debug(msg: string, ...args: any[]): void + } + interface Logger { + /** + * DebugContext logs at [LevelDebug] with the given context. + */ + debugContext(ctx: context.Context, msg: string, ...args: any[]): void + } + interface Logger { + /** + * Info logs at [LevelInfo]. + */ + info(msg: string, ...args: any[]): void + } + interface Logger { + /** + * InfoContext logs at [LevelInfo] with the given context. + */ + infoContext(ctx: context.Context, msg: string, ...args: any[]): void + } + interface Logger { + /** + * Warn logs at [LevelWarn]. + */ + warn(msg: string, ...args: any[]): void + } + interface Logger { + /** + * WarnContext logs at [LevelWarn] with the given context. + */ + warnContext(ctx: context.Context, msg: string, ...args: any[]): void + } + interface Logger { + /** + * Error logs at [LevelError]. + */ + error(msg: string, ...args: any[]): void + } + interface Logger { + /** + * ErrorContext logs at [LevelError] with the given context. + */ + errorContext(ctx: context.Context, msg: string, ...args: any[]): void + } +} + +namespace auth { + /** + * Provider defines a common interface for an OAuth2 client. + */ + interface Provider { + [key:string]: any; + /** + * Context returns the context associated with the provider (if any). + */ + context(): context.Context + /** + * SetContext assigns the specified context to the current provider. + */ + setContext(ctx: context.Context): void + /** + * PKCE indicates whether the provider can use the PKCE flow. + */ + pkce(): boolean + /** + * SetPKCE toggles the state whether the provider can use the PKCE flow or not. + */ + setPKCE(enable: boolean): void + /** + * DisplayName usually returns provider name as it is officially written + * and it could be used directly in the UI. + */ + displayName(): string + /** + * SetDisplayName sets the provider's display name. + */ + setDisplayName(displayName: string): void + /** + * Scopes returns the provider access permissions that will be requested. + */ + scopes(): Array + /** + * SetScopes sets the provider access permissions that will be requested later. + */ + setScopes(scopes: Array): void + /** + * ClientId returns the provider client's app ID. + */ + clientId(): string + /** + * SetClientId sets the provider client's ID. + */ + setClientId(clientId: string): void + /** + * ClientSecret returns the provider client's app secret. + */ + clientSecret(): string + /** + * SetClientSecret sets the provider client's app secret. + */ + setClientSecret(secret: string): void + /** + * RedirectURL returns the end address to redirect the user + * going through the OAuth flow. + */ + redirectURL(): string + /** + * SetRedirectURL sets the provider's RedirectURL. + */ + setRedirectURL(url: string): void + /** + * AuthURL returns the provider's authorization service url. + */ + authURL(): string + /** + * SetAuthURL sets the provider's AuthURL. + */ + setAuthURL(url: string): void + /** + * TokenURL returns the provider's token exchange service url. + */ + tokenURL(): string + /** + * SetTokenURL sets the provider's TokenURL. + */ + setTokenURL(url: string): void + /** + * UserInfoURL returns the provider's user info api url. + */ + userInfoURL(): string + /** + * SetUserInfoURL sets the provider's UserInfoURL. + */ + setUserInfoURL(url: string): void + /** + * Extra returns a shallow copy of any custom config data + * that the provider may be need. + */ + extra(): _TygojaDict + /** + * SetExtra updates the provider's custom config data. + */ + setExtra(data: _TygojaDict): void + /** + * Client returns an http client using the provided token. + */ + client(token: oauth2.Token): (any) + /** + * BuildAuthURL returns a URL to the provider's consent page + * that asks for permissions for the required scopes explicitly. + */ + buildAuthURL(state: string, ...opts: oauth2.AuthCodeOption[]): string + /** + * FetchToken converts an authorization code to token. + */ + fetchToken(code: string, ...opts: oauth2.AuthCodeOption[]): (oauth2.Token) + /** + * FetchRawUserInfo requests and marshalizes into `result` the + * the OAuth user api response. + */ + fetchRawUserInfo(token: oauth2.Token): string|Array + /** + * FetchAuthUser is similar to FetchRawUserInfo, but normalizes and + * marshalizes the user api response into a standardized AuthUser struct. + */ + fetchAuthUser(token: oauth2.Token): (AuthUser) } /** - * Event specifies based Route handler event that is usually intended - * to be embedded as part of a custom event struct. - * - * NB! It is expected that the Response and Request fields are always set. + * AuthUser defines a standardized OAuth2 user data structure. */ - type _seOZRqn = hook.Event - interface Event extends _seOZRqn { - response: http.ResponseWriter - request?: http.Request - } - interface Event { + interface AuthUser { + expiry: types.DateTime + rawUser: _TygojaDict + id: string + name: string + username: string + email: string + avatarURL: string + accessToken: string + refreshToken: string /** - * Written reports whether the current response has already been written. + * @todo + * deprecated: use AvatarURL instead + * AvatarUrl will be removed after dropping v0.22 support + */ + avatarUrl: string + } + interface AuthUser { + /** + * MarshalJSON implements the [json.Marshaler] interface. * - * This method always returns false if e.ResponseWritter doesn't implement the WriteTracker interface - * (all router package handlers receives a ResponseWritter that implements it unless explicitly replaced with a custom one). + * @todo remove after dropping v0.22 support */ - written(): boolean - } - interface Event { - /** - * Status reports the status code of the current response. - * - * This method always returns 0 if e.Response doesn't implement the StatusTracker interface - * (all router package handlers receives a ResponseWritter that implements it unless explicitly replaced with a custom one). - */ - status(): number - } - interface Event { - /** - * Flush flushes buffered data to the current response. - * - * Returns [http.ErrNotSupported] if e.Response doesn't implement the [http.Flusher] interface - * (all router package handlers receives a ResponseWritter that implements it unless explicitly replaced with a custom one). - */ - flush(): void - } - interface Event { - /** - * IsTLS reports whether the connection on which the request was received is TLS. - */ - isTLS(): boolean - } - interface Event { - /** - * SetCookie is an alias for [http.SetCookie]. - * - * SetCookie adds a Set-Cookie header to the current response's headers. - * The provided cookie must have a valid Name. - * Invalid cookies may be silently dropped. - */ - setCookie(cookie: http.Cookie): void - } - interface Event { - /** - * RemoteIP returns the IP address of the client that sent the request. - * - * IPv6 addresses are returned expanded. - * For example, "2001:db8::1" becomes "2001:0db8:0000:0000:0000:0000:0000:0001". - * - * Note that if you are behind reverse proxy(ies), this method returns - * the IP of the last connecting proxy. - */ - remoteIP(): string - } - interface Event { - /** - * FindUploadedFiles extracts all form files of "key" from a http request - * and returns a slice with filesystem.File instances (if any). - */ - findUploadedFiles(key: string): Array<(filesystem.File | undefined)> - } - interface Event { - /** - * Get retrieves single value from the current event data store. - */ - get(key: string): any - } - interface Event { - /** - * GetAll returns a copy of the current event data store. - */ - getAll(): _TygojaDict - } - interface Event { - /** - * Set saves single value into the current event data store. - */ - set(key: string, value: any): void - } - interface Event { - /** - * SetAll saves all items from m into the current event data store. - */ - setAll(m: _TygojaDict): void - } - interface Event { - /** - * String writes a plain string response. - */ - string(status: number, data: string): void - } - interface Event { - /** - * HTML writes an HTML response. - */ - html(status: number, data: string): void - } - interface Event { - /** - * JSON writes a JSON response. - * - * It also provides a generic response data fields picker if the "fields" query parameter is set. - * For example, if you are requesting `?fields=a,b` for `e.JSON(200, map[string]int{ "a":1, "b":2, "c":3 })`, - * it should result in a JSON response like: `{"a":1, "b": 2}`. - */ - json(status: number, data: any): void - } - interface Event { - /** - * XML writes an XML response. - * It automatically prepends the generic [xml.Header] string to the response. - */ - xml(status: number, data: any): void - } - interface Event { - /** - * Stream streams the specified reader into the response. - */ - stream(status: number, contentType: string, reader: io.Reader): void - } - interface Event { - /** - * Blob writes a blob (bytes slice) response. - */ - blob(status: number, contentType: string, b: string|Array): void - } - interface Event { - /** - * FileFS serves the specified filename from fsys. - * - * It is similar to [echo.FileFS] for consistency with earlier versions. - */ - fileFS(fsys: fs.FS, filename: string): void - } - interface Event { - /** - * NoContent writes a response with no body (ex. 204). - */ - noContent(status: number): void - } - interface Event { - /** - * Redirect writes a redirect response to the specified url. - * The status code must be in between 300 – 399 range. - */ - redirect(status: number, url: string): void - } - interface Event { - error(status: number, message: string, errData: any): (ApiError) - } - interface Event { - badRequestError(message: string, errData: any): (ApiError) - } - interface Event { - notFoundError(message: string, errData: any): (ApiError) - } - interface Event { - forbiddenError(message: string, errData: any): (ApiError) - } - interface Event { - unauthorizedError(message: string, errData: any): (ApiError) - } - interface Event { - tooManyRequestsError(message: string, errData: any): (ApiError) - } - interface Event { - internalServerError(message: string, errData: any): (ApiError) - } - interface Event { - /** - * BindBody unmarshal the request body into the provided dst. - * - * dst must be either a struct pointer or map[string]any. - * - * The rules how the body will be scanned depends on the request Content-Type. - * - * Currently the following Content-Types are supported: - * ``` - * - application/json - * - text/xml, application/xml - * - multipart/form-data, application/x-www-form-urlencoded - * ``` - * - * Respectively the following struct tags are supported (again, which one will be used depends on the Content-Type): - * ``` - * - "json" (json body)- uses the builtin Go json package for unmarshaling. - * - "xml" (xml body) - uses the builtin Go xml package for unmarshaling. - * - "form" (form data) - utilizes the custom [router.UnmarshalRequestData] method. - * ``` - * - * NB! When dst is a struct make sure that it doesn't have public fields - * that shouldn't be bindable and it is advisible such fields to be unexported - * or have a separate struct just for the binding. For example: - * - * ``` - * data := struct{ - * somethingPrivate string - * - * Title string `json:"title" form:"title"` - * Total int `json:"total" form:"total"` - * } - * err := e.BindBody(&data) - * ``` - */ - bindBody(dst: any): void - } - /** - * Router defines a thin wrapper around the standard Go [http.ServeMux] by - * adding support for routing sub-groups, middlewares and other common utils. - * - * Example: - * - * ``` - * r := NewRouter[*MyEvent](eventFactory) - * - * // middlewares - * r.BindFunc(m1, m2) - * - * // routes - * r.GET("/test", handler1) - * - * // sub-routers/groups - * api := r.Group("/api") - * api.GET("/admins", handler2) - * - * // generate a http.ServeMux instance based on the router configurations - * mux, _ := r.BuildMux() - * - * http.ListenAndServe("localhost:8090", mux) - * ``` - */ - type _shvLIXv = RouterGroup - interface Router extends _shvLIXv { + marshalJSON(): string|Array } } @@ -19803,672 +19958,428 @@ namespace cobra { } } -namespace mailer { +namespace blob { /** - * Message defines a generic email message struct. + * Reader reads bytes from a blob. + * It implements io.ReadSeekCloser, and must be closed after + * reads are finished. */ - interface Message { - from: { address: string; name?: string; } - to: Array<{ address: string; name?: string; }> - bcc: Array<{ address: string; name?: string; }> - cc: Array<{ address: string; name?: string; }> - subject: string - html: string - text: string - headers: _TygojaDict - attachments: _TygojaDict - inlineAttachments: _TygojaDict + interface Reader { } - /** - * Mailer defines a base mail client interface. - */ - interface Mailer { - [key:string]: any; + interface Reader { /** - * Send sends an email with the provided Message. + * Read implements io.Reader (https://golang.org/pkg/io/#Reader). */ - send(message: Message): void + read(p: string|Array): number + } + interface Reader { + /** + * Seek implements io.Seeker (https://golang.org/pkg/io/#Seeker). + */ + seek(offset: number, whence: number): number + } + interface Reader { + /** + * Close implements io.Closer (https://golang.org/pkg/io/#Closer). + */ + close(): void + } + interface Reader { + /** + * ContentType returns the MIME type of the blob. + */ + contentType(): string + } + interface Reader { + /** + * ModTime returns the time the blob was last modified. + */ + modTime(): time.Time + } + interface Reader { + /** + * Size returns the size of the blob content in bytes. + */ + size(): number + } + interface Reader { + /** + * WriteTo reads from r and writes to w until there's no more data or + * an error occurs. + * The return value is the number of bytes written to w. + * + * It implements the io.WriterTo interface. + */ + writeTo(w: io.Writer): number } -} - -/** - * Package slog provides structured logging, - * in which log records include a message, - * a severity level, and various other attributes - * expressed as key-value pairs. - * - * It defines a type, [Logger], - * which provides several methods (such as [Logger.Info] and [Logger.Error]) - * for reporting events of interest. - * - * Each Logger is associated with a [Handler]. - * A Logger output method creates a [Record] from the method arguments - * and passes it to the Handler, which decides how to handle it. - * There is a default Logger accessible through top-level functions - * (such as [Info] and [Error]) that call the corresponding Logger methods. - * - * A log record consists of a time, a level, a message, and a set of key-value - * pairs, where the keys are strings and the values may be of any type. - * As an example, - * - * ``` - * slog.Info("hello", "count", 3) - * ``` - * - * creates a record containing the time of the call, - * a level of Info, the message "hello", and a single - * pair with key "count" and value 3. - * - * The [Info] top-level function calls the [Logger.Info] method on the default Logger. - * In addition to [Logger.Info], there are methods for Debug, Warn and Error levels. - * Besides these convenience methods for common levels, - * there is also a [Logger.Log] method which takes the level as an argument. - * Each of these methods has a corresponding top-level function that uses the - * default logger. - * - * The default handler formats the log record's message, time, level, and attributes - * as a string and passes it to the [log] package. - * - * ``` - * 2022/11/08 15:28:26 INFO hello count=3 - * ``` - * - * For more control over the output format, create a logger with a different handler. - * This statement uses [New] to create a new logger with a [TextHandler] - * that writes structured records in text form to standard error: - * - * ``` - * logger := slog.New(slog.NewTextHandler(os.Stderr, nil)) - * ``` - * - * [TextHandler] output is a sequence of key=value pairs, easily and unambiguously - * parsed by machine. This statement: - * - * ``` - * logger.Info("hello", "count", 3) - * ``` - * - * produces this output: - * - * ``` - * time=2022-11-08T15:28:26.000-05:00 level=INFO msg=hello count=3 - * ``` - * - * The package also provides [JSONHandler], whose output is line-delimited JSON: - * - * ``` - * logger := slog.New(slog.NewJSONHandler(os.Stdout, nil)) - * logger.Info("hello", "count", 3) - * ``` - * - * produces this output: - * - * ``` - * {"time":"2022-11-08T15:28:26.000000000-05:00","level":"INFO","msg":"hello","count":3} - * ``` - * - * Both [TextHandler] and [JSONHandler] can be configured with [HandlerOptions]. - * There are options for setting the minimum level (see Levels, below), - * displaying the source file and line of the log call, and - * modifying attributes before they are logged. - * - * Setting a logger as the default with - * - * ``` - * slog.SetDefault(logger) - * ``` - * - * will cause the top-level functions like [Info] to use it. - * [SetDefault] also updates the default logger used by the [log] package, - * so that existing applications that use [log.Printf] and related functions - * will send log records to the logger's handler without needing to be rewritten. - * - * Some attributes are common to many log calls. - * For example, you may wish to include the URL or trace identifier of a server request - * with all log events arising from the request. - * Rather than repeat the attribute with every log call, you can use [Logger.With] - * to construct a new Logger containing the attributes: - * - * ``` - * logger2 := logger.With("url", r.URL) - * ``` - * - * The arguments to With are the same key-value pairs used in [Logger.Info]. - * The result is a new Logger with the same handler as the original, but additional - * attributes that will appear in the output of every call. - * - * # Levels - * - * A [Level] is an integer representing the importance or severity of a log event. - * The higher the level, the more severe the event. - * This package defines constants for the most common levels, - * but any int can be used as a level. - * - * In an application, you may wish to log messages only at a certain level or greater. - * One common configuration is to log messages at Info or higher levels, - * suppressing debug logging until it is needed. - * The built-in handlers can be configured with the minimum level to output by - * setting [HandlerOptions.Level]. - * The program's `main` function typically does this. - * The default value is LevelInfo. - * - * Setting the [HandlerOptions.Level] field to a [Level] value - * fixes the handler's minimum level throughout its lifetime. - * Setting it to a [LevelVar] allows the level to be varied dynamically. - * A LevelVar holds a Level and is safe to read or write from multiple - * goroutines. - * To vary the level dynamically for an entire program, first initialize - * a global LevelVar: - * - * ``` - * var programLevel = new(slog.LevelVar) // Info by default - * ``` - * - * Then use the LevelVar to construct a handler, and make it the default: - * - * ``` - * h := slog.NewJSONHandler(os.Stderr, &slog.HandlerOptions{Level: programLevel}) - * slog.SetDefault(slog.New(h)) - * ``` - * - * Now the program can change its logging level with a single statement: - * - * ``` - * programLevel.Set(slog.LevelDebug) - * ``` - * - * # Groups - * - * Attributes can be collected into groups. - * A group has a name that is used to qualify the names of its attributes. - * How this qualification is displayed depends on the handler. - * [TextHandler] separates the group and attribute names with a dot. - * [JSONHandler] treats each group as a separate JSON object, with the group name as the key. - * - * Use [Group] to create a Group attribute from a name and a list of key-value pairs: - * - * ``` - * slog.Group("request", - * "method", r.Method, - * "url", r.URL) - * ``` - * - * TextHandler would display this group as - * - * ``` - * request.method=GET request.url=http://example.com - * ``` - * - * JSONHandler would display it as - * - * ``` - * "request":{"method":"GET","url":"http://example.com"} - * ``` - * - * Use [Logger.WithGroup] to qualify all of a Logger's output - * with a group name. Calling WithGroup on a Logger results in a - * new Logger with the same Handler as the original, but with all - * its attributes qualified by the group name. - * - * This can help prevent duplicate attribute keys in large systems, - * where subsystems might use the same keys. - * Pass each subsystem a different Logger with its own group name so that - * potential duplicates are qualified: - * - * ``` - * logger := slog.Default().With("id", systemID) - * parserLogger := logger.WithGroup("parser") - * parseInput(input, parserLogger) - * ``` - * - * When parseInput logs with parserLogger, its keys will be qualified with "parser", - * so even if it uses the common key "id", the log line will have distinct keys. - * - * # Contexts - * - * Some handlers may wish to include information from the [context.Context] that is - * available at the call site. One example of such information - * is the identifier for the current span when tracing is enabled. - * - * The [Logger.Log] and [Logger.LogAttrs] methods take a context as a first - * argument, as do their corresponding top-level functions. - * - * Although the convenience methods on Logger (Info and so on) and the - * corresponding top-level functions do not take a context, the alternatives ending - * in "Context" do. For example, - * - * ``` - * slog.InfoContext(ctx, "message") - * ``` - * - * It is recommended to pass a context to an output method if one is available. - * - * # Attrs and Values - * - * An [Attr] is a key-value pair. The Logger output methods accept Attrs as well as - * alternating keys and values. The statement - * - * ``` - * slog.Info("hello", slog.Int("count", 3)) - * ``` - * - * behaves the same as - * - * ``` - * slog.Info("hello", "count", 3) - * ``` - * - * There are convenience constructors for [Attr] such as [Int], [String], and [Bool] - * for common types, as well as the function [Any] for constructing Attrs of any - * type. - * - * The value part of an Attr is a type called [Value]. - * Like an [any], a Value can hold any Go value, - * but it can represent typical values, including all numbers and strings, - * without an allocation. - * - * For the most efficient log output, use [Logger.LogAttrs]. - * It is similar to [Logger.Log] but accepts only Attrs, not alternating - * keys and values; this allows it, too, to avoid allocation. - * - * The call - * - * ``` - * logger.LogAttrs(ctx, slog.LevelInfo, "hello", slog.Int("count", 3)) - * ``` - * - * is the most efficient way to achieve the same output as - * - * ``` - * slog.InfoContext(ctx, "hello", "count", 3) - * ``` - * - * # Customizing a type's logging behavior - * - * If a type implements the [LogValuer] interface, the [Value] returned from its LogValue - * method is used for logging. You can use this to control how values of the type - * appear in logs. For example, you can redact secret information like passwords, - * or gather a struct's fields in a Group. See the examples under [LogValuer] for - * details. - * - * A LogValue method may return a Value that itself implements [LogValuer]. The [Value.Resolve] - * method handles these cases carefully, avoiding infinite loops and unbounded recursion. - * Handler authors and others may wish to use [Value.Resolve] instead of calling LogValue directly. - * - * # Wrapping output methods - * - * The logger functions use reflection over the call stack to find the file name - * and line number of the logging call within the application. This can produce - * incorrect source information for functions that wrap slog. For instance, if you - * define this function in file mylog.go: - * - * ``` - * func Infof(logger *slog.Logger, format string, args ...any) { - * logger.Info(fmt.Sprintf(format, args...)) - * } - * ``` - * - * and you call it like this in main.go: - * - * ``` - * Infof(slog.Default(), "hello, %s", "world") - * ``` - * - * then slog will report the source file as mylog.go, not main.go. - * - * A correct implementation of Infof will obtain the source location - * (pc) and pass it to NewRecord. - * The Infof function in the package-level example called "wrapping" - * demonstrates how to do this. - * - * # Working with Records - * - * Sometimes a Handler will need to modify a Record - * before passing it on to another Handler or backend. - * A Record contains a mixture of simple public fields (e.g. Time, Level, Message) - * and hidden fields that refer to state (such as attributes) indirectly. This - * means that modifying a simple copy of a Record (e.g. by calling - * [Record.Add] or [Record.AddAttrs] to add attributes) - * may have unexpected effects on the original. - * Before modifying a Record, use [Record.Clone] to - * create a copy that shares no state with the original, - * or create a new Record with [NewRecord] - * and build up its Attrs by traversing the old ones with [Record.Attrs]. - * - * # Performance considerations - * - * If profiling your application demonstrates that logging is taking significant time, - * the following suggestions may help. - * - * If many log lines have a common attribute, use [Logger.With] to create a Logger with - * that attribute. The built-in handlers will format that attribute only once, at the - * call to [Logger.With]. The [Handler] interface is designed to allow that optimization, - * and a well-written Handler should take advantage of it. - * - * The arguments to a log call are always evaluated, even if the log event is discarded. - * If possible, defer computation so that it happens only if the value is actually logged. - * For example, consider the call - * - * ``` - * slog.Info("starting request", "url", r.URL.String()) // may compute String unnecessarily - * ``` - * - * The URL.String method will be called even if the logger discards Info-level events. - * Instead, pass the URL directly: - * - * ``` - * slog.Info("starting request", "url", &r.URL) // calls URL.String only if needed - * ``` - * - * The built-in [TextHandler] will call its String method, but only - * if the log event is enabled. - * Avoiding the call to String also preserves the structure of the underlying value. - * For example [JSONHandler] emits the components of the parsed URL as a JSON object. - * If you want to avoid eagerly paying the cost of the String call - * without causing the handler to potentially inspect the structure of the value, - * wrap the value in a fmt.Stringer implementation that hides its Marshal methods. - * - * You can also use the [LogValuer] interface to avoid unnecessary work in disabled log - * calls. Say you need to log some expensive value: - * - * ``` - * slog.Debug("frobbing", "value", computeExpensiveValue(arg)) - * ``` - * - * Even if this line is disabled, computeExpensiveValue will be called. - * To avoid that, define a type implementing LogValuer: - * - * ``` - * type expensive struct { arg int } - * - * func (e expensive) LogValue() slog.Value { - * return slog.AnyValue(computeExpensiveValue(e.arg)) - * } - * ``` - * - * Then use a value of that type in log calls: - * - * ``` - * slog.Debug("frobbing", "value", expensive{arg}) - * ``` - * - * Now computeExpensiveValue will only be called when the line is enabled. - * - * The built-in handlers acquire a lock before calling [io.Writer.Write] - * to ensure that exactly one [Record] is written at a time in its entirety. - * Although each log record has a timestamp, - * the built-in handlers do not use that time to sort the written records. - * User-defined handlers are responsible for their own locking and sorting. - * - * # Writing a handler - * - * For a guide to writing a custom handler, see https://golang.org/s/slog-handler-guide. - */ -namespace slog { - // @ts-ignore - import loginternal = internal /** - * A Logger records structured information about each call to its - * Log, Debug, Info, Warn, and Error methods. - * For each call, it creates a [Record] and passes it to a [Handler]. + * @todo remove * - * To create a new Logger, call [New] or a Logger method - * that begins "With". + * Attributes contains attributes about a blob. */ - interface Logger { - } - interface Logger { + interface Attributes { /** - * Handler returns l's Handler. + * CacheControl specifies caching attributes that services may use + * when serving the blob. + * https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Cache-Control */ - handler(): Handler - } - interface Logger { + cacheControl: string /** - * With returns a Logger that includes the given attributes - * in each output operation. Arguments are converted to - * attributes as if by [Logger.Log]. + * ContentDisposition specifies whether the blob content is expected to be + * displayed inline or as an attachment. + * https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Disposition */ - with(...args: any[]): (Logger) - } - interface Logger { + contentDisposition: string /** - * WithGroup returns a Logger that starts a group, if name is non-empty. - * The keys of all attributes added to the Logger will be qualified by the given - * name. (How that qualification happens depends on the [Handler.WithGroup] - * method of the Logger's Handler.) - * - * If name is empty, WithGroup returns the receiver. + * ContentEncoding specifies the encoding used for the blob's content, if any. + * https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Encoding */ - withGroup(name: string): (Logger) - } - interface Logger { + contentEncoding: string /** - * Enabled reports whether l emits log records at the given context and level. + * ContentLanguage specifies the language used in the blob's content, if any. + * https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Language */ - enabled(ctx: context.Context, level: Level): boolean - } - interface Logger { + contentLanguage: string /** - * Log emits a log record with the current time and the given level and message. - * The Record's Attrs consist of the Logger's attributes followed by - * the Attrs specified by args. - * - * The attribute arguments are processed as follows: - * ``` - * - If an argument is an Attr, it is used as is. - * - If an argument is a string and this is not the last argument, - * the following argument is treated as the value and the two are combined - * into an Attr. - * - Otherwise, the argument is treated as a value with key "!BADKEY". - * ``` + * ContentType is the MIME type of the blob. It will not be empty. + * https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Type */ - log(ctx: context.Context, level: Level, msg: string, ...args: any[]): void - } - interface Logger { + contentType: string /** - * LogAttrs is a more efficient version of [Logger.Log] that accepts only Attrs. + * Metadata holds key/value pairs associated with the blob. + * Keys are guaranteed to be in lowercase, even if the backend service + * has case-sensitive keys (although note that Metadata written via + * this package will always be lowercased). If there are duplicate + * case-insensitive keys (e.g., "foo" and "FOO"), only one value + * will be kept, and it is undefined which one. */ - logAttrs(ctx: context.Context, level: Level, msg: string, ...attrs: Attr[]): void - } - interface Logger { + metadata: _TygojaDict /** - * Debug logs at [LevelDebug]. + * CreateTime is the time the blob was created, if available. If not available, + * CreateTime will be the zero time. */ - debug(msg: string, ...args: any[]): void - } - interface Logger { + createTime: time.Time /** - * DebugContext logs at [LevelDebug] with the given context. + * ModTime is the time the blob was last modified. */ - debugContext(ctx: context.Context, msg: string, ...args: any[]): void - } - interface Logger { + modTime: time.Time /** - * Info logs at [LevelInfo]. + * Size is the size of the blob's content in bytes. */ - info(msg: string, ...args: any[]): void - } - interface Logger { + size: number /** - * InfoContext logs at [LevelInfo] with the given context. + * MD5 is an MD5 hash of the blob contents or nil if not available. */ - infoContext(ctx: context.Context, msg: string, ...args: any[]): void - } - interface Logger { + md5: string|Array /** - * Warn logs at [LevelWarn]. + * ETag for the blob; see https://en.wikipedia.org/wiki/HTTP_ETag. */ - warn(msg: string, ...args: any[]): void + eTag: string } - interface Logger { + /** + * ListObject represents a single blob returned from List. + */ + interface ListObject { /** - * WarnContext logs at [LevelWarn] with the given context. + * Key is the key for this blob. */ - warnContext(ctx: context.Context, msg: string, ...args: any[]): void - } - interface Logger { + key: string /** - * Error logs at [LevelError]. + * ModTime is the time the blob was last modified. */ - error(msg: string, ...args: any[]): void - } - interface Logger { + modTime: time.Time /** - * ErrorContext logs at [LevelError] with the given context. + * Size is the size of the blob's content in bytes. */ - errorContext(ctx: context.Context, msg: string, ...args: any[]): void + size: number + /** + * MD5 is an MD5 hash of the blob contents or nil if not available. + */ + md5: string|Array + /** + * IsDir indicates that this result represents a "directory" in the + * hierarchical namespace, ending in ListOptions.Delimiter. Key can be + * passed as ListOptions.Prefix to list items in the "directory". + * Fields other than Key and IsDir will not be set if IsDir is true. + */ + isDir: boolean } } -namespace auth { +namespace router { + // @ts-ignore + import validation = ozzo_validation /** - * Provider defines a common interface for an OAuth2 client. + * ApiError defines the struct for a basic api error response. */ - interface Provider { - [key:string]: any; + interface ApiError { + data: _TygojaDict + message: string + status: number + } + interface ApiError { /** - * Context returns the context associated with the provider (if any). + * Error makes it compatible with the `error` interface. */ - context(): context.Context + error(): string + } + interface ApiError { /** - * SetContext assigns the specified context to the current provider. + * RawData returns the unformatted error data (could be an internal error, text, etc.) */ - setContext(ctx: context.Context): void + rawData(): any + } + interface ApiError { /** - * PKCE indicates whether the provider can use the PKCE flow. + * Is reports whether the current ApiError wraps the target. */ - pkce(): boolean - /** - * SetPKCE toggles the state whether the provider can use the PKCE flow or not. - */ - setPKCE(enable: boolean): void - /** - * DisplayName usually returns provider name as it is officially written - * and it could be used directly in the UI. - */ - displayName(): string - /** - * SetDisplayName sets the provider's display name. - */ - setDisplayName(displayName: string): void - /** - * Scopes returns the provider access permissions that will be requested. - */ - scopes(): Array - /** - * SetScopes sets the provider access permissions that will be requested later. - */ - setScopes(scopes: Array): void - /** - * ClientId returns the provider client's app ID. - */ - clientId(): string - /** - * SetClientId sets the provider client's ID. - */ - setClientId(clientId: string): void - /** - * ClientSecret returns the provider client's app secret. - */ - clientSecret(): string - /** - * SetClientSecret sets the provider client's app secret. - */ - setClientSecret(secret: string): void - /** - * RedirectURL returns the end address to redirect the user - * going through the OAuth flow. - */ - redirectURL(): string - /** - * SetRedirectURL sets the provider's RedirectURL. - */ - setRedirectURL(url: string): void - /** - * AuthURL returns the provider's authorization service url. - */ - authURL(): string - /** - * SetAuthURL sets the provider's AuthURL. - */ - setAuthURL(url: string): void - /** - * TokenURL returns the provider's token exchange service url. - */ - tokenURL(): string - /** - * SetTokenURL sets the provider's TokenURL. - */ - setTokenURL(url: string): void - /** - * UserInfoURL returns the provider's user info api url. - */ - userInfoURL(): string - /** - * SetUserInfoURL sets the provider's UserInfoURL. - */ - setUserInfoURL(url: string): void - /** - * Extra returns a shallow copy of any custom config data - * that the provider may be need. - */ - extra(): _TygojaDict - /** - * SetExtra updates the provider's custom config data. - */ - setExtra(data: _TygojaDict): void - /** - * Client returns an http client using the provided token. - */ - client(token: oauth2.Token): (any) - /** - * BuildAuthURL returns a URL to the provider's consent page - * that asks for permissions for the required scopes explicitly. - */ - buildAuthURL(state: string, ...opts: oauth2.AuthCodeOption[]): string - /** - * FetchToken converts an authorization code to token. - */ - fetchToken(code: string, ...opts: oauth2.AuthCodeOption[]): (oauth2.Token) - /** - * FetchRawUserInfo requests and marshalizes into `result` the - * the OAuth user api response. - */ - fetchRawUserInfo(token: oauth2.Token): string|Array - /** - * FetchAuthUser is similar to FetchRawUserInfo, but normalizes and - * marshalizes the user api response into a standardized AuthUser struct. - */ - fetchAuthUser(token: oauth2.Token): (AuthUser) + is(target: Error): boolean } /** - * AuthUser defines a standardized OAuth2 user data structure. + * Event specifies based Route handler event that is usually intended + * to be embedded as part of a custom event struct. + * + * NB! It is expected that the Response and Request fields are always set. */ - interface AuthUser { - expiry: types.DateTime - rawUser: _TygojaDict - id: string - name: string - username: string - email: string - avatarURL: string - accessToken: string - refreshToken: string - /** - * @todo - * deprecated: use AvatarURL instead - * AvatarUrl will be removed after dropping v0.22 support - */ - avatarUrl: string + type _sFlOddj = hook.Event + interface Event extends _sFlOddj { + response: http.ResponseWriter + request?: http.Request } - interface AuthUser { + interface Event { /** - * MarshalJSON implements the [json.Marshaler] interface. + * Written reports whether the current response has already been written. * - * @todo remove after dropping v0.22 support + * This method always returns false if e.ResponseWritter doesn't implement the WriteTracker interface + * (all router package handlers receives a ResponseWritter that implements it unless explicitly replaced with a custom one). */ - marshalJSON(): string|Array + written(): boolean + } + interface Event { + /** + * Status reports the status code of the current response. + * + * This method always returns 0 if e.Response doesn't implement the StatusTracker interface + * (all router package handlers receives a ResponseWritter that implements it unless explicitly replaced with a custom one). + */ + status(): number + } + interface Event { + /** + * Flush flushes buffered data to the current response. + * + * Returns [http.ErrNotSupported] if e.Response doesn't implement the [http.Flusher] interface + * (all router package handlers receives a ResponseWritter that implements it unless explicitly replaced with a custom one). + */ + flush(): void + } + interface Event { + /** + * IsTLS reports whether the connection on which the request was received is TLS. + */ + isTLS(): boolean + } + interface Event { + /** + * SetCookie is an alias for [http.SetCookie]. + * + * SetCookie adds a Set-Cookie header to the current response's headers. + * The provided cookie must have a valid Name. + * Invalid cookies may be silently dropped. + */ + setCookie(cookie: http.Cookie): void + } + interface Event { + /** + * RemoteIP returns the IP address of the client that sent the request. + * + * IPv6 addresses are returned expanded. + * For example, "2001:db8::1" becomes "2001:0db8:0000:0000:0000:0000:0000:0001". + * + * Note that if you are behind reverse proxy(ies), this method returns + * the IP of the last connecting proxy. + */ + remoteIP(): string + } + interface Event { + /** + * FindUploadedFiles extracts all form files of "key" from a http request + * and returns a slice with filesystem.File instances (if any). + */ + findUploadedFiles(key: string): Array<(filesystem.File | undefined)> + } + interface Event { + /** + * Get retrieves single value from the current event data store. + */ + get(key: string): any + } + interface Event { + /** + * GetAll returns a copy of the current event data store. + */ + getAll(): _TygojaDict + } + interface Event { + /** + * Set saves single value into the current event data store. + */ + set(key: string, value: any): void + } + interface Event { + /** + * SetAll saves all items from m into the current event data store. + */ + setAll(m: _TygojaDict): void + } + interface Event { + /** + * String writes a plain string response. + */ + string(status: number, data: string): void + } + interface Event { + /** + * HTML writes an HTML response. + */ + html(status: number, data: string): void + } + interface Event { + /** + * JSON writes a JSON response. + * + * It also provides a generic response data fields picker if the "fields" query parameter is set. + * For example, if you are requesting `?fields=a,b` for `e.JSON(200, map[string]int{ "a":1, "b":2, "c":3 })`, + * it should result in a JSON response like: `{"a":1, "b": 2}`. + */ + json(status: number, data: any): void + } + interface Event { + /** + * XML writes an XML response. + * It automatically prepends the generic [xml.Header] string to the response. + */ + xml(status: number, data: any): void + } + interface Event { + /** + * Stream streams the specified reader into the response. + */ + stream(status: number, contentType: string, reader: io.Reader): void + } + interface Event { + /** + * Blob writes a blob (bytes slice) response. + */ + blob(status: number, contentType: string, b: string|Array): void + } + interface Event { + /** + * FileFS serves the specified filename from fsys. + * + * It is similar to [echo.FileFS] for consistency with earlier versions. + */ + fileFS(fsys: fs.FS, filename: string): void + } + interface Event { + /** + * NoContent writes a response with no body (ex. 204). + */ + noContent(status: number): void + } + interface Event { + /** + * Redirect writes a redirect response to the specified url. + * The status code must be in between 300 – 399 range. + */ + redirect(status: number, url: string): void + } + interface Event { + error(status: number, message: string, errData: any): (ApiError) + } + interface Event { + badRequestError(message: string, errData: any): (ApiError) + } + interface Event { + notFoundError(message: string, errData: any): (ApiError) + } + interface Event { + forbiddenError(message: string, errData: any): (ApiError) + } + interface Event { + unauthorizedError(message: string, errData: any): (ApiError) + } + interface Event { + tooManyRequestsError(message: string, errData: any): (ApiError) + } + interface Event { + internalServerError(message: string, errData: any): (ApiError) + } + interface Event { + /** + * BindBody unmarshal the request body into the provided dst. + * + * dst must be either a struct pointer or map[string]any. + * + * The rules how the body will be scanned depends on the request Content-Type. + * + * Currently the following Content-Types are supported: + * ``` + * - application/json + * - text/xml, application/xml + * - multipart/form-data, application/x-www-form-urlencoded + * ``` + * + * Respectively the following struct tags are supported (again, which one will be used depends on the Content-Type): + * ``` + * - "json" (json body)- uses the builtin Go json package for unmarshaling. + * - "xml" (xml body) - uses the builtin Go xml package for unmarshaling. + * - "form" (form data) - utilizes the custom [router.UnmarshalRequestData] method. + * ``` + * + * NB! When dst is a struct make sure that it doesn't have public fields + * that shouldn't be bindable and it is advisible such fields to be unexported + * or have a separate struct just for the binding. For example: + * + * ``` + * data := struct{ + * somethingPrivate string + * + * Title string `json:"title" form:"title"` + * Total int `json:"total" form:"total"` + * } + * err := e.BindBody(&data) + * ``` + */ + bindBody(dst: any): void + } + /** + * Router defines a thin wrapper around the standard Go [http.ServeMux] by + * adding support for routing sub-groups, middlewares and other common utils. + * + * Example: + * + * ``` + * r := NewRouter[*MyEvent](eventFactory) + * + * // middlewares + * r.BindFunc(m1, m2) + * + * // routes + * r.GET("/test", handler1) + * + * // sub-routers/groups + * api := r.Group("/api") + * api.GET("/admins", handler2) + * + * // generate a http.ServeMux instance based on the router configurations + * mux, _ := r.BuildMux() + * + * http.ListenAndServe("localhost:8090", mux) + * ``` + */ + type _sqBklvO = RouterGroup + interface Router extends _sqBklvO { } } @@ -20730,25 +20641,6 @@ namespace sync { } } -/** - * Package io provides basic interfaces to I/O primitives. - * Its primary job is to wrap existing implementations of such primitives, - * such as those in package os, into shared public interfaces that - * abstract the functionality, plus some other related primitives. - * - * Because these interfaces and primitives wrap lower-level operations with - * various implementations, unless otherwise informed clients should not - * assume they are safe for parallel execution. - */ -namespace io { - /** - * WriteCloser is the interface that groups the basic Write and Close methods. - */ - interface WriteCloser { - [key:string]: any; - } -} - /** * Package syscall contains an interface to the low-level operating system * primitives. The details vary depending on the underlying system, and @@ -20945,258 +20837,6 @@ namespace time { } } -/** - * Package fs defines basic interfaces to a file system. - * A file system can be provided by the host operating system - * but also by other packages. - * - * See the [testing/fstest] package for support with testing - * implementations of file systems. - */ -namespace fs { -} - -namespace store { -} - -/** - * Package url parses URLs and implements query escaping. - */ -namespace url { - /** - * A URL represents a parsed URL (technically, a URI reference). - * - * The general form represented is: - * - * ``` - * [scheme:][//[userinfo@]host][/]path[?query][#fragment] - * ``` - * - * URLs that do not start with a slash after the scheme are interpreted as: - * - * ``` - * scheme:opaque[?query][#fragment] - * ``` - * - * The Host field contains the host and port subcomponents of the URL. - * When the port is present, it is separated from the host with a colon. - * When the host is an IPv6 address, it must be enclosed in square brackets: - * "[fe80::1]:80". The [net.JoinHostPort] function combines a host and port - * into a string suitable for the Host field, adding square brackets to - * the host when necessary. - * - * Note that the Path field is stored in decoded form: /%47%6f%2f becomes /Go/. - * A consequence is that it is impossible to tell which slashes in the Path were - * slashes in the raw URL and which were %2f. This distinction is rarely important, - * but when it is, the code should use the [URL.EscapedPath] method, which preserves - * the original encoding of Path. - * - * The RawPath field is an optional field which is only set when the default - * encoding of Path is different from the escaped path. See the EscapedPath method - * for more details. - * - * URL's String method uses the EscapedPath method to obtain the path. - */ - interface URL { - scheme: string - opaque: string // encoded opaque data - user?: Userinfo // username and password information - host: string // host or host:port (see Hostname and Port methods) - path: string // path (relative paths may omit leading slash) - rawPath: string // encoded path hint (see EscapedPath method) - omitHost: boolean // do not emit empty host (authority) - forceQuery: boolean // append a query ('?') even if RawQuery is empty - rawQuery: string // encoded query values, without '?' - fragment: string // fragment for references, without '#' - rawFragment: string // encoded fragment hint (see EscapedFragment method) - } - interface URL { - /** - * EscapedPath returns the escaped form of u.Path. - * In general there are multiple possible escaped forms of any path. - * EscapedPath returns u.RawPath when it is a valid escaping of u.Path. - * Otherwise EscapedPath ignores u.RawPath and computes an escaped - * form on its own. - * The [URL.String] and [URL.RequestURI] methods use EscapedPath to construct - * their results. - * In general, code should call EscapedPath instead of - * reading u.RawPath directly. - */ - escapedPath(): string - } - interface URL { - /** - * EscapedFragment returns the escaped form of u.Fragment. - * In general there are multiple possible escaped forms of any fragment. - * EscapedFragment returns u.RawFragment when it is a valid escaping of u.Fragment. - * Otherwise EscapedFragment ignores u.RawFragment and computes an escaped - * form on its own. - * The [URL.String] method uses EscapedFragment to construct its result. - * In general, code should call EscapedFragment instead of - * reading u.RawFragment directly. - */ - escapedFragment(): string - } - interface URL { - /** - * String reassembles the [URL] into a valid URL string. - * The general form of the result is one of: - * - * ``` - * scheme:opaque?query#fragment - * scheme://userinfo@host/path?query#fragment - * ``` - * - * If u.Opaque is non-empty, String uses the first form; - * otherwise it uses the second form. - * Any non-ASCII characters in host are escaped. - * To obtain the path, String uses u.EscapedPath(). - * - * In the second form, the following rules apply: - * ``` - * - if u.Scheme is empty, scheme: is omitted. - * - if u.User is nil, userinfo@ is omitted. - * - if u.Host is empty, host/ is omitted. - * - if u.Scheme and u.Host are empty and u.User is nil, - * the entire scheme://userinfo@host/ is omitted. - * - if u.Host is non-empty and u.Path begins with a /, - * the form host/path does not add its own /. - * - if u.RawQuery is empty, ?query is omitted. - * - if u.Fragment is empty, #fragment is omitted. - * ``` - */ - string(): string - } - interface URL { - /** - * Redacted is like [URL.String] but replaces any password with "xxxxx". - * Only the password in u.User is redacted. - */ - redacted(): string - } - /** - * Values maps a string key to a list of values. - * It is typically used for query parameters and form values. - * Unlike in the http.Header map, the keys in a Values map - * are case-sensitive. - */ - interface Values extends _TygojaDict{} - interface Values { - /** - * Get gets the first value associated with the given key. - * If there are no values associated with the key, Get returns - * the empty string. To access multiple values, use the map - * directly. - */ - get(key: string): string - } - interface Values { - /** - * Set sets the key to value. It replaces any existing - * values. - */ - set(key: string, value: string): void - } - interface Values { - /** - * Add adds the value to key. It appends to any existing - * values associated with key. - */ - add(key: string, value: string): void - } - interface Values { - /** - * Del deletes the values associated with key. - */ - del(key: string): void - } - interface Values { - /** - * Has checks whether a given key is set. - */ - has(key: string): boolean - } - interface Values { - /** - * Encode encodes the values into “URL encoded” form - * ("bar=baz&foo=quux") sorted by key. - */ - encode(): string - } - interface URL { - /** - * IsAbs reports whether the [URL] is absolute. - * Absolute means that it has a non-empty scheme. - */ - isAbs(): boolean - } - interface URL { - /** - * Parse parses a [URL] in the context of the receiver. The provided URL - * may be relative or absolute. Parse returns nil, err on parse - * failure, otherwise its return value is the same as [URL.ResolveReference]. - */ - parse(ref: string): (URL) - } - interface URL { - /** - * ResolveReference resolves a URI reference to an absolute URI from - * an absolute base URI u, per RFC 3986 Section 5.2. The URI reference - * may be relative or absolute. ResolveReference always returns a new - * [URL] instance, even if the returned URL is identical to either the - * base or reference. If ref is an absolute URL, then ResolveReference - * ignores base and returns a copy of ref. - */ - resolveReference(ref: URL): (URL) - } - interface URL { - /** - * Query parses RawQuery and returns the corresponding values. - * It silently discards malformed value pairs. - * To check errors use [ParseQuery]. - */ - query(): Values - } - interface URL { - /** - * RequestURI returns the encoded path?query or opaque?query - * string that would be used in an HTTP request for u. - */ - requestURI(): string - } - interface URL { - /** - * Hostname returns u.Host, stripping any valid port number if present. - * - * If the result is enclosed in square brackets, as literal IPv6 addresses are, - * the square brackets are removed from the result. - */ - hostname(): string - } - interface URL { - /** - * Port returns the port part of u.Host, without the leading colon. - * - * If u.Host doesn't contain a valid numeric port, Port returns an empty string. - */ - port(): string - } - interface URL { - marshalBinary(): string|Array - } - interface URL { - unmarshalBinary(text: string|Array): void - } - interface URL { - /** - * JoinPath returns a new [URL] with the provided path elements joined to - * any existing path and the resulting path cleaned of any ./ or ../ elements. - * Any sequences of multiple / characters will be reduced to a single /. - */ - joinPath(...elem: string[]): (URL) - } -} - /** * Package context defines the Context type, which carries deadlines, * cancellation signals, and other request-scoped values across API boundaries @@ -21254,178 +20894,33 @@ namespace context { } /** - * Package net provides a portable interface for network I/O, including - * TCP/IP, UDP, domain name resolution, and Unix domain sockets. + * Package io provides basic interfaces to I/O primitives. + * Its primary job is to wrap existing implementations of such primitives, + * such as those in package os, into shared public interfaces that + * abstract the functionality, plus some other related primitives. * - * Although the package provides access to low-level networking - * primitives, most clients will need only the basic interface provided - * by the [Dial], [Listen], and Accept functions and the associated - * [Conn] and [Listener] interfaces. The crypto/tls package uses - * the same interfaces and similar Dial and Listen functions. - * - * The Dial function connects to a server: - * - * ``` - * conn, err := net.Dial("tcp", "golang.org:80") - * if err != nil { - * // handle error - * } - * fmt.Fprintf(conn, "GET / HTTP/1.0\r\n\r\n") - * status, err := bufio.NewReader(conn).ReadString('\n') - * // ... - * ``` - * - * The Listen function creates servers: - * - * ``` - * ln, err := net.Listen("tcp", ":8080") - * if err != nil { - * // handle error - * } - * for { - * conn, err := ln.Accept() - * if err != nil { - * // handle error - * } - * go handleConnection(conn) - * } - * ``` - * - * # Name Resolution - * - * The method for resolving domain names, whether indirectly with functions like Dial - * or directly with functions like [LookupHost] and [LookupAddr], varies by operating system. - * - * On Unix systems, the resolver has two options for resolving names. - * It can use a pure Go resolver that sends DNS requests directly to the servers - * listed in /etc/resolv.conf, or it can use a cgo-based resolver that calls C - * library routines such as getaddrinfo and getnameinfo. - * - * On Unix the pure Go resolver is preferred over the cgo resolver, because a blocked DNS - * request consumes only a goroutine, while a blocked C call consumes an operating system thread. - * When cgo is available, the cgo-based resolver is used instead under a variety of - * conditions: on systems that do not let programs make direct DNS requests (OS X), - * when the LOCALDOMAIN environment variable is present (even if empty), - * when the RES_OPTIONS or HOSTALIASES environment variable is non-empty, - * when the ASR_CONFIG environment variable is non-empty (OpenBSD only), - * when /etc/resolv.conf or /etc/nsswitch.conf specify the use of features that the - * Go resolver does not implement. - * - * On all systems (except Plan 9), when the cgo resolver is being used - * this package applies a concurrent cgo lookup limit to prevent the system - * from running out of system threads. Currently, it is limited to 500 concurrent lookups. - * - * The resolver decision can be overridden by setting the netdns value of the - * GODEBUG environment variable (see package runtime) to go or cgo, as in: - * - * ``` - * export GODEBUG=netdns=go # force pure Go resolver - * export GODEBUG=netdns=cgo # force native resolver (cgo, win32) - * ``` - * - * The decision can also be forced while building the Go source tree - * by setting the netgo or netcgo build tag. - * - * A numeric netdns setting, as in GODEBUG=netdns=1, causes the resolver - * to print debugging information about its decisions. - * To force a particular resolver while also printing debugging information, - * join the two settings by a plus sign, as in GODEBUG=netdns=go+1. - * - * The Go resolver will send an EDNS0 additional header with a DNS request, - * to signal a willingness to accept a larger DNS packet size. - * This can reportedly cause sporadic failures with the DNS server run - * by some modems and routers. Setting GODEBUG=netedns0=0 will disable - * sending the additional header. - * - * On macOS, if Go code that uses the net package is built with - * -buildmode=c-archive, linking the resulting archive into a C program - * requires passing -lresolv when linking the C code. - * - * On Plan 9, the resolver always accesses /net/cs and /net/dns. - * - * On Windows, in Go 1.18.x and earlier, the resolver always used C - * library functions, such as GetAddrInfo and DnsQuery. + * Because these interfaces and primitives wrap lower-level operations with + * various implementations, unless otherwise informed clients should not + * assume they are safe for parallel execution. */ -namespace net { +namespace io { /** - * Addr represents a network end point address. - * - * The two methods [Addr.Network] and [Addr.String] conventionally return strings - * that can be passed as the arguments to [Dial], but the exact form - * and meaning of the strings is up to the implementation. + * WriteCloser is the interface that groups the basic Write and Close methods. */ - interface Addr { + interface WriteCloser { [key:string]: any; - network(): string // name of the network (for example, "tcp", "udp") - string(): string // string form of address (for example, "192.0.2.1:25", "[2001:db8::1]:80") - } - /** - * A Listener is a generic network listener for stream-oriented protocols. - * - * Multiple goroutines may invoke methods on a Listener simultaneously. - */ - interface Listener { - [key:string]: any; - /** - * Accept waits for and returns the next connection to the listener. - */ - accept(): Conn - /** - * Close closes the listener. - * Any blocked Accept operations will be unblocked and return errors. - */ - close(): void - /** - * Addr returns the listener's network address. - */ - addr(): Addr } } /** - * Package jwt is a Go implementation of JSON Web Tokens: http://self-issued.info/docs/draft-jones-json-web-token.html + * Package fs defines basic interfaces to a file system. + * A file system can be provided by the host operating system + * but also by other packages. * - * See README.md for more info. + * See the [testing/fstest] package for support with testing + * implementations of file systems. */ -namespace jwt { - /** - * NumericDate represents a JSON numeric date value, as referenced at - * https://datatracker.ietf.org/doc/html/rfc7519#section-2. - */ - type _seNvycX = time.Time - interface NumericDate extends _seNvycX { - } - interface NumericDate { - /** - * MarshalJSON is an implementation of the json.RawMessage interface and serializes the UNIX epoch - * represented in NumericDate to a byte array, using the precision specified in TimePrecision. - */ - marshalJSON(): string|Array - } - interface NumericDate { - /** - * UnmarshalJSON is an implementation of the json.RawMessage interface and - * deserializes a [NumericDate] from a JSON representation, i.e. a - * [json.Number]. This number represents an UNIX epoch with either integer or - * non-integer seconds. - */ - unmarshalJSON(b: string|Array): void - } - /** - * ClaimStrings is basically just a slice of strings, but it can be either - * serialized from a string array or just a string. This type is necessary, - * since the "aud" claim can either be a single string or an array. - */ - interface ClaimStrings extends Array{} - interface ClaimStrings { - unmarshalJSON(data: string|Array): void - } - interface ClaimStrings { - marshalJSON(): string|Array - } -} - -namespace subscriptions { +namespace fs { } /** @@ -21693,6 +21188,654 @@ namespace bufio { } } +/** + * Package sql provides a generic interface around SQL (or SQL-like) + * databases. + * + * The sql package must be used in conjunction with a database driver. + * See https://golang.org/s/sqldrivers for a list of drivers. + * + * Drivers that do not support context cancellation will not return until + * after the query is completed. + * + * For usage examples, see the wiki page at + * https://golang.org/s/sqlwiki. + */ +namespace sql { + /** + * IsolationLevel is the transaction isolation level used in [TxOptions]. + */ + interface IsolationLevel extends Number{} + interface IsolationLevel { + /** + * String returns the name of the transaction isolation level. + */ + string(): string + } + /** + * DBStats contains database statistics. + */ + interface DBStats { + maxOpenConnections: number // Maximum number of open connections to the database. + /** + * Pool Status + */ + openConnections: number // The number of established connections both in use and idle. + inUse: number // The number of connections currently in use. + idle: number // The number of idle connections. + /** + * Counters + */ + waitCount: number // The total number of connections waited for. + waitDuration: time.Duration // The total time blocked waiting for a new connection. + maxIdleClosed: number // The total number of connections closed due to SetMaxIdleConns. + maxIdleTimeClosed: number // The total number of connections closed due to SetConnMaxIdleTime. + maxLifetimeClosed: number // The total number of connections closed due to SetConnMaxLifetime. + } + /** + * Conn represents a single database connection rather than a pool of database + * connections. Prefer running queries from [DB] unless there is a specific + * need for a continuous single database connection. + * + * A Conn must call [Conn.Close] to return the connection to the database pool + * and may do so concurrently with a running query. + * + * After a call to [Conn.Close], all operations on the + * connection fail with [ErrConnDone]. + */ + interface Conn { + } + interface Conn { + /** + * PingContext verifies the connection to the database is still alive. + */ + pingContext(ctx: context.Context): void + } + interface Conn { + /** + * ExecContext executes a query without returning any rows. + * The args are for any placeholder parameters in the query. + */ + execContext(ctx: context.Context, query: string, ...args: any[]): Result + } + interface Conn { + /** + * QueryContext executes a query that returns rows, typically a SELECT. + * The args are for any placeholder parameters in the query. + */ + queryContext(ctx: context.Context, query: string, ...args: any[]): (Rows) + } + interface Conn { + /** + * QueryRowContext executes a query that is expected to return at most one row. + * QueryRowContext always returns a non-nil value. Errors are deferred until + * the [*Row.Scan] method is called. + * If the query selects no rows, the [*Row.Scan] will return [ErrNoRows]. + * Otherwise, the [*Row.Scan] scans the first selected row and discards + * the rest. + */ + queryRowContext(ctx: context.Context, query: string, ...args: any[]): (Row) + } + interface Conn { + /** + * PrepareContext creates a prepared statement for later queries or executions. + * Multiple queries or executions may be run concurrently from the + * returned statement. + * The caller must call the statement's [*Stmt.Close] method + * when the statement is no longer needed. + * + * The provided context is used for the preparation of the statement, not for the + * execution of the statement. + */ + prepareContext(ctx: context.Context, query: string): (Stmt) + } + interface Conn { + /** + * Raw executes f exposing the underlying driver connection for the + * duration of f. The driverConn must not be used outside of f. + * + * Once f returns and err is not [driver.ErrBadConn], the [Conn] will continue to be usable + * until [Conn.Close] is called. + */ + raw(f: (driverConn: any) => void): void + } + interface Conn { + /** + * BeginTx starts a transaction. + * + * The provided context is used until the transaction is committed or rolled back. + * If the context is canceled, the sql package will roll back + * the transaction. [Tx.Commit] will return an error if the context provided to + * BeginTx is canceled. + * + * The provided [TxOptions] is optional and may be nil if defaults should be used. + * If a non-default isolation level is used that the driver doesn't support, + * an error will be returned. + */ + beginTx(ctx: context.Context, opts: TxOptions): (Tx) + } + interface Conn { + /** + * Close returns the connection to the connection pool. + * All operations after a Close will return with [ErrConnDone]. + * Close is safe to call concurrently with other operations and will + * block until all other operations finish. It may be useful to first + * cancel any used context and then call close directly after. + */ + close(): void + } + /** + * ColumnType contains the name and type of a column. + */ + interface ColumnType { + } + interface ColumnType { + /** + * Name returns the name or alias of the column. + */ + name(): string + } + interface ColumnType { + /** + * Length returns the column type length for variable length column types such + * as text and binary field types. If the type length is unbounded the value will + * be [math.MaxInt64] (any database limits will still apply). + * If the column type is not variable length, such as an int, or if not supported + * by the driver ok is false. + */ + length(): [number, boolean] + } + interface ColumnType { + /** + * DecimalSize returns the scale and precision of a decimal type. + * If not applicable or if not supported ok is false. + */ + decimalSize(): [number, number, boolean] + } + interface ColumnType { + /** + * ScanType returns a Go type suitable for scanning into using [Rows.Scan]. + * If a driver does not support this property ScanType will return + * the type of an empty interface. + */ + scanType(): any + } + interface ColumnType { + /** + * Nullable reports whether the column may be null. + * If a driver does not support this property ok will be false. + */ + nullable(): [boolean, boolean] + } + interface ColumnType { + /** + * DatabaseTypeName returns the database system name of the column type. If an empty + * string is returned, then the driver type name is not supported. + * Consult your driver documentation for a list of driver data types. [ColumnType.Length] specifiers + * are not included. + * Common type names include "VARCHAR", "TEXT", "NVARCHAR", "DECIMAL", "BOOL", + * "INT", and "BIGINT". + */ + databaseTypeName(): string + } + /** + * Row is the result of calling [DB.QueryRow] to select a single row. + */ + interface Row { + } + interface Row { + /** + * Scan copies the columns from the matched row into the values + * pointed at by dest. See the documentation on [Rows.Scan] for details. + * If more than one row matches the query, + * Scan uses the first row and discards the rest. If no row matches + * the query, Scan returns [ErrNoRows]. + */ + scan(...dest: any[]): void + } + interface Row { + /** + * Err provides a way for wrapping packages to check for + * query errors without calling [Row.Scan]. + * Err returns the error, if any, that was encountered while running the query. + * If this error is not nil, this error will also be returned from [Row.Scan]. + */ + err(): void + } +} + +namespace store { +} + +/** + * Package url parses URLs and implements query escaping. + */ +namespace url { + /** + * A URL represents a parsed URL (technically, a URI reference). + * + * The general form represented is: + * + * ``` + * [scheme:][//[userinfo@]host][/]path[?query][#fragment] + * ``` + * + * URLs that do not start with a slash after the scheme are interpreted as: + * + * ``` + * scheme:opaque[?query][#fragment] + * ``` + * + * The Host field contains the host and port subcomponents of the URL. + * When the port is present, it is separated from the host with a colon. + * When the host is an IPv6 address, it must be enclosed in square brackets: + * "[fe80::1]:80". The [net.JoinHostPort] function combines a host and port + * into a string suitable for the Host field, adding square brackets to + * the host when necessary. + * + * Note that the Path field is stored in decoded form: /%47%6f%2f becomes /Go/. + * A consequence is that it is impossible to tell which slashes in the Path were + * slashes in the raw URL and which were %2f. This distinction is rarely important, + * but when it is, the code should use the [URL.EscapedPath] method, which preserves + * the original encoding of Path. + * + * The RawPath field is an optional field which is only set when the default + * encoding of Path is different from the escaped path. See the EscapedPath method + * for more details. + * + * URL's String method uses the EscapedPath method to obtain the path. + */ + interface URL { + scheme: string + opaque: string // encoded opaque data + user?: Userinfo // username and password information + host: string // host or host:port (see Hostname and Port methods) + path: string // path (relative paths may omit leading slash) + rawPath: string // encoded path hint (see EscapedPath method) + omitHost: boolean // do not emit empty host (authority) + forceQuery: boolean // append a query ('?') even if RawQuery is empty + rawQuery: string // encoded query values, without '?' + fragment: string // fragment for references, without '#' + rawFragment: string // encoded fragment hint (see EscapedFragment method) + } + interface URL { + /** + * EscapedPath returns the escaped form of u.Path. + * In general there are multiple possible escaped forms of any path. + * EscapedPath returns u.RawPath when it is a valid escaping of u.Path. + * Otherwise EscapedPath ignores u.RawPath and computes an escaped + * form on its own. + * The [URL.String] and [URL.RequestURI] methods use EscapedPath to construct + * their results. + * In general, code should call EscapedPath instead of + * reading u.RawPath directly. + */ + escapedPath(): string + } + interface URL { + /** + * EscapedFragment returns the escaped form of u.Fragment. + * In general there are multiple possible escaped forms of any fragment. + * EscapedFragment returns u.RawFragment when it is a valid escaping of u.Fragment. + * Otherwise EscapedFragment ignores u.RawFragment and computes an escaped + * form on its own. + * The [URL.String] method uses EscapedFragment to construct its result. + * In general, code should call EscapedFragment instead of + * reading u.RawFragment directly. + */ + escapedFragment(): string + } + interface URL { + /** + * String reassembles the [URL] into a valid URL string. + * The general form of the result is one of: + * + * ``` + * scheme:opaque?query#fragment + * scheme://userinfo@host/path?query#fragment + * ``` + * + * If u.Opaque is non-empty, String uses the first form; + * otherwise it uses the second form. + * Any non-ASCII characters in host are escaped. + * To obtain the path, String uses u.EscapedPath(). + * + * In the second form, the following rules apply: + * ``` + * - if u.Scheme is empty, scheme: is omitted. + * - if u.User is nil, userinfo@ is omitted. + * - if u.Host is empty, host/ is omitted. + * - if u.Scheme and u.Host are empty and u.User is nil, + * the entire scheme://userinfo@host/ is omitted. + * - if u.Host is non-empty and u.Path begins with a /, + * the form host/path does not add its own /. + * - if u.RawQuery is empty, ?query is omitted. + * - if u.Fragment is empty, #fragment is omitted. + * ``` + */ + string(): string + } + interface URL { + /** + * Redacted is like [URL.String] but replaces any password with "xxxxx". + * Only the password in u.User is redacted. + */ + redacted(): string + } + /** + * Values maps a string key to a list of values. + * It is typically used for query parameters and form values. + * Unlike in the http.Header map, the keys in a Values map + * are case-sensitive. + */ + interface Values extends _TygojaDict{} + interface Values { + /** + * Get gets the first value associated with the given key. + * If there are no values associated with the key, Get returns + * the empty string. To access multiple values, use the map + * directly. + */ + get(key: string): string + } + interface Values { + /** + * Set sets the key to value. It replaces any existing + * values. + */ + set(key: string, value: string): void + } + interface Values { + /** + * Add adds the value to key. It appends to any existing + * values associated with key. + */ + add(key: string, value: string): void + } + interface Values { + /** + * Del deletes the values associated with key. + */ + del(key: string): void + } + interface Values { + /** + * Has checks whether a given key is set. + */ + has(key: string): boolean + } + interface Values { + /** + * Encode encodes the values into “URL encoded” form + * ("bar=baz&foo=quux") sorted by key. + */ + encode(): string + } + interface URL { + /** + * IsAbs reports whether the [URL] is absolute. + * Absolute means that it has a non-empty scheme. + */ + isAbs(): boolean + } + interface URL { + /** + * Parse parses a [URL] in the context of the receiver. The provided URL + * may be relative or absolute. Parse returns nil, err on parse + * failure, otherwise its return value is the same as [URL.ResolveReference]. + */ + parse(ref: string): (URL) + } + interface URL { + /** + * ResolveReference resolves a URI reference to an absolute URI from + * an absolute base URI u, per RFC 3986 Section 5.2. The URI reference + * may be relative or absolute. ResolveReference always returns a new + * [URL] instance, even if the returned URL is identical to either the + * base or reference. If ref is an absolute URL, then ResolveReference + * ignores base and returns a copy of ref. + */ + resolveReference(ref: URL): (URL) + } + interface URL { + /** + * Query parses RawQuery and returns the corresponding values. + * It silently discards malformed value pairs. + * To check errors use [ParseQuery]. + */ + query(): Values + } + interface URL { + /** + * RequestURI returns the encoded path?query or opaque?query + * string that would be used in an HTTP request for u. + */ + requestURI(): string + } + interface URL { + /** + * Hostname returns u.Host, stripping any valid port number if present. + * + * If the result is enclosed in square brackets, as literal IPv6 addresses are, + * the square brackets are removed from the result. + */ + hostname(): string + } + interface URL { + /** + * Port returns the port part of u.Host, without the leading colon. + * + * If u.Host doesn't contain a valid numeric port, Port returns an empty string. + */ + port(): string + } + interface URL { + marshalBinary(): string|Array + } + interface URL { + unmarshalBinary(text: string|Array): void + } + interface URL { + /** + * JoinPath returns a new [URL] with the provided path elements joined to + * any existing path and the resulting path cleaned of any ./ or ../ elements. + * Any sequences of multiple / characters will be reduced to a single /. + */ + joinPath(...elem: string[]): (URL) + } +} + +/** + * Package net provides a portable interface for network I/O, including + * TCP/IP, UDP, domain name resolution, and Unix domain sockets. + * + * Although the package provides access to low-level networking + * primitives, most clients will need only the basic interface provided + * by the [Dial], [Listen], and Accept functions and the associated + * [Conn] and [Listener] interfaces. The crypto/tls package uses + * the same interfaces and similar Dial and Listen functions. + * + * The Dial function connects to a server: + * + * ``` + * conn, err := net.Dial("tcp", "golang.org:80") + * if err != nil { + * // handle error + * } + * fmt.Fprintf(conn, "GET / HTTP/1.0\r\n\r\n") + * status, err := bufio.NewReader(conn).ReadString('\n') + * // ... + * ``` + * + * The Listen function creates servers: + * + * ``` + * ln, err := net.Listen("tcp", ":8080") + * if err != nil { + * // handle error + * } + * for { + * conn, err := ln.Accept() + * if err != nil { + * // handle error + * } + * go handleConnection(conn) + * } + * ``` + * + * # Name Resolution + * + * The method for resolving domain names, whether indirectly with functions like Dial + * or directly with functions like [LookupHost] and [LookupAddr], varies by operating system. + * + * On Unix systems, the resolver has two options for resolving names. + * It can use a pure Go resolver that sends DNS requests directly to the servers + * listed in /etc/resolv.conf, or it can use a cgo-based resolver that calls C + * library routines such as getaddrinfo and getnameinfo. + * + * On Unix the pure Go resolver is preferred over the cgo resolver, because a blocked DNS + * request consumes only a goroutine, while a blocked C call consumes an operating system thread. + * When cgo is available, the cgo-based resolver is used instead under a variety of + * conditions: on systems that do not let programs make direct DNS requests (OS X), + * when the LOCALDOMAIN environment variable is present (even if empty), + * when the RES_OPTIONS or HOSTALIASES environment variable is non-empty, + * when the ASR_CONFIG environment variable is non-empty (OpenBSD only), + * when /etc/resolv.conf or /etc/nsswitch.conf specify the use of features that the + * Go resolver does not implement. + * + * On all systems (except Plan 9), when the cgo resolver is being used + * this package applies a concurrent cgo lookup limit to prevent the system + * from running out of system threads. Currently, it is limited to 500 concurrent lookups. + * + * The resolver decision can be overridden by setting the netdns value of the + * GODEBUG environment variable (see package runtime) to go or cgo, as in: + * + * ``` + * export GODEBUG=netdns=go # force pure Go resolver + * export GODEBUG=netdns=cgo # force native resolver (cgo, win32) + * ``` + * + * The decision can also be forced while building the Go source tree + * by setting the netgo or netcgo build tag. + * + * A numeric netdns setting, as in GODEBUG=netdns=1, causes the resolver + * to print debugging information about its decisions. + * To force a particular resolver while also printing debugging information, + * join the two settings by a plus sign, as in GODEBUG=netdns=go+1. + * + * The Go resolver will send an EDNS0 additional header with a DNS request, + * to signal a willingness to accept a larger DNS packet size. + * This can reportedly cause sporadic failures with the DNS server run + * by some modems and routers. Setting GODEBUG=netedns0=0 will disable + * sending the additional header. + * + * On macOS, if Go code that uses the net package is built with + * -buildmode=c-archive, linking the resulting archive into a C program + * requires passing -lresolv when linking the C code. + * + * On Plan 9, the resolver always accesses /net/cs and /net/dns. + * + * On Windows, in Go 1.18.x and earlier, the resolver always used C + * library functions, such as GetAddrInfo and DnsQuery. + */ +namespace net { + /** + * Addr represents a network end point address. + * + * The two methods [Addr.Network] and [Addr.String] conventionally return strings + * that can be passed as the arguments to [Dial], but the exact form + * and meaning of the strings is up to the implementation. + */ + interface Addr { + [key:string]: any; + network(): string // name of the network (for example, "tcp", "udp") + string(): string // string form of address (for example, "192.0.2.1:25", "[2001:db8::1]:80") + } + /** + * A Listener is a generic network listener for stream-oriented protocols. + * + * Multiple goroutines may invoke methods on a Listener simultaneously. + */ + interface Listener { + [key:string]: any; + /** + * Accept waits for and returns the next connection to the listener. + */ + accept(): Conn + /** + * Close closes the listener. + * Any blocked Accept operations will be unblocked and return errors. + */ + close(): void + /** + * Addr returns the listener's network address. + */ + addr(): Addr + } +} + +/** + * Package jwt is a Go implementation of JSON Web Tokens: http://self-issued.info/docs/draft-jones-json-web-token.html + * + * See README.md for more info. + */ +namespace jwt { + /** + * NumericDate represents a JSON numeric date value, as referenced at + * https://datatracker.ietf.org/doc/html/rfc7519#section-2. + */ + type _sMCEJRz = time.Time + interface NumericDate extends _sMCEJRz { + } + interface NumericDate { + /** + * MarshalJSON is an implementation of the json.RawMessage interface and serializes the UNIX epoch + * represented in NumericDate to a byte array, using the precision specified in TimePrecision. + */ + marshalJSON(): string|Array + } + interface NumericDate { + /** + * UnmarshalJSON is an implementation of the json.RawMessage interface and + * deserializes a [NumericDate] from a JSON representation, i.e. a + * [json.Number]. This number represents an UNIX epoch with either integer or + * non-integer seconds. + */ + unmarshalJSON(b: string|Array): void + } + /** + * ClaimStrings is basically just a slice of strings, but it can be either + * serialized from a string array or just a string. This type is necessary, + * since the "aud" claim can either be a single string or an array. + */ + interface ClaimStrings extends Array{} + interface ClaimStrings { + unmarshalJSON(data: string|Array): void + } + interface ClaimStrings { + marshalJSON(): string|Array + } +} + +/** + * Package types implements some commonly used db serializable types + * like datetime, json, etc. + */ +namespace types { +} + +namespace search { +} + +namespace hook { + /** + * wrapped local Hook embedded struct to limit the public API surface. + */ + type _spggaCU = Hook + interface mainHook extends _spggaCU { + } +} + /** * Package textproto implements generic support for text-based request/response * protocols in the style of HTTP, NNTP, and SMTP. @@ -21854,6 +21997,606 @@ namespace multipart { } } +/** + * Package http provides HTTP client and server implementations. + * + * [Get], [Head], [Post], and [PostForm] make HTTP (or HTTPS) requests: + * + * ``` + * resp, err := http.Get("http://example.com/") + * ... + * resp, err := http.Post("http://example.com/upload", "image/jpeg", &buf) + * ... + * resp, err := http.PostForm("http://example.com/form", + * url.Values{"key": {"Value"}, "id": {"123"}}) + * ``` + * + * The caller must close the response body when finished with it: + * + * ``` + * resp, err := http.Get("http://example.com/") + * if err != nil { + * // handle error + * } + * defer resp.Body.Close() + * body, err := io.ReadAll(resp.Body) + * // ... + * ``` + * + * # Clients and Transports + * + * For control over HTTP client headers, redirect policy, and other + * settings, create a [Client]: + * + * ``` + * client := &http.Client{ + * CheckRedirect: redirectPolicyFunc, + * } + * + * resp, err := client.Get("http://example.com") + * // ... + * + * req, err := http.NewRequest("GET", "http://example.com", nil) + * // ... + * req.Header.Add("If-None-Match", `W/"wyzzy"`) + * resp, err := client.Do(req) + * // ... + * ``` + * + * For control over proxies, TLS configuration, keep-alives, + * compression, and other settings, create a [Transport]: + * + * ``` + * tr := &http.Transport{ + * MaxIdleConns: 10, + * IdleConnTimeout: 30 * time.Second, + * DisableCompression: true, + * } + * client := &http.Client{Transport: tr} + * resp, err := client.Get("https://example.com") + * ``` + * + * Clients and Transports are safe for concurrent use by multiple + * goroutines and for efficiency should only be created once and re-used. + * + * # Servers + * + * ListenAndServe starts an HTTP server with a given address and handler. + * The handler is usually nil, which means to use [DefaultServeMux]. + * [Handle] and [HandleFunc] add handlers to [DefaultServeMux]: + * + * ``` + * http.Handle("/foo", fooHandler) + * + * http.HandleFunc("/bar", func(w http.ResponseWriter, r *http.Request) { + * fmt.Fprintf(w, "Hello, %q", html.EscapeString(r.URL.Path)) + * }) + * + * log.Fatal(http.ListenAndServe(":8080", nil)) + * ``` + * + * More control over the server's behavior is available by creating a + * custom Server: + * + * ``` + * s := &http.Server{ + * Addr: ":8080", + * Handler: myHandler, + * ReadTimeout: 10 * time.Second, + * WriteTimeout: 10 * time.Second, + * MaxHeaderBytes: 1 << 20, + * } + * log.Fatal(s.ListenAndServe()) + * ``` + * + * # HTTP/2 + * + * Starting with Go 1.6, the http package has transparent support for the + * HTTP/2 protocol when using HTTPS. Programs that must disable HTTP/2 + * can do so by setting [Transport.TLSNextProto] (for clients) or + * [Server.TLSNextProto] (for servers) to a non-nil, empty + * map. Alternatively, the following GODEBUG settings are + * currently supported: + * + * ``` + * GODEBUG=http2client=0 # disable HTTP/2 client support + * GODEBUG=http2server=0 # disable HTTP/2 server support + * GODEBUG=http2debug=1 # enable verbose HTTP/2 debug logs + * GODEBUG=http2debug=2 # ... even more verbose, with frame dumps + * ``` + * + * Please report any issues before disabling HTTP/2 support: https://golang.org/s/http2bug + * + * The http package's [Transport] and [Server] both automatically enable + * HTTP/2 support for simple configurations. To enable HTTP/2 for more + * complex configurations, to use lower-level HTTP/2 features, or to use + * a newer version of Go's http2 package, import "golang.org/x/net/http2" + * directly and use its ConfigureTransport and/or ConfigureServer + * functions. Manually configuring HTTP/2 via the golang.org/x/net/http2 + * package takes precedence over the net/http package's built-in HTTP/2 + * support. + */ +namespace http { + /** + * A Cookie represents an HTTP cookie as sent in the Set-Cookie header of an + * HTTP response or the Cookie header of an HTTP request. + * + * See https://tools.ietf.org/html/rfc6265 for details. + */ + interface Cookie { + name: string + value: string + quoted: boolean // indicates whether the Value was originally quoted + path: string // optional + domain: string // optional + expires: time.Time // optional + rawExpires: string // for reading cookies only + /** + * MaxAge=0 means no 'Max-Age' attribute specified. + * MaxAge<0 means delete cookie now, equivalently 'Max-Age: 0' + * MaxAge>0 means Max-Age attribute present and given in seconds + */ + maxAge: number + secure: boolean + httpOnly: boolean + sameSite: SameSite + partitioned: boolean + raw: string + unparsed: Array // Raw text of unparsed attribute-value pairs + } + interface Cookie { + /** + * String returns the serialization of the cookie for use in a [Cookie] + * header (if only Name and Value are set) or a Set-Cookie response + * header (if other fields are set). + * If c is nil or c.Name is invalid, the empty string is returned. + */ + string(): string + } + interface Cookie { + /** + * Valid reports whether the cookie is valid. + */ + valid(): void + } + // @ts-ignore + import mathrand = rand + /** + * A Header represents the key-value pairs in an HTTP header. + * + * The keys should be in canonical form, as returned by + * [CanonicalHeaderKey]. + */ + interface Header extends _TygojaDict{} + interface Header { + /** + * Add adds the key, value pair to the header. + * It appends to any existing values associated with key. + * The key is case insensitive; it is canonicalized by + * [CanonicalHeaderKey]. + */ + add(key: string, value: string): void + } + interface Header { + /** + * Set sets the header entries associated with key to the + * single element value. It replaces any existing values + * associated with key. The key is case insensitive; it is + * canonicalized by [textproto.CanonicalMIMEHeaderKey]. + * To use non-canonical keys, assign to the map directly. + */ + set(key: string, value: string): void + } + interface Header { + /** + * Get gets the first value associated with the given key. If + * there are no values associated with the key, Get returns "". + * It is case insensitive; [textproto.CanonicalMIMEHeaderKey] is + * used to canonicalize the provided key. Get assumes that all + * keys are stored in canonical form. To use non-canonical keys, + * access the map directly. + */ + get(key: string): string + } + interface Header { + /** + * Values returns all values associated with the given key. + * It is case insensitive; [textproto.CanonicalMIMEHeaderKey] is + * used to canonicalize the provided key. To use non-canonical + * keys, access the map directly. + * The returned slice is not a copy. + */ + values(key: string): Array + } + interface Header { + /** + * Del deletes the values associated with key. + * The key is case insensitive; it is canonicalized by + * [CanonicalHeaderKey]. + */ + del(key: string): void + } + interface Header { + /** + * Write writes a header in wire format. + */ + write(w: io.Writer): void + } + interface Header { + /** + * Clone returns a copy of h or nil if h is nil. + */ + clone(): Header + } + interface Header { + /** + * WriteSubset writes a header in wire format. + * If exclude is not nil, keys where exclude[key] == true are not written. + * Keys are not canonicalized before checking the exclude map. + */ + writeSubset(w: io.Writer, exclude: _TygojaDict): void + } + // @ts-ignore + import urlpkg = url + /** + * Response represents the response from an HTTP request. + * + * The [Client] and [Transport] return Responses from servers once + * the response headers have been received. The response body + * is streamed on demand as the Body field is read. + */ + interface Response { + status: string // e.g. "200 OK" + statusCode: number // e.g. 200 + proto: string // e.g. "HTTP/1.0" + protoMajor: number // e.g. 1 + protoMinor: number // e.g. 0 + /** + * Header maps header keys to values. If the response had multiple + * headers with the same key, they may be concatenated, with comma + * delimiters. (RFC 7230, section 3.2.2 requires that multiple headers + * be semantically equivalent to a comma-delimited sequence.) When + * Header values are duplicated by other fields in this struct (e.g., + * ContentLength, TransferEncoding, Trailer), the field values are + * authoritative. + * + * Keys in the map are canonicalized (see CanonicalHeaderKey). + */ + header: Header + /** + * Body represents the response body. + * + * The response body is streamed on demand as the Body field + * is read. If the network connection fails or the server + * terminates the response, Body.Read calls return an error. + * + * The http Client and Transport guarantee that Body is always + * non-nil, even on responses without a body or responses with + * a zero-length body. It is the caller's responsibility to + * close Body. The default HTTP client's Transport may not + * reuse HTTP/1.x "keep-alive" TCP connections if the Body is + * not read to completion and closed. + * + * The Body is automatically dechunked if the server replied + * with a "chunked" Transfer-Encoding. + * + * As of Go 1.12, the Body will also implement io.Writer + * on a successful "101 Switching Protocols" response, + * as used by WebSockets and HTTP/2's "h2c" mode. + */ + body: io.ReadCloser + /** + * ContentLength records the length of the associated content. The + * value -1 indicates that the length is unknown. Unless Request.Method + * is "HEAD", values >= 0 indicate that the given number of bytes may + * be read from Body. + */ + contentLength: number + /** + * Contains transfer encodings from outer-most to inner-most. Value is + * nil, means that "identity" encoding is used. + */ + transferEncoding: Array + /** + * Close records whether the header directed that the connection be + * closed after reading Body. The value is advice for clients: neither + * ReadResponse nor Response.Write ever closes a connection. + */ + close: boolean + /** + * Uncompressed reports whether the response was sent compressed but + * was decompressed by the http package. When true, reading from + * Body yields the uncompressed content instead of the compressed + * content actually set from the server, ContentLength is set to -1, + * and the "Content-Length" and "Content-Encoding" fields are deleted + * from the responseHeader. To get the original response from + * the server, set Transport.DisableCompression to true. + */ + uncompressed: boolean + /** + * Trailer maps trailer keys to values in the same + * format as Header. + * + * The Trailer initially contains only nil values, one for + * each key specified in the server's "Trailer" header + * value. Those values are not added to Header. + * + * Trailer must not be accessed concurrently with Read calls + * on the Body. + * + * After Body.Read has returned io.EOF, Trailer will contain + * any trailer values sent by the server. + */ + trailer: Header + /** + * Request is the request that was sent to obtain this Response. + * Request's Body is nil (having already been consumed). + * This is only populated for Client requests. + */ + request?: Request + /** + * TLS contains information about the TLS connection on which the + * response was received. It is nil for unencrypted responses. + * The pointer is shared between responses and should not be + * modified. + */ + tls?: any + } + interface Response { + /** + * Cookies parses and returns the cookies set in the Set-Cookie headers. + */ + cookies(): Array<(Cookie | undefined)> + } + interface Response { + /** + * Location returns the URL of the response's "Location" header, + * if present. Relative redirects are resolved relative to + * [Response.Request]. [ErrNoLocation] is returned if no + * Location header is present. + */ + location(): (url.URL) + } + interface Response { + /** + * ProtoAtLeast reports whether the HTTP protocol used + * in the response is at least major.minor. + */ + protoAtLeast(major: number, minor: number): boolean + } + interface Response { + /** + * Write writes r to w in the HTTP/1.x server response format, + * including the status line, headers, body, and optional trailer. + * + * This method consults the following fields of the response r: + * + * ``` + * StatusCode + * ProtoMajor + * ProtoMinor + * Request.Method + * TransferEncoding + * Trailer + * Body + * ContentLength + * Header, values for non-canonical keys will have unpredictable behavior + * ``` + * + * The Response Body is closed after it is sent. + */ + write(w: io.Writer): void + } + /** + * A ConnState represents the state of a client connection to a server. + * It's used by the optional [Server.ConnState] hook. + */ + interface ConnState extends Number{} + interface ConnState { + string(): string + } +} + +/** + * Package oauth2 provides support for making + * OAuth2 authorized and authenticated HTTP requests, + * as specified in RFC 6749. + * It can additionally grant authorization with Bearer JWT. + */ +/** + * Copyright 2023 The Go Authors. All rights reserved. + * Use of this source code is governed by a BSD-style + * license that can be found in the LICENSE file. + */ +namespace oauth2 { + /** + * An AuthCodeOption is passed to Config.AuthCodeURL. + */ + interface AuthCodeOption { + [key:string]: any; + } + /** + * Token represents the credentials used to authorize + * the requests to access protected resources on the OAuth 2.0 + * provider's backend. + * + * Most users of this package should not access fields of Token + * directly. They're exported mostly for use by related packages + * implementing derivative OAuth2 flows. + */ + interface Token { + /** + * AccessToken is the token that authorizes and authenticates + * the requests. + */ + accessToken: string + /** + * TokenType is the type of token. + * The Type method returns either this or "Bearer", the default. + */ + tokenType: string + /** + * RefreshToken is a token that's used by the application + * (as opposed to the user) to refresh the access token + * if it expires. + */ + refreshToken: string + /** + * Expiry is the optional expiration time of the access token. + * + * If zero, TokenSource implementations will reuse the same + * token forever and RefreshToken or equivalent + * mechanisms for that TokenSource will not be used. + */ + expiry: time.Time + /** + * ExpiresIn is the OAuth2 wire format "expires_in" field, + * which specifies how many seconds later the token expires, + * relative to an unknown time base approximately around "now". + * It is the application's responsibility to populate + * `Expiry` from `ExpiresIn` when required. + */ + expiresIn: number + } + interface Token { + /** + * Type returns t.TokenType if non-empty, else "Bearer". + */ + type(): string + } + interface Token { + /** + * SetAuthHeader sets the Authorization header to r using the access + * token in t. + * + * This method is unnecessary when using Transport or an HTTP Client + * returned by this package. + */ + setAuthHeader(r: http.Request): void + } + interface Token { + /** + * WithExtra returns a new Token that's a clone of t, but using the + * provided raw extra map. This is only intended for use by packages + * implementing derivative OAuth2 flows. + */ + withExtra(extra: { + }): (Token) + } + interface Token { + /** + * Extra returns an extra field. + * Extra fields are key-value pairs returned by the server as a + * part of the token retrieval response. + */ + extra(key: string): { + } + } + interface Token { + /** + * Valid reports whether t is non-nil, has an AccessToken, and is not expired. + */ + valid(): boolean + } +} + +/** + * Package cron implements a crontab-like service to execute and schedule + * repeative tasks/jobs. + * + * Example: + * + * ``` + * c := cron.New() + * c.MustAdd("dailyReport", "0 0 * * *", func() { ... }) + * c.Start() + * ``` + */ +namespace cron { + /** + * Job defines a single registered cron job. + */ + interface Job { + } + interface Job { + /** + * Id returns the cron job id. + */ + id(): string + } + interface Job { + /** + * Expression returns the plain cron job schedule expression. + */ + expression(): string + } + interface Job { + /** + * Run runs the cron job function. + */ + run(): void + } + interface Job { + /** + * MarshalJSON implements [json.Marshaler] and export the current + * jobs data into valid JSON. + */ + marshalJSON(): string|Array + } +} + +/** + * Package cobra is a commander providing a simple interface to create powerful modern CLI interfaces. + * In addition to providing an interface, Cobra simultaneously provides a controller to organize your application code. + */ +namespace cobra { + interface PositionalArgs {(cmd: Command, args: Array): void } + // @ts-ignore + import flag = pflag + /** + * FParseErrWhitelist configures Flag parse errors to be ignored + */ + interface FParseErrWhitelist extends _TygojaAny{} + /** + * Group Structure to manage groups for commands + */ + interface Group { + id: string + title: string + } + /** + * ShellCompDirective is a bit map representing the different behaviors the shell + * can be instructed to have once completions have been provided. + */ + interface ShellCompDirective extends Number{} + /** + * CompletionOptions are the options to control shell completion + */ + interface CompletionOptions { + /** + * DisableDefaultCmd prevents Cobra from creating a default 'completion' command + */ + disableDefaultCmd: boolean + /** + * DisableNoDescFlag prevents Cobra from creating the '--no-descriptions' flag + * for shells that support completion descriptions + */ + disableNoDescFlag: boolean + /** + * DisableDescriptions turns off all completion descriptions for shells + * that support them + */ + disableDescriptions: boolean + /** + * HiddenDefaultCmd makes the default 'completion' command hidden + */ + hiddenDefaultCmd: boolean + } +} + +namespace subscriptions { +} + /** * Package slog provides structured logging, * in which log records include a message, @@ -22393,838 +23136,6 @@ namespace slog { import loginternal = internal } -/** - * Package cobra is a commander providing a simple interface to create powerful modern CLI interfaces. - * In addition to providing an interface, Cobra simultaneously provides a controller to organize your application code. - */ -namespace cobra { - interface PositionalArgs {(cmd: Command, args: Array): void } - // @ts-ignore - import flag = pflag - /** - * FParseErrWhitelist configures Flag parse errors to be ignored - */ - interface FParseErrWhitelist extends _TygojaAny{} - /** - * Group Structure to manage groups for commands - */ - interface Group { - id: string - title: string - } - /** - * ShellCompDirective is a bit map representing the different behaviors the shell - * can be instructed to have once completions have been provided. - */ - interface ShellCompDirective extends Number{} - /** - * CompletionOptions are the options to control shell completion - */ - interface CompletionOptions { - /** - * DisableDefaultCmd prevents Cobra from creating a default 'completion' command - */ - disableDefaultCmd: boolean - /** - * DisableNoDescFlag prevents Cobra from creating the '--no-descriptions' flag - * for shells that support completion descriptions - */ - disableNoDescFlag: boolean - /** - * DisableDescriptions turns off all completion descriptions for shells - * that support them - */ - disableDescriptions: boolean - /** - * HiddenDefaultCmd makes the default 'completion' command hidden - */ - hiddenDefaultCmd: boolean - } -} - -/** - * Package sql provides a generic interface around SQL (or SQL-like) - * databases. - * - * The sql package must be used in conjunction with a database driver. - * See https://golang.org/s/sqldrivers for a list of drivers. - * - * Drivers that do not support context cancellation will not return until - * after the query is completed. - * - * For usage examples, see the wiki page at - * https://golang.org/s/sqlwiki. - */ -namespace sql { - /** - * IsolationLevel is the transaction isolation level used in [TxOptions]. - */ - interface IsolationLevel extends Number{} - interface IsolationLevel { - /** - * String returns the name of the transaction isolation level. - */ - string(): string - } - /** - * DBStats contains database statistics. - */ - interface DBStats { - maxOpenConnections: number // Maximum number of open connections to the database. - /** - * Pool Status - */ - openConnections: number // The number of established connections both in use and idle. - inUse: number // The number of connections currently in use. - idle: number // The number of idle connections. - /** - * Counters - */ - waitCount: number // The total number of connections waited for. - waitDuration: time.Duration // The total time blocked waiting for a new connection. - maxIdleClosed: number // The total number of connections closed due to SetMaxIdleConns. - maxIdleTimeClosed: number // The total number of connections closed due to SetConnMaxIdleTime. - maxLifetimeClosed: number // The total number of connections closed due to SetConnMaxLifetime. - } - /** - * Conn represents a single database connection rather than a pool of database - * connections. Prefer running queries from [DB] unless there is a specific - * need for a continuous single database connection. - * - * A Conn must call [Conn.Close] to return the connection to the database pool - * and may do so concurrently with a running query. - * - * After a call to [Conn.Close], all operations on the - * connection fail with [ErrConnDone]. - */ - interface Conn { - } - interface Conn { - /** - * PingContext verifies the connection to the database is still alive. - */ - pingContext(ctx: context.Context): void - } - interface Conn { - /** - * ExecContext executes a query without returning any rows. - * The args are for any placeholder parameters in the query. - */ - execContext(ctx: context.Context, query: string, ...args: any[]): Result - } - interface Conn { - /** - * QueryContext executes a query that returns rows, typically a SELECT. - * The args are for any placeholder parameters in the query. - */ - queryContext(ctx: context.Context, query: string, ...args: any[]): (Rows) - } - interface Conn { - /** - * QueryRowContext executes a query that is expected to return at most one row. - * QueryRowContext always returns a non-nil value. Errors are deferred until - * the [*Row.Scan] method is called. - * If the query selects no rows, the [*Row.Scan] will return [ErrNoRows]. - * Otherwise, the [*Row.Scan] scans the first selected row and discards - * the rest. - */ - queryRowContext(ctx: context.Context, query: string, ...args: any[]): (Row) - } - interface Conn { - /** - * PrepareContext creates a prepared statement for later queries or executions. - * Multiple queries or executions may be run concurrently from the - * returned statement. - * The caller must call the statement's [*Stmt.Close] method - * when the statement is no longer needed. - * - * The provided context is used for the preparation of the statement, not for the - * execution of the statement. - */ - prepareContext(ctx: context.Context, query: string): (Stmt) - } - interface Conn { - /** - * Raw executes f exposing the underlying driver connection for the - * duration of f. The driverConn must not be used outside of f. - * - * Once f returns and err is not [driver.ErrBadConn], the [Conn] will continue to be usable - * until [Conn.Close] is called. - */ - raw(f: (driverConn: any) => void): void - } - interface Conn { - /** - * BeginTx starts a transaction. - * - * The provided context is used until the transaction is committed or rolled back. - * If the context is canceled, the sql package will roll back - * the transaction. [Tx.Commit] will return an error if the context provided to - * BeginTx is canceled. - * - * The provided [TxOptions] is optional and may be nil if defaults should be used. - * If a non-default isolation level is used that the driver doesn't support, - * an error will be returned. - */ - beginTx(ctx: context.Context, opts: TxOptions): (Tx) - } - interface Conn { - /** - * Close returns the connection to the connection pool. - * All operations after a Close will return with [ErrConnDone]. - * Close is safe to call concurrently with other operations and will - * block until all other operations finish. It may be useful to first - * cancel any used context and then call close directly after. - */ - close(): void - } - /** - * ColumnType contains the name and type of a column. - */ - interface ColumnType { - } - interface ColumnType { - /** - * Name returns the name or alias of the column. - */ - name(): string - } - interface ColumnType { - /** - * Length returns the column type length for variable length column types such - * as text and binary field types. If the type length is unbounded the value will - * be [math.MaxInt64] (any database limits will still apply). - * If the column type is not variable length, such as an int, or if not supported - * by the driver ok is false. - */ - length(): [number, boolean] - } - interface ColumnType { - /** - * DecimalSize returns the scale and precision of a decimal type. - * If not applicable or if not supported ok is false. - */ - decimalSize(): [number, number, boolean] - } - interface ColumnType { - /** - * ScanType returns a Go type suitable for scanning into using [Rows.Scan]. - * If a driver does not support this property ScanType will return - * the type of an empty interface. - */ - scanType(): any - } - interface ColumnType { - /** - * Nullable reports whether the column may be null. - * If a driver does not support this property ok will be false. - */ - nullable(): [boolean, boolean] - } - interface ColumnType { - /** - * DatabaseTypeName returns the database system name of the column type. If an empty - * string is returned, then the driver type name is not supported. - * Consult your driver documentation for a list of driver data types. [ColumnType.Length] specifiers - * are not included. - * Common type names include "VARCHAR", "TEXT", "NVARCHAR", "DECIMAL", "BOOL", - * "INT", and "BIGINT". - */ - databaseTypeName(): string - } - /** - * Row is the result of calling [DB.QueryRow] to select a single row. - */ - interface Row { - } - interface Row { - /** - * Scan copies the columns from the matched row into the values - * pointed at by dest. See the documentation on [Rows.Scan] for details. - * If more than one row matches the query, - * Scan uses the first row and discards the rest. If no row matches - * the query, Scan returns [ErrNoRows]. - */ - scan(...dest: any[]): void - } - interface Row { - /** - * Err provides a way for wrapping packages to check for - * query errors without calling [Row.Scan]. - * Err returns the error, if any, that was encountered while running the query. - * If this error is not nil, this error will also be returned from [Row.Scan]. - */ - err(): void - } -} - -namespace hook { - /** - * wrapped local Hook embedded struct to limit the public API surface. - */ - type _szGRKZz = Hook - interface mainHook extends _szGRKZz { - } -} - -/** - * Package types implements some commonly used db serializable types - * like datetime, json, etc. - */ -namespace types { -} - -/** - * Package cron implements a crontab-like service to execute and schedule - * repeative tasks/jobs. - * - * Example: - * - * ``` - * c := cron.New() - * c.MustAdd("dailyReport", "0 0 * * *", func() { ... }) - * c.Start() - * ``` - */ -namespace cron { - /** - * Job defines a single registered cron job. - */ - interface Job { - } - interface Job { - /** - * Id returns the cron job id. - */ - id(): string - } - interface Job { - /** - * Expression returns the plain cron job schedule expression. - */ - expression(): string - } - interface Job { - /** - * Run runs the cron job function. - */ - run(): void - } - interface Job { - /** - * MarshalJSON implements [json.Marshaler] and export the current - * jobs data into valid JSON. - */ - marshalJSON(): string|Array - } -} - -namespace search { -} - -/** - * Package http provides HTTP client and server implementations. - * - * [Get], [Head], [Post], and [PostForm] make HTTP (or HTTPS) requests: - * - * ``` - * resp, err := http.Get("http://example.com/") - * ... - * resp, err := http.Post("http://example.com/upload", "image/jpeg", &buf) - * ... - * resp, err := http.PostForm("http://example.com/form", - * url.Values{"key": {"Value"}, "id": {"123"}}) - * ``` - * - * The caller must close the response body when finished with it: - * - * ``` - * resp, err := http.Get("http://example.com/") - * if err != nil { - * // handle error - * } - * defer resp.Body.Close() - * body, err := io.ReadAll(resp.Body) - * // ... - * ``` - * - * # Clients and Transports - * - * For control over HTTP client headers, redirect policy, and other - * settings, create a [Client]: - * - * ``` - * client := &http.Client{ - * CheckRedirect: redirectPolicyFunc, - * } - * - * resp, err := client.Get("http://example.com") - * // ... - * - * req, err := http.NewRequest("GET", "http://example.com", nil) - * // ... - * req.Header.Add("If-None-Match", `W/"wyzzy"`) - * resp, err := client.Do(req) - * // ... - * ``` - * - * For control over proxies, TLS configuration, keep-alives, - * compression, and other settings, create a [Transport]: - * - * ``` - * tr := &http.Transport{ - * MaxIdleConns: 10, - * IdleConnTimeout: 30 * time.Second, - * DisableCompression: true, - * } - * client := &http.Client{Transport: tr} - * resp, err := client.Get("https://example.com") - * ``` - * - * Clients and Transports are safe for concurrent use by multiple - * goroutines and for efficiency should only be created once and re-used. - * - * # Servers - * - * ListenAndServe starts an HTTP server with a given address and handler. - * The handler is usually nil, which means to use [DefaultServeMux]. - * [Handle] and [HandleFunc] add handlers to [DefaultServeMux]: - * - * ``` - * http.Handle("/foo", fooHandler) - * - * http.HandleFunc("/bar", func(w http.ResponseWriter, r *http.Request) { - * fmt.Fprintf(w, "Hello, %q", html.EscapeString(r.URL.Path)) - * }) - * - * log.Fatal(http.ListenAndServe(":8080", nil)) - * ``` - * - * More control over the server's behavior is available by creating a - * custom Server: - * - * ``` - * s := &http.Server{ - * Addr: ":8080", - * Handler: myHandler, - * ReadTimeout: 10 * time.Second, - * WriteTimeout: 10 * time.Second, - * MaxHeaderBytes: 1 << 20, - * } - * log.Fatal(s.ListenAndServe()) - * ``` - * - * # HTTP/2 - * - * Starting with Go 1.6, the http package has transparent support for the - * HTTP/2 protocol when using HTTPS. Programs that must disable HTTP/2 - * can do so by setting [Transport.TLSNextProto] (for clients) or - * [Server.TLSNextProto] (for servers) to a non-nil, empty - * map. Alternatively, the following GODEBUG settings are - * currently supported: - * - * ``` - * GODEBUG=http2client=0 # disable HTTP/2 client support - * GODEBUG=http2server=0 # disable HTTP/2 server support - * GODEBUG=http2debug=1 # enable verbose HTTP/2 debug logs - * GODEBUG=http2debug=2 # ... even more verbose, with frame dumps - * ``` - * - * Please report any issues before disabling HTTP/2 support: https://golang.org/s/http2bug - * - * The http package's [Transport] and [Server] both automatically enable - * HTTP/2 support for simple configurations. To enable HTTP/2 for more - * complex configurations, to use lower-level HTTP/2 features, or to use - * a newer version of Go's http2 package, import "golang.org/x/net/http2" - * directly and use its ConfigureTransport and/or ConfigureServer - * functions. Manually configuring HTTP/2 via the golang.org/x/net/http2 - * package takes precedence over the net/http package's built-in HTTP/2 - * support. - */ -namespace http { - /** - * A Cookie represents an HTTP cookie as sent in the Set-Cookie header of an - * HTTP response or the Cookie header of an HTTP request. - * - * See https://tools.ietf.org/html/rfc6265 for details. - */ - interface Cookie { - name: string - value: string - quoted: boolean // indicates whether the Value was originally quoted - path: string // optional - domain: string // optional - expires: time.Time // optional - rawExpires: string // for reading cookies only - /** - * MaxAge=0 means no 'Max-Age' attribute specified. - * MaxAge<0 means delete cookie now, equivalently 'Max-Age: 0' - * MaxAge>0 means Max-Age attribute present and given in seconds - */ - maxAge: number - secure: boolean - httpOnly: boolean - sameSite: SameSite - partitioned: boolean - raw: string - unparsed: Array // Raw text of unparsed attribute-value pairs - } - interface Cookie { - /** - * String returns the serialization of the cookie for use in a [Cookie] - * header (if only Name and Value are set) or a Set-Cookie response - * header (if other fields are set). - * If c is nil or c.Name is invalid, the empty string is returned. - */ - string(): string - } - interface Cookie { - /** - * Valid reports whether the cookie is valid. - */ - valid(): void - } - // @ts-ignore - import mathrand = rand - /** - * A Header represents the key-value pairs in an HTTP header. - * - * The keys should be in canonical form, as returned by - * [CanonicalHeaderKey]. - */ - interface Header extends _TygojaDict{} - interface Header { - /** - * Add adds the key, value pair to the header. - * It appends to any existing values associated with key. - * The key is case insensitive; it is canonicalized by - * [CanonicalHeaderKey]. - */ - add(key: string, value: string): void - } - interface Header { - /** - * Set sets the header entries associated with key to the - * single element value. It replaces any existing values - * associated with key. The key is case insensitive; it is - * canonicalized by [textproto.CanonicalMIMEHeaderKey]. - * To use non-canonical keys, assign to the map directly. - */ - set(key: string, value: string): void - } - interface Header { - /** - * Get gets the first value associated with the given key. If - * there are no values associated with the key, Get returns "". - * It is case insensitive; [textproto.CanonicalMIMEHeaderKey] is - * used to canonicalize the provided key. Get assumes that all - * keys are stored in canonical form. To use non-canonical keys, - * access the map directly. - */ - get(key: string): string - } - interface Header { - /** - * Values returns all values associated with the given key. - * It is case insensitive; [textproto.CanonicalMIMEHeaderKey] is - * used to canonicalize the provided key. To use non-canonical - * keys, access the map directly. - * The returned slice is not a copy. - */ - values(key: string): Array - } - interface Header { - /** - * Del deletes the values associated with key. - * The key is case insensitive; it is canonicalized by - * [CanonicalHeaderKey]. - */ - del(key: string): void - } - interface Header { - /** - * Write writes a header in wire format. - */ - write(w: io.Writer): void - } - interface Header { - /** - * Clone returns a copy of h or nil if h is nil. - */ - clone(): Header - } - interface Header { - /** - * WriteSubset writes a header in wire format. - * If exclude is not nil, keys where exclude[key] == true are not written. - * Keys are not canonicalized before checking the exclude map. - */ - writeSubset(w: io.Writer, exclude: _TygojaDict): void - } - // @ts-ignore - import urlpkg = url - /** - * Response represents the response from an HTTP request. - * - * The [Client] and [Transport] return Responses from servers once - * the response headers have been received. The response body - * is streamed on demand as the Body field is read. - */ - interface Response { - status: string // e.g. "200 OK" - statusCode: number // e.g. 200 - proto: string // e.g. "HTTP/1.0" - protoMajor: number // e.g. 1 - protoMinor: number // e.g. 0 - /** - * Header maps header keys to values. If the response had multiple - * headers with the same key, they may be concatenated, with comma - * delimiters. (RFC 7230, section 3.2.2 requires that multiple headers - * be semantically equivalent to a comma-delimited sequence.) When - * Header values are duplicated by other fields in this struct (e.g., - * ContentLength, TransferEncoding, Trailer), the field values are - * authoritative. - * - * Keys in the map are canonicalized (see CanonicalHeaderKey). - */ - header: Header - /** - * Body represents the response body. - * - * The response body is streamed on demand as the Body field - * is read. If the network connection fails or the server - * terminates the response, Body.Read calls return an error. - * - * The http Client and Transport guarantee that Body is always - * non-nil, even on responses without a body or responses with - * a zero-length body. It is the caller's responsibility to - * close Body. The default HTTP client's Transport may not - * reuse HTTP/1.x "keep-alive" TCP connections if the Body is - * not read to completion and closed. - * - * The Body is automatically dechunked if the server replied - * with a "chunked" Transfer-Encoding. - * - * As of Go 1.12, the Body will also implement io.Writer - * on a successful "101 Switching Protocols" response, - * as used by WebSockets and HTTP/2's "h2c" mode. - */ - body: io.ReadCloser - /** - * ContentLength records the length of the associated content. The - * value -1 indicates that the length is unknown. Unless Request.Method - * is "HEAD", values >= 0 indicate that the given number of bytes may - * be read from Body. - */ - contentLength: number - /** - * Contains transfer encodings from outer-most to inner-most. Value is - * nil, means that "identity" encoding is used. - */ - transferEncoding: Array - /** - * Close records whether the header directed that the connection be - * closed after reading Body. The value is advice for clients: neither - * ReadResponse nor Response.Write ever closes a connection. - */ - close: boolean - /** - * Uncompressed reports whether the response was sent compressed but - * was decompressed by the http package. When true, reading from - * Body yields the uncompressed content instead of the compressed - * content actually set from the server, ContentLength is set to -1, - * and the "Content-Length" and "Content-Encoding" fields are deleted - * from the responseHeader. To get the original response from - * the server, set Transport.DisableCompression to true. - */ - uncompressed: boolean - /** - * Trailer maps trailer keys to values in the same - * format as Header. - * - * The Trailer initially contains only nil values, one for - * each key specified in the server's "Trailer" header - * value. Those values are not added to Header. - * - * Trailer must not be accessed concurrently with Read calls - * on the Body. - * - * After Body.Read has returned io.EOF, Trailer will contain - * any trailer values sent by the server. - */ - trailer: Header - /** - * Request is the request that was sent to obtain this Response. - * Request's Body is nil (having already been consumed). - * This is only populated for Client requests. - */ - request?: Request - /** - * TLS contains information about the TLS connection on which the - * response was received. It is nil for unencrypted responses. - * The pointer is shared between responses and should not be - * modified. - */ - tls?: any - } - interface Response { - /** - * Cookies parses and returns the cookies set in the Set-Cookie headers. - */ - cookies(): Array<(Cookie | undefined)> - } - interface Response { - /** - * Location returns the URL of the response's "Location" header, - * if present. Relative redirects are resolved relative to - * [Response.Request]. [ErrNoLocation] is returned if no - * Location header is present. - */ - location(): (url.URL) - } - interface Response { - /** - * ProtoAtLeast reports whether the HTTP protocol used - * in the response is at least major.minor. - */ - protoAtLeast(major: number, minor: number): boolean - } - interface Response { - /** - * Write writes r to w in the HTTP/1.x server response format, - * including the status line, headers, body, and optional trailer. - * - * This method consults the following fields of the response r: - * - * ``` - * StatusCode - * ProtoMajor - * ProtoMinor - * Request.Method - * TransferEncoding - * Trailer - * Body - * ContentLength - * Header, values for non-canonical keys will have unpredictable behavior - * ``` - * - * The Response Body is closed after it is sent. - */ - write(w: io.Writer): void - } - /** - * A ConnState represents the state of a client connection to a server. - * It's used by the optional [Server.ConnState] hook. - */ - interface ConnState extends Number{} - interface ConnState { - string(): string - } -} - -/** - * Package oauth2 provides support for making - * OAuth2 authorized and authenticated HTTP requests, - * as specified in RFC 6749. - * It can additionally grant authorization with Bearer JWT. - */ -/** - * Copyright 2023 The Go Authors. All rights reserved. - * Use of this source code is governed by a BSD-style - * license that can be found in the LICENSE file. - */ -namespace oauth2 { - /** - * An AuthCodeOption is passed to Config.AuthCodeURL. - */ - interface AuthCodeOption { - [key:string]: any; - } - /** - * Token represents the credentials used to authorize - * the requests to access protected resources on the OAuth 2.0 - * provider's backend. - * - * Most users of this package should not access fields of Token - * directly. They're exported mostly for use by related packages - * implementing derivative OAuth2 flows. - */ - interface Token { - /** - * AccessToken is the token that authorizes and authenticates - * the requests. - */ - accessToken: string - /** - * TokenType is the type of token. - * The Type method returns either this or "Bearer", the default. - */ - tokenType: string - /** - * RefreshToken is a token that's used by the application - * (as opposed to the user) to refresh the access token - * if it expires. - */ - refreshToken: string - /** - * Expiry is the optional expiration time of the access token. - * - * If zero, TokenSource implementations will reuse the same - * token forever and RefreshToken or equivalent - * mechanisms for that TokenSource will not be used. - */ - expiry: time.Time - /** - * ExpiresIn is the OAuth2 wire format "expires_in" field, - * which specifies how many seconds later the token expires, - * relative to an unknown time base approximately around "now". - * It is the application's responsibility to populate - * `Expiry` from `ExpiresIn` when required. - */ - expiresIn: number - } - interface Token { - /** - * Type returns t.TokenType if non-empty, else "Bearer". - */ - type(): string - } - interface Token { - /** - * SetAuthHeader sets the Authorization header to r using the access - * token in t. - * - * This method is unnecessary when using Transport or an HTTP Client - * returned by this package. - */ - setAuthHeader(r: http.Request): void - } - interface Token { - /** - * WithExtra returns a new Token that's a clone of t, but using the - * provided raw extra map. This is only intended for use by packages - * implementing derivative OAuth2 flows. - */ - withExtra(extra: { - }): (Token) - } - interface Token { - /** - * Extra returns an extra field. - * Extra fields are key-value pairs returned by the server as a - * part of the token retrieval response. - */ - extra(key: string): { - } - } - interface Token { - /** - * Valid reports whether t is non-nil, has an AccessToken, and is not expired. - */ - valid(): boolean - } -} - namespace router { // @ts-ignore import validation = ozzo_validation @@ -23238,551 +23149,6 @@ namespace router { } } -/** - * Package slog provides structured logging, - * in which log records include a message, - * a severity level, and various other attributes - * expressed as key-value pairs. - * - * It defines a type, [Logger], - * which provides several methods (such as [Logger.Info] and [Logger.Error]) - * for reporting events of interest. - * - * Each Logger is associated with a [Handler]. - * A Logger output method creates a [Record] from the method arguments - * and passes it to the Handler, which decides how to handle it. - * There is a default Logger accessible through top-level functions - * (such as [Info] and [Error]) that call the corresponding Logger methods. - * - * A log record consists of a time, a level, a message, and a set of key-value - * pairs, where the keys are strings and the values may be of any type. - * As an example, - * - * ``` - * slog.Info("hello", "count", 3) - * ``` - * - * creates a record containing the time of the call, - * a level of Info, the message "hello", and a single - * pair with key "count" and value 3. - * - * The [Info] top-level function calls the [Logger.Info] method on the default Logger. - * In addition to [Logger.Info], there are methods for Debug, Warn and Error levels. - * Besides these convenience methods for common levels, - * there is also a [Logger.Log] method which takes the level as an argument. - * Each of these methods has a corresponding top-level function that uses the - * default logger. - * - * The default handler formats the log record's message, time, level, and attributes - * as a string and passes it to the [log] package. - * - * ``` - * 2022/11/08 15:28:26 INFO hello count=3 - * ``` - * - * For more control over the output format, create a logger with a different handler. - * This statement uses [New] to create a new logger with a [TextHandler] - * that writes structured records in text form to standard error: - * - * ``` - * logger := slog.New(slog.NewTextHandler(os.Stderr, nil)) - * ``` - * - * [TextHandler] output is a sequence of key=value pairs, easily and unambiguously - * parsed by machine. This statement: - * - * ``` - * logger.Info("hello", "count", 3) - * ``` - * - * produces this output: - * - * ``` - * time=2022-11-08T15:28:26.000-05:00 level=INFO msg=hello count=3 - * ``` - * - * The package also provides [JSONHandler], whose output is line-delimited JSON: - * - * ``` - * logger := slog.New(slog.NewJSONHandler(os.Stdout, nil)) - * logger.Info("hello", "count", 3) - * ``` - * - * produces this output: - * - * ``` - * {"time":"2022-11-08T15:28:26.000000000-05:00","level":"INFO","msg":"hello","count":3} - * ``` - * - * Both [TextHandler] and [JSONHandler] can be configured with [HandlerOptions]. - * There are options for setting the minimum level (see Levels, below), - * displaying the source file and line of the log call, and - * modifying attributes before they are logged. - * - * Setting a logger as the default with - * - * ``` - * slog.SetDefault(logger) - * ``` - * - * will cause the top-level functions like [Info] to use it. - * [SetDefault] also updates the default logger used by the [log] package, - * so that existing applications that use [log.Printf] and related functions - * will send log records to the logger's handler without needing to be rewritten. - * - * Some attributes are common to many log calls. - * For example, you may wish to include the URL or trace identifier of a server request - * with all log events arising from the request. - * Rather than repeat the attribute with every log call, you can use [Logger.With] - * to construct a new Logger containing the attributes: - * - * ``` - * logger2 := logger.With("url", r.URL) - * ``` - * - * The arguments to With are the same key-value pairs used in [Logger.Info]. - * The result is a new Logger with the same handler as the original, but additional - * attributes that will appear in the output of every call. - * - * # Levels - * - * A [Level] is an integer representing the importance or severity of a log event. - * The higher the level, the more severe the event. - * This package defines constants for the most common levels, - * but any int can be used as a level. - * - * In an application, you may wish to log messages only at a certain level or greater. - * One common configuration is to log messages at Info or higher levels, - * suppressing debug logging until it is needed. - * The built-in handlers can be configured with the minimum level to output by - * setting [HandlerOptions.Level]. - * The program's `main` function typically does this. - * The default value is LevelInfo. - * - * Setting the [HandlerOptions.Level] field to a [Level] value - * fixes the handler's minimum level throughout its lifetime. - * Setting it to a [LevelVar] allows the level to be varied dynamically. - * A LevelVar holds a Level and is safe to read or write from multiple - * goroutines. - * To vary the level dynamically for an entire program, first initialize - * a global LevelVar: - * - * ``` - * var programLevel = new(slog.LevelVar) // Info by default - * ``` - * - * Then use the LevelVar to construct a handler, and make it the default: - * - * ``` - * h := slog.NewJSONHandler(os.Stderr, &slog.HandlerOptions{Level: programLevel}) - * slog.SetDefault(slog.New(h)) - * ``` - * - * Now the program can change its logging level with a single statement: - * - * ``` - * programLevel.Set(slog.LevelDebug) - * ``` - * - * # Groups - * - * Attributes can be collected into groups. - * A group has a name that is used to qualify the names of its attributes. - * How this qualification is displayed depends on the handler. - * [TextHandler] separates the group and attribute names with a dot. - * [JSONHandler] treats each group as a separate JSON object, with the group name as the key. - * - * Use [Group] to create a Group attribute from a name and a list of key-value pairs: - * - * ``` - * slog.Group("request", - * "method", r.Method, - * "url", r.URL) - * ``` - * - * TextHandler would display this group as - * - * ``` - * request.method=GET request.url=http://example.com - * ``` - * - * JSONHandler would display it as - * - * ``` - * "request":{"method":"GET","url":"http://example.com"} - * ``` - * - * Use [Logger.WithGroup] to qualify all of a Logger's output - * with a group name. Calling WithGroup on a Logger results in a - * new Logger with the same Handler as the original, but with all - * its attributes qualified by the group name. - * - * This can help prevent duplicate attribute keys in large systems, - * where subsystems might use the same keys. - * Pass each subsystem a different Logger with its own group name so that - * potential duplicates are qualified: - * - * ``` - * logger := slog.Default().With("id", systemID) - * parserLogger := logger.WithGroup("parser") - * parseInput(input, parserLogger) - * ``` - * - * When parseInput logs with parserLogger, its keys will be qualified with "parser", - * so even if it uses the common key "id", the log line will have distinct keys. - * - * # Contexts - * - * Some handlers may wish to include information from the [context.Context] that is - * available at the call site. One example of such information - * is the identifier for the current span when tracing is enabled. - * - * The [Logger.Log] and [Logger.LogAttrs] methods take a context as a first - * argument, as do their corresponding top-level functions. - * - * Although the convenience methods on Logger (Info and so on) and the - * corresponding top-level functions do not take a context, the alternatives ending - * in "Context" do. For example, - * - * ``` - * slog.InfoContext(ctx, "message") - * ``` - * - * It is recommended to pass a context to an output method if one is available. - * - * # Attrs and Values - * - * An [Attr] is a key-value pair. The Logger output methods accept Attrs as well as - * alternating keys and values. The statement - * - * ``` - * slog.Info("hello", slog.Int("count", 3)) - * ``` - * - * behaves the same as - * - * ``` - * slog.Info("hello", "count", 3) - * ``` - * - * There are convenience constructors for [Attr] such as [Int], [String], and [Bool] - * for common types, as well as the function [Any] for constructing Attrs of any - * type. - * - * The value part of an Attr is a type called [Value]. - * Like an [any], a Value can hold any Go value, - * but it can represent typical values, including all numbers and strings, - * without an allocation. - * - * For the most efficient log output, use [Logger.LogAttrs]. - * It is similar to [Logger.Log] but accepts only Attrs, not alternating - * keys and values; this allows it, too, to avoid allocation. - * - * The call - * - * ``` - * logger.LogAttrs(ctx, slog.LevelInfo, "hello", slog.Int("count", 3)) - * ``` - * - * is the most efficient way to achieve the same output as - * - * ``` - * slog.InfoContext(ctx, "hello", "count", 3) - * ``` - * - * # Customizing a type's logging behavior - * - * If a type implements the [LogValuer] interface, the [Value] returned from its LogValue - * method is used for logging. You can use this to control how values of the type - * appear in logs. For example, you can redact secret information like passwords, - * or gather a struct's fields in a Group. See the examples under [LogValuer] for - * details. - * - * A LogValue method may return a Value that itself implements [LogValuer]. The [Value.Resolve] - * method handles these cases carefully, avoiding infinite loops and unbounded recursion. - * Handler authors and others may wish to use [Value.Resolve] instead of calling LogValue directly. - * - * # Wrapping output methods - * - * The logger functions use reflection over the call stack to find the file name - * and line number of the logging call within the application. This can produce - * incorrect source information for functions that wrap slog. For instance, if you - * define this function in file mylog.go: - * - * ``` - * func Infof(logger *slog.Logger, format string, args ...any) { - * logger.Info(fmt.Sprintf(format, args...)) - * } - * ``` - * - * and you call it like this in main.go: - * - * ``` - * Infof(slog.Default(), "hello, %s", "world") - * ``` - * - * then slog will report the source file as mylog.go, not main.go. - * - * A correct implementation of Infof will obtain the source location - * (pc) and pass it to NewRecord. - * The Infof function in the package-level example called "wrapping" - * demonstrates how to do this. - * - * # Working with Records - * - * Sometimes a Handler will need to modify a Record - * before passing it on to another Handler or backend. - * A Record contains a mixture of simple public fields (e.g. Time, Level, Message) - * and hidden fields that refer to state (such as attributes) indirectly. This - * means that modifying a simple copy of a Record (e.g. by calling - * [Record.Add] or [Record.AddAttrs] to add attributes) - * may have unexpected effects on the original. - * Before modifying a Record, use [Record.Clone] to - * create a copy that shares no state with the original, - * or create a new Record with [NewRecord] - * and build up its Attrs by traversing the old ones with [Record.Attrs]. - * - * # Performance considerations - * - * If profiling your application demonstrates that logging is taking significant time, - * the following suggestions may help. - * - * If many log lines have a common attribute, use [Logger.With] to create a Logger with - * that attribute. The built-in handlers will format that attribute only once, at the - * call to [Logger.With]. The [Handler] interface is designed to allow that optimization, - * and a well-written Handler should take advantage of it. - * - * The arguments to a log call are always evaluated, even if the log event is discarded. - * If possible, defer computation so that it happens only if the value is actually logged. - * For example, consider the call - * - * ``` - * slog.Info("starting request", "url", r.URL.String()) // may compute String unnecessarily - * ``` - * - * The URL.String method will be called even if the logger discards Info-level events. - * Instead, pass the URL directly: - * - * ``` - * slog.Info("starting request", "url", &r.URL) // calls URL.String only if needed - * ``` - * - * The built-in [TextHandler] will call its String method, but only - * if the log event is enabled. - * Avoiding the call to String also preserves the structure of the underlying value. - * For example [JSONHandler] emits the components of the parsed URL as a JSON object. - * If you want to avoid eagerly paying the cost of the String call - * without causing the handler to potentially inspect the structure of the value, - * wrap the value in a fmt.Stringer implementation that hides its Marshal methods. - * - * You can also use the [LogValuer] interface to avoid unnecessary work in disabled log - * calls. Say you need to log some expensive value: - * - * ``` - * slog.Debug("frobbing", "value", computeExpensiveValue(arg)) - * ``` - * - * Even if this line is disabled, computeExpensiveValue will be called. - * To avoid that, define a type implementing LogValuer: - * - * ``` - * type expensive struct { arg int } - * - * func (e expensive) LogValue() slog.Value { - * return slog.AnyValue(computeExpensiveValue(e.arg)) - * } - * ``` - * - * Then use a value of that type in log calls: - * - * ``` - * slog.Debug("frobbing", "value", expensive{arg}) - * ``` - * - * Now computeExpensiveValue will only be called when the line is enabled. - * - * The built-in handlers acquire a lock before calling [io.Writer.Write] - * to ensure that exactly one [Record] is written at a time in its entirety. - * Although each log record has a timestamp, - * the built-in handlers do not use that time to sort the written records. - * User-defined handlers are responsible for their own locking and sorting. - * - * # Writing a handler - * - * For a guide to writing a custom handler, see https://golang.org/s/slog-handler-guide. - */ -namespace slog { - // @ts-ignore - import loginternal = internal - /** - * A Record holds information about a log event. - * Copies of a Record share state. - * Do not modify a Record after handing out a copy to it. - * Call [NewRecord] to create a new Record. - * Use [Record.Clone] to create a copy with no shared state. - */ - interface Record { - /** - * The time at which the output method (Log, Info, etc.) was called. - */ - time: time.Time - /** - * The log message. - */ - message: string - /** - * The level of the event. - */ - level: Level - /** - * The program counter at the time the record was constructed, as determined - * by runtime.Callers. If zero, no program counter is available. - * - * The only valid use for this value is as an argument to - * [runtime.CallersFrames]. In particular, it must not be passed to - * [runtime.FuncForPC]. - */ - pc: number - } - interface Record { - /** - * Clone returns a copy of the record with no shared state. - * The original record and the clone can both be modified - * without interfering with each other. - */ - clone(): Record - } - interface Record { - /** - * NumAttrs returns the number of attributes in the [Record]. - */ - numAttrs(): number - } - interface Record { - /** - * Attrs calls f on each Attr in the [Record]. - * Iteration stops if f returns false. - */ - attrs(f: (_arg0: Attr) => boolean): void - } - interface Record { - /** - * AddAttrs appends the given Attrs to the [Record]'s list of Attrs. - * It omits empty groups. - */ - addAttrs(...attrs: Attr[]): void - } - interface Record { - /** - * Add converts the args to Attrs as described in [Logger.Log], - * then appends the Attrs to the [Record]'s list of Attrs. - * It omits empty groups. - */ - add(...args: any[]): void - } - /** - * A Value can represent any Go value, but unlike type any, - * it can represent most small values without an allocation. - * The zero Value corresponds to nil. - */ - interface Value { - } - interface Value { - /** - * Kind returns v's Kind. - */ - kind(): Kind - } - interface Value { - /** - * Any returns v's value as an any. - */ - any(): any - } - interface Value { - /** - * String returns Value's value as a string, formatted like [fmt.Sprint]. Unlike - * the methods Int64, Float64, and so on, which panic if v is of the - * wrong kind, String never panics. - */ - string(): string - } - interface Value { - /** - * Int64 returns v's value as an int64. It panics - * if v is not a signed integer. - */ - int64(): number - } - interface Value { - /** - * Uint64 returns v's value as a uint64. It panics - * if v is not an unsigned integer. - */ - uint64(): number - } - interface Value { - /** - * Bool returns v's value as a bool. It panics - * if v is not a bool. - */ - bool(): boolean - } - interface Value { - /** - * Duration returns v's value as a [time.Duration]. It panics - * if v is not a time.Duration. - */ - duration(): time.Duration - } - interface Value { - /** - * Float64 returns v's value as a float64. It panics - * if v is not a float64. - */ - float64(): number - } - interface Value { - /** - * Time returns v's value as a [time.Time]. It panics - * if v is not a time.Time. - */ - time(): time.Time - } - interface Value { - /** - * LogValuer returns v's value as a LogValuer. It panics - * if v is not a LogValuer. - */ - logValuer(): LogValuer - } - interface Value { - /** - * Group returns v's value as a []Attr. - * It panics if v's [Kind] is not [KindGroup]. - */ - group(): Array - } - interface Value { - /** - * Equal reports whether v and w represent the same Go value. - */ - equal(w: Value): boolean - } - interface Value { - /** - * Resolve repeatedly calls LogValue on v while it implements [LogValuer], - * and returns the result. - * If v resolves to a group, the group's attributes' values are not recursively - * resolved. - * If the number of LogValue calls exceeds a threshold, a Value containing an - * error is returned. - * Resolve's return value is guaranteed not to be of Kind [KindLogValuer]. - */ - resolve(): Value - } -} - /** * Package url parses URLs and implements query escaping. */ @@ -24385,6 +23751,551 @@ namespace http { * * For a guide to writing a custom handler, see https://golang.org/s/slog-handler-guide. */ +namespace slog { + // @ts-ignore + import loginternal = internal + /** + * A Record holds information about a log event. + * Copies of a Record share state. + * Do not modify a Record after handing out a copy to it. + * Call [NewRecord] to create a new Record. + * Use [Record.Clone] to create a copy with no shared state. + */ + interface Record { + /** + * The time at which the output method (Log, Info, etc.) was called. + */ + time: time.Time + /** + * The log message. + */ + message: string + /** + * The level of the event. + */ + level: Level + /** + * The program counter at the time the record was constructed, as determined + * by runtime.Callers. If zero, no program counter is available. + * + * The only valid use for this value is as an argument to + * [runtime.CallersFrames]. In particular, it must not be passed to + * [runtime.FuncForPC]. + */ + pc: number + } + interface Record { + /** + * Clone returns a copy of the record with no shared state. + * The original record and the clone can both be modified + * without interfering with each other. + */ + clone(): Record + } + interface Record { + /** + * NumAttrs returns the number of attributes in the [Record]. + */ + numAttrs(): number + } + interface Record { + /** + * Attrs calls f on each Attr in the [Record]. + * Iteration stops if f returns false. + */ + attrs(f: (_arg0: Attr) => boolean): void + } + interface Record { + /** + * AddAttrs appends the given Attrs to the [Record]'s list of Attrs. + * It omits empty groups. + */ + addAttrs(...attrs: Attr[]): void + } + interface Record { + /** + * Add converts the args to Attrs as described in [Logger.Log], + * then appends the Attrs to the [Record]'s list of Attrs. + * It omits empty groups. + */ + add(...args: any[]): void + } + /** + * A Value can represent any Go value, but unlike type any, + * it can represent most small values without an allocation. + * The zero Value corresponds to nil. + */ + interface Value { + } + interface Value { + /** + * Kind returns v's Kind. + */ + kind(): Kind + } + interface Value { + /** + * Any returns v's value as an any. + */ + any(): any + } + interface Value { + /** + * String returns Value's value as a string, formatted like [fmt.Sprint]. Unlike + * the methods Int64, Float64, and so on, which panic if v is of the + * wrong kind, String never panics. + */ + string(): string + } + interface Value { + /** + * Int64 returns v's value as an int64. It panics + * if v is not a signed integer. + */ + int64(): number + } + interface Value { + /** + * Uint64 returns v's value as a uint64. It panics + * if v is not an unsigned integer. + */ + uint64(): number + } + interface Value { + /** + * Bool returns v's value as a bool. It panics + * if v is not a bool. + */ + bool(): boolean + } + interface Value { + /** + * Duration returns v's value as a [time.Duration]. It panics + * if v is not a time.Duration. + */ + duration(): time.Duration + } + interface Value { + /** + * Float64 returns v's value as a float64. It panics + * if v is not a float64. + */ + float64(): number + } + interface Value { + /** + * Time returns v's value as a [time.Time]. It panics + * if v is not a time.Time. + */ + time(): time.Time + } + interface Value { + /** + * LogValuer returns v's value as a LogValuer. It panics + * if v is not a LogValuer. + */ + logValuer(): LogValuer + } + interface Value { + /** + * Group returns v's value as a []Attr. + * It panics if v's [Kind] is not [KindGroup]. + */ + group(): Array + } + interface Value { + /** + * Equal reports whether v and w represent the same Go value. + */ + equal(w: Value): boolean + } + interface Value { + /** + * Resolve repeatedly calls LogValue on v while it implements [LogValuer], + * and returns the result. + * If v resolves to a group, the group's attributes' values are not recursively + * resolved. + * If the number of LogValue calls exceeds a threshold, a Value containing an + * error is returned. + * Resolve's return value is guaranteed not to be of Kind [KindLogValuer]. + */ + resolve(): Value + } +} + +/** + * Package slog provides structured logging, + * in which log records include a message, + * a severity level, and various other attributes + * expressed as key-value pairs. + * + * It defines a type, [Logger], + * which provides several methods (such as [Logger.Info] and [Logger.Error]) + * for reporting events of interest. + * + * Each Logger is associated with a [Handler]. + * A Logger output method creates a [Record] from the method arguments + * and passes it to the Handler, which decides how to handle it. + * There is a default Logger accessible through top-level functions + * (such as [Info] and [Error]) that call the corresponding Logger methods. + * + * A log record consists of a time, a level, a message, and a set of key-value + * pairs, where the keys are strings and the values may be of any type. + * As an example, + * + * ``` + * slog.Info("hello", "count", 3) + * ``` + * + * creates a record containing the time of the call, + * a level of Info, the message "hello", and a single + * pair with key "count" and value 3. + * + * The [Info] top-level function calls the [Logger.Info] method on the default Logger. + * In addition to [Logger.Info], there are methods for Debug, Warn and Error levels. + * Besides these convenience methods for common levels, + * there is also a [Logger.Log] method which takes the level as an argument. + * Each of these methods has a corresponding top-level function that uses the + * default logger. + * + * The default handler formats the log record's message, time, level, and attributes + * as a string and passes it to the [log] package. + * + * ``` + * 2022/11/08 15:28:26 INFO hello count=3 + * ``` + * + * For more control over the output format, create a logger with a different handler. + * This statement uses [New] to create a new logger with a [TextHandler] + * that writes structured records in text form to standard error: + * + * ``` + * logger := slog.New(slog.NewTextHandler(os.Stderr, nil)) + * ``` + * + * [TextHandler] output is a sequence of key=value pairs, easily and unambiguously + * parsed by machine. This statement: + * + * ``` + * logger.Info("hello", "count", 3) + * ``` + * + * produces this output: + * + * ``` + * time=2022-11-08T15:28:26.000-05:00 level=INFO msg=hello count=3 + * ``` + * + * The package also provides [JSONHandler], whose output is line-delimited JSON: + * + * ``` + * logger := slog.New(slog.NewJSONHandler(os.Stdout, nil)) + * logger.Info("hello", "count", 3) + * ``` + * + * produces this output: + * + * ``` + * {"time":"2022-11-08T15:28:26.000000000-05:00","level":"INFO","msg":"hello","count":3} + * ``` + * + * Both [TextHandler] and [JSONHandler] can be configured with [HandlerOptions]. + * There are options for setting the minimum level (see Levels, below), + * displaying the source file and line of the log call, and + * modifying attributes before they are logged. + * + * Setting a logger as the default with + * + * ``` + * slog.SetDefault(logger) + * ``` + * + * will cause the top-level functions like [Info] to use it. + * [SetDefault] also updates the default logger used by the [log] package, + * so that existing applications that use [log.Printf] and related functions + * will send log records to the logger's handler without needing to be rewritten. + * + * Some attributes are common to many log calls. + * For example, you may wish to include the URL or trace identifier of a server request + * with all log events arising from the request. + * Rather than repeat the attribute with every log call, you can use [Logger.With] + * to construct a new Logger containing the attributes: + * + * ``` + * logger2 := logger.With("url", r.URL) + * ``` + * + * The arguments to With are the same key-value pairs used in [Logger.Info]. + * The result is a new Logger with the same handler as the original, but additional + * attributes that will appear in the output of every call. + * + * # Levels + * + * A [Level] is an integer representing the importance or severity of a log event. + * The higher the level, the more severe the event. + * This package defines constants for the most common levels, + * but any int can be used as a level. + * + * In an application, you may wish to log messages only at a certain level or greater. + * One common configuration is to log messages at Info or higher levels, + * suppressing debug logging until it is needed. + * The built-in handlers can be configured with the minimum level to output by + * setting [HandlerOptions.Level]. + * The program's `main` function typically does this. + * The default value is LevelInfo. + * + * Setting the [HandlerOptions.Level] field to a [Level] value + * fixes the handler's minimum level throughout its lifetime. + * Setting it to a [LevelVar] allows the level to be varied dynamically. + * A LevelVar holds a Level and is safe to read or write from multiple + * goroutines. + * To vary the level dynamically for an entire program, first initialize + * a global LevelVar: + * + * ``` + * var programLevel = new(slog.LevelVar) // Info by default + * ``` + * + * Then use the LevelVar to construct a handler, and make it the default: + * + * ``` + * h := slog.NewJSONHandler(os.Stderr, &slog.HandlerOptions{Level: programLevel}) + * slog.SetDefault(slog.New(h)) + * ``` + * + * Now the program can change its logging level with a single statement: + * + * ``` + * programLevel.Set(slog.LevelDebug) + * ``` + * + * # Groups + * + * Attributes can be collected into groups. + * A group has a name that is used to qualify the names of its attributes. + * How this qualification is displayed depends on the handler. + * [TextHandler] separates the group and attribute names with a dot. + * [JSONHandler] treats each group as a separate JSON object, with the group name as the key. + * + * Use [Group] to create a Group attribute from a name and a list of key-value pairs: + * + * ``` + * slog.Group("request", + * "method", r.Method, + * "url", r.URL) + * ``` + * + * TextHandler would display this group as + * + * ``` + * request.method=GET request.url=http://example.com + * ``` + * + * JSONHandler would display it as + * + * ``` + * "request":{"method":"GET","url":"http://example.com"} + * ``` + * + * Use [Logger.WithGroup] to qualify all of a Logger's output + * with a group name. Calling WithGroup on a Logger results in a + * new Logger with the same Handler as the original, but with all + * its attributes qualified by the group name. + * + * This can help prevent duplicate attribute keys in large systems, + * where subsystems might use the same keys. + * Pass each subsystem a different Logger with its own group name so that + * potential duplicates are qualified: + * + * ``` + * logger := slog.Default().With("id", systemID) + * parserLogger := logger.WithGroup("parser") + * parseInput(input, parserLogger) + * ``` + * + * When parseInput logs with parserLogger, its keys will be qualified with "parser", + * so even if it uses the common key "id", the log line will have distinct keys. + * + * # Contexts + * + * Some handlers may wish to include information from the [context.Context] that is + * available at the call site. One example of such information + * is the identifier for the current span when tracing is enabled. + * + * The [Logger.Log] and [Logger.LogAttrs] methods take a context as a first + * argument, as do their corresponding top-level functions. + * + * Although the convenience methods on Logger (Info and so on) and the + * corresponding top-level functions do not take a context, the alternatives ending + * in "Context" do. For example, + * + * ``` + * slog.InfoContext(ctx, "message") + * ``` + * + * It is recommended to pass a context to an output method if one is available. + * + * # Attrs and Values + * + * An [Attr] is a key-value pair. The Logger output methods accept Attrs as well as + * alternating keys and values. The statement + * + * ``` + * slog.Info("hello", slog.Int("count", 3)) + * ``` + * + * behaves the same as + * + * ``` + * slog.Info("hello", "count", 3) + * ``` + * + * There are convenience constructors for [Attr] such as [Int], [String], and [Bool] + * for common types, as well as the function [Any] for constructing Attrs of any + * type. + * + * The value part of an Attr is a type called [Value]. + * Like an [any], a Value can hold any Go value, + * but it can represent typical values, including all numbers and strings, + * without an allocation. + * + * For the most efficient log output, use [Logger.LogAttrs]. + * It is similar to [Logger.Log] but accepts only Attrs, not alternating + * keys and values; this allows it, too, to avoid allocation. + * + * The call + * + * ``` + * logger.LogAttrs(ctx, slog.LevelInfo, "hello", slog.Int("count", 3)) + * ``` + * + * is the most efficient way to achieve the same output as + * + * ``` + * slog.InfoContext(ctx, "hello", "count", 3) + * ``` + * + * # Customizing a type's logging behavior + * + * If a type implements the [LogValuer] interface, the [Value] returned from its LogValue + * method is used for logging. You can use this to control how values of the type + * appear in logs. For example, you can redact secret information like passwords, + * or gather a struct's fields in a Group. See the examples under [LogValuer] for + * details. + * + * A LogValue method may return a Value that itself implements [LogValuer]. The [Value.Resolve] + * method handles these cases carefully, avoiding infinite loops and unbounded recursion. + * Handler authors and others may wish to use [Value.Resolve] instead of calling LogValue directly. + * + * # Wrapping output methods + * + * The logger functions use reflection over the call stack to find the file name + * and line number of the logging call within the application. This can produce + * incorrect source information for functions that wrap slog. For instance, if you + * define this function in file mylog.go: + * + * ``` + * func Infof(logger *slog.Logger, format string, args ...any) { + * logger.Info(fmt.Sprintf(format, args...)) + * } + * ``` + * + * and you call it like this in main.go: + * + * ``` + * Infof(slog.Default(), "hello, %s", "world") + * ``` + * + * then slog will report the source file as mylog.go, not main.go. + * + * A correct implementation of Infof will obtain the source location + * (pc) and pass it to NewRecord. + * The Infof function in the package-level example called "wrapping" + * demonstrates how to do this. + * + * # Working with Records + * + * Sometimes a Handler will need to modify a Record + * before passing it on to another Handler or backend. + * A Record contains a mixture of simple public fields (e.g. Time, Level, Message) + * and hidden fields that refer to state (such as attributes) indirectly. This + * means that modifying a simple copy of a Record (e.g. by calling + * [Record.Add] or [Record.AddAttrs] to add attributes) + * may have unexpected effects on the original. + * Before modifying a Record, use [Record.Clone] to + * create a copy that shares no state with the original, + * or create a new Record with [NewRecord] + * and build up its Attrs by traversing the old ones with [Record.Attrs]. + * + * # Performance considerations + * + * If profiling your application demonstrates that logging is taking significant time, + * the following suggestions may help. + * + * If many log lines have a common attribute, use [Logger.With] to create a Logger with + * that attribute. The built-in handlers will format that attribute only once, at the + * call to [Logger.With]. The [Handler] interface is designed to allow that optimization, + * and a well-written Handler should take advantage of it. + * + * The arguments to a log call are always evaluated, even if the log event is discarded. + * If possible, defer computation so that it happens only if the value is actually logged. + * For example, consider the call + * + * ``` + * slog.Info("starting request", "url", r.URL.String()) // may compute String unnecessarily + * ``` + * + * The URL.String method will be called even if the logger discards Info-level events. + * Instead, pass the URL directly: + * + * ``` + * slog.Info("starting request", "url", &r.URL) // calls URL.String only if needed + * ``` + * + * The built-in [TextHandler] will call its String method, but only + * if the log event is enabled. + * Avoiding the call to String also preserves the structure of the underlying value. + * For example [JSONHandler] emits the components of the parsed URL as a JSON object. + * If you want to avoid eagerly paying the cost of the String call + * without causing the handler to potentially inspect the structure of the value, + * wrap the value in a fmt.Stringer implementation that hides its Marshal methods. + * + * You can also use the [LogValuer] interface to avoid unnecessary work in disabled log + * calls. Say you need to log some expensive value: + * + * ``` + * slog.Debug("frobbing", "value", computeExpensiveValue(arg)) + * ``` + * + * Even if this line is disabled, computeExpensiveValue will be called. + * To avoid that, define a type implementing LogValuer: + * + * ``` + * type expensive struct { arg int } + * + * func (e expensive) LogValue() slog.Value { + * return slog.AnyValue(computeExpensiveValue(e.arg)) + * } + * ``` + * + * Then use a value of that type in log calls: + * + * ``` + * slog.Debug("frobbing", "value", expensive{arg}) + * ``` + * + * Now computeExpensiveValue will only be called when the line is enabled. + * + * The built-in handlers acquire a lock before calling [io.Writer.Write] + * to ensure that exactly one [Record] is written at a time in its entirety. + * Although each log record has a timestamp, + * the built-in handlers do not use that time to sort the written records. + * User-defined handlers are responsible for their own locking and sorting. + * + * # Writing a handler + * + * For a guide to writing a custom handler, see https://golang.org/s/slog-handler-guide. + */ namespace slog { // @ts-ignore import loginternal = internal diff --git a/plugins/jsvm/internal/types/types.go b/plugins/jsvm/internal/types/types.go index 0ab5e20f..09557b5f 100644 --- a/plugins/jsvm/internal/types/types.go +++ b/plugins/jsvm/internal/types/types.go @@ -1158,7 +1158,6 @@ func main() { "pflag.*": "any", "flag.*": "any", "log.*": "any", - "aws.*": "any", "http.Client": "any", "mail.Address": "{ address: string; name?: string; }", // prevents the LSP to complain in case no name is provided }, diff --git a/tools/filesystem/blob/bucket.go b/tools/filesystem/blob/bucket.go new file mode 100644 index 00000000..3ab3f251 --- /dev/null +++ b/tools/filesystem/blob/bucket.go @@ -0,0 +1,716 @@ +// Package blob defines a lightweight abstration for interacting with +// various storage services (local filesystem, S3, etc.). +// +// NB! +// For compatibility with earlier PocketBase versions and to prevent +// unnecessary breaking changes, this package is based and implemented +// as a minimal, stripped down version of the previously used gocloud.dev/blob. +// While there is no promise that it won't diverge in the future to accomodate +// better some PocketBase specific use cases, currently it copies and +// tries to follow as close as possible the same implementaitons, +// conventions and rules for the key escaping/unescaping, blob read/write +// interfaces and struct options as gocloud.dev/blob, therefore the +// credits goes to the original Go Cloud Development Kit Authors. +package blob + +import ( + "bytes" + "context" + "crypto/md5" + "errors" + "fmt" + "io" + "log" + "mime" + "runtime" + "strings" + "sync" + "time" + "unicode/utf8" +) + +var ( + ErrNotFound = errors.New("resource not found") + ErrClosed = errors.New("bucket or blob is closed") +) + +// Bucket provides an easy and portable way to interact with blobs +// within a "bucket", including read, write, and list operations. +// To create a Bucket, use constructors found in driver subpackages. +type Bucket struct { + drv Driver + + // mu protects the closed variable. + // Read locks are kept to allow holding a read lock for long-running calls, + // and thereby prevent closing until a call finishes. + mu sync.RWMutex + closed bool +} + +// NewBucket creates a new *Bucket based on a specific driver implementation. +func NewBucket(drv Driver) *Bucket { + return &Bucket{drv: drv} +} + +// ListOptions sets options for listing blobs via Bucket.List. +type ListOptions struct { + // Prefix indicates that only blobs with a key starting with this prefix + // should be returned. + Prefix string + + // Delimiter sets the delimiter used to define a hierarchical namespace, + // like a filesystem with "directories". It is highly recommended that you + // use "" or "/" as the Delimiter. Other values should work through this API, + // but service UIs generally assume "/". + // + // An empty delimiter means that the bucket is treated as a single flat + // namespace. + // + // A non-empty delimiter means that any result with the delimiter in its key + // after Prefix is stripped will be returned with ListObject.IsDir = true, + // ListObject.Key truncated after the delimiter, and zero values for other + // ListObject fields. These results represent "directories". Multiple results + // in a "directory" are returned as a single result. + Delimiter string + + // PageSize sets the maximum number of objects to be returned. + // 0 means no maximum; driver implementations should choose a reasonable + // max. It is guaranteed to be >= 0. + PageSize int + + // PageToken may be filled in with the NextPageToken from a previous + // ListPaged call. + PageToken []byte +} + +// ListPage represents a page of results return from ListPaged. +type ListPage struct { + // Objects is the slice of objects found. If ListOptions.PageSize > 0, + // it should have at most ListOptions.PageSize entries. + // + // Objects should be returned in lexicographical order of UTF-8 encoded keys, + // including across pages. I.e., all objects returned from a ListPage request + // made using a PageToken from a previous ListPage request's NextPageToken + // should have Key >= the Key for all objects from the previous request. + Objects []*ListObject + // NextPageToken should be left empty unless there are more objects + // to return. The value may be returned as ListOptions.PageToken on a + // subsequent ListPaged call, to fetch the next page of results. + // It can be an arbitrary []byte; it need not be a valid key. + NextPageToken []byte +} + +// ListIterator iterates over List results. +type ListIterator struct { + b *Bucket + opts *ListOptions + page *ListPage + nextIdx int +} + +// Next returns a *ListObject for the next blob. +// It returns (nil, io.EOF) if there are no more. +func (i *ListIterator) Next(ctx context.Context) (*ListObject, error) { + if i.page != nil { + // We've already got a page of results. + if i.nextIdx < len(i.page.Objects) { + // Next object is in the page; return it. + dobj := i.page.Objects[i.nextIdx] + i.nextIdx++ + return &ListObject{ + Key: dobj.Key, + ModTime: dobj.ModTime, + Size: dobj.Size, + MD5: dobj.MD5, + IsDir: dobj.IsDir, + }, nil + } + + if len(i.page.NextPageToken) == 0 { + // Done with current page, and there are no more; return io.EOF. + return nil, io.EOF + } + + // We need to load the next page. + i.opts.PageToken = i.page.NextPageToken + } + + i.b.mu.RLock() + defer i.b.mu.RUnlock() + + if i.b.closed { + return nil, ErrClosed + } + + // Loading a new page. + p, err := i.b.drv.ListPaged(ctx, i.opts) + if err != nil { + return nil, wrapError(i.b.drv, err, "") + } + + i.page = p + i.nextIdx = 0 + + return i.Next(ctx) +} + +// ListObject represents a single blob returned from List. +type ListObject struct { + // Key is the key for this blob. + Key string + + // ModTime is the time the blob was last modified. + ModTime time.Time + + // Size is the size of the blob's content in bytes. + Size int64 + + // MD5 is an MD5 hash of the blob contents or nil if not available. + MD5 []byte + + // IsDir indicates that this result represents a "directory" in the + // hierarchical namespace, ending in ListOptions.Delimiter. Key can be + // passed as ListOptions.Prefix to list items in the "directory". + // Fields other than Key and IsDir will not be set if IsDir is true. + IsDir bool +} + +// List returns a ListIterator that can be used to iterate over blobs in a +// bucket, in lexicographical order of UTF-8 encoded keys. The underlying +// implementation fetches results in pages. +// +// A nil ListOptions is treated the same as the zero value. +// +// List is not guaranteed to include all recently-written blobs; +// some services are only eventually consistent. +func (b *Bucket) List(opts *ListOptions) *ListIterator { + if opts == nil { + opts = &ListOptions{} + } + + dopts := &ListOptions{ + Prefix: opts.Prefix, + Delimiter: opts.Delimiter, + } + + return &ListIterator{b: b, opts: dopts} +} + +// FirstPageToken is the pageToken to pass to ListPage to retrieve the first page of results. +var FirstPageToken = []byte("first page") + +// ListPage returns a page of ListObject results for blobs in a bucket, in lexicographical +// order of UTF-8 encoded keys. +// +// To fetch the first page, pass FirstPageToken as the pageToken. For subsequent pages, pass +// the pageToken returned from a previous call to ListPage. +// It is not possible to "skip ahead" pages. +// +// Each call will return pageSize results, unless there are not enough blobs to fill the +// page, in which case it will return fewer results (possibly 0). +// +// If there are no more blobs available, ListPage will return an empty pageToken. Note that +// this may happen regardless of the number of returned results -- the last page might have +// 0 results (i.e., if the last item was deleted), pageSize results, or anything in between. +// +// Calling ListPage with an empty pageToken will immediately return io.EOF. When looping +// over pages, callers can either check for an empty pageToken, or they can make one more +// call and check for io.EOF. +// +// The underlying implementation fetches results in pages, but one call to ListPage may +// require multiple page fetches (and therefore, multiple calls to the BeforeList callback). +// +// A nil ListOptions is treated the same as the zero value. +// +// ListPage is not guaranteed to include all recently-written blobs; +// some services are only eventually consistent. +func (b *Bucket) ListPage(ctx context.Context, pageToken []byte, pageSize int, opts *ListOptions) (retval []*ListObject, nextPageToken []byte, err error) { + if opts == nil { + opts = &ListOptions{} + } + if pageSize <= 0 { + return nil, nil, fmt.Errorf("pageSize must be > 0 (%d)", pageSize) + } + + // Nil pageToken means no more results. + if len(pageToken) == 0 { + return nil, nil, io.EOF + } + + // FirstPageToken fetches the first page. Drivers use nil. + // The public API doesn't use nil for the first page because it would be too easy to + // keep fetching forever (since the last page return nil for the next pageToken). + if bytes.Equal(pageToken, FirstPageToken) { + pageToken = nil + } + + b.mu.RLock() + defer b.mu.RUnlock() + + if b.closed { + return nil, nil, ErrClosed + } + + dopts := &ListOptions{ + Prefix: opts.Prefix, + Delimiter: opts.Delimiter, + PageToken: pageToken, + PageSize: pageSize, + } + + retval = make([]*ListObject, 0, pageSize) + for len(retval) < pageSize { + p, err := b.drv.ListPaged(ctx, dopts) + if err != nil { + return nil, nil, wrapError(b.drv, err, "") + } + + for _, dobj := range p.Objects { + retval = append(retval, &ListObject{ + Key: dobj.Key, + ModTime: dobj.ModTime, + Size: dobj.Size, + MD5: dobj.MD5, + IsDir: dobj.IsDir, + }) + } + + // ListPaged may return fewer results than pageSize. If there are more results + // available, signalled by non-empty p.NextPageToken, try to fetch the remainder + // of the page. + // It does not work to ask for more results than we need, because then we'd have + // a NextPageToken on a non-page boundary. + dopts.PageSize = pageSize - len(retval) + dopts.PageToken = p.NextPageToken + if len(dopts.PageToken) == 0 { + dopts.PageToken = nil + break + } + } + + return retval, dopts.PageToken, nil +} + +// Attributes contains attributes about a blob. +type Attributes struct { + // CacheControl specifies caching attributes that services may use + // when serving the blob. + // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Cache-Control + CacheControl string + // ContentDisposition specifies whether the blob content is expected to be + // displayed inline or as an attachment. + // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Disposition + ContentDisposition string + // ContentEncoding specifies the encoding used for the blob's content, if any. + // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Encoding + ContentEncoding string + // ContentLanguage specifies the language used in the blob's content, if any. + // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Language + ContentLanguage string + // ContentType is the MIME type of the blob. It will not be empty. + // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Type + ContentType string + // Metadata holds key/value pairs associated with the blob. + // Keys are guaranteed to be in lowercase, even if the backend service + // has case-sensitive keys (although note that Metadata written via + // this package will always be lowercased). If there are duplicate + // case-insensitive keys (e.g., "foo" and "FOO"), only one value + // will be kept, and it is undefined which one. + Metadata map[string]string + // CreateTime is the time the blob was created, if available. If not available, + // CreateTime will be the zero time. + CreateTime time.Time + // ModTime is the time the blob was last modified. + ModTime time.Time + // Size is the size of the blob's content in bytes. + Size int64 + // MD5 is an MD5 hash of the blob contents or nil if not available. + MD5 []byte + // ETag for the blob; see https://en.wikipedia.org/wiki/HTTP_ETag. + ETag string +} + +// Attributes returns attributes for the blob stored at key. +// +// If the blob does not exist, Attributes returns an error for which +// gcerrors.Code will return gcerrors.NotFound. +func (b *Bucket) Attributes(ctx context.Context, key string) (_ *Attributes, err error) { + if !utf8.ValidString(key) { + return nil, fmt.Errorf("Attributes key must be a valid UTF-8 string: %q", key) + } + + b.mu.RLock() + defer b.mu.RUnlock() + if b.closed { + return nil, ErrClosed + } + + a, err := b.drv.Attributes(ctx, key) + if err != nil { + return nil, wrapError(b.drv, err, key) + } + + var md map[string]string + if len(a.Metadata) > 0 { + // Services are inconsistent, but at least some treat keys + // as case-insensitive. To make the behavior consistent, we + // force-lowercase them when writing and reading. + md = make(map[string]string, len(a.Metadata)) + for k, v := range a.Metadata { + md[strings.ToLower(k)] = v + } + } + + return &Attributes{ + CacheControl: a.CacheControl, + ContentDisposition: a.ContentDisposition, + ContentEncoding: a.ContentEncoding, + ContentLanguage: a.ContentLanguage, + ContentType: a.ContentType, + Metadata: md, + CreateTime: a.CreateTime, + ModTime: a.ModTime, + Size: a.Size, + MD5: a.MD5, + ETag: a.ETag, + }, nil +} + +// Exists returns true if a blob exists at key, false if it does not exist, or +// an error. +// +// It is a shortcut for calling Attributes and checking if it returns an error +// with code ErrNotFound. +func (b *Bucket) Exists(ctx context.Context, key string) (bool, error) { + _, err := b.Attributes(ctx, key) + if err == nil { + return true, nil + } + + if errors.Is(err, ErrNotFound) { + return false, nil + } + + return false, err +} + +// NewReader is a shortcut for NewRangeReader with offset=0 and length=-1. +func (b *Bucket) NewReader(ctx context.Context, key string) (*Reader, error) { + return b.newRangeReader(ctx, key, 0, -1) +} + +// NewRangeReader returns a Reader to read content from the blob stored at key. +// It reads at most length bytes starting at offset (>= 0). +// If length is negative, it will read till the end of the blob. +// +// For the purposes of Seek, the returned Reader will start at offset and +// end at the minimum of the actual end of the blob or (if length > 0) offset + length. +// +// Note that ctx is used for all reads performed during the lifetime of the reader. +// +// If the blob does not exist, NewRangeReader returns an error for which +// gcerrors.Code will return gcerrors.NotFound. Exists is a lighter-weight way +// to check for existence. +// +// A nil ReaderOptions is treated the same as the zero value. +// +// The caller must call Close on the returned Reader when done reading. +func (b *Bucket) NewRangeReader(ctx context.Context, key string, offset, length int64) (_ *Reader, err error) { + return b.newRangeReader(ctx, key, offset, length) +} + +func (b *Bucket) newRangeReader(ctx context.Context, key string, offset, length int64) (_ *Reader, err error) { + b.mu.RLock() + defer b.mu.RUnlock() + if b.closed { + return nil, ErrClosed + } + + if offset < 0 { + return nil, fmt.Errorf("NewRangeReader offset must be non-negative (%d)", offset) + } + + if !utf8.ValidString(key) { + return nil, fmt.Errorf("NewRangeReader key must be a valid UTF-8 string: %q", key) + } + + var dr DriverReader + dr, err = b.drv.NewRangeReader(ctx, key, offset, length) + if err != nil { + return nil, wrapError(b.drv, err, key) + } + + r := &Reader{ + drv: b.drv, + r: dr, + key: key, + ctx: ctx, + baseOffset: offset, + baseLength: length, + savedOffset: -1, + } + + _, file, lineno, ok := runtime.Caller(2) + runtime.SetFinalizer(r, func(r *Reader) { + if !r.closed { + var caller string + if ok { + caller = fmt.Sprintf(" (%s:%d)", file, lineno) + } + log.Printf("A blob.Reader reading from %q was never closed%s", key, caller) + } + }) + + return r, nil +} + +// WriterOptions sets options for NewWriter. +type WriterOptions struct { + // BufferSize changes the default size in bytes of the chunks that + // Writer will upload in a single request; larger blobs will be split into + // multiple requests. + // + // This option may be ignored by some drivers. + // + // If 0, the driver will choose a reasonable default. + // + // If the Writer is used to do many small writes concurrently, using a + // smaller BufferSize may reduce memory usage. + BufferSize int + + // MaxConcurrency changes the default concurrency for parts of an upload. + // + // This option may be ignored by some drivers. + // + // If 0, the driver will choose a reasonable default. + MaxConcurrency int + + // CacheControl specifies caching attributes that services may use + // when serving the blob. + // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Cache-Control + CacheControl string + + // ContentDisposition specifies whether the blob content is expected to be + // displayed inline or as an attachment. + // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Disposition + ContentDisposition string + + // ContentEncoding specifies the encoding used for the blob's content, if any. + // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Encoding + ContentEncoding string + + // ContentLanguage specifies the language used in the blob's content, if any. + // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Language + ContentLanguage string + + // ContentType specifies the MIME type of the blob being written. If not set, + // it will be inferred from the content using the algorithm described at + // http://mimesniff.spec.whatwg.org/. + // Set DisableContentTypeDetection to true to disable the above and force + // the ContentType to stay empty. + // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Type + ContentType string + + // When true, if ContentType is the empty string, it will stay the empty + // string rather than being inferred from the content. + // Note that while the blob will be written with an empty string ContentType, + // most providers will fill one in during reads, so don't expect an empty + // ContentType if you read the blob back. + DisableContentTypeDetection bool + + // ContentMD5 is used as a message integrity check. + // If len(ContentMD5) > 0, the MD5 hash of the bytes written must match + // ContentMD5, or Close will return an error without completing the write. + // https://tools.ietf.org/html/rfc1864 + ContentMD5 []byte + + // Metadata holds key/value strings to be associated with the blob, or nil. + // Keys may not be empty, and are lowercased before being written. + // Duplicate case-insensitive keys (e.g., "foo" and "FOO") will result in + // an error. + Metadata map[string]string +} + +// NewWriter returns a Writer that writes to the blob stored at key. +// A nil WriterOptions is treated the same as the zero value. +// +// If a blob with this key already exists, it will be replaced. +// The blob being written is not guaranteed to be readable until Close +// has been called; until then, any previous blob will still be readable. +// Even after Close is called, newly written blobs are not guaranteed to be +// returned from List; some services are only eventually consistent. +// +// The returned Writer will store ctx for later use in Write and/or Close. +// To abort a write, cancel ctx; otherwise, it must remain open until +// Close is called. +// +// The caller must call Close on the returned Writer, even if the write is +// aborted. +func (b *Bucket) NewWriter(ctx context.Context, key string, opts *WriterOptions) (_ *Writer, err error) { + if !utf8.ValidString(key) { + return nil, fmt.Errorf("NewWriter key must be a valid UTF-8 string: %q", key) + } + if opts == nil { + opts = &WriterOptions{} + } + dopts := &WriterOptions{ + CacheControl: opts.CacheControl, + ContentDisposition: opts.ContentDisposition, + ContentEncoding: opts.ContentEncoding, + ContentLanguage: opts.ContentLanguage, + ContentMD5: opts.ContentMD5, + BufferSize: opts.BufferSize, + MaxConcurrency: opts.MaxConcurrency, + DisableContentTypeDetection: opts.DisableContentTypeDetection, + } + + if len(opts.Metadata) > 0 { + // Services are inconsistent, but at least some treat keys + // as case-insensitive. To make the behavior consistent, we + // force-lowercase them when writing and reading. + md := make(map[string]string, len(opts.Metadata)) + for k, v := range opts.Metadata { + if k == "" { + return nil, errors.New("WriterOptions.Metadata keys may not be empty strings") + } + if !utf8.ValidString(k) { + return nil, fmt.Errorf("WriterOptions.Metadata keys must be valid UTF-8 strings: %q", k) + } + if !utf8.ValidString(v) { + return nil, fmt.Errorf("WriterOptions.Metadata values must be valid UTF-8 strings: %q", v) + } + lowerK := strings.ToLower(k) + if _, found := md[lowerK]; found { + return nil, fmt.Errorf("WriterOptions.Metadata has a duplicate case-insensitive metadata key: %q", lowerK) + } + md[lowerK] = v + } + dopts.Metadata = md + } + + b.mu.RLock() + defer b.mu.RUnlock() + if b.closed { + return nil, ErrClosed + } + + ctx, cancel := context.WithCancel(ctx) + + w := &Writer{ + drv: b.drv, + cancel: cancel, + key: key, + contentMD5: opts.ContentMD5, + md5hash: md5.New(), + } + + if opts.ContentType != "" || opts.DisableContentTypeDetection { + var ct string + if opts.ContentType != "" { + t, p, err := mime.ParseMediaType(opts.ContentType) + if err != nil { + cancel() + return nil, err + } + ct = mime.FormatMediaType(t, p) + } + dw, err := b.drv.NewTypedWriter(ctx, key, ct, dopts) + if err != nil { + cancel() + return nil, wrapError(b.drv, err, key) + } + w.w = dw + } else { + // Save the fields needed to called NewTypedWriter later, once we've gotten + // sniffLen bytes; see the comment on Writer. + w.ctx = ctx + w.opts = dopts + w.buf = bytes.NewBuffer([]byte{}) + } + + _, file, lineno, ok := runtime.Caller(1) + runtime.SetFinalizer(w, func(w *Writer) { + if !w.closed { + var caller string + if ok { + caller = fmt.Sprintf(" (%s:%d)", file, lineno) + } + log.Printf("A blob.Writer writing to %q was never closed%s", key, caller) + } + }) + + return w, nil +} + +// Copy the blob stored at srcKey to dstKey. +// A nil CopyOptions is treated the same as the zero value. +// +// If the source blob does not exist, Copy returns an error for which +// gcerrors.Code will return gcerrors.NotFound. +// +// If the destination blob already exists, it is overwritten. +func (b *Bucket) Copy(ctx context.Context, dstKey, srcKey string) (err error) { + if !utf8.ValidString(srcKey) { + return fmt.Errorf("Copy srcKey must be a valid UTF-8 string: %q", srcKey) + } + + if !utf8.ValidString(dstKey) { + return fmt.Errorf("Copy dstKey must be a valid UTF-8 string: %q", dstKey) + } + + b.mu.RLock() + defer b.mu.RUnlock() + + if b.closed { + return ErrClosed + } + + return wrapError(b.drv, b.drv.Copy(ctx, dstKey, srcKey), fmt.Sprintf("%s -> %s", srcKey, dstKey)) +} + +// Delete deletes the blob stored at key. +// +// If the blob does not exist, Delete returns an error for which +// gcerrors.Code will return gcerrors.NotFound. +func (b *Bucket) Delete(ctx context.Context, key string) (err error) { + if !utf8.ValidString(key) { + return fmt.Errorf("Delete key must be a valid UTF-8 string: %q", key) + } + + b.mu.RLock() + defer b.mu.RUnlock() + + if b.closed { + return ErrClosed + } + + return wrapError(b.drv, b.drv.Delete(ctx, key), key) +} + +// Close releases any resources used for the bucket. +func (b *Bucket) Close() error { + b.mu.Lock() + prev := b.closed + b.closed = true + b.mu.Unlock() + + if prev { + return ErrClosed + } + + return wrapError(b.drv, b.drv.Close(), "") +} + +func wrapError(b Driver, err error, key string) error { + if err == nil { + return nil + } + + err = b.NormalizeError(err) + + if key != "" { + err = fmt.Errorf("[key: %s] %w", key, err) + } + + return err +} diff --git a/tools/filesystem/blob/driver.go b/tools/filesystem/blob/driver.go new file mode 100644 index 00000000..6cafefa0 --- /dev/null +++ b/tools/filesystem/blob/driver.go @@ -0,0 +1,108 @@ +package blob + +import ( + "context" + "io" + "time" +) + +// ReaderAttributes contains a subset of attributes about a blob that are +// accessible from Reader. +type ReaderAttributes struct { + // ContentType is the MIME type of the blob object. It must not be empty. + // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Type + ContentType string + // ModTime is the time the blob object was last modified. + ModTime time.Time + // Size is the size of the object in bytes. + Size int64 +} + +// DriverReader reads an object from the blob. +type DriverReader interface { + io.ReadCloser + + // Attributes returns a subset of attributes about the blob. + // The portable type will not modify the returned ReaderAttributes. + Attributes() *ReaderAttributes +} + +// DriverWriter writes an object to the blob. +type DriverWriter interface { + io.WriteCloser +} + +// Driver provides read, write and delete operations on objects within it on the +// blob service. +type Driver interface { + NormalizeError(err error) error + + // Attributes returns attributes for the blob. If the specified object does + // not exist, Attributes must return an error for which ErrorCode returns + // gcerrors.NotFound. + // The portable type will not modify the returned Attributes. + Attributes(ctx context.Context, key string) (*Attributes, error) + + // ListPaged lists objects in the bucket, in lexicographical order by + // UTF-8-encoded key, returning pages of objects at a time. + // Services are only required to be eventually consistent with respect + // to recently written or deleted objects. That is to say, there is no + // guarantee that an object that's been written will immediately be returned + // from ListPaged. + // opts is guaranteed to be non-nil. + ListPaged(ctx context.Context, opts *ListOptions) (*ListPage, error) + + // NewRangeReader returns a Reader that reads part of an object, reading at + // most length bytes starting at the given offset. If length is negative, it + // will read until the end of the object. If the specified object does not + // exist, NewRangeReader must return an error for which ErrorCode returns + // gcerrors.NotFound. + // opts is guaranteed to be non-nil. + // + // The returned Reader *may* also implement Downloader if the underlying + // implementation can take advantage of that. The Download call is guaranteed + // to be the only call to the Reader. For such readers, offset will always + // be 0 and length will always be -1. + NewRangeReader(ctx context.Context, key string, offset, length int64) (DriverReader, error) + + // NewTypedWriter returns Writer that writes to an object associated with key. + // + // A new object will be created unless an object with this key already exists. + // Otherwise any previous object with the same key will be replaced. + // The object may not be available (and any previous object will remain) + // until Close has been called. + // + // contentType sets the MIME type of the object to be written. + // opts is guaranteed to be non-nil. + // + // The caller must call Close on the returned Writer when done writing. + // + // Implementations should abort an ongoing write if ctx is later canceled, + // and do any necessary cleanup in Close. Close should then return ctx.Err(). + // + // The returned Writer *may* also implement Uploader if the underlying + // implementation can take advantage of that. The Upload call is guaranteed + // to be the only non-Close call to the Writer.. + NewTypedWriter(ctx context.Context, key, contentType string, opts *WriterOptions) (DriverWriter, error) + + // Copy copies the object associated with srcKey to dstKey. + // + // If the source object does not exist, Copy must return an error for which + // ErrorCode returns gcerrors.NotFound. + // + // If the destination object already exists, it should be overwritten. + // + // opts is guaranteed to be non-nil. + Copy(ctx context.Context, dstKey, srcKey string) error + + // Delete deletes the object associated with key. If the specified object does + // not exist, Delete must return an error for which ErrorCode returns + // gcerrors.NotFound. + Delete(ctx context.Context, key string) error + + // Close cleans up any resources used by the Bucket. Once Close is called, + // there will be no method calls to the Bucket other than As, ErrorAs, and + // ErrorCode. There may be open readers or writers that will receive calls. + // It is up to the driver as to how these will be handled. + Close() error +} diff --git a/tools/filesystem/blob/hex.go b/tools/filesystem/blob/hex.go new file mode 100644 index 00000000..e830324f --- /dev/null +++ b/tools/filesystem/blob/hex.go @@ -0,0 +1,153 @@ +package blob + +// Copied from gocloud.dev/blob to avoid nuances around the specific +// HEX escaping/unescaping rules. +// +// ------------------------------------------------------------------- +// Copyright 2019 The Go Cloud Development Kit Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// ------------------------------------------------------------------- + +import ( + "fmt" + "strconv" +) + +// HexEscape returns s, with all runes for which shouldEscape returns true +// escaped to "__0xXXX__", where XXX is the hex representation of the rune +// value. For example, " " would escape to "__0x20__". +// +// Non-UTF-8 strings will have their non-UTF-8 characters escaped to +// unicode.ReplacementChar; the original value is lost. Please file an +// issue if you need non-UTF8 support. +// +// Note: shouldEscape takes the whole string as a slice of runes and an +// index. Passing it a single byte or a single rune doesn't provide +// enough context for some escape decisions; for example, the caller might +// want to escape the second "/" in "//" but not the first one. +// We pass a slice of runes instead of the string or a slice of bytes +// because some decisions will be made on a rune basis (e.g., encode +// all non-ASCII runes). +func HexEscape(s string, shouldEscape func(s []rune, i int) bool) string { + // Do a first pass to see which runes (if any) need escaping. + runes := []rune(s) + var toEscape []int + for i := range runes { + if shouldEscape(runes, i) { + toEscape = append(toEscape, i) + } + } + if len(toEscape) == 0 { + return s + } + + // Each escaped rune turns into at most 14 runes ("__0x7fffffff__"), + // so allocate an extra 13 for each. We'll reslice at the end + // if we didn't end up using them. + escaped := make([]rune, len(runes)+13*len(toEscape)) + n := 0 // current index into toEscape + j := 0 // current index into escaped + for i, r := range runes { + if n < len(toEscape) && i == toEscape[n] { + // We were asked to escape this rune. + for _, x := range fmt.Sprintf("__%#x__", r) { + escaped[j] = x + j++ + } + n++ + } else { + escaped[j] = r + j++ + } + } + + return string(escaped[0:j]) +} + +// unescape tries to unescape starting at r[i]. +// It returns a boolean indicating whether the unescaping was successful, +// and (if true) the unescaped rune and the last index of r that was used +// during unescaping. +func unescape(r []rune, i int) (bool, rune, int) { + // Look for "__0x". + if r[i] != '_' { + return false, 0, 0 + } + i++ + if i >= len(r) || r[i] != '_' { + return false, 0, 0 + } + i++ + if i >= len(r) || r[i] != '0' { + return false, 0, 0 + } + i++ + if i >= len(r) || r[i] != 'x' { + return false, 0, 0 + } + i++ + + // Capture the digits until the next "_" (if any). + var hexdigits []rune + for ; i < len(r) && r[i] != '_'; i++ { + hexdigits = append(hexdigits, r[i]) + } + + // Look for the trailing "__". + if i >= len(r) || r[i] != '_' { + return false, 0, 0 + } + i++ + if i >= len(r) || r[i] != '_' { + return false, 0, 0 + } + + // Parse the hex digits into an int32. + retval, err := strconv.ParseInt(string(hexdigits), 16, 32) + if err != nil { + return false, 0, 0 + } + + return true, rune(retval), i +} + +// HexUnescape reverses HexEscape. +func HexUnescape(s string) string { + var unescaped []rune + + runes := []rune(s) + for i := 0; i < len(runes); i++ { + if ok, newR, newI := unescape(runes, i); ok { + // We unescaped some runes starting at i, resulting in the + // unescaped rune newR. The last rune used was newI. + if unescaped == nil { + // This is the first rune we've encountered that + // needed unescaping. Allocate a buffer and copy any + // previous runes. + unescaped = make([]rune, i) + copy(unescaped, runes) + } + unescaped = append(unescaped, newR) + i = newI + } else if unescaped != nil { + unescaped = append(unescaped, runes[i]) + } + } + + if unescaped == nil { + return s + } + + return string(unescaped) +} diff --git a/tools/filesystem/blob/reader.go b/tools/filesystem/blob/reader.go new file mode 100644 index 00000000..40d7b82d --- /dev/null +++ b/tools/filesystem/blob/reader.go @@ -0,0 +1,178 @@ +package blob + +import ( + "context" + "fmt" + "io" + "log" + "time" +) + +var _ io.ReadSeekCloser = (*Reader)(nil) + +// Reader reads bytes from a blob. +// It implements io.ReadSeekCloser, and must be closed after reads are finished. +type Reader struct { + ctx context.Context // Used to recreate r after Seeks + r DriverReader + drv Driver + key string + baseOffset int64 // The base offset provided to NewRangeReader. + baseLength int64 // The length provided to NewRangeReader (may be negative). + relativeOffset int64 // Current offset (relative to baseOffset). + savedOffset int64 // Last relativeOffset for r, saved after relativeOffset is changed in Seek, or -1 if no Seek. + closed bool +} + +// Read implements io.Reader (https://golang.org/pkg/io/#Reader). +func (r *Reader) Read(p []byte) (int, error) { + if r.savedOffset != -1 { + // We've done one or more Seeks since the last read. We may have + // to recreate the Reader. + // + // Note that remembering the savedOffset and lazily resetting the + // reader like this allows the caller to Seek, then Seek again back, + // to the original offset, without having to recreate the reader. + // We only have to recreate the reader if we actually read after a Seek. + // This is an important optimization because it's common to Seek + // to (SeekEnd, 0) and use the return value to determine the size + // of the data, then Seek back to (SeekStart, 0). + saved := r.savedOffset + if r.relativeOffset == saved { + // Nope! We're at the same place we left off. + r.savedOffset = -1 + } else { + // Yep! We've changed the offset. Recreate the reader. + length := r.baseLength + if length >= 0 { + length -= r.relativeOffset + if length < 0 { + // Shouldn't happen based on checks in Seek. + return 0, fmt.Errorf("invalid Seek (base length %d, relative offset %d)", r.baseLength, r.relativeOffset) + } + } + newR, err := r.drv.NewRangeReader(r.ctx, r.key, r.baseOffset+r.relativeOffset, length) + if err != nil { + return 0, wrapError(r.drv, err, r.key) + } + _ = r.r.Close() + r.savedOffset = -1 + r.r = newR + } + } + n, err := r.r.Read(p) + r.relativeOffset += int64(n) + return n, wrapError(r.drv, err, r.key) +} + +// Seek implements io.Seeker (https://golang.org/pkg/io/#Seeker). +func (r *Reader) Seek(offset int64, whence int) (int64, error) { + if r.savedOffset == -1 { + // Save the current offset for our reader. If the Seek changes the + // offset, and then we try to read, we'll need to recreate the reader. + // See comment above in Read for why we do it lazily. + r.savedOffset = r.relativeOffset + } + // The maximum relative offset is the minimum of: + // 1. The actual size of the blob, minus our initial baseOffset. + // 2. The length provided to NewRangeReader (if it was non-negative). + maxRelativeOffset := r.Size() - r.baseOffset + if r.baseLength >= 0 && r.baseLength < maxRelativeOffset { + maxRelativeOffset = r.baseLength + } + switch whence { + case io.SeekStart: + r.relativeOffset = offset + case io.SeekCurrent: + r.relativeOffset += offset + case io.SeekEnd: + r.relativeOffset = maxRelativeOffset + offset + } + if r.relativeOffset < 0 { + // "Seeking to an offset before the start of the file is an error." + invalidOffset := r.relativeOffset + r.relativeOffset = 0 + return 0, fmt.Errorf("Seek resulted in invalid offset %d, using 0", invalidOffset) + } + if r.relativeOffset > maxRelativeOffset { + // "Seeking to any positive offset is legal, but the behavior of subsequent + // I/O operations on the underlying object is implementation-dependent." + // We'll choose to set the offset to the EOF. + log.Printf("blob.Reader.Seek set an offset after EOF (base offset/length from NewRangeReader %d, %d; actual blob size %d; relative offset %d -> absolute offset %d).", r.baseOffset, r.baseLength, r.Size(), r.relativeOffset, r.baseOffset+r.relativeOffset) + r.relativeOffset = maxRelativeOffset + } + return r.relativeOffset, nil +} + +// Close implements io.Closer (https://golang.org/pkg/io/#Closer). +func (r *Reader) Close() error { + r.closed = true + err := wrapError(r.drv, r.r.Close(), r.key) + return err +} + +// ContentType returns the MIME type of the blob. +func (r *Reader) ContentType() string { + return r.r.Attributes().ContentType +} + +// ModTime returns the time the blob was last modified. +func (r *Reader) ModTime() time.Time { + return r.r.Attributes().ModTime +} + +// Size returns the size of the blob content in bytes. +func (r *Reader) Size() int64 { + return r.r.Attributes().Size +} + +// WriteTo reads from r and writes to w until there's no more data or +// an error occurs. +// The return value is the number of bytes written to w. +// +// It implements the io.WriterTo interface. +func (r *Reader) WriteTo(w io.Writer) (int64, error) { + // If the writer has a ReaderFrom method, use it to do the copy. + // Don't do this for our own *Writer to avoid infinite recursion. + // Avoids an allocation and a copy. + switch w.(type) { + case *Writer: + default: + if rf, ok := w.(io.ReaderFrom); ok { + n, err := rf.ReadFrom(r) + return n, err + } + } + + _, nw, err := readFromWriteTo(r, w) + return nw, err +} + +// readFromWriteTo is a helper for ReadFrom and WriteTo. +// It reads data from r and writes to w, until EOF or a read/write error. +// It returns the number of bytes read from r and the number of bytes +// written to w. +func readFromWriteTo(r io.Reader, w io.Writer) (int64, int64, error) { + // Note: can't use io.Copy because it will try to use r.WriteTo + // or w.WriteTo, which is recursive in this context. + buf := make([]byte, 1024) + var totalRead, totalWritten int64 + for { + numRead, rerr := r.Read(buf) + if numRead > 0 { + totalRead += int64(numRead) + numWritten, werr := w.Write(buf[0:numRead]) + totalWritten += int64(numWritten) + if werr != nil { + return totalRead, totalWritten, werr + } + } + if rerr == io.EOF { + // Done! + return totalRead, totalWritten, nil + } + if rerr != nil { + return totalRead, totalWritten, rerr + } + } +} diff --git a/tools/filesystem/blob/writer.go b/tools/filesystem/blob/writer.go new file mode 100644 index 00000000..ab0dd528 --- /dev/null +++ b/tools/filesystem/blob/writer.go @@ -0,0 +1,166 @@ +package blob + +import ( + "bytes" + "context" + "fmt" + "hash" + "io" + "net/http" +) + +var _ io.WriteCloser = (*Writer)(nil) + +// Writer writes bytes to a blob. +// +// It implements io.WriteCloser (https://golang.org/pkg/io/#Closer), and must be +// closed after all writes are done. +type Writer struct { + drv Driver + w DriverWriter + key string + cancel func() // cancels the ctx provided to NewTypedWriter if contentMD5 verification fails + contentMD5 []byte + md5hash hash.Hash + bytesWritten int + closed bool + + // These fields are non-zero values only when w is nil (not yet created). + // + // A ctx is stored in the Writer since we need to pass it into NewTypedWriter + // when we finish detecting the content type of the blob and create the + // underlying driver.Writer. This step happens inside Write or Close and + // neither of them take a context.Context as an argument. + // + // All 3 fields are only initialized when we create the Writer without + // setting the w field, and are reset to zero values after w is created. + ctx context.Context + opts *WriterOptions + buf *bytes.Buffer +} + +// sniffLen is the byte size of Writer.buf used to detect content-type. +const sniffLen = 512 + +// Write implements the io.Writer interface (https://golang.org/pkg/io/#Writer). +// +// Writes may happen asynchronously, so the returned error can be nil +// even if the actual write eventually fails. The write is only guaranteed to +// have succeeded if Close returns no error. +func (w *Writer) Write(p []byte) (int, error) { + if len(w.contentMD5) > 0 { + if _, err := w.md5hash.Write(p); err != nil { + return 0, err + } + } + + if w.w != nil { + return w.write(p) + } + + // If w is not yet created due to no content-type being passed in, try to sniff + // the MIME type based on at most 512 bytes of the blob content of p. + + // Detect the content-type directly if the first chunk is at least 512 bytes. + if w.buf.Len() == 0 && len(p) >= sniffLen { + return w.open(p) + } + + // Store p in w.buf and detect the content-type when the size of content in + // w.buf is at least 512 bytes. + n, err := w.buf.Write(p) + if err != nil { + return 0, err + } + + if w.buf.Len() >= sniffLen { + // Note that w.open will return the full length of the buffer; we don't want + // to return that as the length of this write since some of them were written in + // previous writes. Instead, we return the n from this write, above. + _, err := w.open(w.buf.Bytes()) + return n, err + } + + return n, nil +} + +// Close closes the blob writer. The write operation is not guaranteed +// to have succeeded until Close returns with no error. +// +// Close may return an error if the context provided to create the +// Writer is canceled or reaches its deadline. +func (w *Writer) Close() (err error) { + w.closed = true + + // Verify the MD5 hash of what was written matches the ContentMD5 provided by the user. + if len(w.contentMD5) > 0 { + md5sum := w.md5hash.Sum(nil) + if !bytes.Equal(md5sum, w.contentMD5) { + // No match! Return an error, but first cancel the context and call the + // driver's Close function to ensure the write is aborted. + w.cancel() + if w.w != nil { + _ = w.w.Close() + } + return fmt.Errorf("the WriterOptions.ContentMD5 you specified (%X) did not match what was written (%X)", w.contentMD5, md5sum) + } + } + + defer w.cancel() + + if w.w != nil { + return wrapError(w.drv, w.w.Close(), w.key) + } + + if _, err := w.open(w.buf.Bytes()); err != nil { + return err + } + + return wrapError(w.drv, w.w.Close(), w.key) +} + +// open tries to detect the MIME type of p and write it to the blob. +// The error it returns is wrapped. +func (w *Writer) open(p []byte) (int, error) { + ct := http.DetectContentType(p) + + var err error + w.w, err = w.drv.NewTypedWriter(w.ctx, w.key, ct, w.opts) + if err != nil { + return 0, wrapError(w.drv, err, w.key) + } + + // Set the 3 fields needed for lazy NewTypedWriter back to zero values + // (see the comment on Writer). + w.buf = nil + w.ctx = nil + w.opts = nil + + return w.write(p) +} + +func (w *Writer) write(p []byte) (int, error) { + n, err := w.w.Write(p) + w.bytesWritten += n + return n, wrapError(w.drv, err, w.key) +} + +// ReadFrom reads from r and writes to w until EOF or error. +// The return value is the number of bytes read from r. +// +// It implements the io.ReaderFrom interface. +func (w *Writer) ReadFrom(r io.Reader) (int64, error) { + // If the reader has a WriteTo method, use it to do the copy. + // Don't do this for our own *Reader to avoid infinite recursion. + // Avoids an allocation and a copy. + switch r.(type) { + case *Reader: + default: + if wt, ok := r.(io.WriterTo); ok { + return wt.WriteTo(w) + } + } + + nr, _, err := readFromWriteTo(r, w) + return nr, err +} diff --git a/tools/filesystem/filesystem.go b/tools/filesystem/filesystem.go index 144301eb..cc44fee6 100644 --- a/tools/filesystem/filesystem.go +++ b/tools/filesystem/filesystem.go @@ -14,50 +14,23 @@ import ( "strconv" "strings" - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/config" - "github.com/aws/aws-sdk-go-v2/credentials" - "github.com/aws/aws-sdk-go-v2/service/s3" "github.com/disintegration/imaging" "github.com/gabriel-vasile/mimetype" - "github.com/pocketbase/pocketbase/tools/filesystem/internal/s3lite" + "github.com/pocketbase/pocketbase/tools/filesystem/blob" + "github.com/pocketbase/pocketbase/tools/filesystem/internal/fileblob" + "github.com/pocketbase/pocketbase/tools/filesystem/internal/s3blob" + "github.com/pocketbase/pocketbase/tools/filesystem/internal/s3blob/s3" "github.com/pocketbase/pocketbase/tools/list" - "gocloud.dev/blob" - "gocloud.dev/blob/fileblob" - "gocloud.dev/gcerrors" ) -var gcpIgnoreHeaders = []string{"Accept-Encoding"} - -var ErrNotFound = errors.New("blob not found") +// note: the same as blob.ErrNotFound for backward compatibility with earlier versions +var ErrNotFound = blob.ErrNotFound type System struct { ctx context.Context bucket *blob.Bucket } -// ------------------------------------------------------------------- - -// @todo delete after replacing the aws-sdk-go-v2 dependency -// -// enforce WHEN_REQUIRED by default in case the user has updated AWS SDK dependency -// https://github.com/aws/aws-sdk-go-v2/discussions/2960 -// https://github.com/pocketbase/pocketbase/discussions/6440 -// https://github.com/pocketbase/pocketbase/discussions/6313 -func init() { - reqEnv := os.Getenv("AWS_REQUEST_CHECKSUM_CALCULATION") - if reqEnv == "" { - os.Setenv("AWS_REQUEST_CHECKSUM_CALCULATION", "WHEN_REQUIRED") - } - - resEnv := os.Getenv("AWS_RESPONSE_CHECKSUM_VALIDATION") - if resEnv == "" { - os.Setenv("AWS_RESPONSE_CHECKSUM_VALIDATION", "WHEN_REQUIRED") - } -} - -// ------------------------------------------------------------------- - // NewS3 initializes an S3 filesystem instance. // // NB! Make sure to call `Close()` after you are done working with it. @@ -71,41 +44,21 @@ func NewS3( ) (*System, error) { ctx := context.Background() // default context - cred := credentials.NewStaticCredentialsProvider(accessKey, secretKey, "") + client := &s3.S3{ + Bucket: bucketName, + Region: region, + Endpoint: endpoint, + AccessKey: accessKey, + SecretKey: secretKey, + UsePathStyle: s3ForcePathStyle, + } - cfg, err := config.LoadDefaultConfig( - ctx, - config.WithCredentialsProvider(cred), - config.WithRegion(region), - ) + drv, err := s3blob.New(client) if err != nil { return nil, err } - client := s3.NewFromConfig(cfg, func(o *s3.Options) { - // ensure that the endpoint has url scheme for - // backward compatibility with v1 of the aws sdk - if !strings.Contains(endpoint, "://") { - endpoint = "https://" + endpoint - } - o.BaseEndpoint = aws.String(endpoint) - - o.UsePathStyle = s3ForcePathStyle - - // Google Cloud Storage alters the Accept-Encoding header, - // which breaks the v2 request signature - // (https://github.com/aws/aws-sdk-go-v2/issues/1816) - if strings.Contains(endpoint, "storage.googleapis.com") { - ignoreSigningHeaders(o, gcpIgnoreHeaders) - } - }) - - bucket, err := s3lite.OpenBucketV2(ctx, client, bucketName, nil) - if err != nil { - return nil, err - } - - return &System{ctx: ctx, bucket: bucket}, nil + return &System{ctx: ctx, bucket: blob.NewBucket(drv)}, nil } // NewLocal initializes a new local filesystem instance. @@ -119,14 +72,14 @@ func NewLocal(dirPath string) (*System, error) { return nil, err } - bucket, err := fileblob.OpenBucket(dirPath, &fileblob.Options{ + drv, err := fileblob.New(dirPath, &fileblob.Options{ NoTempDir: true, }) if err != nil { return nil, err } - return &System{ctx: ctx, bucket: bucket}, nil + return &System{ctx: ctx, bucket: blob.NewBucket(drv)}, nil } // SetContext assigns the specified context to the current filesystem. @@ -140,29 +93,15 @@ func (s *System) Close() error { } // Exists checks if file with fileKey path exists or not. -// -// If the file doesn't exist returns false and ErrNotFound. func (s *System) Exists(fileKey string) (bool, error) { - exists, err := s.bucket.Exists(s.ctx, fileKey) - - if gcerrors.Code(err) == gcerrors.NotFound { - err = ErrNotFound - } - - return exists, err + return s.bucket.Exists(s.ctx, fileKey) } // Attributes returns the attributes for the file with fileKey path. // // If the file doesn't exist it returns ErrNotFound. func (s *System) Attributes(fileKey string) (*blob.Attributes, error) { - attrs, err := s.bucket.Attributes(s.ctx, fileKey) - - if gcerrors.Code(err) == gcerrors.NotFound { - err = ErrNotFound - } - - return attrs, err + return s.bucket.Attributes(s.ctx, fileKey) } // GetFile returns a file content reader for the given fileKey. @@ -171,13 +110,7 @@ func (s *System) Attributes(fileKey string) (*blob.Attributes, error) { // // If the file doesn't exist returns ErrNotFound. func (s *System) GetFile(fileKey string) (*blob.Reader, error) { - br, err := s.bucket.NewReader(s.ctx, fileKey, nil) - - if gcerrors.Code(err) == gcerrors.NotFound { - err = ErrNotFound - } - - return br, err + return s.bucket.NewReader(s.ctx, fileKey) } // Copy copies the file stored at srcKey to dstKey. @@ -186,13 +119,7 @@ func (s *System) GetFile(fileKey string) (*blob.Reader, error) { // // If dstKey file already exists, it is overwritten. func (s *System) Copy(srcKey, dstKey string) error { - err := s.bucket.Copy(s.ctx, dstKey, srcKey, nil) - - if gcerrors.Code(err) == gcerrors.NotFound { - err = ErrNotFound - } - - return err + return s.bucket.Copy(s.ctx, dstKey, srcKey) } // List returns a flat list with info for all files under the specified prefix. @@ -206,7 +133,7 @@ func (s *System) List(prefix string) ([]*blob.ListObject, error) { for { obj, err := iter.Next(s.ctx) if err != nil { - if err != io.EOF { + if !errors.Is(err, io.EOF) { return nil, err } break @@ -323,13 +250,7 @@ func (s *System) UploadMultipart(fh *multipart.FileHeader, fileKey string) error // // If the file doesn't exist returns ErrNotFound. func (s *System) Delete(fileKey string) error { - err := s.bucket.Delete(s.ctx, fileKey) - - if gcerrors.Code(err) == gcerrors.NotFound { - return ErrNotFound - } - - return err + return s.bucket.Delete(s.ctx, fileKey) } // DeletePrefix deletes everything starting with the specified prefix. @@ -361,7 +282,7 @@ func (s *System) DeletePrefix(prefix string) []error { for { obj, err := iter.Next(s.ctx) if err != nil { - if err != io.EOF { + if !errors.Is(err, io.EOF) { failed = append(failed, err) } break @@ -420,7 +341,7 @@ func (s *System) IsEmptyDir(dir string) bool { _, err := iter.Next(s.ctx) - return err == io.EOF + return err != nil && errors.Is(err, io.EOF) } var inlineServeContentTypes = []string{ diff --git a/tools/filesystem/ignore_signing_headers.go b/tools/filesystem/ignore_signing_headers.go deleted file mode 100644 index 52e39e4d..00000000 --- a/tools/filesystem/ignore_signing_headers.go +++ /dev/null @@ -1,72 +0,0 @@ -package filesystem - -import ( - "context" - "fmt" - - v4 "github.com/aws/aws-sdk-go-v2/aws/signer/v4" - "github.com/aws/aws-sdk-go-v2/service/s3" - "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" -) - -// ignoreSigningHeaders excludes the listed headers -// from the request signing because some providers may alter them. -// -// See https://github.com/aws/aws-sdk-go-v2/issues/1816. -func ignoreSigningHeaders(o *s3.Options, headers []string) { - o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { - if err := stack.Finalize.Insert(ignoreHeaders(headers), "Signing", middleware.Before); err != nil { - return err - } - - if err := stack.Finalize.Insert(restoreIgnored(), "Signing", middleware.After); err != nil { - return err - } - - return nil - }) -} - -type ignoredHeadersKey struct{} - -func ignoreHeaders(headers []string) middleware.FinalizeMiddleware { - return middleware.FinalizeMiddlewareFunc( - "IgnoreHeaders", - func(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (out middleware.FinalizeOutput, metadata middleware.Metadata, err error) { - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &v4.SigningError{Err: fmt.Errorf("(ignoreHeaders) unexpected request middleware type %T", in.Request)} - } - - ignored := make(map[string]string, len(headers)) - for _, h := range headers { - ignored[h] = req.Header.Get(h) - req.Header.Del(h) - } - - ctx = middleware.WithStackValue(ctx, ignoredHeadersKey{}, ignored) - - return next.HandleFinalize(ctx, in) - }, - ) -} - -func restoreIgnored() middleware.FinalizeMiddleware { - return middleware.FinalizeMiddlewareFunc( - "RestoreIgnored", - func(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (out middleware.FinalizeOutput, metadata middleware.Metadata, err error) { - req, ok := in.Request.(*smithyhttp.Request) - if !ok { - return out, metadata, &v4.SigningError{Err: fmt.Errorf("(restoreIgnored) unexpected request middleware type %T", in.Request)} - } - - ignored, _ := middleware.GetStackValue(ctx, ignoredHeadersKey{}).(map[string]string) - for k, v := range ignored { - req.Header.Set(k, v) - } - - return next.HandleFinalize(ctx, in) - }, - ) -} diff --git a/tools/filesystem/internal/fileblob/attrs.go b/tools/filesystem/internal/fileblob/attrs.go new file mode 100644 index 00000000..bc51ae05 --- /dev/null +++ b/tools/filesystem/internal/fileblob/attrs.go @@ -0,0 +1,79 @@ +// Copyright 2018 The Go Cloud Development Kit Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fileblob + +import ( + "encoding/json" + "fmt" + "os" +) + +const attrsExt = ".attrs" + +var errAttrsExt = fmt.Errorf("file extension %q is reserved", attrsExt) + +// xattrs stores extended attributes for an object. The format is like +// filesystem extended attributes, see +// https://www.freedesktop.org/wiki/CommonExtendedAttributes. +type xattrs struct { + CacheControl string `json:"user.cache_control"` + ContentDisposition string `json:"user.content_disposition"` + ContentEncoding string `json:"user.content_encoding"` + ContentLanguage string `json:"user.content_language"` + ContentType string `json:"user.content_type"` + Metadata map[string]string `json:"user.metadata"` + MD5 []byte `json:"md5"` +} + +// setAttrs creates a "path.attrs" file along with blob to store the attributes, +// it uses JSON format. +func setAttrs(path string, xa xattrs) error { + f, err := os.Create(path + attrsExt) + if err != nil { + return err + } + + if err := json.NewEncoder(f).Encode(xa); err != nil { + f.Close() + os.Remove(f.Name()) + return err + } + + return f.Close() +} + +// getAttrs looks at the "path.attrs" file to retrieve the attributes and +// decodes them into a xattrs struct. It doesn't return error when there is no +// such .attrs file. +func getAttrs(path string) (xattrs, error) { + f, err := os.Open(path + attrsExt) + if err != nil { + if os.IsNotExist(err) { + // Handle gracefully for non-existent .attr files. + return xattrs{ + ContentType: "application/octet-stream", + }, nil + } + return xattrs{}, err + } + + xa := new(xattrs) + if err := json.NewDecoder(f).Decode(xa); err != nil { + f.Close() + return xattrs{}, err + } + + return *xa, f.Close() +} diff --git a/tools/filesystem/internal/fileblob/fileblob.go b/tools/filesystem/internal/fileblob/fileblob.go new file mode 100644 index 00000000..e057db5d --- /dev/null +++ b/tools/filesystem/internal/fileblob/fileblob.go @@ -0,0 +1,713 @@ +// Package fileblob provides a blob.Bucket driver implementation. +// +// NB! To minimize breaking changes with older PocketBase releases, +// the driver is a stripped down and adapted version of the previously +// used gocloud.dev/blob/fileblob, hence many of the below doc comments, +// struct options and interface implementations are the same. +// +// To avoid partial writes, fileblob writes to a temporary file and then renames +// the temporary file to the final path on Close. By default, it creates these +// temporary files in `os.TempDir`. If `os.TempDir` is on a different mount than +// your base bucket path, the `os.Rename` will fail with `invalid cross-device link`. +// To avoid this, either configure the temp dir to use by setting the environment +// variable `TMPDIR`, or set `Options.NoTempDir` to `true` (fileblob will create +// the temporary files next to the actual files instead of in a temporary directory). +// +// By default fileblob stores blob metadata in "sidecar" files under the original +// filename with an additional ".attrs" suffix. +// This behaviour can be changed via `Options.Metadata`; +// writing of those metadata files can be suppressed by setting it to +// `MetadataDontWrite` or its equivalent "metadata=skip" in the URL for the opener. +// In either case, absent any stored metadata many `blob.Attributes` fields +// will be set to default values. +// +// The blob abstraction supports all UTF-8 strings; to make this work with services lacking +// full UTF-8 support, strings must be escaped (during writes) and unescaped +// (during reads). The following escapes are performed for fileblob: +// - Blob keys: ASCII characters 0-31 are escaped to "__0x__". +// If os.PathSeparator != "/", it is also escaped. +// Additionally, the "/" in "../", the trailing "/" in "//", and a trailing +// "/" is key names are escaped in the same way. +// On Windows, the characters "<>:"|?*" are also escaped. +// +// Example: +// +// drv, _ := fileblob.New("/path/to/dir", nil) +// bucket := blob.NewBucket(drv) +package fileblob + +import ( + "context" + "crypto/md5" + "errors" + "fmt" + "hash" + "io" + "io/fs" + "os" + "path/filepath" + "strconv" + "strings" + "time" + + "github.com/pocketbase/pocketbase/tools/filesystem/blob" +) + +const defaultPageSize = 1000 + +type metadataOption string // Not exported as subject to change. + +// Settings for Options.Metadata. +const ( + // Metadata gets written to a separate file. + MetadataInSidecar metadataOption = "" + + // Writes won't carry metadata, as per the package docstring. + MetadataDontWrite metadataOption = "skip" +) + +// Options sets options for constructing a *blob.Bucket backed by fileblob. +type Options struct { + // Refers to the strategy for how to deal with metadata (such as blob.Attributes). + // For supported values please see the Metadata* constants. + // If left unchanged, 'MetadataInSidecar' will be used. + Metadata metadataOption + + // The FileMode to use when creating directories for the top-level directory + // backing the bucket (when CreateDir is true), and for subdirectories for keys. + // Defaults to 0777. + DirFileMode os.FileMode + + // If true, create the directory backing the Bucket if it does not exist + // (using os.MkdirAll). + CreateDir bool + + // If true, don't use os.TempDir for temporary files, but instead place them + // next to the actual files. This may result in "stranded" temporary files + // (e.g., if the application is killed before the file cleanup runs). + // + // If your bucket directory is on a different mount than os.TempDir, you will + // need to set this to true, as os.Rename will fail across mount points. + NoTempDir bool +} + +// New creates a new instance of the fileblob driver backed by the +// filesystem and rooted at dir, which must exist. +func New(dir string, opts *Options) (blob.Driver, error) { + if opts == nil { + opts = &Options{} + } + if opts.DirFileMode == 0 { + opts.DirFileMode = os.FileMode(0o777) + } + + absdir, err := filepath.Abs(dir) + if err != nil { + return nil, fmt.Errorf("failed to convert %s into an absolute path: %v", dir, err) + } + + // Optionally, create the directory if it does not already exist. + info, err := os.Stat(absdir) + if err != nil && opts.CreateDir && os.IsNotExist(err) { + err = os.MkdirAll(absdir, opts.DirFileMode) + if err != nil { + return nil, fmt.Errorf("tried to create directory but failed: %v", err) + } + info, err = os.Stat(absdir) + } + if err != nil { + return nil, err + } + + if !info.IsDir() { + return nil, fmt.Errorf("%s is not a directory", absdir) + } + + return &driver{dir: absdir, opts: opts}, nil +} + +type driver struct { + opts *Options + dir string +} + +// Close implements [blob/Driver.Close]. +func (drv *driver) Close() error { + return nil +} + +// NormalizeError implements [blob/Driver.NormalizeError]. +func (drv *driver) NormalizeError(err error) error { + if os.IsNotExist(err) { + return errors.Join(err, blob.ErrNotFound) + } + + return err +} + +// path returns the full path for a key. +func (drv *driver) path(key string) (string, error) { + path := filepath.Join(drv.dir, escapeKey(key)) + + if strings.HasSuffix(path, attrsExt) { + return "", errAttrsExt + } + + return path, nil +} + +// forKey returns the full path, os.FileInfo, and attributes for key. +func (drv *driver) forKey(key string) (string, os.FileInfo, *xattrs, error) { + path, err := drv.path(key) + if err != nil { + return "", nil, nil, err + } + + info, err := os.Stat(path) + if err != nil { + return "", nil, nil, err + } + + if info.IsDir() { + return "", nil, nil, os.ErrNotExist + } + + xa, err := getAttrs(path) + if err != nil { + return "", nil, nil, err + } + + return path, info, &xa, nil +} + +// ListPaged implements [blob/Driver.ListPaged]. +func (drv *driver) ListPaged(ctx context.Context, opts *blob.ListOptions) (*blob.ListPage, error) { + var pageToken string + if len(opts.PageToken) > 0 { + pageToken = string(opts.PageToken) + } + + pageSize := opts.PageSize + if pageSize == 0 { + pageSize = defaultPageSize + } + + // If opts.Delimiter != "", lastPrefix contains the last "directory" key we + // added. It is used to avoid adding it again; all files in this "directory" + // are collapsed to the single directory entry. + var lastPrefix string + var lastKeyAdded string + + // If the Prefix contains a "/", we can set the root of the Walk + // to the path specified by the Prefix as any files below the path will not + // match the Prefix. + // Note that we use "/" explicitly and not os.PathSeparator, as the opts.Prefix + // is in the unescaped form. + root := drv.dir + if i := strings.LastIndex(opts.Prefix, "/"); i > -1 { + root = filepath.Join(root, opts.Prefix[:i]) + } + + var result blob.ListPage + + // Do a full recursive scan of the root directory. + err := filepath.WalkDir(root, func(path string, info fs.DirEntry, err error) error { + if err != nil { + // Couldn't read this file/directory for some reason; just skip it. + return nil + } + + // Skip the self-generated attribute files. + if strings.HasSuffix(path, attrsExt) { + return nil + } + + // os.Walk returns the root directory; skip it. + if path == drv.dir { + return nil + } + + // Strip the prefix from path. + prefixLen := len(drv.dir) + // Include the separator for non-root. + if drv.dir != "/" { + prefixLen++ + } + path = path[prefixLen:] + + // Unescape the path to get the key. + key := unescapeKey(path) + + // Skip all directories. If opts.Delimiter is set, we'll create + // pseudo-directories later. + // Note that returning nil means that we'll still recurse into it; + // we're just not adding a result for the directory itself. + if info.IsDir() { + key += "/" + // Avoid recursing into subdirectories if the directory name already + // doesn't match the prefix; any files in it are guaranteed not to match. + if len(key) > len(opts.Prefix) && !strings.HasPrefix(key, opts.Prefix) { + return filepath.SkipDir + } + // Similarly, avoid recursing into subdirectories if we're making + // "directories" and all of the files in this subdirectory are guaranteed + // to collapse to a "directory" that we've already added. + if lastPrefix != "" && strings.HasPrefix(key, lastPrefix) { + return filepath.SkipDir + } + return nil + } + + // Skip files/directories that don't match the Prefix. + if !strings.HasPrefix(key, opts.Prefix) { + return nil + } + + var md5 []byte + if xa, err := getAttrs(path); err == nil { + // Note: we only have the MD5 hash for blobs that we wrote. + // For other blobs, md5 will remain nil. + md5 = xa.MD5 + } + + fi, err := info.Info() + if err != nil { + return err + } + + obj := &blob.ListObject{ + Key: key, + ModTime: fi.ModTime(), + Size: fi.Size(), + MD5: md5, + } + + // If using Delimiter, collapse "directories". + if opts.Delimiter != "" { + // Strip the prefix, which may contain Delimiter. + keyWithoutPrefix := key[len(opts.Prefix):] + // See if the key still contains Delimiter. + // If no, it's a file and we just include it. + // If yes, it's a file in a "sub-directory" and we want to collapse + // all files in that "sub-directory" into a single "directory" result. + if idx := strings.Index(keyWithoutPrefix, opts.Delimiter); idx != -1 { + prefix := opts.Prefix + keyWithoutPrefix[0:idx+len(opts.Delimiter)] + // We've already included this "directory"; don't add it. + if prefix == lastPrefix { + return nil + } + // Update the object to be a "directory". + obj = &blob.ListObject{ + Key: prefix, + IsDir: true, + } + lastPrefix = prefix + } + } + + // If there's a pageToken, skip anything before it. + if pageToken != "" && obj.Key <= pageToken { + return nil + } + + // If we've already got a full page of results, set NextPageToken and stop. + // Unless the current object is a directory, in which case there may + // still be objects coming that are alphabetically before it (since + // we appended the delimiter). In that case, keep going; we'll trim the + // extra entries (if any) before returning. + if len(result.Objects) == pageSize && !obj.IsDir { + result.NextPageToken = []byte(result.Objects[pageSize-1].Key) + return io.EOF + } + + result.Objects = append(result.Objects, obj) + + // Normally, objects are added in the correct order (by Key). + // However, sometimes adding the file delimiter messes that up + // (e.g., if the file delimiter is later in the alphabet than the last character of a key). + // Detect if this happens and swap if needed. + if len(result.Objects) > 1 && obj.Key < lastKeyAdded { + i := len(result.Objects) - 1 + result.Objects[i-1], result.Objects[i] = result.Objects[i], result.Objects[i-1] + lastKeyAdded = result.Objects[i].Key + } else { + lastKeyAdded = obj.Key + } + + return nil + }) + if err != nil && err != io.EOF { + return nil, err + } + + if len(result.Objects) > pageSize { + result.Objects = result.Objects[0:pageSize] + result.NextPageToken = []byte(result.Objects[pageSize-1].Key) + } + + return &result, nil +} + +// Attributes implements [blob/Driver.Attributes]. +func (drv *driver) Attributes(ctx context.Context, key string) (*blob.Attributes, error) { + _, info, xa, err := drv.forKey(key) + if err != nil { + return nil, err + } + + return &blob.Attributes{ + CacheControl: xa.CacheControl, + ContentDisposition: xa.ContentDisposition, + ContentEncoding: xa.ContentEncoding, + ContentLanguage: xa.ContentLanguage, + ContentType: xa.ContentType, + Metadata: xa.Metadata, + // CreateTime left as the zero time. + ModTime: info.ModTime(), + Size: info.Size(), + MD5: xa.MD5, + ETag: fmt.Sprintf("\"%x-%x\"", info.ModTime().UnixNano(), info.Size()), + }, nil +} + +// NewRangeReader implements [blob/Driver.NewRangeReader]. +func (drv *driver) NewRangeReader(ctx context.Context, key string, offset, length int64) (blob.DriverReader, error) { + path, info, xa, err := drv.forKey(key) + if err != nil { + return nil, err + } + + f, err := os.Open(path) + if err != nil { + return nil, err + } + + if offset > 0 { + if _, err := f.Seek(offset, io.SeekStart); err != nil { + return nil, err + } + } + + r := io.Reader(f) + if length >= 0 { + r = io.LimitReader(r, length) + } + + return &reader{ + r: r, + c: f, + attrs: &blob.ReaderAttributes{ + ContentType: xa.ContentType, + ModTime: info.ModTime(), + Size: info.Size(), + }, + }, nil +} + +func createTemp(path string, noTempDir bool) (*os.File, error) { + // Use a custom createTemp function rather than os.CreateTemp() as + // os.CreateTemp() sets the permissions of the tempfile to 0600, rather than + // 0666, making it inconsistent with the directories and attribute files. + try := 0 + for { + // Append the current time with nanosecond precision and .tmp to the + // base path. If the file already exists try again. Nanosecond changes enough + // between each iteration to make a conflict unlikely. Using the full + // time lowers the chance of a collision with a file using a similar + // pattern, but has undefined behavior after the year 2262. + var name string + if noTempDir { + name = path + } else { + name = filepath.Join(os.TempDir(), filepath.Base(path)) + } + name += "." + strconv.FormatInt(time.Now().UnixNano(), 16) + ".tmp" + + f, err := os.OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0o666) + if os.IsExist(err) { + if try++; try < 10000 { + continue + } + return nil, &os.PathError{Op: "createtemp", Path: path + ".*.tmp", Err: os.ErrExist} + } + + return f, err + } +} + +// NewTypedWriter implements [blob/Driver.NewTypedWriter]. +func (drv *driver) NewTypedWriter(ctx context.Context, key, contentType string, opts *blob.WriterOptions) (blob.DriverWriter, error) { + path, err := drv.path(key) + if err != nil { + return nil, err + } + + err = os.MkdirAll(filepath.Dir(path), drv.opts.DirFileMode) + if err != nil { + return nil, err + } + + f, err := createTemp(path, drv.opts.NoTempDir) + if err != nil { + return nil, err + } + + if drv.opts.Metadata == MetadataDontWrite { + w := &writer{ + ctx: ctx, + File: f, + path: path, + } + return w, nil + } + + var metadata map[string]string + if len(opts.Metadata) > 0 { + metadata = opts.Metadata + } + + return &writerWithSidecar{ + ctx: ctx, + f: f, + path: path, + contentMD5: opts.ContentMD5, + md5hash: md5.New(), + attrs: xattrs{ + CacheControl: opts.CacheControl, + ContentDisposition: opts.ContentDisposition, + ContentEncoding: opts.ContentEncoding, + ContentLanguage: opts.ContentLanguage, + ContentType: contentType, + Metadata: metadata, + }, + }, nil +} + +// Copy implements [blob/Driver.Copy]. +func (drv *driver) Copy(ctx context.Context, dstKey, srcKey string) error { + // Note: we could use NewRangeReader here, but since we need to copy all of + // the metadata (from xa), it's more efficient to do it directly. + srcPath, _, xa, err := drv.forKey(srcKey) + if err != nil { + return err + } + + f, err := os.Open(srcPath) + if err != nil { + return err + } + defer f.Close() + + // We'll write the copy using Writer, to avoid re-implementing making of a + // temp file, cleaning up after partial failures, etc. + wopts := blob.WriterOptions{ + CacheControl: xa.CacheControl, + ContentDisposition: xa.ContentDisposition, + ContentEncoding: xa.ContentEncoding, + ContentLanguage: xa.ContentLanguage, + Metadata: xa.Metadata, + } + + // Create a cancelable context so we can cancel the write if there are problems. + writeCtx, cancel := context.WithCancel(ctx) + defer cancel() + + w, err := drv.NewTypedWriter(writeCtx, dstKey, xa.ContentType, &wopts) + if err != nil { + return err + } + + _, err = io.Copy(w, f) + if err != nil { + cancel() // cancel before Close cancels the write + w.Close() + return err + } + + return w.Close() +} + +// Delete implements [blob/Driver.Delete]. +func (b *driver) Delete(ctx context.Context, key string) error { + path, err := b.path(key) + if err != nil { + return err + } + + err = os.Remove(path) + if err != nil { + return err + } + + err = os.Remove(path + attrsExt) + if err != nil && !os.IsNotExist(err) { + return err + } + + return nil +} + +// ------------------------------------------------------------------- + +type reader struct { + r io.Reader + c io.Closer + attrs *blob.ReaderAttributes +} + +func (r *reader) Read(p []byte) (int, error) { + if r.r == nil { + return 0, io.EOF + } + return r.r.Read(p) +} + +func (r *reader) Close() error { + if r.c == nil { + return nil + } + return r.c.Close() +} + +// Attributes implements [blob/DriverReader.Attributes]. +func (r *reader) Attributes() *blob.ReaderAttributes { + return r.attrs +} + +// ------------------------------------------------------------------- + +// writerWithSidecar implements the strategy of storing metadata in a distinct file. +type writerWithSidecar struct { + ctx context.Context + md5hash hash.Hash + f *os.File + path string + attrs xattrs + contentMD5 []byte +} + +func (w *writerWithSidecar) Write(p []byte) (n int, err error) { + n, err = w.f.Write(p) + if err != nil { + // Don't hash the unwritten tail twice when writing is resumed. + w.md5hash.Write(p[:n]) + return n, err + } + + if _, err := w.md5hash.Write(p); err != nil { + return n, err + } + + return n, nil +} + +func (w *writerWithSidecar) Close() error { + err := w.f.Close() + if err != nil { + return err + } + + // Always delete the temp file. On success, it will have been + // renamed so the Remove will fail. + defer func() { + _ = os.Remove(w.f.Name()) + }() + + // Check if the write was cancelled. + if err := w.ctx.Err(); err != nil { + return err + } + + md5sum := w.md5hash.Sum(nil) + w.attrs.MD5 = md5sum + + // Write the attributes file. + if err := setAttrs(w.path, w.attrs); err != nil { + return err + } + + // Rename the temp file to path. + if err := os.Rename(w.f.Name(), w.path); err != nil { + _ = os.Remove(w.path + attrsExt) + return err + } + + return nil +} + +// writer is a file with a temporary name until closed. +// +// Embedding os.File allows the likes of io.Copy to use optimizations, +// which is why it is not folded into writerWithSidecar. +type writer struct { + *os.File + ctx context.Context + path string +} + +func (w *writer) Close() error { + err := w.File.Close() + if err != nil { + return err + } + + // Always delete the temp file. On success, it will have been renamed so + // the Remove will fail. + tempname := w.File.Name() + defer os.Remove(tempname) + + // Check if the write was cancelled. + if err := w.ctx.Err(); err != nil { + return err + } + + // Rename the temp file to path. + return os.Rename(tempname, w.path) +} + +// ------------------------------------------------------------------- + +// escapeKey does all required escaping for UTF-8 strings to work the filesystem. +func escapeKey(s string) string { + s = blob.HexEscape(s, func(r []rune, i int) bool { + c := r[i] + switch { + case c < 32: + return true + // We're going to replace '/' with os.PathSeparator below. In order for this + // to be reversible, we need to escape raw os.PathSeparators. + case os.PathSeparator != '/' && c == os.PathSeparator: + return true + // For "../", escape the trailing slash. + case i > 1 && c == '/' && r[i-1] == '.' && r[i-2] == '.': + return true + // For "//", escape the trailing slash. + case i > 0 && c == '/' && r[i-1] == '/': + return true + // Escape the trailing slash in a key. + case c == '/' && i == len(r)-1: + return true + // https://docs.microsoft.com/en-us/windows/desktop/fileio/naming-a-file + case os.PathSeparator == '\\' && (c == '>' || c == '<' || c == ':' || c == '"' || c == '|' || c == '?' || c == '*'): + return true + } + return false + }) + + // Replace "/" with os.PathSeparator if needed, so that the local filesystem + // can use subdirectories. + if os.PathSeparator != '/' { + s = strings.ReplaceAll(s, "/", string(os.PathSeparator)) + } + + return s +} + +// unescapeKey reverses escapeKey. +func unescapeKey(s string) string { + if os.PathSeparator != '/' { + s = strings.ReplaceAll(s, string(os.PathSeparator), "/") + } + + return blob.HexUnescape(s) +} diff --git a/tools/filesystem/internal/s3blob/driver.go b/tools/filesystem/internal/s3blob/driver.go new file mode 100644 index 00000000..25746b77 --- /dev/null +++ b/tools/filesystem/internal/s3blob/driver.go @@ -0,0 +1,482 @@ +// Package s3blob provides a blob.Bucket S3 driver implementation. +// +// NB! To minimize breaking changes with older PocketBase releases, +// the driver is based of the previously used gocloud.dev/blob/s3blob, +// hence many of the below doc comments, struct options and interface +// implementations are the same. +// +// The blob abstraction supports all UTF-8 strings; to make this work with services lacking +// full UTF-8 support, strings must be escaped (during writes) and unescaped +// (during reads). The following escapes are performed for s3blob: +// - Blob keys: ASCII characters 0-31 are escaped to "__0x__". +// Additionally, the "/" in "../" is escaped in the same way. +// - Metadata keys: Escaped using URL encoding, then additionally "@:=" are +// escaped using "__0x__". These characters were determined by +// experimentation. +// - Metadata values: Escaped using URL encoding. +// +// Example: +// +// drv, _ := s3blob.New(&s3.S3{ +// Bucket: "bucketName", +// Region: "region", +// Endpoint: "endpoint", +// AccessKey: "accessKey", +// SecretKey: "secretKey", +// }) +// bucket := blob.NewBucket(drv) +package s3blob + +import ( + "context" + "encoding/base64" + "encoding/hex" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "sort" + "strconv" + "strings" + + "github.com/pocketbase/pocketbase/tools/filesystem/blob" + "github.com/pocketbase/pocketbase/tools/filesystem/internal/s3blob/s3" +) + +const defaultPageSize = 1000 + +// New creates a new instance of the S3 driver backed by the the internal S3 client. +func New(s3Client *s3.S3) (blob.Driver, error) { + if s3Client.Bucket == "" { + return nil, errors.New("s3blob.New: missing bucket name") + } + + if s3Client.Endpoint == "" { + return nil, errors.New("s3blob.New: missing endpoint") + } + + if s3Client.Region == "" { + return nil, errors.New("s3blob.New: missing region") + } + + return &driver{s3: s3Client}, nil +} + +type driver struct { + s3 *s3.S3 +} + +// Close implements [blob/Driver.Close]. +func (drv *driver) Close() error { + return nil +} + +// NormalizeError implements [blob/Driver.NormalizeError]. +func (drv *driver) NormalizeError(err error) error { + var ae s3.ResponseError + if errors.As(err, &ae) { + switch ae.Code { + case "NoSuchBucket", "NoSuchKey", "NotFound": + return errors.Join(err, blob.ErrNotFound) + } + } + + return err +} + +// ListPaged implements [blob/Driver.ListPaged]. +func (drv *driver) ListPaged(ctx context.Context, opts *blob.ListOptions) (*blob.ListPage, error) { + pageSize := opts.PageSize + if pageSize == 0 { + pageSize = defaultPageSize + } + + in := s3.ListParams{ + MaxKeys: pageSize, + } + if len(opts.PageToken) > 0 { + in.ContinuationToken = string(opts.PageToken) + } + if opts.Prefix != "" { + in.Prefix = escapeKey(opts.Prefix) + } + if opts.Delimiter != "" { + in.Delimiter = escapeKey(opts.Delimiter) + } + + var reqOptions []func(*http.Request) + + resp, err := drv.s3.ListObjects(ctx, in, reqOptions...) + if err != nil { + return nil, err + } + + page := blob.ListPage{} + if resp.NextContinuationToken != "" { + page.NextPageToken = []byte(resp.NextContinuationToken) + } + + if n := len(resp.Contents) + len(resp.CommonPrefixes); n > 0 { + page.Objects = make([]*blob.ListObject, n) + for i, obj := range resp.Contents { + page.Objects[i] = &blob.ListObject{ + Key: unescapeKey(obj.Key), + ModTime: obj.LastModified, + Size: obj.Size, + MD5: eTagToMD5(obj.ETag), + } + } + + for i, prefix := range resp.CommonPrefixes { + page.Objects[i+len(resp.Contents)] = &blob.ListObject{ + Key: unescapeKey(prefix.Prefix), + IsDir: true, + } + } + + if len(resp.Contents) > 0 && len(resp.CommonPrefixes) > 0 { + // S3 gives us blobs and "directories" in separate lists; sort them. + sort.Slice(page.Objects, func(i, j int) bool { + return page.Objects[i].Key < page.Objects[j].Key + }) + } + } + + return &page, nil +} + +// Attributes implements [blob/Driver.Attributes]. +func (drv *driver) Attributes(ctx context.Context, key string) (*blob.Attributes, error) { + key = escapeKey(key) + + resp, err := drv.s3.HeadObject(ctx, key) + if err != nil { + return nil, err + } + + md := make(map[string]string, len(resp.Metadata)) + for k, v := range resp.Metadata { + // See the package comments for more details on escaping of metadata + // keys & values. + md[blob.HexUnescape(urlUnescape(k))] = urlUnescape(v) + } + + return &blob.Attributes{ + CacheControl: resp.CacheControl, + ContentDisposition: resp.ContentDisposition, + ContentEncoding: resp.ContentEncoding, + ContentLanguage: resp.ContentLanguage, + ContentType: resp.ContentType, + Metadata: md, + // CreateTime not supported; left as the zero time. + ModTime: resp.LastModified, + Size: resp.ContentLength, + MD5: eTagToMD5(resp.ETag), + ETag: resp.ETag, + }, nil +} + +// NewRangeReader implements [blob/Driver.NewRangeReader]. +func (drv *driver) NewRangeReader(ctx context.Context, key string, offset, length int64) (blob.DriverReader, error) { + key = escapeKey(key) + + var byteRange string + if offset > 0 && length < 0 { + byteRange = fmt.Sprintf("bytes=%d-", offset) + } else if length == 0 { + // AWS doesn't support a zero-length read; we'll read 1 byte and then + // ignore it in favor of http.NoBody below. + byteRange = fmt.Sprintf("bytes=%d-%d", offset, offset) + } else if length >= 0 { + byteRange = fmt.Sprintf("bytes=%d-%d", offset, offset+length-1) + } + + reqOptions := []func(*http.Request){ + func(req *http.Request) { + req.Header.Set("Range", byteRange) + }, + } + + resp, err := drv.s3.GetObject(ctx, key, reqOptions...) + if err != nil { + return nil, err + } + + body := resp.Body + if length == 0 { + body = http.NoBody + } + + return &reader{ + body: body, + attrs: &blob.ReaderAttributes{ + ContentType: resp.ContentType, + ModTime: resp.LastModified, + Size: getSize(resp.ContentLength, resp.ContentRange), + }, + }, nil +} + +// NewTypedWriter implements [blob/Driver.NewTypedWriter]. +func (drv *driver) NewTypedWriter(ctx context.Context, key string, contentType string, opts *blob.WriterOptions) (blob.DriverWriter, error) { + key = escapeKey(key) + + u := &s3.Uploader{ + S3: drv.s3, + Key: key, + } + + if opts.BufferSize != 0 { + u.MinPartSize = opts.BufferSize + } + + if opts.MaxConcurrency != 0 { + u.MaxConcurrency = opts.MaxConcurrency + } + + md := make(map[string]string, len(opts.Metadata)) + for k, v := range opts.Metadata { + // See the package comments for more details on escaping of metadata keys & values. + k = blob.HexEscape(url.PathEscape(k), func(runes []rune, i int) bool { + c := runes[i] + return c == '@' || c == ':' || c == '=' + }) + md[k] = url.PathEscape(v) + } + u.Metadata = md + + var reqOptions []func(*http.Request) + reqOptions = append(reqOptions, func(r *http.Request) { + r.Header.Set("Content-Type", contentType) + + if opts.CacheControl != "" { + r.Header.Set("Cache-Control", opts.CacheControl) + } + if opts.ContentDisposition != "" { + r.Header.Set("Content-Disposition", opts.ContentDisposition) + } + if opts.ContentEncoding != "" { + r.Header.Set("Content-Encoding", opts.ContentEncoding) + } + if opts.ContentLanguage != "" { + r.Header.Set("Content-Language", opts.ContentLanguage) + } + if len(opts.ContentMD5) > 0 { + r.Header.Set("Content-MD5", base64.StdEncoding.EncodeToString(opts.ContentMD5)) + } + }) + + return &writer{ + ctx: ctx, + uploader: u, + donec: make(chan struct{}), + reqOptions: reqOptions, + }, nil +} + +// Copy implements [blob/Driver.Copy]. +func (drv *driver) Copy(ctx context.Context, dstKey, srcKey string) error { + dstKey = escapeKey(dstKey) + srcKey = escapeKey(srcKey) + _, err := drv.s3.CopyObject(ctx, srcKey, dstKey) + return err +} + +// Delete implements [blob/Driver.Delete]. +func (drv *driver) Delete(ctx context.Context, key string) error { + key = escapeKey(key) + return drv.s3.DeleteObject(ctx, key) +} + +// ------------------------------------------------------------------- + +// reader reads an S3 object. It implements io.ReadCloser. +type reader struct { + attrs *blob.ReaderAttributes + body io.ReadCloser +} + +// Read implements [io/ReadCloser.Read]. +func (r *reader) Read(p []byte) (int, error) { + return r.body.Read(p) +} + +// Close closes the reader itself. It must be called when done reading. +func (r *reader) Close() error { + return r.body.Close() +} + +// Attributes implements [blob/DriverReader.Attributes]. +func (r *reader) Attributes() *blob.ReaderAttributes { + return r.attrs +} + +// ------------------------------------------------------------------- + +// writer writes an S3 object, it implements io.WriteCloser. +type writer struct { + ctx context.Context + err error // written before donec closes + uploader *s3.Uploader + + // Ends of an io.Pipe, created when the first byte is written. + pw *io.PipeWriter + pr *io.PipeReader + + donec chan struct{} // closed when done writing + + reqOptions []func(*http.Request) +} + +// Write appends p to w.pw. User must call Close to close the w after done writing. +func (w *writer) Write(p []byte) (int, error) { + // Avoid opening the pipe for a zero-length write; + // the concrete can do these for empty blobs. + if len(p) == 0 { + return 0, nil + } + + if w.pw == nil { + // We'll write into pw and use pr as an io.Reader for the + // Upload call to S3. + w.pr, w.pw = io.Pipe() + w.open(w.pr, true) + } + + return w.pw.Write(p) +} + +// r may be nil if we're Closing and no data was written. +// If closePipeOnError is true, w.pr will be closed if there's an +// error uploading to S3. +func (w *writer) open(r io.Reader, closePipeOnError bool) { + // This goroutine will keep running until Close, unless there's an error. + go func() { + defer func() { + close(w.donec) + }() + + if r == nil { + // AWS doesn't like a nil Body. + r = http.NoBody + } + var err error + + w.uploader.Payload = r + + err = w.uploader.Upload(w.ctx, w.reqOptions...) + + if err != nil { + if closePipeOnError { + w.pr.CloseWithError(err) + } + w.err = err + } + }() +} + +// Close completes the writer and closes it. Any error occurring during write +// will be returned. If a writer is closed before any Write is called, Close +// will create an empty file at the given key. +func (w *writer) Close() error { + if w.pr != nil { + defer w.pr.Close() + } + + if w.pw == nil { + // We never got any bytes written. We'll write an http.NoBody. + w.open(nil, false) + } else if err := w.pw.Close(); err != nil { + return err + } + + <-w.donec + + return w.err +} + +// ------------------------------------------------------------------- + +// etagToMD5 processes an ETag header and returns an MD5 hash if possible. +// S3's ETag header is sometimes a quoted hexstring of the MD5. Other times, +// notably when the object was uploaded in multiple parts, it is not. +// We do the best we can. +// Some links about ETag: +// https://docs.aws.amazon.com/AmazonS3/latest/API/RESTCommonResponseHeaders.html +// https://github.com/aws/aws-sdk-net/issues/815 +// https://teppen.io/2018/06/23/aws_s3_etags/ +func eTagToMD5(etag string) []byte { + // No header at all. + if etag == "" { + return nil + } + + // Strip the expected leading and trailing quotes. + if len(etag) < 2 || etag[0] != '"' || etag[len(etag)-1] != '"' { + return nil + } + unquoted := etag[1 : len(etag)-1] + + // Un-hex; we return nil on error. In particular, we'll get an error here + // for multi-part uploaded blobs, whose ETag will contain a "-" and so will + // never be a legal hex encoding. + md5, err := hex.DecodeString(unquoted) + if err != nil { + return nil + } + + return md5 +} + +func getSize(contentLength int64, contentRange string) int64 { + // Default size to ContentLength, but that's incorrect for partial-length reads, + // where ContentLength refers to the size of the returned Body, not the entire + // size of the blob. ContentRange has the full size. + size := contentLength + if contentRange != "" { + // Sample: bytes 10-14/27 (where 27 is the full size). + parts := strings.Split(contentRange, "/") + if len(parts) == 2 { + if i, err := strconv.ParseInt(parts[1], 10, 64); err == nil { + size = i + } + } + } + + return size +} + +// escapeKey does all required escaping for UTF-8 strings to work with S3. +func escapeKey(key string) string { + return blob.HexEscape(key, func(r []rune, i int) bool { + c := r[i] + + // S3 doesn't handle these characters (determined via experimentation). + if c < 32 { + return true + } + + // For "../", escape the trailing slash. + if i > 1 && c == '/' && r[i-1] == '.' && r[i-2] == '.' { + return true + } + + return false + }) +} + +// unescapeKey reverses escapeKey. +func unescapeKey(key string) string { + return blob.HexUnescape(key) +} + +// urlUnescape reverses URLEscape using url.PathUnescape. If the unescape +// returns an error, it returns s. +func urlUnescape(s string) string { + if u, err := url.PathUnescape(s); err == nil { + return u + } + + return s +} diff --git a/tools/filesystem/internal/s3blob/s3/client_test.go b/tools/filesystem/internal/s3blob/s3/client_test.go new file mode 100644 index 00000000..ebdf321e --- /dev/null +++ b/tools/filesystem/internal/s3blob/s3/client_test.go @@ -0,0 +1,128 @@ +package s3_test + +import ( + "errors" + "fmt" + "io" + "net/http" + "regexp" + "slices" + "strings" + "sync" +) + +func checkHeaders(headers http.Header, expectations map[string]string) bool { + for h, expected := range expectations { + v := headers.Get(h) + + pattern := expected + if !strings.HasPrefix(pattern, "^") && !strings.HasSuffix(pattern, "$") { + pattern = "^" + regexp.QuoteMeta(pattern) + "$" + } + + expectedRegex, err := regexp.Compile(pattern) + if err != nil { + return false + } + + if !expectedRegex.MatchString(v) { + return false + } + } + + return true +} + +type RequestStub struct { + Method string + URL string // plain string or regex pattern wrapped in "^pattern$" + Match func(req *http.Request) bool + Response *http.Response +} + +func NewTestClient(stubs ...*RequestStub) *TestClient { + return &TestClient{stubs: stubs} +} + +type TestClient struct { + stubs []*RequestStub + mu sync.Mutex +} + +func (c *TestClient) AssertNoRemaining() error { + c.mu.Lock() + defer c.mu.Unlock() + + if len(c.stubs) == 0 { + return nil + } + + msgParts := make([]string, 0, len(c.stubs)+1) + msgParts = append(msgParts, "not all stub requests were processed:") + for _, stub := range c.stubs { + msgParts = append(msgParts, "- "+stub.Method+" "+stub.URL) + } + + return errors.New(strings.Join(msgParts, "\n")) +} + +func (c *TestClient) Do(req *http.Request) (*http.Response, error) { + c.mu.Lock() + defer c.mu.Unlock() + + for i, stub := range c.stubs { + if req.Method != stub.Method { + continue + } + + urlPattern := stub.URL + if !strings.HasPrefix(urlPattern, "^") && !strings.HasSuffix(urlPattern, "$") { + urlPattern = "^" + regexp.QuoteMeta(urlPattern) + "$" + } + + urlRegex, err := regexp.Compile(urlPattern) + if err != nil { + return nil, err + } + + if !urlRegex.MatchString(req.URL.String()) { + continue + } + + if stub.Match != nil && !stub.Match(req) { + continue + } + + // remove from the remaining stubs + c.stubs = slices.Delete(c.stubs, i, i+1) + + response := stub.Response + if response == nil { + response = &http.Response{} + } + if response.Header == nil { + response.Header = http.Header{} + } + if response.Body == nil { + response.Body = http.NoBody + } + + response.Request = req + + return response, nil + } + + var body []byte + if req.Body != nil { + defer req.Body.Close() + body, _ = io.ReadAll(req.Body) + } + + return nil, fmt.Errorf( + "the below request doesn't have a corresponding stub:\n%s %s\nHeaders: %v\nBody: %q", + req.Method, + req.URL.String(), + req.Header, + body, + ) +} diff --git a/tools/filesystem/internal/s3blob/s3/copy_object.go b/tools/filesystem/internal/s3blob/s3/copy_object.go new file mode 100644 index 00000000..de26bade --- /dev/null +++ b/tools/filesystem/internal/s3blob/s3/copy_object.go @@ -0,0 +1,59 @@ +package s3 + +import ( + "context" + "encoding/xml" + "net/http" + "net/url" + "strings" + "time" +) + +// https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html#API_CopyObject_ResponseSyntax +type CopyObjectResponse struct { + CopyObjectResult xml.Name `json:"copyObjectResult" xml:"CopyObjectResult"` + ETag string `json:"etag" xml:"ETag"` + LastModified time.Time `json:"lastModified" xml:"LastModified"` + ChecksumType string `json:"checksumType" xml:"ChecksumType"` + ChecksumCRC32 string `json:"checksumCRC32" xml:"ChecksumCRC32"` + ChecksumCRC32C string `json:"checksumCRC32C" xml:"ChecksumCRC32C"` + ChecksumCRC64NVME string `json:"checksumCRC64NVME" xml:"ChecksumCRC64NVME"` + ChecksumSHA1 string `json:"checksumSHA1" xml:"ChecksumSHA1"` + ChecksumSHA256 string `json:"checksumSHA256" xml:"ChecksumSHA256"` +} + +// CopyObject copies a single object from srcKey to dstKey destination. +// (both keys are expected to be operating within the same bucket). +// +// https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html +func (s3 *S3) CopyObject(ctx context.Context, srcKey string, dstKey string, optReqFuncs ...func(*http.Request)) (*CopyObjectResponse, error) { + req, err := http.NewRequestWithContext(ctx, http.MethodPut, s3.URL(dstKey), nil) + if err != nil { + return nil, err + } + + // per the doc the header value must be URL-encoded + req.Header.Set("x-amz-copy-source", url.PathEscape(s3.Bucket+"/"+strings.TrimLeft(srcKey, "/"))) + + // apply optional request funcs + for _, fn := range optReqFuncs { + if fn != nil { + fn(req) + } + } + + resp, err := s3.SignAndSend(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + result := &CopyObjectResponse{} + + err = xml.NewDecoder(resp.Body).Decode(result) + if err != nil { + return nil, err + } + + return result, nil +} diff --git a/tools/filesystem/internal/s3blob/s3/copy_object_test.go b/tools/filesystem/internal/s3blob/s3/copy_object_test.go new file mode 100644 index 00000000..ce2d7abf --- /dev/null +++ b/tools/filesystem/internal/s3blob/s3/copy_object_test.go @@ -0,0 +1,66 @@ +package s3_test + +import ( + "context" + "io" + "net/http" + "strings" + "testing" + + "github.com/pocketbase/pocketbase/tools/filesystem/internal/s3blob/s3" +) + +func TestS3CopyObject(t *testing.T) { + t.Parallel() + + httpClient := NewTestClient( + &RequestStub{ + Method: http.MethodPut, + URL: "http://test_bucket.example.com/@dst_test", + Match: func(req *http.Request) bool { + return checkHeaders(req.Header, map[string]string{ + "test_header": "test", + "x-amz-copy-source": "test_bucket%2F@src_test", + "Authorization": "^.+Credential=123/.+$", + }) + }, + Response: &http.Response{ + Body: io.NopCloser(strings.NewReader(` + + 2025-01-01T01:02:03.456Z + test_etag + + `)), + }, + }, + ) + + s3Client := &s3.S3{ + Client: httpClient, + Region: "test_region", + Bucket: "test_bucket", + Endpoint: "http://example.com", + AccessKey: "123", + SecretKey: "abc", + } + + copyResp, err := s3Client.CopyObject(context.Background(), "@src_test", "@dst_test", func(r *http.Request) { + r.Header.Set("test_header", "test") + }) + if err != nil { + t.Fatal(err) + } + + err = httpClient.AssertNoRemaining() + if err != nil { + t.Fatal(err) + } + + if copyResp.ETag != "test_etag" { + t.Fatalf("Expected ETag %q, got %q", "test_etag", copyResp.ETag) + } + + if date := copyResp.LastModified.Format("2006-01-02T15:04:05.000Z"); date != "2025-01-01T01:02:03.456Z" { + t.Fatalf("Expected LastModified %q, got %q", "2025-01-01T01:02:03.456Z", date) + } +} diff --git a/tools/filesystem/internal/s3blob/s3/delete_object.go b/tools/filesystem/internal/s3blob/s3/delete_object.go new file mode 100644 index 00000000..a46c8ab2 --- /dev/null +++ b/tools/filesystem/internal/s3blob/s3/delete_object.go @@ -0,0 +1,31 @@ +package s3 + +import ( + "context" + "net/http" +) + +// DeleteObject deletes a single object by its key. +// +// https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html +func (s3 *S3) DeleteObject(ctx context.Context, key string, optFuncs ...func(*http.Request)) error { + req, err := http.NewRequestWithContext(ctx, http.MethodDelete, s3.URL(key), nil) + if err != nil { + return err + } + + // apply optional request funcs + for _, fn := range optFuncs { + if fn != nil { + fn(req) + } + } + + resp, err := s3.SignAndSend(req) + if err != nil { + return err + } + defer resp.Body.Close() + + return nil +} diff --git a/tools/filesystem/internal/s3blob/s3/delete_object_test.go b/tools/filesystem/internal/s3blob/s3/delete_object_test.go new file mode 100644 index 00000000..7db6c572 --- /dev/null +++ b/tools/filesystem/internal/s3blob/s3/delete_object_test.go @@ -0,0 +1,47 @@ +package s3_test + +import ( + "context" + "net/http" + "testing" + + "github.com/pocketbase/pocketbase/tools/filesystem/internal/s3blob/s3" +) + +func TestS3DeleteObject(t *testing.T) { + t.Parallel() + + httpClient := NewTestClient( + &RequestStub{ + Method: http.MethodDelete, + URL: "http://test_bucket.example.com/test_key", + Match: func(req *http.Request) bool { + return checkHeaders(req.Header, map[string]string{ + "test_header": "test", + "Authorization": "^.+Credential=123/.+$", + }) + }, + }, + ) + + s3Client := &s3.S3{ + Client: httpClient, + Region: "test_region", + Bucket: "test_bucket", + Endpoint: "http://example.com", + AccessKey: "123", + SecretKey: "abc", + } + + err := s3Client.DeleteObject(context.Background(), "test_key", func(r *http.Request) { + r.Header.Set("test_header", "test") + }) + if err != nil { + t.Fatal(err) + } + + err = httpClient.AssertNoRemaining() + if err != nil { + t.Fatal(err) + } +} diff --git a/tools/filesystem/internal/s3blob/s3/error.go b/tools/filesystem/internal/s3blob/s3/error.go new file mode 100644 index 00000000..e98ef7c6 --- /dev/null +++ b/tools/filesystem/internal/s3blob/s3/error.go @@ -0,0 +1,47 @@ +package s3 + +import ( + "encoding/xml" + "strconv" + "strings" +) + +// ResponseError defines a general S3 response error. +// +// https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html +type ResponseError struct { + XMLName xml.Name `json:"-" xml:"Error"` + Code string `json:"code" xml:"Code"` + Message string `json:"message" xml:"Message"` + RequestId string `json:"requestId" xml:"RequestId"` + Resource string `json:"resource" xml:"Resource"` + Raw []byte `json:"-" xml:"-"` + Status int `json:"status" xml:"Status"` +} + +// Error implements the std error interface. +func (err ResponseError) Error() string { + var strBuilder strings.Builder + + strBuilder.WriteString(strconv.Itoa(err.Status)) + strBuilder.WriteString(" ") + + if err.Code != "" { + strBuilder.WriteString(err.Code) + } else { + strBuilder.WriteString("S3ResponseError") + } + + if err.Message != "" { + strBuilder.WriteString(": ") + strBuilder.WriteString(err.Message) + } + + if len(err.Raw) > 0 { + strBuilder.WriteString("\n(RAW: ") + strBuilder.Write(err.Raw) + strBuilder.WriteString(")") + } + + return strBuilder.String() +} diff --git a/tools/filesystem/internal/s3blob/s3/error_test.go b/tools/filesystem/internal/s3blob/s3/error_test.go new file mode 100644 index 00000000..b4020060 --- /dev/null +++ b/tools/filesystem/internal/s3blob/s3/error_test.go @@ -0,0 +1,86 @@ +package s3_test + +import ( + "encoding/json" + "encoding/xml" + "testing" + + "github.com/pocketbase/pocketbase/tools/filesystem/internal/s3blob/s3" +) + +func TestResponseErrorSerialization(t *testing.T) { + raw := ` + + + test_code + test_message + test_request_id + test_resource + + ` + + respErr := s3.ResponseError{ + Status: 123, + Raw: []byte("test"), + } + + err := xml.Unmarshal([]byte(raw), &respErr) + if err != nil { + t.Fatal(err) + } + + jsonRaw, err := json.Marshal(respErr) + if err != nil { + t.Fatal(err) + } + jsonStr := string(jsonRaw) + + expected := `{"code":"test_code","message":"test_message","requestId":"test_request_id","resource":"test_resource","status":123}` + + if expected != jsonStr { + t.Fatalf("Expected JSON\n%s\ngot\n%s", expected, jsonStr) + } +} + +func TestResponseErrorErrorInterface(t *testing.T) { + scenarios := []struct { + name string + err s3.ResponseError + expected string + }{ + { + "empty", + s3.ResponseError{}, + "0 S3ResponseError", + }, + { + "with code and message (nil raw)", + s3.ResponseError{ + Status: 123, + Code: "test_code", + Message: "test_message", + }, + "123 test_code: test_message", + }, + { + "with code and message (non-nil raw)", + s3.ResponseError{ + Status: 123, + Code: "test_code", + Message: "test_message", + Raw: []byte("test_raw"), + }, + "123 test_code: test_message\n(RAW: test_raw)", + }, + } + + for _, s := range scenarios { + t.Run(s.name, func(t *testing.T) { + result := s.err.Error() + + if result != s.expected { + t.Fatalf("Expected\n%s\ngot\n%s", s.expected, result) + } + }) + } +} diff --git a/tools/filesystem/internal/s3blob/s3/get_object.go b/tools/filesystem/internal/s3blob/s3/get_object.go new file mode 100644 index 00000000..e83f38fa --- /dev/null +++ b/tools/filesystem/internal/s3blob/s3/get_object.go @@ -0,0 +1,43 @@ +package s3 + +import ( + "context" + "io" + "net/http" +) + +// https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html#API_GetObject_ResponseElements +type GetObjectResponse struct { + Body io.ReadCloser `json:"-" xml:"-"` + + HeadObjectResponse +} + +// GetObject retrieves a single object by its key. +// +// NB! Make sure to call GetObjectResponse.Body.Close() after done working with the result. +// +// https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html +func (s3 *S3) GetObject(ctx context.Context, key string, optFuncs ...func(*http.Request)) (*GetObjectResponse, error) { + req, err := http.NewRequestWithContext(ctx, http.MethodGet, s3.URL(key), nil) + if err != nil { + return nil, err + } + + // apply optional request funcs + for _, fn := range optFuncs { + if fn != nil { + fn(req) + } + } + + resp, err := s3.SignAndSend(req) + if err != nil { + return nil, err + } + + result := &GetObjectResponse{Body: resp.Body} + result.load(resp.Header) + + return result, nil +} diff --git a/tools/filesystem/internal/s3blob/s3/get_object_test.go b/tools/filesystem/internal/s3blob/s3/get_object_test.go new file mode 100644 index 00000000..802b14ab --- /dev/null +++ b/tools/filesystem/internal/s3blob/s3/get_object_test.go @@ -0,0 +1,91 @@ +package s3_test + +import ( + "context" + "encoding/json" + "io" + "net/http" + "strings" + "testing" + + "github.com/pocketbase/pocketbase/tools/filesystem/internal/s3blob/s3" +) + +func TestS3GetObject(t *testing.T) { + t.Parallel() + + httpClient := NewTestClient( + &RequestStub{ + Method: http.MethodGet, + URL: "http://test_bucket.example.com/test_key", + Match: func(req *http.Request) bool { + return checkHeaders(req.Header, map[string]string{ + "test_header": "test", + "Authorization": "^.+Credential=123/.+$", + }) + }, + Response: &http.Response{ + Header: http.Header{ + "Last-Modified": []string{"Mon, 01 Feb 2025 03:04:05 GMT"}, + "Cache-Control": []string{"test_cache"}, + "Content-Disposition": []string{"test_disposition"}, + "Content-Encoding": []string{"test_encoding"}, + "Content-Language": []string{"test_language"}, + "Content-Type": []string{"test_type"}, + "Content-Range": []string{"test_range"}, + "Etag": []string{"test_etag"}, + "Content-Length": []string{"100"}, + "x-amz-meta-AbC": []string{"test_meta_a"}, + "x-amz-meta-Def": []string{"test_meta_b"}, + }, + Body: io.NopCloser(strings.NewReader("test")), + }, + }, + ) + + s3Client := &s3.S3{ + Client: httpClient, + Region: "test_region", + Bucket: "test_bucket", + Endpoint: "http://example.com", + AccessKey: "123", + SecretKey: "abc", + } + + resp, err := s3Client.GetObject(context.Background(), "test_key", func(r *http.Request) { + r.Header.Set("test_header", "test") + }) + if err != nil { + t.Fatal(err) + } + defer resp.Body.Close() + + err = httpClient.AssertNoRemaining() + if err != nil { + t.Fatal(err) + } + + // check body + body, err := io.ReadAll(resp.Body) + if err != nil { + t.Fatal(err) + } + bodyStr := string(body) + + if bodyStr != "test" { + t.Fatalf("Expected body\n%q\ngot\n%q", "test", bodyStr) + } + + // check serialized attributes + raw, err := json.Marshal(resp) + if err != nil { + t.Fatal(err) + } + rawStr := string(raw) + + expected := `{"metadata":{"abc":"test_meta_a","def":"test_meta_b"},"lastModified":"2025-02-01T03:04:05Z","cacheControl":"test_cache","contentDisposition":"test_disposition","contentEncoding":"test_encoding","contentLanguage":"test_language","contentType":"test_type","contentRange":"test_range","etag":"test_etag","contentLength":100}` + + if rawStr != expected { + t.Fatalf("Expected attributes\n%s\ngot\n%s", expected, rawStr) + } +} diff --git a/tools/filesystem/internal/s3blob/s3/head_object.go b/tools/filesystem/internal/s3blob/s3/head_object.go new file mode 100644 index 00000000..ff5d7ab6 --- /dev/null +++ b/tools/filesystem/internal/s3blob/s3/head_object.go @@ -0,0 +1,89 @@ +package s3 + +import ( + "context" + "net/http" + "strconv" + "time" +) + +// https://docs.aws.amazon.com/AmazonS3/latest/API/API_HeadObject.html#API_HeadObject_ResponseElements +type HeadObjectResponse struct { + // Metadata is the extra data that is stored with the S3 object (aka. the "x-amz-meta-*" header values). + // + // The map keys are normalized to lower-case. + Metadata map[string]string `json:"metadata"` + + // LastModified date and time when the object was last modified. + LastModified time.Time `json:"lastModified"` + + // CacheControl specifies caching behavior along the request/reply chain. + CacheControl string `json:"cacheControl"` + + // ContentDisposition specifies presentational information for the object. + ContentDisposition string `json:"contentDisposition"` + + // ContentEncoding indicates what content encodings have been applied to the object + // and thus what decoding mechanisms must be applied to obtain the + // media-type referenced by the Content-Type header field. + ContentEncoding string `json:"contentEncoding"` + + // ContentLanguage indicates the language the content is in. + ContentLanguage string `json:"contentLanguage"` + + // ContentType is a standard MIME type describing the format of the object data. + ContentType string `json:"contentType"` + + // ContentRange is the portion of the object usually returned in the response for a GET request. + ContentRange string `json:"contentRange"` + + // ETag is an opaque identifier assigned by a web + // server to a specific version of a resource found at a URL. + ETag string `json:"etag"` + + // ContentLength is size of the body in bytes. + ContentLength int64 `json:"contentLength"` +} + +// load parses and load the header values into the current HeadObjectResponse fields. +func (o *HeadObjectResponse) load(headers http.Header) { + o.LastModified, _ = time.Parse(time.RFC1123, headers.Get("Last-Modified")) + o.CacheControl = headers.Get("Cache-Control") + o.ContentDisposition = headers.Get("Content-Disposition") + o.ContentEncoding = headers.Get("Content-Encoding") + o.ContentLanguage = headers.Get("Content-Language") + o.ContentType = headers.Get("Content-Type") + o.ContentRange = headers.Get("Content-Range") + o.ETag = headers.Get("ETag") + o.ContentLength, _ = strconv.ParseInt(headers.Get("Content-Length"), 10, 0) + o.Metadata = extractMetadata(headers) +} + +// HeadObject sends a HEAD request for a single object to check its +// existence and to retrieve its metadata. +// +// https://docs.aws.amazon.com/AmazonS3/latest/API/API_HeadObject.html +func (s3 *S3) HeadObject(ctx context.Context, key string, optFuncs ...func(*http.Request)) (*HeadObjectResponse, error) { + req, err := http.NewRequestWithContext(ctx, http.MethodHead, s3.URL(key), nil) + if err != nil { + return nil, err + } + + // apply optional request funcs + for _, fn := range optFuncs { + if fn != nil { + fn(req) + } + } + + resp, err := s3.SignAndSend(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + result := &HeadObjectResponse{} + result.load(resp.Header) + + return result, nil +} diff --git a/tools/filesystem/internal/s3blob/s3/head_object_test.go b/tools/filesystem/internal/s3blob/s3/head_object_test.go new file mode 100644 index 00000000..d7a8c965 --- /dev/null +++ b/tools/filesystem/internal/s3blob/s3/head_object_test.go @@ -0,0 +1,76 @@ +package s3_test + +import ( + "context" + "encoding/json" + "net/http" + "testing" + + "github.com/pocketbase/pocketbase/tools/filesystem/internal/s3blob/s3" +) + +func TestS3HeadObject(t *testing.T) { + t.Parallel() + + httpClient := NewTestClient( + &RequestStub{ + Method: http.MethodHead, + URL: "http://test_bucket.example.com/test_key", + Match: func(req *http.Request) bool { + return checkHeaders(req.Header, map[string]string{ + "test_header": "test", + "Authorization": "^.+Credential=123/.+$", + }) + }, + Response: &http.Response{ + Header: http.Header{ + "Last-Modified": []string{"Mon, 01 Feb 2025 03:04:05 GMT"}, + "Cache-Control": []string{"test_cache"}, + "Content-Disposition": []string{"test_disposition"}, + "Content-Encoding": []string{"test_encoding"}, + "Content-Language": []string{"test_language"}, + "Content-Type": []string{"test_type"}, + "Content-Range": []string{"test_range"}, + "Etag": []string{"test_etag"}, + "Content-Length": []string{"100"}, + "x-amz-meta-AbC": []string{"test_meta_a"}, + "x-amz-meta-Def": []string{"test_meta_b"}, + }, + Body: http.NoBody, + }, + }, + ) + + s3Client := &s3.S3{ + Client: httpClient, + Region: "test_region", + Bucket: "test_bucket", + Endpoint: "http://example.com", + AccessKey: "123", + SecretKey: "abc", + } + + resp, err := s3Client.HeadObject(context.Background(), "test_key", func(r *http.Request) { + r.Header.Set("test_header", "test") + }) + if err != nil { + t.Fatal(err) + } + + err = httpClient.AssertNoRemaining() + if err != nil { + t.Fatal(err) + } + + raw, err := json.Marshal(resp) + if err != nil { + t.Fatal(err) + } + rawStr := string(raw) + + expected := `{"metadata":{"abc":"test_meta_a","def":"test_meta_b"},"lastModified":"2025-02-01T03:04:05Z","cacheControl":"test_cache","contentDisposition":"test_disposition","contentEncoding":"test_encoding","contentLanguage":"test_language","contentType":"test_type","contentRange":"test_range","etag":"test_etag","contentLength":100}` + + if rawStr != expected { + t.Fatalf("Expected response\n%s\ngot\n%s", expected, rawStr) + } +} diff --git a/tools/filesystem/internal/s3blob/s3/list_objects.go b/tools/filesystem/internal/s3blob/s3/list_objects.go new file mode 100644 index 00000000..77c2794a --- /dev/null +++ b/tools/filesystem/internal/s3blob/s3/list_objects.go @@ -0,0 +1,165 @@ +package s3 + +import ( + "context" + "encoding/xml" + "net/http" + "net/url" + "strconv" + "time" +) + +// ListParams defines optional parameters for the ListObject request. +type ListParams struct { + // ContinuationToken indicates that the list is being continued on this bucket with a token. + // ContinuationToken is obfuscated and is not a real key. + // You can use this ContinuationToken for pagination of the list results. + ContinuationToken string `json:"continuationToken"` + + // Delimiter is a character that you use to group keys. + // + // For directory buckets, "/" is the only supported delimiter. + Delimiter string `json:"delimiter"` + + // Prefix limits the response to keys that begin with the specified prefix. + Prefix string `json:"prefix"` + + // Encoding type is used to encode the object keys in the response. + // Responses are encoded only in UTF-8. + // An object key can contain any Unicode character. + // However, the XML 1.0 parser can't parse certain characters, + // such as characters with an ASCII value from 0 to 10. + // For characters that aren't supported in XML 1.0, you can add + // this parameter to request that S3 encode the keys in the response. + // + // Valid Values: url + EncodingType string `json:"encodingType"` + + // StartAfter is where you want S3 to start listing from. + // S3 starts listing after this specified key. + // StartAfter can be any key in the bucket. + // + // This functionality is not supported for directory buckets. + StartAfter string `json:"startAfter"` + + // MaxKeys Sets the maximum number of keys returned in the response. + // By default, the action returns up to 1,000 key names. + // The response might contain fewer keys but will never contain more. + MaxKeys int `json:"maxKeys"` + + // FetchOwner returns the owner field with each key in the result. + FetchOwner bool `json:"fetchOwner"` +} + +// Encode encodes the parameters in a properly formatted query string. +func (l *ListParams) Encode() string { + query := url.Values{} + + query.Add("list-type", "2") + + if l.ContinuationToken != "" { + query.Add("continuation-token", l.ContinuationToken) + } + + if l.Delimiter != "" { + query.Add("delimiter", l.Delimiter) + } + + if l.Prefix != "" { + query.Add("prefix", l.Prefix) + } + + if l.EncodingType != "" { + query.Add("encoding-type", l.EncodingType) + } + + if l.FetchOwner { + query.Add("fetch-owner", "true") + } + + if l.MaxKeys > 0 { + query.Add("max-keys", strconv.Itoa(l.MaxKeys)) + } + + if l.StartAfter != "" { + query.Add("start-after", l.StartAfter) + } + + return query.Encode() +} + +// ListObjects retrieves paginated objects list. +// +// https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html +func (s3 *S3) ListObjects(ctx context.Context, params ListParams, optReqFuncs ...func(*http.Request)) (*ListObjectsResponse, error) { + req, err := http.NewRequestWithContext(ctx, http.MethodGet, s3.URL("?"+params.Encode()), nil) + if err != nil { + return nil, err + } + + // apply optional request funcs + for _, fn := range optReqFuncs { + if fn != nil { + fn(req) + } + } + + resp, err := s3.SignAndSend(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + result := &ListObjectsResponse{} + + err = xml.NewDecoder(resp.Body).Decode(result) + if err != nil { + return nil, err + } + + return result, nil +} + +// https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html#API_ListObjectsV2_ResponseSyntax +type ListObjectsResponse struct { + XMLName xml.Name `json:"-" xml:"ListBucketResult"` + EncodingType string `json:"encodingType" xml:"EncodingType"` + Name string `json:"name" xml:"Name"` + Prefix string `json:"prefix" xml:"Prefix"` + Delimiter string `json:"delimiter" xml:"Delimiter"` + ContinuationToken string `json:"continuationToken" xml:"ContinuationToken"` + NextContinuationToken string `json:"nextContinuationToken" xml:"NextContinuationToken"` + StartAfter string `json:"startAfter" xml:"StartAfter"` + + CommonPrefixes []*ListObjectCommonPrefix `json:"commonPrefixes" xml:"CommonPrefixes"` + + Contents []*ListObjectContent `json:"contents" xml:"Contents"` + + KeyCount int `json:"keyCount" xml:"KeyCount"` + MaxKeys int `json:"maxKeys" xml:"MaxKeys"` + IsTruncated bool `json:"isTruncated" xml:"IsTruncated"` +} + +type ListObjectCommonPrefix struct { + Prefix string `json:"prefix" xml:"Prefix"` +} + +type ListObjectContent struct { + Owner struct { + DisplayName string `json:"displayName" xml:"DisplayName"` + ID string `json:"id" xml:"ID"` + } `json:"owner" xml:"Owner"` + + ChecksumAlgorithm string `json:"checksumAlgorithm" xml:"ChecksumAlgorithm"` + ETag string `json:"etag" xml:"ETag"` + Key string `json:"key" xml:"Key"` + StorageClass string `json:"storageClass" xml:"StorageClass"` + LastModified time.Time `json:"lastModified" xml:"LastModified"` + + RestoreStatus struct { + RestoreExpiryDate time.Time `json:"restoreExpiryDate" xml:"RestoreExpiryDate"` + IsRestoreInProgress bool `json:"isRestoreInProgress" xml:"IsRestoreInProgress"` + } `json:"restoreStatus" xml:"RestoreStatus"` + + Size int64 `json:"size" xml:"Size"` +} diff --git a/tools/filesystem/internal/s3blob/s3/list_objects_test.go b/tools/filesystem/internal/s3blob/s3/list_objects_test.go new file mode 100644 index 00000000..e6d9b728 --- /dev/null +++ b/tools/filesystem/internal/s3blob/s3/list_objects_test.go @@ -0,0 +1,156 @@ +package s3_test + +import ( + "context" + "encoding/json" + "io" + "net/http" + "strings" + "testing" + + "github.com/pocketbase/pocketbase/tools/filesystem/internal/s3blob/s3" +) + +func TestS3ListParamsEncode(t *testing.T) { + t.Parallel() + + scenarios := []struct { + name string + params s3.ListParams + expected string + }{ + { + "blank", + s3.ListParams{}, + "list-type=2", + }, + { + "filled", + s3.ListParams{ + ContinuationToken: "test_ct", + Delimiter: "test_delimiter", + Prefix: "test_prefix", + EncodingType: "test_et", + StartAfter: "test_sa", + MaxKeys: 1, + FetchOwner: true, + }, + "continuation-token=test_ct&delimiter=test_delimiter&encoding-type=test_et&fetch-owner=true&list-type=2&max-keys=1&prefix=test_prefix&start-after=test_sa", + }, + } + + for _, s := range scenarios { + t.Run(s.name, func(t *testing.T) { + result := s.params.Encode() + if result != s.expected { + t.Fatalf("Expected\n%s\ngot\n%s", s.expected, result) + } + }) + } +} + +func TestS3ListObjects(t *testing.T) { + t.Parallel() + + listParams := s3.ListParams{ + ContinuationToken: "test_ct", + Delimiter: "test_delimiter", + Prefix: "test_prefix", + EncodingType: "test_et", + StartAfter: "test_sa", + MaxKeys: 10, + FetchOwner: true, + } + + httpClient := NewTestClient( + &RequestStub{ + Method: http.MethodGet, + URL: "http://test_bucket.example.com/?" + listParams.Encode(), + Match: func(req *http.Request) bool { + return checkHeaders(req.Header, map[string]string{ + "test_header": "test", + "Authorization": "^.+Credential=123/.+$", + }) + }, + Response: &http.Response{ + Body: io.NopCloser(strings.NewReader(` + + + example + test_encoding + a/ + / + ct + nct + example0.txt + 1 + 3 + true + + example1.txt + 2025-01-01T01:02:03.123Z + test_ca + test_etag1 + 123 + STANDARD + + owner_dn + owner_id + + + 2025-01-02T01:02:03.123Z + true + + + + example2.txt + 2025-01-02T01:02:03.123Z + test_etag2 + 456 + STANDARD + + + a/b/ + + + a/c/ + + + `)), + }, + }, + ) + + s3Client := &s3.S3{ + Client: httpClient, + Region: "test_region", + Bucket: "test_bucket", + Endpoint: "http://example.com", + AccessKey: "123", + SecretKey: "abc", + } + + resp, err := s3Client.ListObjects(context.Background(), listParams, func(r *http.Request) { + r.Header.Set("test_header", "test") + }) + if err != nil { + t.Fatal(err) + } + + err = httpClient.AssertNoRemaining() + if err != nil { + t.Fatal(err) + } + + raw, err := json.Marshal(resp) + if err != nil { + t.Fatal(err) + } + rawStr := string(raw) + + expected := `{"encodingType":"test_encoding","name":"example","prefix":"a/","delimiter":"/","continuationToken":"ct","nextContinuationToken":"nct","startAfter":"example0.txt","commonPrefixes":[{"prefix":"a/b/"},{"prefix":"a/c/"}],"contents":[{"owner":{"displayName":"owner_dn","id":"owner_id"},"checksumAlgorithm":"test_ca","etag":"test_etag1","key":"example1.txt","storageClass":"STANDARD","lastModified":"2025-01-01T01:02:03.123Z","restoreStatus":{"restoreExpiryDate":"2025-01-02T01:02:03.123Z","isRestoreInProgress":true},"size":123},{"owner":{"displayName":"","id":""},"checksumAlgorithm":"","etag":"test_etag2","key":"example2.txt","storageClass":"STANDARD","lastModified":"2025-01-02T01:02:03.123Z","restoreStatus":{"restoreExpiryDate":"0001-01-01T00:00:00Z","isRestoreInProgress":false},"size":456}],"keyCount":1,"maxKeys":3,"isTruncated":true}` + + if rawStr != expected { + t.Fatalf("Expected response\n%s\ngot\n%s", expected, rawStr) + } +} diff --git a/tools/filesystem/internal/s3blob/s3/s3.go b/tools/filesystem/internal/s3blob/s3/s3.go new file mode 100644 index 00000000..9d62a318 --- /dev/null +++ b/tools/filesystem/internal/s3blob/s3/s3.go @@ -0,0 +1,262 @@ +package s3 + +import ( + "crypto/hmac" + "crypto/sha256" + "encoding/hex" + "encoding/xml" + "errors" + "fmt" + "io" + "net/http" + "slices" + "strings" + "time" +) + +const ( + awsS3ServiceCode = "s3" + awsSignAlgorithm = "AWS4-HMAC-SHA256" + awsTerminationString = "aws4_request" + metadataPrefix = "x-amz-meta-" + dateTimeFormat = "20060102T150405Z" +) + +type HTTPClient interface { + Do(req *http.Request) (*http.Response, error) +} + +type S3 struct { + // Client specifies the HTTP client to send the request with. + // + // If not explicitly set, fallbacks to http.DefaultClient. + Client HTTPClient + + Bucket string + Region string + Endpoint string // can be with or without the schema + AccessKey string + SecretKey string + UsePathStyle bool +} + +// URL constructs an S3 request URL based on the current configuration. +func (s3 *S3) URL(key string) string { + scheme := "https" + endpoint := strings.TrimRight(s3.Endpoint, "/") + if after, ok := strings.CutPrefix(endpoint, "https://"); ok { + endpoint = after + } else if after, ok := strings.CutPrefix(endpoint, "http://"); ok { + endpoint = after + scheme = "http" + } + + key = strings.TrimLeft(key, "/") + + if s3.UsePathStyle { + return fmt.Sprintf("%s://%s/%s/%s", scheme, endpoint, s3.Bucket, key) + } + + return fmt.Sprintf("%s://%s.%s/%s", scheme, s3.Bucket, endpoint, key) +} + +// SignAndSend signs the provided request per AWS Signature v4 and sends it. +// +// It automatically normalizes all 40x/50x responses to ResponseError. +// +// Note: Don't forget to call resp.Body.Close() after done with the result. +func (s3 *S3) SignAndSend(req *http.Request) (*http.Response, error) { + s3.sign(req) + + client := s3.Client + if client == nil { + client = http.DefaultClient + } + + resp, err := client.Do(req) + if err != nil { + return nil, err + } + + if resp.StatusCode >= 400 { + defer resp.Body.Close() + + respErr := &ResponseError{ + Status: resp.StatusCode, + } + + respErr.Raw, err = io.ReadAll(resp.Body) + if err != nil && !errors.Is(err, io.EOF) { + return nil, errors.Join(err, respErr) + } + + if len(respErr.Raw) > 0 { + err = xml.Unmarshal(respErr.Raw, respErr) + if err != nil { + return nil, errors.Join(err, respErr) + } + } + + return nil, respErr + } + + return resp, nil +} + +// https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_sigv-create-signed-request.html#create-signed-request-steps +func (s3 *S3) sign(req *http.Request) { + // fallback to the Unsigned payload option + // (data integrity checks could be still applied via the content-md5 or x-amz-checksum-* headers) + if req.Header.Get("x-amz-content-sha256") == "" { + req.Header.Set("x-amz-content-sha256", "UNSIGNED-PAYLOAD") + } + + reqDateTime, _ := time.Parse(dateTimeFormat, req.Header.Get("x-amz-date")) + if reqDateTime.IsZero() { + reqDateTime = time.Now().UTC() + req.Header.Set("x-amz-date", reqDateTime.Format(dateTimeFormat)) + } + + req.Header.Set("host", req.URL.Host) + + date := reqDateTime.Format("20060102") + + dateTime := reqDateTime.Format(dateTimeFormat) + + // 1. Create canonical request + // https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_sigv-create-signed-request.html#create-canonical-request + // --------------------------------------------------------------- + canonicalHeaders, signedHeaders := canonicalAndSignedHeaders(req) + + canonicalParts := []string{ + req.Method, + req.URL.EscapedPath(), + encodeQuery(req), + canonicalHeaders, + signedHeaders, + req.Header.Get("x-amz-content-sha256"), + } + + // 2. Create a hash of the canonical request + // https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_sigv-create-signed-request.html#create-canonical-request-hash + // --------------------------------------------------------------- + hashedCanonicalRequest := sha256Hex([]byte(strings.Join(canonicalParts, "\n"))) + + // 3. Create a string to sign + // https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_sigv-create-signed-request.html#create-string-to-sign + // --------------------------------------------------------------- + scope := strings.Join([]string{ + date, + s3.Region, + awsS3ServiceCode, + awsTerminationString, + }, "/") + + stringToSign := strings.Join([]string{ + awsSignAlgorithm, + dateTime, + scope, + hashedCanonicalRequest, + }, "\n") + + // 4. Derive a signing key for SigV4 + // https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_sigv-create-signed-request.html#derive-signing-key + // --------------------------------------------------------------- + dateKey := hmacSHA256([]byte("AWS4"+s3.SecretKey), date) + dateRegionKey := hmacSHA256(dateKey, s3.Region) + dateRegionServiceKey := hmacSHA256(dateRegionKey, awsS3ServiceCode) + signingKey := hmacSHA256(dateRegionServiceKey, awsTerminationString) + signature := hex.EncodeToString(hmacSHA256(signingKey, stringToSign)) + + // 5. Add the signature to the request + // https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_sigv-create-signed-request.html#add-signature-to-request + authorization := fmt.Sprintf( + "%s Credential=%s/%s, SignedHeaders=%s, Signature=%s", + awsSignAlgorithm, + s3.AccessKey, + scope, + signedHeaders, + signature, + ) + + req.Header.Set("authorization", authorization) +} + +// encodeQuery encodes the request query parameters according to the AWS requirements +// (see UriEncode description in https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_sigv-create-signed-request.html). +func encodeQuery(req *http.Request) string { + return strings.ReplaceAll(req.URL.Query().Encode(), "+", "%20") +} + +func sha256Hex(content []byte) string { + h := sha256.New() + h.Write(content) + return hex.EncodeToString(h.Sum(nil)) +} + +func hmacSHA256(key []byte, content string) []byte { + mac := hmac.New(sha256.New, key) + mac.Write([]byte(content)) + return mac.Sum(nil) +} + +func canonicalAndSignedHeaders(req *http.Request) (string, string) { + signed := []string{} + canonical := map[string]string{} + + for key, values := range req.Header { + normalizedKey := strings.ToLower(key) + + if normalizedKey != "host" && + normalizedKey != "content-type" && + !strings.HasPrefix(normalizedKey, "x-amz-") { + continue + } + + signed = append(signed, normalizedKey) + + // for each value: + // trim any leading or trailing spaces + // convert sequential spaces to a single space + normalizedValues := make([]string, len(values)) + for i, v := range values { + normalizedValues[i] = strings.ReplaceAll(strings.TrimSpace(v), " ", " ") + } + + canonical[normalizedKey] = strings.Join(normalizedValues, ",") + } + + slices.Sort(signed) + + var sortedCanonical strings.Builder + for _, key := range signed { + sortedCanonical.WriteString(key) + sortedCanonical.WriteString(":") + sortedCanonical.WriteString(canonical[key]) + sortedCanonical.WriteString("\n") + } + + return sortedCanonical.String(), strings.Join(signed, ";") +} + +// extractMetadata parses and extracts and the metadata from the specified request headers. +// +// The metadata keys are all lowercased and without the "x-amz-meta-" prefix. +func extractMetadata(headers http.Header) map[string]string { + result := map[string]string{} + + for k, v := range headers { + if len(v) == 0 { + continue + } + + metadataKey, ok := strings.CutPrefix(strings.ToLower(k), metadataPrefix) + if !ok { + continue + } + + result[metadataKey] = v[0] + } + + return result +} diff --git a/tools/filesystem/internal/s3blob/s3/s3_test.go b/tools/filesystem/internal/s3blob/s3/s3_test.go new file mode 100644 index 00000000..32f2f927 --- /dev/null +++ b/tools/filesystem/internal/s3blob/s3/s3_test.go @@ -0,0 +1,224 @@ +package s3_test + +import ( + "io" + "net/http" + "strings" + "testing" + + "github.com/pocketbase/pocketbase/tools/filesystem/internal/s3blob/s3" +) + +func TestS3URL(t *testing.T) { + t.Parallel() + + scenarios := []struct { + name string + s3Client *s3.S3 + expected string + }{ + { + "no schema", + &s3.S3{ + Region: "test_region", + Bucket: "test_bucket", + Endpoint: "example.com/", + AccessKey: "123", + SecretKey: "abc", + }, + "https://test_bucket.example.com/test_key/a/b/c?q=1", + }, + { + "with https schema", + &s3.S3{ + Region: "test_region", + Bucket: "test_bucket", + Endpoint: "https://example.com/", + AccessKey: "123", + SecretKey: "abc", + }, + "https://test_bucket.example.com/test_key/a/b/c?q=1", + }, + { + "with http schema", + &s3.S3{ + Region: "test_region", + Bucket: "test_bucket", + Endpoint: "http://example.com/", + AccessKey: "123", + SecretKey: "abc", + }, + "http://test_bucket.example.com/test_key/a/b/c?q=1", + }, + { + "path style addressing (non-explicit schema)", + &s3.S3{ + Region: "test_region", + Bucket: "test_bucket", + Endpoint: "example.com/", + AccessKey: "123", + SecretKey: "abc", + UsePathStyle: true, + }, + "https://example.com/test_bucket/test_key/a/b/c?q=1", + }, + { + "path style addressing (explicit schema)", + &s3.S3{ + Region: "test_region", + Bucket: "test_bucket", + Endpoint: "http://example.com/", + AccessKey: "123", + SecretKey: "abc", + UsePathStyle: true, + }, + "http://example.com/test_bucket/test_key/a/b/c?q=1", + }, + } + + for _, s := range scenarios { + t.Run(s.name, func(t *testing.T) { + result := s.s3Client.URL("/test_key/a/b/c?q=1") + if result != s.expected { + t.Fatalf("Expected URL\n%s\ngot\n%s", s.expected, result) + } + }) + } +} + +func TestS3SignAndSend(t *testing.T) { + t.Parallel() + + testResponse := func() *http.Response { + return &http.Response{ + Body: io.NopCloser(strings.NewReader("test_response")), + } + } + + scenarios := []struct { + name string + reqFunc func(req *http.Request) + s3Client *s3.S3 + }{ + { + "minimal", + func(req *http.Request) { + req.Header.Set("x-amz-date", "20250102T150405Z") + }, + &s3.S3{ + Region: "test_region", + Bucket: "test_bucket", + Endpoint: "https://example.com/", + AccessKey: "123", + SecretKey: "abc", + Client: NewTestClient(&RequestStub{ + Method: http.MethodGet, + URL: "https://test_bucket.example.com/test", + Response: testResponse(), + Match: func(req *http.Request) bool { + return checkHeaders(req.Header, map[string]string{ + "Authorization": "AWS4-HMAC-SHA256 Credential=123/20250102/test_region/s3/aws4_request, SignedHeaders=host;x-amz-content-sha256;x-amz-date, Signature=ea093662bc1deef08dfb4ac35453dfaad5ea89edf102e9dd3b7156c9a27e4c1f", + "Host": "test_bucket.example.com", + "X-Amz-Content-Sha256": "UNSIGNED-PAYLOAD", + "X-Amz-Date": "20250102T150405Z", + }) + }, + }), + }, + }, + { + "minimal with different access and secret keys", + func(req *http.Request) { + req.Header.Set("x-amz-date", "20250102T150405Z") + }, + &s3.S3{ + Region: "test_region", + Bucket: "test_bucket", + Endpoint: "https://example.com/", + AccessKey: "456", + SecretKey: "def", + Client: NewTestClient(&RequestStub{ + Method: http.MethodGet, + URL: "https://test_bucket.example.com/test", + Response: testResponse(), + Match: func(req *http.Request) bool { + return checkHeaders(req.Header, map[string]string{ + "Authorization": "AWS4-HMAC-SHA256 Credential=456/20250102/test_region/s3/aws4_request, SignedHeaders=host;x-amz-content-sha256;x-amz-date, Signature=17510fa1f724403dd0a563b61c9b31d1d718f877fcbd75455620d17a8afce5fb", + "Host": "test_bucket.example.com", + "X-Amz-Content-Sha256": "UNSIGNED-PAYLOAD", + "X-Amz-Date": "20250102T150405Z", + }) + }, + }), + }, + }, + { + "with extra headers", + func(req *http.Request) { + req.Header.Set("x-amz-date", "20250102T150405Z") + req.Header.Set("x-amz-content-sha256", "test_sha256") + req.Header.Set("x-amz-example", "123") + req.Header.Set("x-amz-meta-a", "456") + req.Header.Set("content-type", "image/png") + req.Header.Set("x-test", "789") // shouldn't be included in the signing headers + }, + &s3.S3{ + Region: "test_region", + Bucket: "test_bucket", + Endpoint: "https://example.com/", + AccessKey: "123", + SecretKey: "abc", + Client: NewTestClient(&RequestStub{ + Method: http.MethodGet, + URL: "https://test_bucket.example.com/test", + Response: testResponse(), + Match: func(req *http.Request) bool { + return checkHeaders(req.Header, map[string]string{ + "authorization": "AWS4-HMAC-SHA256 Credential=123/20250102/test_region/s3/aws4_request, SignedHeaders=content-type;host;x-amz-content-sha256;x-amz-date;x-amz-example;x-amz-meta-a, Signature=86dccbcd012c33073dc99e9d0a9e0b717a4d8c11c37848cfa9a4a02716bc0db3", + "host": "test_bucket.example.com", + "x-amz-date": "20250102T150405Z", + "x-amz-content-sha256": "test_sha256", + "x-amz-example": "123", + "x-amz-meta-a": "456", + "x-test": "789", + }) + }, + }), + }, + }, + } + + for _, s := range scenarios { + t.Run(s.name, func(t *testing.T) { + req, err := http.NewRequest(http.MethodGet, s.s3Client.URL("/test"), strings.NewReader("test_request")) + if err != nil { + t.Fatal(err) + } + + if s.reqFunc != nil { + s.reqFunc(req) + } + + resp, err := s.s3Client.SignAndSend(req) + if err != nil { + t.Fatal(err) + } + defer resp.Body.Close() + + err = s.s3Client.Client.(*TestClient).AssertNoRemaining() + if err != nil { + t.Fatal(err) + } + + expectedBody := "test_response" + + body, err := io.ReadAll(resp.Body) + if err != nil { + t.Fatal(err) + } + if str := string(body); str != expectedBody { + t.Fatalf("Expected body %q, got %q", expectedBody, str) + } + }) + } +} diff --git a/tools/filesystem/internal/s3blob/s3/uploader.go b/tools/filesystem/internal/s3blob/s3/uploader.go new file mode 100644 index 00000000..2a9d20d8 --- /dev/null +++ b/tools/filesystem/internal/s3blob/s3/uploader.go @@ -0,0 +1,414 @@ +package s3 + +import ( + "bytes" + "context" + "encoding/xml" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "slices" + "strconv" + "strings" + "sync" + + "golang.org/x/sync/errgroup" +) + +var ErrUsedUploader = errors.New("the Uploader has been already used") + +const ( + defaultMaxConcurrency int = 5 + defaultMinPartSize int = 6 << 20 +) + +// Uploader handles S3 object upload. +// +// If the Payload size is less than the configured MinPartSize it sends +// a single (PutObject) request, otherwise performs chunked/multipart upload. +type Uploader struct { + // S3 is the S3 client instance performing the upload object request (required). + S3 *S3 + + // Payload is the object content to upload (required). + Payload io.Reader + + // Key is the destination key of the uploaded object (required). + Key string + + // Metadata specifies the optional metadata to write with the object upload. + Metadata map[string]string + + // MaxConcurrency specifies the max number of workers to use when + // performing chunked/multipart upload. + // + // If zero or negative, defaults to 5. + // + // This option is used only when the Payload size is > MinPartSize. + MaxConcurrency int + + // MinPartSize specifies the min Payload size required to perform + // chunked/multipart upload. + // + // If zero or negative, defaults to ~6MB. + MinPartSize int + + uploadId string + uploadedParts []*mpPart + lastPartNumber int + mu sync.Mutex // guards lastPartNumber and the uploadedParts slice + used bool +} + +// Upload processes the current Uploader instance. +// +// Users can specify an optional optReqFuncs that will be passed down to all Upload internal requests +// (single upload, multipart init, multipart parts upload, multipart complete, multipart abort). +// +// Note that after this call the Uploader should be discarded (aka. no longer can be used). +func (u *Uploader) Upload(ctx context.Context, optReqFuncs ...func(*http.Request)) error { + if u.used { + return ErrUsedUploader + } + + err := u.validateAndNormalize() + if err != nil { + return err + } + + initPart, _, err := u.nextPart() + if err != nil && !errors.Is(err, io.EOF) { + return err + } + + if len(initPart) < u.MinPartSize { + return u.singleUpload(ctx, initPart, optReqFuncs...) + } + + err = u.multipartInit(ctx, optReqFuncs...) + if err != nil { + return fmt.Errorf("multipart init error: %w", err) + } + + err = u.multipartUpload(ctx, initPart, optReqFuncs...) + if err != nil { + return errors.Join( + u.multipartAbort(ctx, optReqFuncs...), + fmt.Errorf("multipart upload error: %w", err), + ) + } + + err = u.multipartComplete(ctx, optReqFuncs...) + if err != nil { + return errors.Join( + u.multipartAbort(ctx, optReqFuncs...), + fmt.Errorf("multipart complete error: %w", err), + ) + } + + return nil +} + +// ------------------------------------------------------------------- + +func (u *Uploader) validateAndNormalize() error { + if u.S3 == nil { + return errors.New("Uploader.S3 must be a non-empty and properly initialized S3 client instance") + } + + if u.Key == "" { + return errors.New("Uploader.Key is required") + } + + if u.Payload == nil { + return errors.New("Uploader.Payload must be non-nill") + } + + if u.MaxConcurrency <= 0 { + u.MaxConcurrency = defaultMaxConcurrency + } + + if u.MinPartSize <= 0 { + u.MinPartSize = defaultMinPartSize + } + + return nil +} + +func (u *Uploader) singleUpload(ctx context.Context, part []byte, optReqFuncs ...func(*http.Request)) error { + if u.used { + return ErrUsedUploader + } + + req, err := http.NewRequestWithContext(ctx, http.MethodPut, u.S3.URL(u.Key), bytes.NewReader(part)) + if err != nil { + return err + } + + req.Header.Set("Content-Length", strconv.Itoa(len(part))) + + for k, v := range u.Metadata { + req.Header.Set(metadataPrefix+k, v) + } + + // apply optional request funcs + for _, fn := range optReqFuncs { + if fn != nil { + fn(req) + } + } + + resp, err := u.S3.SignAndSend(req) + if err != nil { + return err + } + defer resp.Body.Close() + + return nil +} + +// ------------------------------------------------------------------- + +type mpPart struct { + XMLName xml.Name `xml:"Part"` + ETag string `xml:"ETag"` + PartNumber int `xml:"PartNumber"` +} + +// https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html +func (u *Uploader) multipartInit(ctx context.Context, optReqFuncs ...func(*http.Request)) error { + if u.used { + return ErrUsedUploader + } + + req, err := http.NewRequestWithContext(ctx, http.MethodPost, u.S3.URL(u.Key+"?uploads"), nil) + if err != nil { + return err + } + + for k, v := range u.Metadata { + req.Header.Set(metadataPrefix+k, v) + } + + // apply optional request funcs + for _, fn := range optReqFuncs { + if fn != nil { + fn(req) + } + } + + resp, err := u.S3.SignAndSend(req) + if err != nil { + return err + } + defer resp.Body.Close() + + body := &struct { + XMLName xml.Name `xml:"InitiateMultipartUploadResult"` + UploadId string `xml:"UploadId"` + }{} + + err = xml.NewDecoder(resp.Body).Decode(body) + if err != nil { + return err + } + + u.uploadId = body.UploadId + + return nil +} + +// https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html +func (u *Uploader) multipartAbort(ctx context.Context, optReqFuncs ...func(*http.Request)) error { + u.mu.Lock() + defer u.mu.Unlock() + + u.used = true + + // ensure that the specified abort context is always valid to allow cleanup + var abortCtx = ctx + if abortCtx.Err() != nil { + abortCtx = context.Background() + } + + query := url.Values{"uploadId": []string{u.uploadId}} + + req, err := http.NewRequestWithContext(ctx, http.MethodDelete, u.S3.URL(u.Key+"?"+query.Encode()), nil) + if err != nil { + return err + } + + // apply optional request funcs + for _, fn := range optReqFuncs { + if fn != nil { + fn(req) + } + } + + resp, err := u.S3.SignAndSend(req) + if err != nil { + return err + } + defer resp.Body.Close() + + return nil +} + +// https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html +func (u *Uploader) multipartComplete(ctx context.Context, optReqFuncs ...func(*http.Request)) error { + u.mu.Lock() + defer u.mu.Unlock() + + u.used = true + + // the list of parts must be sorted in ascending order + slices.SortFunc(u.uploadedParts, func(a, b *mpPart) int { + if a.PartNumber < b.PartNumber { + return -1 + } + if a.PartNumber > b.PartNumber { + return 1 + } + return 0 + }) + + // build a request payload with the uploaded parts + xmlParts := &struct { + XMLName xml.Name `xml:"CompleteMultipartUpload"` + Parts []*mpPart + }{ + Parts: u.uploadedParts, + } + rawXMLParts, err := xml.Marshal(xmlParts) + if err != nil { + return err + } + reqPayload := strings.NewReader(xml.Header + string(rawXMLParts)) + + query := url.Values{"uploadId": []string{u.uploadId}} + req, err := http.NewRequestWithContext(ctx, http.MethodPost, u.S3.URL(u.Key+"?"+query.Encode()), reqPayload) + if err != nil { + return err + } + + // apply optional request funcs + for _, fn := range optReqFuncs { + if fn != nil { + fn(req) + } + } + + resp, err := u.S3.SignAndSend(req) + if err != nil { + return err + } + defer resp.Body.Close() + + return nil +} + +func (u *Uploader) nextPart() ([]byte, int, error) { + u.mu.Lock() + defer u.mu.Unlock() + + part := make([]byte, u.MinPartSize) + n, err := io.ReadFull(u.Payload, part) + + // normalize io.EOF errors and ensure that io.EOF is returned only when there were no read bytes + if err != nil && (errors.Is(err, io.EOF) || errors.Is(err, io.ErrUnexpectedEOF)) { + if n == 0 { + err = io.EOF + } else { + err = nil + } + } + + u.lastPartNumber++ + + return part[0:n], u.lastPartNumber, err +} + +func (u *Uploader) multipartUpload(ctx context.Context, initPart []byte, optReqFuncs ...func(*http.Request)) error { + var g errgroup.Group + g.SetLimit(u.MaxConcurrency) + + totalParallel := u.MaxConcurrency + + if len(initPart) != 0 { + totalParallel-- + initPartNumber := u.lastPartNumber + g.Go(func() error { + mp, err := u.uploadPart(ctx, initPartNumber, initPart, optReqFuncs...) + if err != nil { + return err + } + + u.mu.Lock() + u.uploadedParts = append(u.uploadedParts, mp) + u.mu.Unlock() + + return nil + }) + } + + for i := 0; i < totalParallel; i++ { + g.Go(func() error { + for { + part, num, err := u.nextPart() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + mp, err := u.uploadPart(ctx, num, part, optReqFuncs...) + if err != nil { + return err + } + + u.mu.Lock() + u.uploadedParts = append(u.uploadedParts, mp) + u.mu.Unlock() + } + + return nil + }) + } + + return g.Wait() +} + +// https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html +func (u *Uploader) uploadPart(ctx context.Context, partNumber int, partData []byte, optReqFuncs ...func(*http.Request)) (*mpPart, error) { + query := url.Values{} + query.Set("uploadId", u.uploadId) + query.Set("partNumber", strconv.Itoa(partNumber)) + + req, err := http.NewRequestWithContext(ctx, http.MethodPut, u.S3.URL(u.Key+"?"+query.Encode()), bytes.NewReader(partData)) + if err != nil { + return nil, err + } + + req.Header.Set("Content-Length", strconv.Itoa(len(partData))) + + // apply optional request funcs + for _, fn := range optReqFuncs { + if fn != nil { + fn(req) + } + } + + resp, err := u.S3.SignAndSend(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + return &mpPart{ + PartNumber: partNumber, + ETag: resp.Header.Get("ETag"), + }, nil +} diff --git a/tools/filesystem/internal/s3blob/s3/uploader_test.go b/tools/filesystem/internal/s3blob/s3/uploader_test.go new file mode 100644 index 00000000..83eeb7c4 --- /dev/null +++ b/tools/filesystem/internal/s3blob/s3/uploader_test.go @@ -0,0 +1,462 @@ +package s3_test + +import ( + "context" + "io" + "net/http" + "strings" + "testing" + + "github.com/pocketbase/pocketbase/tools/filesystem/internal/s3blob/s3" +) + +func TestUploaderRequiredFields(t *testing.T) { + t.Parallel() + + s3Client := &s3.S3{ + Client: NewTestClient(&RequestStub{Method: "PUT", URL: `^.+$`}), // match every upload + Region: "test_region", + Bucket: "test_bucket", + Endpoint: "http://example.com", + AccessKey: "123", + SecretKey: "abc", + } + + payload := strings.NewReader("test") + + scenarios := []struct { + name string + uploader *s3.Uploader + expectedError bool + }{ + { + "blank", + &s3.Uploader{}, + true, + }, + { + "no Key", + &s3.Uploader{S3: s3Client, Payload: payload}, + true, + }, + { + "no S3", + &s3.Uploader{Key: "abc", Payload: payload}, + true, + }, + { + "no Payload", + &s3.Uploader{S3: s3Client, Key: "abc"}, + true, + }, + { + "with S3, Key and Payload", + &s3.Uploader{S3: s3Client, Key: "abc", Payload: payload}, + false, + }, + } + + for _, s := range scenarios { + t.Run(s.name, func(t *testing.T) { + err := s.uploader.Upload(context.Background()) + + hasErr := err != nil + if hasErr != s.expectedError { + t.Fatalf("Expected hasErr %v, got %v", s.expectedError, hasErr) + } + }) + } +} + +func TestUploaderSingleUpload(t *testing.T) { + t.Parallel() + + httpClient := NewTestClient( + &RequestStub{ + Method: http.MethodPut, + URL: "http://test_bucket.example.com/test_key", + Match: func(req *http.Request) bool { + body, err := io.ReadAll(req.Body) + if err != nil { + return false + } + + return string(body) == "abcdefg" && checkHeaders(req.Header, map[string]string{ + "Content-Length": "7", + "x-amz-meta-a": "123", + "x-amz-meta-b": "456", + "test_header": "test", + "Authorization": "^.+Credential=123/.+$", + }) + }, + }, + ) + + uploader := &s3.Uploader{ + S3: &s3.S3{ + Client: httpClient, + Region: "test_region", + Bucket: "test_bucket", + Endpoint: "http://example.com", + AccessKey: "123", + SecretKey: "abc", + }, + Key: "test_key", + Payload: strings.NewReader("abcdefg"), + Metadata: map[string]string{"a": "123", "b": "456"}, + MinPartSize: 8, + } + + err := uploader.Upload(context.Background(), func(r *http.Request) { + r.Header.Set("test_header", "test") + }) + if err != nil { + t.Fatal(err) + } + + err = httpClient.AssertNoRemaining() + if err != nil { + t.Fatal(err) + } +} + +func TestUploaderMultipartUploadSuccess(t *testing.T) { + t.Parallel() + + httpClient := NewTestClient( + &RequestStub{ + Method: http.MethodPost, + URL: "http://test_bucket.example.com/test_key?uploads", + Match: func(req *http.Request) bool { + return checkHeaders(req.Header, map[string]string{ + "x-amz-meta-a": "123", + "x-amz-meta-b": "456", + "test_header": "test", + "Authorization": "^.+Credential=123/.+$", + }) + }, + Response: &http.Response{ + Body: io.NopCloser(strings.NewReader(` + + + test_bucket + test_key + test_id + + `)), + }, + }, + &RequestStub{ + Method: http.MethodPut, + URL: "http://test_bucket.example.com/test_key?partNumber=1&uploadId=test_id", + Match: func(req *http.Request) bool { + body, err := io.ReadAll(req.Body) + if err != nil { + return false + } + + return string(body) == "abc" && checkHeaders(req.Header, map[string]string{ + "Content-Length": "3", + "test_header": "test", + "Authorization": "^.+Credential=123/.+$", + }) + }, + Response: &http.Response{ + Header: http.Header{"Etag": []string{"etag1"}}, + }, + }, + &RequestStub{ + Method: http.MethodPut, + URL: "http://test_bucket.example.com/test_key?partNumber=2&uploadId=test_id", + Match: func(req *http.Request) bool { + body, err := io.ReadAll(req.Body) + if err != nil { + return false + } + + return string(body) == "def" && checkHeaders(req.Header, map[string]string{ + "Content-Length": "3", + "test_header": "test", + "Authorization": "^.+Credential=123/.+$", + }) + }, + Response: &http.Response{ + Header: http.Header{"Etag": []string{"etag2"}}, + }, + }, + &RequestStub{ + Method: http.MethodPut, + URL: "http://test_bucket.example.com/test_key?partNumber=3&uploadId=test_id", + Match: func(req *http.Request) bool { + body, err := io.ReadAll(req.Body) + if err != nil { + return false + } + return string(body) == "g" && checkHeaders(req.Header, map[string]string{ + "Content-Length": "1", + "test_header": "test", + "Authorization": "^.+Credential=123/.+$", + }) + }, + Response: &http.Response{ + Header: http.Header{"Etag": []string{"etag3"}}, + }, + }, + &RequestStub{ + Method: http.MethodPost, + URL: "http://test_bucket.example.com/test_key?uploadId=test_id", + Match: func(req *http.Request) bool { + body, err := io.ReadAll(req.Body) + if err != nil { + return false + } + + expected := `etag11etag22etag33` + + return strings.Contains(string(body), expected) && checkHeaders(req.Header, map[string]string{ + "test_header": "test", + "Authorization": "^.+Credential=123/.+$", + }) + }, + }, + ) + + uploader := &s3.Uploader{ + S3: &s3.S3{ + Client: httpClient, + Region: "test_region", + Bucket: "test_bucket", + Endpoint: "http://example.com", + AccessKey: "123", + SecretKey: "abc", + }, + Key: "test_key", + Payload: strings.NewReader("abcdefg"), + Metadata: map[string]string{"a": "123", "b": "456"}, + MinPartSize: 3, + } + + err := uploader.Upload(context.Background(), func(r *http.Request) { + r.Header.Set("test_header", "test") + }) + if err != nil { + t.Fatal(err) + } + + err = httpClient.AssertNoRemaining() + if err != nil { + t.Fatal(err) + } +} + +func TestUploaderMultipartUploadPartFailure(t *testing.T) { + t.Parallel() + + httpClient := NewTestClient( + &RequestStub{ + Method: http.MethodPost, + URL: "http://test_bucket.example.com/test_key?uploads", + Match: func(req *http.Request) bool { + return checkHeaders(req.Header, map[string]string{ + "x-amz-meta-a": "123", + "x-amz-meta-b": "456", + "test_header": "test", + "Authorization": "^.+Credential=123/.+$", + }) + }, + Response: &http.Response{ + Body: io.NopCloser(strings.NewReader(` + + + test_bucket + test_key + test_id + + `)), + }, + }, + &RequestStub{ + Method: http.MethodPut, + URL: "http://test_bucket.example.com/test_key?partNumber=1&uploadId=test_id", + Match: func(req *http.Request) bool { + body, err := io.ReadAll(req.Body) + if err != nil { + return false + } + return string(body) == "abc" && checkHeaders(req.Header, map[string]string{ + "Content-Length": "3", + "test_header": "test", + "Authorization": "^.+Credential=123/.+$", + }) + }, + Response: &http.Response{ + Header: http.Header{"Etag": []string{"etag1"}}, + }, + }, + &RequestStub{ + Method: http.MethodPut, + URL: "http://test_bucket.example.com/test_key?partNumber=2&uploadId=test_id", + Match: func(req *http.Request) bool { + return checkHeaders(req.Header, map[string]string{ + "test_header": "test", + "Authorization": "^.+Credential=123/.+$", + }) + }, + Response: &http.Response{ + StatusCode: 400, + }, + }, + &RequestStub{ + Method: http.MethodDelete, + URL: "http://test_bucket.example.com/test_key?uploadId=test_id", + Match: func(req *http.Request) bool { + return checkHeaders(req.Header, map[string]string{ + "test_header": "test", + "Authorization": "^.+Credential=123/.+$", + }) + }, + }, + ) + + uploader := &s3.Uploader{ + S3: &s3.S3{ + Client: httpClient, + Region: "test_region", + Bucket: "test_bucket", + Endpoint: "http://example.com", + AccessKey: "123", + SecretKey: "abc", + }, + Key: "test_key", + Payload: strings.NewReader("abcdefg"), + Metadata: map[string]string{"a": "123", "b": "456"}, + MinPartSize: 3, + } + + err := uploader.Upload(context.Background(), func(r *http.Request) { + r.Header.Set("test_header", "test") + }) + if err == nil { + t.Fatal("Expected non-nil error") + } + + err = httpClient.AssertNoRemaining() + if err != nil { + t.Fatal(err) + } +} + +func TestUploaderMultipartUploadCompleteFailure(t *testing.T) { + t.Parallel() + + httpClient := NewTestClient( + &RequestStub{ + Method: http.MethodPost, + URL: "http://test_bucket.example.com/test_key?uploads", + Match: func(req *http.Request) bool { + return checkHeaders(req.Header, map[string]string{ + "x-amz-meta-a": "123", + "x-amz-meta-b": "456", + "test_header": "test", + "Authorization": "^.+Credential=123/.+$", + }) + }, + Response: &http.Response{ + Body: io.NopCloser(strings.NewReader(` + + + test_bucket + test_key + test_id + + `)), + }, + }, + &RequestStub{ + Method: http.MethodPut, + URL: "http://test_bucket.example.com/test_key?partNumber=1&uploadId=test_id", + Match: func(req *http.Request) bool { + body, err := io.ReadAll(req.Body) + if err != nil { + return false + } + return string(body) == "abc" && checkHeaders(req.Header, map[string]string{ + "Content-Length": "3", + "test_header": "test", + "Authorization": "^.+Credential=123/.+$", + }) + }, + Response: &http.Response{ + Header: http.Header{"Etag": []string{"etag1"}}, + }, + }, + &RequestStub{ + Method: http.MethodPut, + URL: "http://test_bucket.example.com/test_key?partNumber=2&uploadId=test_id", + Match: func(req *http.Request) bool { + body, err := io.ReadAll(req.Body) + if err != nil { + return false + } + return string(body) == "def" && checkHeaders(req.Header, map[string]string{ + "Content-Length": "3", + "test_header": "test", + "Authorization": "^.+Credential=123/.+$", + }) + }, + Response: &http.Response{ + Header: http.Header{"Etag": []string{"etag2"}}, + }, + }, + &RequestStub{ + Method: http.MethodPost, + URL: "http://test_bucket.example.com/test_key?uploadId=test_id", + Match: func(req *http.Request) bool { + return checkHeaders(req.Header, map[string]string{ + "test_header": "test", + "Authorization": "^.+Credential=123/.+$", + }) + }, + Response: &http.Response{ + StatusCode: 400, + }, + }, + &RequestStub{ + Method: http.MethodDelete, + URL: "http://test_bucket.example.com/test_key?uploadId=test_id", + Match: func(req *http.Request) bool { + return checkHeaders(req.Header, map[string]string{ + "test_header": "test", + "Authorization": "^.+Credential=123/.+$", + }) + }, + }, + ) + + uploader := &s3.Uploader{ + S3: &s3.S3{ + Client: httpClient, + Region: "test_region", + Bucket: "test_bucket", + Endpoint: "http://example.com", + AccessKey: "123", + SecretKey: "abc", + }, + Key: "test_key", + Payload: strings.NewReader("abcdef"), + Metadata: map[string]string{"a": "123", "b": "456"}, + MinPartSize: 3, + } + + err := uploader.Upload(context.Background(), func(r *http.Request) { + r.Header.Set("test_header", "test") + }) + if err == nil { + t.Fatal("Expected non-nil error") + } + + err = httpClient.AssertNoRemaining() + if err != nil { + t.Fatal(err) + } +} diff --git a/tools/filesystem/internal/s3lite/s3lite.go b/tools/filesystem/internal/s3lite/s3lite.go deleted file mode 100644 index 427ffaf4..00000000 --- a/tools/filesystem/internal/s3lite/s3lite.go +++ /dev/null @@ -1,917 +0,0 @@ -// This is a trimmed version of the original go-cloud/s3blob driver -// to avoid loading both aws-sdk-go and aws-sdk-go-v2 dependencies. -// It helps reducing the final binary with ~11MB. -// -// In the future this may get replaced entirely with an even slimmer -// version without relying on aws-sdk-go-v2. -// -//-------------------------------------------------------------------- -// -// Copyright 2018 The Go Cloud Development Kit Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package s3blob provides a blob implementation that uses S3. Use OpenBucket -// to construct a *blob.Bucket. -// -// # URLs -// -// For blob.OpenBucket, s3blob registers for the scheme "s3". -// The default URL opener will use an AWS session with the default credentials -// and configuration; see https://docs.aws.amazon.com/sdk-for-go/api/aws/session/ -// for more details. -// Use "awssdk=v1" or "awssdk=v2" to force a specific AWS SDK version. -// To customize the URL opener, or for more details on the URL format, -// see URLOpener. -// See https://gocloud.dev/concepts/urls/ for background information. -// -// # Escaping -// -// Go CDK supports all UTF-8 strings; to make this work with services lacking -// full UTF-8 support, strings must be escaped (during writes) and unescaped -// (during reads). The following escapes are performed for s3blob: -// - Blob keys: ASCII characters 0-31 are escaped to "__0x__". -// Additionally, the "/" in "../" is escaped in the same way. -// - Metadata keys: Escaped using URL encoding, then additionally "@:=" are -// escaped using "__0x__". These characters were determined by -// experimentation. -// - Metadata values: Escaped using URL encoding. -// -// # As -// -// s3blob exposes the following types for As: -// - Bucket: (V1) *s3.S3; (V2) *s3v2.Client -// - Error: (V1) awserr.Error; (V2) any error type returned by the service, notably smithy.APIError -// - ListObject: (V1) s3.Object for objects, s3.CommonPrefix for "directories"; (V2) typesv2.Object for objects, typesv2.CommonPrefix for "directories" -// - ListOptions.BeforeList: (V1) *s3.ListObjectsV2Input or *s3.ListObjectsInput -// when Options.UseLegacyList == true; (V2) *s3v2.ListObjectsV2Input or *[]func(*s3v2.Options), or *s3v2.ListObjectsInput -// when Options.UseLegacyList == true -// - Reader: (V1) s3.GetObjectOutput; (V2) s3v2.GetObjectInput -// - ReaderOptions.BeforeRead: (V1) *s3.GetObjectInput; (V2) *s3v2.GetObjectInput or *[]func(*s3v2.Options) -// - Attributes: (V1) s3.HeadObjectOutput; (V2)s3v2.HeadObjectOutput -// - CopyOptions.BeforeCopy: *(V1) s3.CopyObjectInput; (V2) s3v2.CopyObjectInput -// - WriterOptions.BeforeWrite: (V1) *s3manager.UploadInput, *s3manager.Uploader; (V2) *s3v2.PutObjectInput, *s3v2manager.Uploader -// - SignedURLOptions.BeforeSign: -// (V1) *s3.GetObjectInput; (V2) *s3v2.GetObjectInput, when Options.Method == http.MethodGet, or -// (V1) *s3.PutObjectInput; (V2) *s3v2.PutObjectInput, when Options.Method == http.MethodPut, or -// (V1) *s3.DeleteObjectInput; (V2) [not supported] when Options.Method == http.MethodDelete - -package s3lite - -import ( - "context" - "encoding/base64" - "encoding/hex" - "errors" - "fmt" - "io" - "net/http" - "net/url" - "sort" - "strconv" - "strings" - - awsv2 "github.com/aws/aws-sdk-go-v2/aws" - s3managerv2 "github.com/aws/aws-sdk-go-v2/feature/s3/manager" - s3v2 "github.com/aws/aws-sdk-go-v2/service/s3" - typesv2 "github.com/aws/aws-sdk-go-v2/service/s3/types" - - "github.com/aws/smithy-go" - "gocloud.dev/blob" - "gocloud.dev/blob/driver" - "gocloud.dev/gcerrors" -) - -// ------------------------------------------------------------------- - -// Copyright 2019 The Go Cloud Development Kit Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// HexEscape returns s, with all runes for which shouldEscape returns true -// escaped to "__0xXXX__", where XXX is the hex representation of the rune -// value. For example, " " would escape to "__0x20__". -// -// Non-UTF-8 strings will have their non-UTF-8 characters escaped to -// unicode.ReplacementChar; the original value is lost. Please file an -// issue if you need non-UTF8 support. -// -// Note: shouldEscape takes the whole string as a slice of runes and an -// index. Passing it a single byte or a single rune doesn't provide -// enough context for some escape decisions; for example, the caller might -// want to escape the second "/" in "//" but not the first one. -// We pass a slice of runes instead of the string or a slice of bytes -// because some decisions will be made on a rune basis (e.g., encode -// all non-ASCII runes). -func HexEscape(s string, shouldEscape func(s []rune, i int) bool) string { - // Do a first pass to see which runes (if any) need escaping. - runes := []rune(s) - var toEscape []int - for i := range runes { - if shouldEscape(runes, i) { - toEscape = append(toEscape, i) - } - } - if len(toEscape) == 0 { - return s - } - // Each escaped rune turns into at most 14 runes ("__0x7fffffff__"), - // so allocate an extra 13 for each. We'll reslice at the end - // if we didn't end up using them. - escaped := make([]rune, len(runes)+13*len(toEscape)) - n := 0 // current index into toEscape - j := 0 // current index into escaped - for i, r := range runes { - if n < len(toEscape) && i == toEscape[n] { - // We were asked to escape this rune. - for _, x := range fmt.Sprintf("__%#x__", r) { - escaped[j] = x - j++ - } - n++ - } else { - escaped[j] = r - j++ - } - } - return string(escaped[0:j]) -} - -// unescape tries to unescape starting at r[i]. -// It returns a boolean indicating whether the unescaping was successful, -// and (if true) the unescaped rune and the last index of r that was used -// during unescaping. -func unescape(r []rune, i int) (bool, rune, int) { - // Look for "__0x". - if r[i] != '_' { - return false, 0, 0 - } - i++ - if i >= len(r) || r[i] != '_' { - return false, 0, 0 - } - i++ - if i >= len(r) || r[i] != '0' { - return false, 0, 0 - } - i++ - if i >= len(r) || r[i] != 'x' { - return false, 0, 0 - } - i++ - // Capture the digits until the next "_" (if any). - var hexdigits []rune - for ; i < len(r) && r[i] != '_'; i++ { - hexdigits = append(hexdigits, r[i]) - } - // Look for the trailing "__". - if i >= len(r) || r[i] != '_' { - return false, 0, 0 - } - i++ - if i >= len(r) || r[i] != '_' { - return false, 0, 0 - } - // Parse the hex digits into an int32. - retval, err := strconv.ParseInt(string(hexdigits), 16, 32) - if err != nil { - return false, 0, 0 - } - return true, rune(retval), i -} - -// HexUnescape reverses HexEscape. -func HexUnescape(s string) string { - var unescaped []rune - runes := []rune(s) - for i := 0; i < len(runes); i++ { - if ok, newR, newI := unescape(runes, i); ok { - // We unescaped some runes starting at i, resulting in the - // unescaped rune newR. The last rune used was newI. - if unescaped == nil { - // This is the first rune we've encountered that - // needed unescaping. Allocate a buffer and copy any - // previous runes. - unescaped = make([]rune, i) - copy(unescaped, runes) - } - unescaped = append(unescaped, newR) - i = newI - } else if unescaped != nil { - unescaped = append(unescaped, runes[i]) - } - } - if unescaped == nil { - return s - } - return string(unescaped) -} - -// URLEscape uses url.PathEscape to escape s. -func URLEscape(s string) string { - return url.PathEscape(s) -} - -// URLUnescape reverses URLEscape using url.PathUnescape. If the unescape -// returns an error, it returns s. -func URLUnescape(s string) string { - if u, err := url.PathUnescape(s); err == nil { - return u - } - return s -} - -// ------------------------------------------------------------------- - -const defaultPageSize = 1000 - -// Options sets options for constructing a *blob.Bucket backed by fileblob. -type Options struct { - // UseLegacyList forces the use of ListObjects instead of ListObjectsV2. - // Some S3-compatible services (like CEPH) do not currently support - // ListObjectsV2. - UseLegacyList bool -} - -// openBucket returns an S3 Bucket. -func openBucket(ctx context.Context, useV2 bool, clientV2 *s3v2.Client, bucketName string, opts *Options) (*bucket, error) { - if bucketName == "" { - return nil, errors.New("s3blob.OpenBucket: bucketName is required") - } - if opts == nil { - opts = &Options{} - } - if clientV2 == nil { - return nil, errors.New("s3blob.OpenBucketV2: client is required") - } - return &bucket{ - useV2: useV2, - name: bucketName, - clientV2: clientV2, - useLegacyList: opts.UseLegacyList, - }, nil -} - -// OpenBucketV2 returns a *blob.Bucket backed by S3, using AWS SDK v2. -func OpenBucketV2(ctx context.Context, client *s3v2.Client, bucketName string, opts *Options) (*blob.Bucket, error) { - drv, err := openBucket(ctx, true, client, bucketName, opts) - if err != nil { - return nil, err - } - return blob.NewBucket(drv), nil -} - -// reader reads an S3 object. It implements io.ReadCloser. -type reader struct { - useV2 bool - body io.ReadCloser - attrs driver.ReaderAttributes - rawV2 *s3v2.GetObjectOutput -} - -func (r *reader) Read(p []byte) (int, error) { - return r.body.Read(p) -} - -// Close closes the reader itself. It must be called when done reading. -func (r *reader) Close() error { - return r.body.Close() -} - -func (r *reader) As(i interface{}) bool { - p, ok := i.(*s3v2.GetObjectOutput) - if !ok { - return false - } - *p = *r.rawV2 - return true -} - -func (r *reader) Attributes() *driver.ReaderAttributes { - return &r.attrs -} - -// writer writes an S3 object, it implements io.WriteCloser. -type writer struct { - // Ends of an io.Pipe, created when the first byte is written. - pw *io.PipeWriter - pr *io.PipeReader - - // Alternatively, upload is set to true when Upload was - // used to upload data. - upload bool - - ctx context.Context - useV2 bool - - // v2 - uploaderV2 *s3managerv2.Uploader - reqV2 *s3v2.PutObjectInput - - donec chan struct{} // closed when done writing - // The following fields will be written before donec closes: - err error -} - -// Write appends p to w.pw. User must call Close to close the w after done writing. -func (w *writer) Write(p []byte) (int, error) { - // Avoid opening the pipe for a zero-length write; - // the concrete can do these for empty blobs. - if len(p) == 0 { - return 0, nil - } - if w.pw == nil { - // We'll write into pw and use pr as an io.Reader for the - // Upload call to S3. - w.pr, w.pw = io.Pipe() - w.open(w.pr, true) - } - return w.pw.Write(p) -} - -// Upload reads from r. Per the driver, it is guaranteed to be the only -// write call for this writer. -func (w *writer) Upload(r io.Reader) error { - w.upload = true - w.open(r, false) - return nil -} - -// r may be nil if we're Closing and no data was written. -// If closePipeOnError is true, w.pr will be closed if there's an -// error uploading to S3. -func (w *writer) open(r io.Reader, closePipeOnError bool) { - // This goroutine will keep running until Close, unless there's an error. - go func() { - defer close(w.donec) - - if r == nil { - // AWS doesn't like a nil Body. - r = http.NoBody - } - var err error - w.reqV2.Body = r - _, err = w.uploaderV2.Upload(w.ctx, w.reqV2) - if err != nil { - if closePipeOnError { - w.pr.CloseWithError(err) - } - w.err = err - } - }() -} - -// Close completes the writer and closes it. Any error occurring during write -// will be returned. If a writer is closed before any Write is called, Close -// will create an empty file at the given key. -func (w *writer) Close() error { - if !w.upload { - if w.pr != nil { - defer w.pr.Close() - } - if w.pw == nil { - // We never got any bytes written. We'll write an http.NoBody. - w.open(nil, false) - } else if err := w.pw.Close(); err != nil { - return err - } - } - <-w.donec - return w.err -} - -// bucket represents an S3 bucket and handles read, write and delete operations. -type bucket struct { - name string - useV2 bool - clientV2 *s3v2.Client - useLegacyList bool -} - -func (b *bucket) Close() error { - return nil -} - -func (b *bucket) ErrorCode(err error) gcerrors.ErrorCode { - var code string - var ae smithy.APIError - var oe *smithy.OperationError - if errors.As(err, &oe) && strings.Contains(oe.Error(), "301") { - // V2 returns an OperationError with a missing redirect for invalid buckets. - code = "NoSuchBucket" - } else if errors.As(err, &ae) { - code = ae.ErrorCode() - } else { - return gcerrors.Unknown - } - switch { - case code == "NoSuchBucket" || code == "NoSuchKey" || code == "NotFound": - return gcerrors.NotFound - default: - return gcerrors.Unknown - } -} - -// ListPaged implements driver.ListPaged. -func (b *bucket) ListPaged(ctx context.Context, opts *driver.ListOptions) (*driver.ListPage, error) { - pageSize := opts.PageSize - if pageSize == 0 { - pageSize = defaultPageSize - } - in := &s3v2.ListObjectsV2Input{ - Bucket: awsv2.String(b.name), - MaxKeys: awsv2.Int32(int32(pageSize)), - } - if len(opts.PageToken) > 0 { - in.ContinuationToken = awsv2.String(string(opts.PageToken)) - } - if opts.Prefix != "" { - in.Prefix = awsv2.String(escapeKey(opts.Prefix)) - } - if opts.Delimiter != "" { - in.Delimiter = awsv2.String(escapeKey(opts.Delimiter)) - } - resp, err := b.listObjectsV2(ctx, in, opts) - if err != nil { - return nil, err - } - page := driver.ListPage{} - if resp.NextContinuationToken != nil { - page.NextPageToken = []byte(*resp.NextContinuationToken) - } - if n := len(resp.Contents) + len(resp.CommonPrefixes); n > 0 { - page.Objects = make([]*driver.ListObject, n) - for i, obj := range resp.Contents { - obj := obj - page.Objects[i] = &driver.ListObject{ - Key: unescapeKey(awsv2.ToString(obj.Key)), - ModTime: *obj.LastModified, - Size: awsv2.ToInt64(obj.Size), - MD5: eTagToMD5(obj.ETag), - AsFunc: func(i interface{}) bool { - p, ok := i.(*typesv2.Object) - if !ok { - return false - } - *p = obj - return true - }, - } - } - for i, prefix := range resp.CommonPrefixes { - prefix := prefix - page.Objects[i+len(resp.Contents)] = &driver.ListObject{ - Key: unescapeKey(awsv2.ToString(prefix.Prefix)), - IsDir: true, - AsFunc: func(i interface{}) bool { - p, ok := i.(*typesv2.CommonPrefix) - if !ok { - return false - } - *p = prefix - return true - }, - } - } - if len(resp.Contents) > 0 && len(resp.CommonPrefixes) > 0 { - // S3 gives us blobs and "directories" in separate lists; sort them. - sort.Slice(page.Objects, func(i, j int) bool { - return page.Objects[i].Key < page.Objects[j].Key - }) - } - } - return &page, nil -} - -func (b *bucket) listObjectsV2(ctx context.Context, in *s3v2.ListObjectsV2Input, opts *driver.ListOptions) (*s3v2.ListObjectsV2Output, error) { - if !b.useLegacyList { - var varopt []func(*s3v2.Options) - if opts.BeforeList != nil { - asFunc := func(i interface{}) bool { - if p, ok := i.(**s3v2.ListObjectsV2Input); ok { - *p = in - return true - } - if p, ok := i.(**[]func(*s3v2.Options)); ok { - *p = &varopt - return true - } - return false - } - if err := opts.BeforeList(asFunc); err != nil { - return nil, err - } - } - return b.clientV2.ListObjectsV2(ctx, in, varopt...) - } - - // Use the legacy ListObjects request. - legacyIn := &s3v2.ListObjectsInput{ - Bucket: in.Bucket, - Delimiter: in.Delimiter, - EncodingType: in.EncodingType, - Marker: in.ContinuationToken, - MaxKeys: in.MaxKeys, - Prefix: in.Prefix, - RequestPayer: in.RequestPayer, - } - if opts.BeforeList != nil { - asFunc := func(i interface{}) bool { - p, ok := i.(**s3v2.ListObjectsInput) - if !ok { - return false - } - *p = legacyIn - return true - } - if err := opts.BeforeList(asFunc); err != nil { - return nil, err - } - } - legacyResp, err := b.clientV2.ListObjects(ctx, legacyIn) - if err != nil { - return nil, err - } - - var nextContinuationToken *string - if legacyResp.NextMarker != nil { - nextContinuationToken = legacyResp.NextMarker - } else if awsv2.ToBool(legacyResp.IsTruncated) { - nextContinuationToken = awsv2.String(awsv2.ToString(legacyResp.Contents[len(legacyResp.Contents)-1].Key)) - } - return &s3v2.ListObjectsV2Output{ - CommonPrefixes: legacyResp.CommonPrefixes, - Contents: legacyResp.Contents, - NextContinuationToken: nextContinuationToken, - }, nil -} - -// As implements driver.As. -func (b *bucket) As(i interface{}) bool { - p, ok := i.(**s3v2.Client) - if !ok { - return false - } - *p = b.clientV2 - return true -} - -// As implements driver.ErrorAs. -func (b *bucket) ErrorAs(err error, i interface{}) bool { - return errors.As(err, i) -} - -// Attributes implements driver.Attributes. -func (b *bucket) Attributes(ctx context.Context, key string) (*driver.Attributes, error) { - key = escapeKey(key) - in := &s3v2.HeadObjectInput{ - Bucket: awsv2.String(b.name), - Key: awsv2.String(key), - } - resp, err := b.clientV2.HeadObject(ctx, in) - if err != nil { - return nil, err - } - - md := make(map[string]string, len(resp.Metadata)) - for k, v := range resp.Metadata { - // See the package comments for more details on escaping of metadata - // keys & values. - md[HexUnescape(URLUnescape(k))] = URLUnescape(v) - } - return &driver.Attributes{ - CacheControl: awsv2.ToString(resp.CacheControl), - ContentDisposition: awsv2.ToString(resp.ContentDisposition), - ContentEncoding: awsv2.ToString(resp.ContentEncoding), - ContentLanguage: awsv2.ToString(resp.ContentLanguage), - ContentType: awsv2.ToString(resp.ContentType), - Metadata: md, - // CreateTime not supported; left as the zero time. - ModTime: awsv2.ToTime(resp.LastModified), - Size: awsv2.ToInt64(resp.ContentLength), - MD5: eTagToMD5(resp.ETag), - ETag: awsv2.ToString(resp.ETag), - AsFunc: func(i interface{}) bool { - p, ok := i.(*s3v2.HeadObjectOutput) - if !ok { - return false - } - *p = *resp - return true - }, - }, nil -} - -// NewRangeReader implements driver.NewRangeReader. -func (b *bucket) NewRangeReader(ctx context.Context, key string, offset, length int64, opts *driver.ReaderOptions) (driver.Reader, error) { - key = escapeKey(key) - var byteRange *string - if offset > 0 && length < 0 { - byteRange = awsv2.String(fmt.Sprintf("bytes=%d-", offset)) - } else if length == 0 { - // AWS doesn't support a zero-length read; we'll read 1 byte and then - // ignore it in favor of http.NoBody below. - byteRange = awsv2.String(fmt.Sprintf("bytes=%d-%d", offset, offset)) - } else if length >= 0 { - byteRange = awsv2.String(fmt.Sprintf("bytes=%d-%d", offset, offset+length-1)) - } - in := &s3v2.GetObjectInput{ - Bucket: awsv2.String(b.name), - Key: awsv2.String(key), - Range: byteRange, - } - var varopt []func(*s3v2.Options) - if opts.BeforeRead != nil { - asFunc := func(i interface{}) bool { - if p, ok := i.(**s3v2.GetObjectInput); ok { - *p = in - return true - } - if p, ok := i.(**[]func(*s3v2.Options)); ok { - *p = &varopt - return true - } - return false - } - if err := opts.BeforeRead(asFunc); err != nil { - return nil, err - } - } - resp, err := b.clientV2.GetObject(ctx, in, varopt...) - if err != nil { - return nil, err - } - body := resp.Body - if length == 0 { - body = http.NoBody - } - return &reader{ - useV2: true, - body: body, - attrs: driver.ReaderAttributes{ - ContentType: awsv2.ToString(resp.ContentType), - ModTime: awsv2.ToTime(resp.LastModified), - Size: getSize(awsv2.ToInt64(resp.ContentLength), awsv2.ToString(resp.ContentRange)), - }, - rawV2: resp, - }, nil -} - -// etagToMD5 processes an ETag header and returns an MD5 hash if possible. -// S3's ETag header is sometimes a quoted hexstring of the MD5. Other times, -// notably when the object was uploaded in multiple parts, it is not. -// We do the best we can. -// Some links about ETag: -// https://docs.aws.amazon.com/AmazonS3/latest/API/RESTCommonResponseHeaders.html -// https://github.com/aws/aws-sdk-net/issues/815 -// https://teppen.io/2018/06/23/aws_s3_etags/ -func eTagToMD5(etag *string) []byte { - if etag == nil { - // No header at all. - return nil - } - // Strip the expected leading and trailing quotes. - quoted := *etag - if len(quoted) < 2 || quoted[0] != '"' || quoted[len(quoted)-1] != '"' { - return nil - } - unquoted := quoted[1 : len(quoted)-1] - // Un-hex; we return nil on error. In particular, we'll get an error here - // for multi-part uploaded blobs, whose ETag will contain a "-" and so will - // never be a legal hex encoding. - md5, err := hex.DecodeString(unquoted) - if err != nil { - return nil - } - return md5 -} - -func getSize(contentLength int64, contentRange string) int64 { - // Default size to ContentLength, but that's incorrect for partial-length reads, - // where ContentLength refers to the size of the returned Body, not the entire - // size of the blob. ContentRange has the full size. - size := contentLength - if contentRange != "" { - // Sample: bytes 10-14/27 (where 27 is the full size). - parts := strings.Split(contentRange, "/") - if len(parts) == 2 { - if i, err := strconv.ParseInt(parts[1], 10, 64); err == nil { - size = i - } - } - } - return size -} - -// escapeKey does all required escaping for UTF-8 strings to work with S3. -func escapeKey(key string) string { - return HexEscape(key, func(r []rune, i int) bool { - c := r[i] - switch { - // S3 doesn't handle these characters (determined via experimentation). - case c < 32: - return true - // For "../", escape the trailing slash. - case i > 1 && c == '/' && r[i-1] == '.' && r[i-2] == '.': - return true - } - return false - }) -} - -// unescapeKey reverses escapeKey. -func unescapeKey(key string) string { - return HexUnescape(key) -} - -// NewTypedWriter implements driver.NewTypedWriter. -func (b *bucket) NewTypedWriter(ctx context.Context, key string, contentType string, opts *driver.WriterOptions) (driver.Writer, error) { - key = escapeKey(key) - uploaderV2 := s3managerv2.NewUploader(b.clientV2, func(u *s3managerv2.Uploader) { - if opts.BufferSize != 0 { - u.PartSize = int64(opts.BufferSize) - } - if opts.MaxConcurrency != 0 { - u.Concurrency = opts.MaxConcurrency - } - }) - md := make(map[string]string, len(opts.Metadata)) - for k, v := range opts.Metadata { - // See the package comments for more details on escaping of metadata - // keys & values. - k = HexEscape(url.PathEscape(k), func(runes []rune, i int) bool { - c := runes[i] - return c == '@' || c == ':' || c == '=' - }) - md[k] = url.PathEscape(v) - } - reqV2 := &s3v2.PutObjectInput{ - Bucket: awsv2.String(b.name), - ContentType: awsv2.String(contentType), - Key: awsv2.String(key), - Metadata: md, - } - if opts.CacheControl != "" { - reqV2.CacheControl = awsv2.String(opts.CacheControl) - } - if opts.ContentDisposition != "" { - reqV2.ContentDisposition = awsv2.String(opts.ContentDisposition) - } - if opts.ContentEncoding != "" { - reqV2.ContentEncoding = awsv2.String(opts.ContentEncoding) - } - if opts.ContentLanguage != "" { - reqV2.ContentLanguage = awsv2.String(opts.ContentLanguage) - } - if len(opts.ContentMD5) > 0 { - reqV2.ContentMD5 = awsv2.String(base64.StdEncoding.EncodeToString(opts.ContentMD5)) - } - if opts.BeforeWrite != nil { - asFunc := func(i interface{}) bool { - // Note that since the Go CDK Blob - // abstraction does not expose AWS's - // Uploader concept, there does not - // appear to be any utility in - // exposing the options list to the v2 - // Uploader's Upload() method. - // Instead, applications can - // manipulate the exposed *Uploader - // directly, including by setting - // ClientOptions if needed. - if p, ok := i.(**s3managerv2.Uploader); ok { - *p = uploaderV2 - return true - } - if p, ok := i.(**s3v2.PutObjectInput); ok { - *p = reqV2 - return true - } - return false - } - if err := opts.BeforeWrite(asFunc); err != nil { - return nil, err - } - } - return &writer{ - ctx: ctx, - useV2: true, - uploaderV2: uploaderV2, - reqV2: reqV2, - donec: make(chan struct{}), - }, nil -} - -// Copy implements driver.Copy. -func (b *bucket) Copy(ctx context.Context, dstKey, srcKey string, opts *driver.CopyOptions) error { - dstKey = escapeKey(dstKey) - srcKey = escapeKey(srcKey) - input := &s3v2.CopyObjectInput{ - Bucket: awsv2.String(b.name), - CopySource: awsv2.String(b.name + "/" + srcKey), - Key: awsv2.String(dstKey), - } - if opts.BeforeCopy != nil { - asFunc := func(i interface{}) bool { - switch v := i.(type) { - case **s3v2.CopyObjectInput: - *v = input - return true - } - return false - } - if err := opts.BeforeCopy(asFunc); err != nil { - return err - } - } - _, err := b.clientV2.CopyObject(ctx, input) - return err -} - -// Delete implements driver.Delete. -func (b *bucket) Delete(ctx context.Context, key string) error { - if _, err := b.Attributes(ctx, key); err != nil { - return err - } - key = escapeKey(key) - input := &s3v2.DeleteObjectInput{ - Bucket: awsv2.String(b.name), - Key: awsv2.String(key), - } - _, err := b.clientV2.DeleteObject(ctx, input) - return err -} - -func (b *bucket) SignedURL(ctx context.Context, key string, opts *driver.SignedURLOptions) (string, error) { - key = escapeKey(key) - switch opts.Method { - case http.MethodGet: - in := &s3v2.GetObjectInput{ - Bucket: awsv2.String(b.name), - Key: awsv2.String(key), - } - if opts.BeforeSign != nil { - asFunc := func(i interface{}) bool { - v, ok := i.(**s3v2.GetObjectInput) - if ok { - *v = in - } - return ok - } - if err := opts.BeforeSign(asFunc); err != nil { - return "", err - } - } - p, err := s3v2.NewPresignClient(b.clientV2, s3v2.WithPresignExpires(opts.Expiry)).PresignGetObject(ctx, in) - if err != nil { - return "", err - } - return p.URL, nil - case http.MethodPut: - in := &s3v2.PutObjectInput{ - Bucket: awsv2.String(b.name), - Key: awsv2.String(key), - } - if opts.EnforceAbsentContentType || opts.ContentType != "" { - // https://github.com/aws/aws-sdk-go-v2/issues/1475 - return "", errors.New("s3blob: AWS SDK v2 does not supported enforcing ContentType in SignedURLs for PUT") - } - if opts.BeforeSign != nil { - asFunc := func(i interface{}) bool { - v, ok := i.(**s3v2.PutObjectInput) - if ok { - *v = in - } - return ok - } - if err := opts.BeforeSign(asFunc); err != nil { - return "", err - } - } - p, err := s3v2.NewPresignClient(b.clientV2, s3v2.WithPresignExpires(opts.Expiry)).PresignPutObject(ctx, in) - if err != nil { - return "", err - } - return p.URL, nil - } - return "", fmt.Errorf("unsupported Method %q", opts.Method) -}