mirror of
https://github.com/ManyakRus/image_database.git
synced 2024-11-21 05:05:49 +02:00
сделал make mod
This commit is contained in:
parent
13ddccba43
commit
fce38ad1b3
20
go.mod
20
go.mod
@ -3,21 +3,23 @@ module github.com/ManyakRus/image_database
|
||||
go 1.20
|
||||
|
||||
require (
|
||||
github.com/ManyakRus/starter v0.0.0-20230913104819-4ee2118a3638
|
||||
github.com/ManyakRus/starter v0.0.0-20231227074038-f1cc2e5171fa
|
||||
github.com/beevik/etree v1.2.0
|
||||
gorm.io/gorm v1.25.4
|
||||
gorm.io/gorm v1.25.5
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/ManyakRus/logrus v0.0.0-20230913114322-14246ee4c48b // indirect
|
||||
github.com/ManyakRus/logrus v0.0.0-20231019115155-9e6fede0d792 // indirect
|
||||
github.com/jackc/pgpassfile v1.0.0 // indirect
|
||||
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect
|
||||
github.com/jackc/pgx/v5 v5.4.3 // indirect
|
||||
github.com/jackc/pgservicefile v0.0.0-20231201235250-de7065d80cb9 // indirect
|
||||
github.com/jackc/pgx/v5 v5.5.1 // indirect
|
||||
github.com/jackc/puddle/v2 v2.2.1 // indirect
|
||||
github.com/jinzhu/inflection v1.0.0 // indirect
|
||||
github.com/jinzhu/now v1.1.5 // indirect
|
||||
github.com/joho/godotenv v1.5.1 // indirect
|
||||
golang.org/x/crypto v0.13.0 // indirect
|
||||
golang.org/x/sys v0.12.0 // indirect
|
||||
golang.org/x/text v0.13.0 // indirect
|
||||
gorm.io/driver/postgres v1.5.2 // indirect
|
||||
golang.org/x/crypto v0.17.0 // indirect
|
||||
golang.org/x/sync v0.5.0 // indirect
|
||||
golang.org/x/sys v0.15.0 // indirect
|
||||
golang.org/x/text v0.14.0 // indirect
|
||||
gorm.io/driver/postgres v1.5.4 // indirect
|
||||
)
|
||||
|
40
go.sum
40
go.sum
@ -1,7 +1,7 @@
|
||||
github.com/ManyakRus/logrus v0.0.0-20230913114322-14246ee4c48b h1:U17+MOkhFzy9WkjANQhe1JftWq8Opt4gmy2G59wreG8=
|
||||
github.com/ManyakRus/logrus v0.0.0-20230913114322-14246ee4c48b/go.mod h1:OUyxCVbPW/2lC1e6cM7Am941SJiC88BhNnb24x2R3a8=
|
||||
github.com/ManyakRus/starter v0.0.0-20230913104819-4ee2118a3638 h1:USOEGx2aGUtq5vJlbr5OhLIRTX252cpv5aYre/QNztg=
|
||||
github.com/ManyakRus/starter v0.0.0-20230913104819-4ee2118a3638/go.mod h1:wRsJrHV9PcbL8NSxOR14dnWUQ9MSraiZHw1uMQb/ojQ=
|
||||
github.com/ManyakRus/logrus v0.0.0-20231019115155-9e6fede0d792 h1:bxwxD0H3kSUAH3uNk/b74gkImcUiP7dyibmMoVwk338=
|
||||
github.com/ManyakRus/logrus v0.0.0-20231019115155-9e6fede0d792/go.mod h1:OUyxCVbPW/2lC1e6cM7Am941SJiC88BhNnb24x2R3a8=
|
||||
github.com/ManyakRus/starter v0.0.0-20231227074038-f1cc2e5171fa h1:v+rN/vdH1ODCja4iTka/3QToxu4ey76u76pkFHcSfOI=
|
||||
github.com/ManyakRus/starter v0.0.0-20231227074038-f1cc2e5171fa/go.mod h1:1fRj4AUMGeQTtnwBa52pvMd9zwqPDms+uaxozhHkM1Q=
|
||||
github.com/beevik/etree v1.2.0 h1:l7WETslUG/T+xOPs47dtd6jov2Ii/8/OjCldk5fYfQw=
|
||||
github.com/beevik/etree v1.2.0/go.mod h1:aiPf89g/1k3AShMVAzriilpcE4R/Vuor90y83zVZWFc=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
@ -9,10 +9,12 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM=
|
||||
github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg=
|
||||
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a h1:bbPeKD0xmW/Y25WS6cokEszi5g+S0QxI/d45PkRi7Nk=
|
||||
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM=
|
||||
github.com/jackc/pgx/v5 v5.4.3 h1:cxFyXhxlvAifxnkKKdlxv8XqUf59tDlYjnV5YYfsJJY=
|
||||
github.com/jackc/pgx/v5 v5.4.3/go.mod h1:Ig06C2Vu0t5qXC60W8sqIthScaEnFvojjj9dSljmHRA=
|
||||
github.com/jackc/pgservicefile v0.0.0-20231201235250-de7065d80cb9 h1:L0QtFUgDarD7Fpv9jeVMgy/+Ec0mtnmYuImjTz6dtDA=
|
||||
github.com/jackc/pgservicefile v0.0.0-20231201235250-de7065d80cb9/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM=
|
||||
github.com/jackc/pgx/v5 v5.5.1 h1:5I9etrGkLrN+2XPCsi6XLlV5DITbSL/xBZdmAxFcXPI=
|
||||
github.com/jackc/pgx/v5 v5.5.1/go.mod h1:Ig06C2Vu0t5qXC60W8sqIthScaEnFvojjj9dSljmHRA=
|
||||
github.com/jackc/puddle/v2 v2.2.1 h1:RhxXJtFG022u4ibrCSMSiu5aOq1i77R3OHKNJj77OAk=
|
||||
github.com/jackc/puddle/v2 v2.2.1/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4=
|
||||
github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E=
|
||||
github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc=
|
||||
github.com/jinzhu/now v1.1.5 h1:/o9tlHleP7gOFmsnYNz3RGnqzefHA47wQpKrrdTIwXQ=
|
||||
@ -21,22 +23,24 @@ github.com/joho/godotenv v1.5.1 h1:7eLL/+HRGLY0ldzfGMeQkb7vMd0as4CfYvUVzLqw0N0=
|
||||
github.com/joho/godotenv v1.5.1/go.mod h1:f4LDr5Voq0i2e/R5DDNOoa2zzDfwtkZa6DnEwAbqwq4=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
|
||||
golang.org/x/crypto v0.13.0 h1:mvySKfSWJ+UKUii46M40LOvyWfN0s2U+46/jDd0e6Ck=
|
||||
golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc=
|
||||
golang.org/x/crypto v0.17.0 h1:r8bRNjWL3GshPW3gkd+RpvzWrZAwPS49OmTGZ/uhM4k=
|
||||
golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4=
|
||||
golang.org/x/sync v0.5.0 h1:60k92dhOjHxJkrqnwsfl8KuaHbn/5dl0lUPUklKo3qE=
|
||||
golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.12.0 h1:CM0HF96J0hcLAwsHPJZjfdNzs0gftsLfgKt57wWHJ0o=
|
||||
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k=
|
||||
golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
||||
golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc=
|
||||
golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
|
||||
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gorm.io/driver/postgres v1.5.2 h1:ytTDxxEv+MplXOfFe3Lzm7SjG09fcdb3Z/c056DTBx0=
|
||||
gorm.io/driver/postgres v1.5.2/go.mod h1:fmpX0m2I1PKuR7mKZiEluwrP3hbs+ps7JIGMUBpCgl8=
|
||||
gorm.io/gorm v1.25.4 h1:iyNd8fNAe8W9dvtlgeRI5zSVZPsq3OpcTu37cYcpCmw=
|
||||
gorm.io/driver/postgres v1.5.4 h1:Iyrp9Meh3GmbSuyIAGyjkN+n9K+GHX9b9MqsTL4EJCo=
|
||||
gorm.io/driver/postgres v1.5.4/go.mod h1:Bgo89+h0CRcdA33Y6frlaHHVuTdOf87pmyzwW9C/BH0=
|
||||
gorm.io/gorm v1.25.4/go.mod h1:L4uxeKpfBml98NYqVqwAdmV1a2nBtAec/cf3fpucW/k=
|
||||
gorm.io/gorm v1.25.5 h1:zR9lOiiYf09VNh5Q1gphfyia1JpiClIWG9hQaxB/mls=
|
||||
gorm.io/gorm v1.25.5/go.mod h1:hbnx/Oo0ChWMn1BIhpy1oYozzpM15i4YPuHDmfYtwg8=
|
||||
|
@ -2,7 +2,7 @@ package logic
|
||||
|
||||
import (
|
||||
"github.com/ManyakRus/image_database/internal/config"
|
||||
ConfigMain "github.com/ManyakRus/starter/config"
|
||||
ConfigMain "github.com/ManyakRus/starter/config_main"
|
||||
"github.com/ManyakRus/starter/micro"
|
||||
"testing"
|
||||
)
|
||||
@ -16,12 +16,12 @@ func TestStartFillAll(t *testing.T) {
|
||||
StartFillAll(FileName)
|
||||
}
|
||||
|
||||
func TestFindRepositoryName(t *testing.T) {
|
||||
ConfigMain.LoadEnv()
|
||||
config.FillSettings()
|
||||
|
||||
Otvet := FindRepositoryName()
|
||||
if Otvet == "" {
|
||||
t.Error("TestFindRepositoryName() error: =''")
|
||||
}
|
||||
}
|
||||
//func TestFindRepositoryName(t *testing.T) {
|
||||
// ConfigMain.LoadEnv()
|
||||
// config.FillSettings()
|
||||
//
|
||||
// Otvet := FindRepositoryName()
|
||||
// if Otvet == "" {
|
||||
// t.Error("TestFindRepositoryName() error: =''")
|
||||
// }
|
||||
//}
|
||||
|
@ -5,7 +5,7 @@ import (
|
||||
"github.com/ManyakRus/image_database/internal/constants"
|
||||
"github.com/ManyakRus/image_database/internal/logic"
|
||||
"github.com/ManyakRus/image_database/pkg/graphml"
|
||||
ConfigMain "github.com/ManyakRus/starter/config"
|
||||
ConfigMain "github.com/ManyakRus/starter/config_main"
|
||||
"github.com/ManyakRus/starter/log"
|
||||
"github.com/ManyakRus/starter/postgres_gorm"
|
||||
)
|
||||
|
@ -1,7 +1,7 @@
|
||||
package postgres
|
||||
|
||||
import (
|
||||
ConfigMain "github.com/ManyakRus/starter/config"
|
||||
ConfigMain "github.com/ManyakRus/starter/config_main"
|
||||
"github.com/ManyakRus/starter/postgres_gorm"
|
||||
"testing"
|
||||
)
|
||||
|
@ -2,7 +2,7 @@ package graphml
|
||||
|
||||
import (
|
||||
"github.com/ManyakRus/image_database/internal/config"
|
||||
ConfigMain "github.com/ManyakRus/starter/config"
|
||||
ConfigMain "github.com/ManyakRus/starter/config_main"
|
||||
"github.com/ManyakRus/starter/micro"
|
||||
"testing"
|
||||
)
|
||||
|
2
vendor/github.com/ManyakRus/logrus/.travis.yml
generated
vendored
2
vendor/github.com/ManyakRus/logrus/.travis.yml
generated
vendored
@ -1,5 +1,5 @@
|
||||
language: go
|
||||
go_import_path: github.com/sirupsen/logrus
|
||||
go_import_path: github.com/ManyakRus/logrus
|
||||
git:
|
||||
depth: 1
|
||||
env:
|
||||
|
6
vendor/github.com/ManyakRus/logrus/Makefile
generated
vendored
Normal file
6
vendor/github.com/ManyakRus/logrus/Makefile
generated
vendored
Normal file
@ -0,0 +1,6 @@
|
||||
SERVICEURL=github.com/sirupsen/logrus
|
||||
|
||||
NEW_REPO=github.com/ManyakRus/logrus
|
||||
|
||||
newrepo:
|
||||
find -name *.go -not -path "*/vendor/*"|xargs sed -i 's+$(SERVICEURL)+$(NEW_REPO)+g'
|
36
vendor/github.com/ManyakRus/logrus/README.md
generated
vendored
36
vendor/github.com/ManyakRus/logrus/README.md
generated
vendored
@ -1,4 +1,4 @@
|
||||
# Logrus <img src="http://i.imgur.com/hTeVwmJ.png" width="40" height="40" alt=":walrus:" class="emoji" title=":walrus:"/> [![Build Status](https://github.com/sirupsen/logrus/workflows/CI/badge.svg)](https://github.com/sirupsen/logrus/actions?query=workflow%3ACI) [![Build Status](https://travis-ci.org/sirupsen/logrus.svg?branch=master)](https://travis-ci.org/sirupsen/logrus) [![Go Reference](https://pkg.go.dev/badge/github.com/sirupsen/logrus.svg)](https://pkg.go.dev/github.com/sirupsen/logrus)
|
||||
# Logrus <img src="http://i.imgur.com/hTeVwmJ.png" width="40" height="40" alt=":walrus:" class="emoji" title=":walrus:"/> [![Build Status](https://github.com/ManyakRus/logrus/workflows/CI/badge.svg)](https://github.com/ManyakRus/logrus/actions?query=workflow%3ACI) [![Build Status](https://travis-ci.org/sirupsen/logrus.svg?branch=master)](https://travis-ci.org/sirupsen/logrus) [![Go Reference](https://pkg.go.dev/badge/github.com/ManyakRus/logrus.svg)](https://pkg.go.dev/github.com/ManyakRus/logrus)
|
||||
|
||||
Logrus is a structured logger for Go (golang), completely API compatible with
|
||||
the standard library logger.
|
||||
@ -28,12 +28,12 @@ import Logrus as both upper- and lower-case. Due to the Go package environment,
|
||||
this caused issues in the community and we needed a standard. Some environments
|
||||
experienced problems with the upper-case variant, so the lower-case was decided.
|
||||
Everything using `logrus` will need to use the lower-case:
|
||||
`github.com/sirupsen/logrus`. Any package that isn't, should be changed.
|
||||
`github.com/ManyakRus/logrus`. Any package that isn't, should be changed.
|
||||
|
||||
To fix Glide, see [these
|
||||
comments](https://github.com/sirupsen/logrus/issues/553#issuecomment-306591437).
|
||||
comments](https://github.com/ManyakRus/logrus/issues/553#issuecomment-306591437).
|
||||
For an in-depth explanation of the casing issue, see [this
|
||||
comment](https://github.com/sirupsen/logrus/issues/570#issuecomment-313933276).
|
||||
comment](https://github.com/ManyakRus/logrus/issues/570#issuecomment-313933276).
|
||||
|
||||
Nicely color-coded in development (when a TTY is attached, otherwise just
|
||||
plain text):
|
||||
@ -43,7 +43,7 @@ plain text):
|
||||
With `log.SetFormatter(&log.JSONFormatter{})`, for easy parsing by logstash
|
||||
or Splunk:
|
||||
|
||||
```json
|
||||
```text
|
||||
{"animal":"walrus","level":"info","msg":"A group of walrus emerges from the
|
||||
ocean","size":10,"time":"2014-03-10 19:57:38.562264131 -0400 EDT"}
|
||||
|
||||
@ -109,7 +109,7 @@ go test -bench=.*CallerTracing
|
||||
|
||||
The organization's name was changed to lower-case--and this will not be changed
|
||||
back. If you are getting import conflicts due to case sensitivity, please use
|
||||
the lower-case import: `github.com/sirupsen/logrus`.
|
||||
the lower-case import: `github.com/ManyakRus/logrus`.
|
||||
|
||||
#### Example
|
||||
|
||||
@ -119,7 +119,7 @@ The simplest way to use Logrus is simply the package-level exported logger:
|
||||
package main
|
||||
|
||||
import (
|
||||
log "github.com/sirupsen/logrus"
|
||||
log "github.com/ManyakRus/logrus"
|
||||
)
|
||||
|
||||
func main() {
|
||||
@ -130,7 +130,7 @@ func main() {
|
||||
```
|
||||
|
||||
Note that it's completely api-compatible with the stdlib logger, so you can
|
||||
replace your `log` imports everywhere with `log "github.com/sirupsen/logrus"`
|
||||
replace your `log` imports everywhere with `log "github.com/ManyakRus/logrus"`
|
||||
and you'll now have the flexibility of Logrus. You can customize it all you
|
||||
want:
|
||||
|
||||
@ -139,7 +139,7 @@ package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
log "github.com/sirupsen/logrus"
|
||||
log "github.com/ManyakRus/logrus"
|
||||
)
|
||||
|
||||
func init() {
|
||||
@ -190,7 +190,7 @@ package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/ManyakRus/logrus"
|
||||
)
|
||||
|
||||
// Create a new instance of the logger. You can have any number of instances.
|
||||
@ -265,9 +265,9 @@ Logrus comes with [built-in hooks](hooks/). Add those, or your custom hook, in
|
||||
|
||||
```go
|
||||
import (
|
||||
log "github.com/sirupsen/logrus"
|
||||
log "github.com/ManyakRus/logrus"
|
||||
"gopkg.in/gemnasium/logrus-airbrake-hook.v2" // the package is named "airbrake"
|
||||
logrus_syslog "github.com/sirupsen/logrus/hooks/syslog"
|
||||
logrus_syslog "github.com/ManyakRus/logrus/hooks/syslog"
|
||||
"log/syslog"
|
||||
)
|
||||
|
||||
@ -287,7 +287,7 @@ func init() {
|
||||
```
|
||||
Note: Syslog hook also support connecting to local syslog (Ex. "/dev/log" or "/var/run/syslog" or "/var/run/log"). For the detail, please check the [syslog hook README](hooks/syslog/README.md).
|
||||
|
||||
A list of currently known service hooks can be found in this wiki [page](https://github.com/sirupsen/logrus/wiki/Hooks)
|
||||
A list of currently known service hooks can be found in this wiki [page](https://github.com/ManyakRus/logrus/wiki/Hooks)
|
||||
|
||||
|
||||
#### Level logging
|
||||
@ -340,7 +340,7 @@ could do:
|
||||
|
||||
```go
|
||||
import (
|
||||
log "github.com/sirupsen/logrus"
|
||||
log "github.com/ManyakRus/logrus"
|
||||
)
|
||||
|
||||
func init() {
|
||||
@ -372,9 +372,9 @@ The built-in logging formatters are:
|
||||
* When colors are enabled, levels are truncated to 4 characters by default. To disable
|
||||
truncation set the `DisableLevelTruncation` field to `true`.
|
||||
* When outputting to a TTY, it's often helpful to visually scan down a column where all the levels are the same width. Setting the `PadLevelText` field to `true` enables this behavior, by adding padding to the level text.
|
||||
* All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#TextFormatter).
|
||||
* All options are listed in the [generated docs](https://godoc.org/github.com/ManyakRus/logrus#TextFormatter).
|
||||
* `logrus.JSONFormatter`. Logs fields as JSON.
|
||||
* All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#JSONFormatter).
|
||||
* All options are listed in the [generated docs](https://godoc.org/github.com/ManyakRus/logrus#JSONFormatter).
|
||||
|
||||
Third party logging formatters:
|
||||
|
||||
@ -462,8 +462,8 @@ Logrus has a built in facility for asserting the presence of log messages. This
|
||||
|
||||
```go
|
||||
import(
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/sirupsen/logrus/hooks/test"
|
||||
"github.com/ManyakRus/logrus"
|
||||
"github.com/ManyakRus/logrus/hooks/test"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"testing"
|
||||
)
|
||||
|
28
vendor/github.com/ManyakRus/logrus/doc.go
generated
vendored
28
vendor/github.com/ManyakRus/logrus/doc.go
generated
vendored
@ -1,26 +1,26 @@
|
||||
/*
|
||||
Package logrus is a structured logger for Go, completely API compatible with the standard library logger.
|
||||
|
||||
|
||||
The simplest way to use Logrus is simply the package-level exported logger:
|
||||
|
||||
package main
|
||||
package main
|
||||
|
||||
import (
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
import (
|
||||
log "github.com/ManyakRus/logrus"
|
||||
)
|
||||
|
||||
func main() {
|
||||
log.WithFields(log.Fields{
|
||||
"animal": "walrus",
|
||||
"number": 1,
|
||||
"size": 10,
|
||||
}).Info("A walrus appears")
|
||||
}
|
||||
func main() {
|
||||
log.WithFields(log.Fields{
|
||||
"animal": "walrus",
|
||||
"number": 1,
|
||||
"size": 10,
|
||||
}).Info("A walrus appears")
|
||||
}
|
||||
|
||||
Output:
|
||||
time="2015-09-07T08:48:33Z" level=info msg="A walrus appears" animal=walrus number=1 size=10
|
||||
|
||||
For a full guide visit https://github.com/sirupsen/logrus
|
||||
time="2015-09-07T08:48:33Z" level=info msg="A walrus appears" animal=walrus number=1 size=10
|
||||
|
||||
For a full guide visit https://github.com/ManyakRus/logrus
|
||||
*/
|
||||
package logrus
|
||||
|
2
vendor/github.com/ManyakRus/logrus/entry.go
generated
vendored
2
vendor/github.com/ManyakRus/logrus/entry.go
generated
vendored
@ -257,7 +257,7 @@ func (entry *Entry) log(level Level, msg string) {
|
||||
// panic() to use in Entry#Panic(), we avoid the allocation by checking
|
||||
// directly here.
|
||||
if level <= PanicLevel {
|
||||
panic(entry) //sanek
|
||||
//panic(entry) //sanek не было
|
||||
panic(newEntry)
|
||||
}
|
||||
}
|
||||
|
2
vendor/github.com/ManyakRus/logrus/json_formatter.go
generated
vendored
2
vendor/github.com/ManyakRus/logrus/json_formatter.go
generated
vendored
@ -66,7 +66,7 @@ func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
|
||||
switch v := v.(type) {
|
||||
case error:
|
||||
// Otherwise errors are ignored by `encoding/json`
|
||||
// https://github.com/sirupsen/logrus/issues/137
|
||||
// https://github.com/ManyakRus/logrus/issues/137
|
||||
data[k] = v.Error()
|
||||
default:
|
||||
data[k] = v
|
||||
|
18
vendor/github.com/ManyakRus/logrus/logger.go
generated
vendored
18
vendor/github.com/ManyakRus/logrus/logger.go
generated
vendored
@ -76,12 +76,12 @@ func (mw *MutexWrap) Disable() {
|
||||
// `Out` and `Hooks` directly on the default logger instance. You can also just
|
||||
// instantiate your own:
|
||||
//
|
||||
// var log = &logrus.Logger{
|
||||
// Out: os.Stderr,
|
||||
// Formatter: new(logrus.TextFormatter),
|
||||
// Hooks: make(logrus.LevelHooks),
|
||||
// Level: logrus.DebugLevel,
|
||||
// }
|
||||
// var log = &logrus.Logger{
|
||||
// Out: os.Stderr,
|
||||
// Formatter: new(logrus.TextFormatter),
|
||||
// Hooks: make(logrus.LevelHooks),
|
||||
// Level: logrus.DebugLevel,
|
||||
// }
|
||||
//
|
||||
// It's recommended to make this a global instance called `log`.
|
||||
func New() *Logger {
|
||||
@ -347,9 +347,9 @@ func (logger *Logger) Exit(code int) {
|
||||
logger.ExitFunc(code)
|
||||
}
|
||||
|
||||
// When file is opened with appending mode, it's safe to
|
||||
// write concurrently to a file (within 4k message on Linux).
|
||||
// In these cases user can choose to disable the lock.
|
||||
//When file is opened with appending mode, it's safe to
|
||||
//write concurrently to a file (within 4k message on Linux).
|
||||
//In these cases user can choose to disable the lock.
|
||||
func (logger *Logger) SetNoLock() {
|
||||
logger.mu.Disable()
|
||||
}
|
||||
|
2
vendor/github.com/ManyakRus/logrus/terminal_check_bsd.go
generated
vendored
2
vendor/github.com/ManyakRus/logrus/terminal_check_bsd.go
generated
vendored
@ -1,4 +1,4 @@
|
||||
// +build darwin dragonfly freebsd netbsd openbsd
|
||||
// +build darwin dragonfly freebsd netbsd openbsd hurd
|
||||
// +build !js
|
||||
|
||||
package logrus
|
||||
|
2
vendor/github.com/ManyakRus/logrus/terminal_check_unix.go
generated
vendored
2
vendor/github.com/ManyakRus/logrus/terminal_check_unix.go
generated
vendored
@ -1,5 +1,7 @@
|
||||
//go:build (linux || aix || zos) && !js && !wasi
|
||||
// +build linux aix zos
|
||||
// +build !js
|
||||
// +build !wasi
|
||||
|
||||
package logrus
|
||||
|
||||
|
8
vendor/github.com/ManyakRus/logrus/terminal_check_wasi.go
generated
vendored
Normal file
8
vendor/github.com/ManyakRus/logrus/terminal_check_wasi.go
generated
vendored
Normal file
@ -0,0 +1,8 @@
|
||||
//go:build wasi
|
||||
// +build wasi
|
||||
|
||||
package logrus
|
||||
|
||||
func isTerminal(fd int) bool {
|
||||
return false
|
||||
}
|
8
vendor/github.com/ManyakRus/logrus/terminal_check_wasip1.go
generated
vendored
Normal file
8
vendor/github.com/ManyakRus/logrus/terminal_check_wasip1.go
generated
vendored
Normal file
@ -0,0 +1,8 @@
|
||||
//go:build wasip1
|
||||
// +build wasip1
|
||||
|
||||
package logrus
|
||||
|
||||
func isTerminal(fd int) bool {
|
||||
return false
|
||||
}
|
36
vendor/github.com/ManyakRus/logrus/writer.go
generated
vendored
36
vendor/github.com/ManyakRus/logrus/writer.go
generated
vendored
@ -4,6 +4,7 @@ import (
|
||||
"bufio"
|
||||
"io"
|
||||
"runtime"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Writer at INFO level. See WriterLevel for details.
|
||||
@ -20,15 +21,18 @@ func (logger *Logger) WriterLevel(level Level) *io.PipeWriter {
|
||||
return NewEntry(logger).WriterLevel(level)
|
||||
}
|
||||
|
||||
// Writer returns an io.Writer that writes to the logger at the info log level
|
||||
func (entry *Entry) Writer() *io.PipeWriter {
|
||||
return entry.WriterLevel(InfoLevel)
|
||||
}
|
||||
|
||||
// WriterLevel returns an io.Writer that writes to the logger at the given log level
|
||||
func (entry *Entry) WriterLevel(level Level) *io.PipeWriter {
|
||||
reader, writer := io.Pipe()
|
||||
|
||||
var printFunc func(args ...interface{})
|
||||
|
||||
// Determine which log function to use based on the specified log level
|
||||
switch level {
|
||||
case TraceLevel:
|
||||
printFunc = entry.Trace
|
||||
@ -48,23 +52,51 @@ func (entry *Entry) WriterLevel(level Level) *io.PipeWriter {
|
||||
printFunc = entry.Print
|
||||
}
|
||||
|
||||
// Start a new goroutine to scan the input and write it to the logger using the specified print function.
|
||||
// It splits the input into chunks of up to 64KB to avoid buffer overflows.
|
||||
go entry.writerScanner(reader, printFunc)
|
||||
|
||||
// Set a finalizer function to close the writer when it is garbage collected
|
||||
runtime.SetFinalizer(writer, writerFinalizer)
|
||||
|
||||
return writer
|
||||
}
|
||||
|
||||
// writerScanner scans the input from the reader and writes it to the logger
|
||||
func (entry *Entry) writerScanner(reader *io.PipeReader, printFunc func(args ...interface{})) {
|
||||
scanner := bufio.NewScanner(reader)
|
||||
for scanner.Scan() {
|
||||
printFunc(scanner.Text())
|
||||
|
||||
// Set the buffer size to the maximum token size to avoid buffer overflows
|
||||
scanner.Buffer(make([]byte, bufio.MaxScanTokenSize), bufio.MaxScanTokenSize)
|
||||
|
||||
// Define a split function to split the input into chunks of up to 64KB
|
||||
chunkSize := bufio.MaxScanTokenSize // 64KB
|
||||
splitFunc := func(data []byte, atEOF bool) (int, []byte, error) {
|
||||
if len(data) >= chunkSize {
|
||||
return chunkSize, data[:chunkSize], nil
|
||||
}
|
||||
|
||||
return bufio.ScanLines(data, atEOF)
|
||||
}
|
||||
|
||||
// Use the custom split function to split the input
|
||||
scanner.Split(splitFunc)
|
||||
|
||||
// Scan the input and write it to the logger using the specified print function
|
||||
for scanner.Scan() {
|
||||
printFunc(strings.TrimRight(scanner.Text(), "\r\n"))
|
||||
}
|
||||
|
||||
// If there was an error while scanning the input, log an error
|
||||
if err := scanner.Err(); err != nil {
|
||||
entry.Errorf("Error while reading from Writer: %s", err)
|
||||
}
|
||||
|
||||
// Close the reader when we are done
|
||||
reader.Close()
|
||||
}
|
||||
|
||||
// WriterFinalizer is a finalizer function that closes then given writer when it is garbage collected
|
||||
func writerFinalizer(writer *io.PipeWriter) {
|
||||
writer.Close()
|
||||
}
|
||||
|
42
vendor/github.com/ManyakRus/starter/config/config.go
generated
vendored
42
vendor/github.com/ManyakRus/starter/config/config.go
generated
vendored
@ -1,42 +0,0 @@
|
||||
// модуль для загрузки переменных окружения в структуру
|
||||
|
||||
package config
|
||||
|
||||
import (
|
||||
"github.com/joho/godotenv"
|
||||
"github.com/ManyakRus/starter/logger"
|
||||
"github.com/ManyakRus/starter/micro"
|
||||
//log "github.com/sirupsen/logrus"
|
||||
//log "github.com/sirupsen/logrus"
|
||||
//"gitlab.aescorp.ru/dsp_dev/notifier/notifier_adp_eml/internal/v0/app/types"
|
||||
//"gitlab.aescorp.ru/dsp_dev/notifier/notifier_adp_eml/internal/v0/app/micro"
|
||||
)
|
||||
|
||||
// log хранит используемый логгер
|
||||
var log = logger.GetLog()
|
||||
|
||||
// LoadEnv - загружает переменные окружения в структуру из файла или из переменных окружения
|
||||
func LoadEnv() {
|
||||
|
||||
dir := micro.ProgramDir()
|
||||
filename := dir + ".env"
|
||||
LoadEnv_from_file(filename)
|
||||
}
|
||||
|
||||
// LoadEnv_from_file загружает переменные окружения в структуру из файла или из переменных окружения
|
||||
func LoadEnv_from_file(filename string) {
|
||||
|
||||
err := godotenv.Load(filename)
|
||||
if err != nil {
|
||||
log.Debug("Can not parse .env file: ", filename, " warning: "+err.Error())
|
||||
} else {
|
||||
log.Info("load .env from file: ", filename)
|
||||
}
|
||||
|
||||
//LOG_LEVEL := os.Getenv("LOG_LEVEL")
|
||||
//if LOG_LEVEL == "" {
|
||||
// LOG_LEVEL = "info"
|
||||
//}
|
||||
//logger.SetLevel(LOG_LEVEL)
|
||||
|
||||
}
|
88
vendor/github.com/ManyakRus/starter/config_main/config_main.go
generated
vendored
Normal file
88
vendor/github.com/ManyakRus/starter/config_main/config_main.go
generated
vendored
Normal file
@ -0,0 +1,88 @@
|
||||
// модуль для загрузки переменных окружения в структуру
|
||||
|
||||
package config_main
|
||||
|
||||
import (
|
||||
"github.com/ManyakRus/starter/logger"
|
||||
"github.com/ManyakRus/starter/micro"
|
||||
"github.com/joho/godotenv"
|
||||
//log "github.com/sirupsen/logrus"
|
||||
//log "github.com/sirupsen/logrus"
|
||||
//"gitlab.aescorp.ru/dsp_dev/notifier/notifier_adp_eml/internal/v0/app/types"
|
||||
//"gitlab.aescorp.ru/dsp_dev/notifier/notifier_adp_eml/internal/v0/app/micro"
|
||||
)
|
||||
|
||||
// log хранит используемый логгер
|
||||
var log = logger.GetLog()
|
||||
|
||||
// LoadEnv - загружает из файла .env переменные в переменные окружения
|
||||
func LoadEnv() {
|
||||
|
||||
dir := micro.ProgramDir()
|
||||
filename := dir + ".env"
|
||||
LoadEnv_from_file(filename)
|
||||
}
|
||||
|
||||
// LoadEnv - загружает из файла .env переменные в переменные окружения, возвращает ошибку
|
||||
func LoadEnv_err() error {
|
||||
var err error
|
||||
|
||||
dir := micro.ProgramDir()
|
||||
filename := dir + ".env"
|
||||
err = LoadEnv_from_file_err(filename)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// LoadSettingsTxt - загружает из файла settings.txt переменные в переменные окружения
|
||||
func LoadSettingsTxt() {
|
||||
|
||||
dir := micro.ProgramDir()
|
||||
filename := dir + "settings.txt"
|
||||
LoadEnv_from_file(filename)
|
||||
}
|
||||
|
||||
// LoadSettingsTxt_err - загружает из файла settings.txt переменные в переменные окружения, возвращает ошибку
|
||||
func LoadSettingsTxt_err() error {
|
||||
var err error
|
||||
|
||||
dir := micro.ProgramDir()
|
||||
filename := dir + "settings.txt"
|
||||
err = LoadEnv_from_file_err(filename)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// LoadEnv_from_file загружает из файла переменные в переменные окружения
|
||||
func LoadEnv_from_file(filename string) {
|
||||
|
||||
FilenameShort := micro.LastWord(filename)
|
||||
|
||||
err := LoadEnv_from_file_err(filename)
|
||||
if err != nil {
|
||||
log.Debug("Can not parse "+FilenameShort+" file: ", filename, " warning: "+err.Error())
|
||||
} else {
|
||||
log.Info("load "+FilenameShort+" from file: ", filename)
|
||||
}
|
||||
}
|
||||
|
||||
// LoadEnv_from_file загружает из файла переменные в переменные окружения, возвращает ошибку
|
||||
func LoadEnv_from_file_err(filename string) error {
|
||||
var err error
|
||||
|
||||
err = godotenv.Load(filename)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// LoadENV_or_SettingsTXT - загружает из файла .env или settings.txt переменные в переменные окружения
|
||||
func LoadENV_or_SettingsTXT() {
|
||||
errENV := LoadEnv_err()
|
||||
var err2 error
|
||||
if errENV != nil {
|
||||
err2 = LoadSettingsTxt_err()
|
||||
}
|
||||
if err2 != nil {
|
||||
log.Panic("LoadENV_or_SettingsTXT() error: ", err2)
|
||||
}
|
||||
}
|
110
vendor/github.com/ManyakRus/starter/micro/microfunctions.go
generated
vendored
110
vendor/github.com/ManyakRus/starter/micro/microfunctions.go
generated
vendored
@ -676,6 +676,21 @@ func BoolFromInt(i int) bool {
|
||||
return Otvet
|
||||
}
|
||||
|
||||
// BoolFromString - возвращает true если строка = true, или =1
|
||||
func BoolFromString(s string) bool {
|
||||
Otvet := false
|
||||
|
||||
s = strings.TrimLeft(s, " ")
|
||||
s = strings.TrimRight(s, " ")
|
||||
s = strings.ToLower(s)
|
||||
|
||||
if s == "true" || s == "1" {
|
||||
Otvet = true
|
||||
}
|
||||
|
||||
return Otvet
|
||||
}
|
||||
|
||||
// DeleteFileSeperator - убирает в конце / или \
|
||||
func DeleteFileSeperator(dir string) string {
|
||||
Otvet := dir
|
||||
@ -692,3 +707,98 @@ func DeleteFileSeperator(dir string) string {
|
||||
|
||||
return Otvet
|
||||
}
|
||||
|
||||
// CreateFolder - создаёт папку на диске
|
||||
func CreateFolder(FilenameFull string, FilePermissions uint32) error {
|
||||
var err error
|
||||
|
||||
FileMode1 := os.FileMode(FilePermissions)
|
||||
if FilePermissions == 0 {
|
||||
FileMode1 = os.FileMode(0700)
|
||||
}
|
||||
|
||||
if _, err := os.Stat(FilenameFull); errors.Is(err, os.ErrNotExist) {
|
||||
err := os.Mkdir(FilenameFull, FileMode1)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// DeleteFolder - создаёт папку на диске
|
||||
func DeleteFolder(FilenameFull string) error {
|
||||
var err error
|
||||
|
||||
if _, err := os.Stat(FilenameFull); errors.Is(err, os.ErrNotExist) {
|
||||
return err
|
||||
}
|
||||
|
||||
err = os.RemoveAll(FilenameFull)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// ContextDone - возвращает true если контекст завершен
|
||||
func ContextDone(ctx context.Context) bool {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// StringFromUpperCase - возвращает строку, первая буква в верхнем регистре
|
||||
func StringFromUpperCase(s string) string {
|
||||
Otvet := s
|
||||
if Otvet == "" {
|
||||
return Otvet
|
||||
}
|
||||
|
||||
Otvet = strings.ToUpper(Otvet[:1]) + Otvet[1:]
|
||||
|
||||
return Otvet
|
||||
}
|
||||
|
||||
// StringFromLowerCase - возвращает строку, первая буква в нижнем регистре
|
||||
func StringFromLowerCase(s string) string {
|
||||
Otvet := s
|
||||
if Otvet == "" {
|
||||
return Otvet
|
||||
}
|
||||
|
||||
Otvet = strings.ToLower(Otvet[:1]) + Otvet[1:]
|
||||
|
||||
return Otvet
|
||||
}
|
||||
|
||||
// DeleteEndSlash - убирает в конце / или \
|
||||
func DeleteEndSlash(Text string) string {
|
||||
Otvet := Text
|
||||
|
||||
if Otvet == "" {
|
||||
return Otvet
|
||||
}
|
||||
|
||||
LastSymbol := Otvet[len(Otvet)-1:]
|
||||
if LastSymbol == "/" || LastSymbol == `\` {
|
||||
Otvet = Otvet[0 : len(Otvet)-1]
|
||||
}
|
||||
|
||||
return Otvet
|
||||
}
|
||||
|
||||
// Int64FromString - возвращает int64 из строки
|
||||
func Int64FromString(s string) (int64, error) {
|
||||
var Otvet int64
|
||||
var err error
|
||||
|
||||
Otvet, err = strconv.ParseInt(s, 10, 64)
|
||||
|
||||
return Otvet, err
|
||||
}
|
||||
|
42
vendor/github.com/ManyakRus/starter/ping/ping.go
generated
vendored
42
vendor/github.com/ManyakRus/starter/ping/ping.go
generated
vendored
@ -1,42 +0,0 @@
|
||||
package ping
|
||||
|
||||
import (
|
||||
"github.com/ManyakRus/starter/logger"
|
||||
"net"
|
||||
"time"
|
||||
)
|
||||
|
||||
// log - глобальный логгер
|
||||
var log = logger.GetLog()
|
||||
|
||||
func Ping_err(IP, Port string) error {
|
||||
var err error
|
||||
|
||||
var timeout time.Duration
|
||||
timeout = time.Second * 3
|
||||
network := IP + ":" + Port
|
||||
|
||||
conn, err := net.DialTimeout("tcp", network, timeout)
|
||||
|
||||
if err != nil {
|
||||
//log.Warn("PingPort() error: ", err)
|
||||
} else {
|
||||
defer conn.Close()
|
||||
//log.Debug("ping OK: ", network)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func Ping(IP, Port string) {
|
||||
var err error
|
||||
|
||||
network := IP + ":" + Port
|
||||
|
||||
err = Ping_err(IP, Port)
|
||||
if err != nil {
|
||||
log.Panic("Ping() error: ", err)
|
||||
} else {
|
||||
log.Debug("Ping() OK: ", network)
|
||||
}
|
||||
}
|
43
vendor/github.com/ManyakRus/starter/port_checker/port_checker.go
generated
vendored
Normal file
43
vendor/github.com/ManyakRus/starter/port_checker/port_checker.go
generated
vendored
Normal file
@ -0,0 +1,43 @@
|
||||
package port_checker
|
||||
|
||||
import (
|
||||
"github.com/ManyakRus/starter/logger"
|
||||
"net"
|
||||
"time"
|
||||
)
|
||||
|
||||
// log - глобальный логгер
|
||||
var log = logger.GetLog()
|
||||
|
||||
// CheckPort_err - проверяет доступность порта, возвращает ошибку
|
||||
func CheckPort_err(IP, Port string) error {
|
||||
var err error
|
||||
|
||||
var timeout time.Duration
|
||||
timeout = time.Second * 3
|
||||
network := IP + ":" + Port
|
||||
|
||||
conn, err := net.DialTimeout("tcp", network, timeout)
|
||||
|
||||
if err != nil {
|
||||
} else {
|
||||
defer conn.Close()
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// CheckPort - проверяет доступность порта
|
||||
// создаёт панику при ошибке
|
||||
func CheckPort(IP, Port string) {
|
||||
var err error
|
||||
|
||||
network := IP + ":" + Port
|
||||
|
||||
err = CheckPort_err(IP, Port)
|
||||
if err != nil {
|
||||
log.Panic("CheckPort() error: ", err)
|
||||
} else {
|
||||
log.Debug("CheckPort() OK: ", network)
|
||||
}
|
||||
}
|
97
vendor/github.com/ManyakRus/starter/postgres_gorm/postgres_gorm.go
generated
vendored
97
vendor/github.com/ManyakRus/starter/postgres_gorm/postgres_gorm.go
generated
vendored
@ -5,8 +5,10 @@ package postgres_gorm
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/ManyakRus/starter/logger"
|
||||
"github.com/ManyakRus/starter/ping"
|
||||
"github.com/ManyakRus/starter/port_checker"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
//"github.com/jackc/pgconn"
|
||||
@ -59,7 +61,7 @@ func Connect() {
|
||||
FillSettings()
|
||||
}
|
||||
|
||||
ping.Ping(Settings.DB_HOST, Settings.DB_PORT)
|
||||
port_checker.CheckPort(Settings.DB_HOST, Settings.DB_PORT)
|
||||
|
||||
err := Connect_err()
|
||||
if err != nil {
|
||||
@ -73,6 +75,14 @@ func Connect() {
|
||||
// Connect_err - подключается к базе данных
|
||||
func Connect_err() error {
|
||||
var err error
|
||||
err = Connect_WithApplicationName_err("")
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// Connect_WithApplicationName_err - подключается к базе данных, с указанием имени приложения
|
||||
func Connect_WithApplicationName_err(ApplicationName string) error {
|
||||
var err error
|
||||
|
||||
if Settings.DB_HOST == "" {
|
||||
FillSettings()
|
||||
@ -84,12 +94,19 @@ func Connect_err() error {
|
||||
//defer cancel()
|
||||
|
||||
// get the database connection URL.
|
||||
dsn := GetDSN()
|
||||
dsn := GetDSN(ApplicationName)
|
||||
|
||||
//
|
||||
conf := &gorm.Config{}
|
||||
conn := postgres.Open(dsn)
|
||||
Conn, err = gorm.Open(conn, conf)
|
||||
//conn := postgres.Open(dsn)
|
||||
|
||||
dialect := postgres.New(postgres.Config{
|
||||
DSN: dsn,
|
||||
PreferSimpleProtocol: true, //для запуска мультизапросов
|
||||
})
|
||||
Conn, err = gorm.Open(dialect, conf)
|
||||
|
||||
//Conn, err = gorm.Open(conn, conf)
|
||||
Conn.Config.NamingStrategy = schema.NamingStrategy{TablePrefix: Settings.DB_SCHEMA + "."}
|
||||
Conn.Config.Logger = gormlogger.Default.LogMode(gormlogger.Warn)
|
||||
|
||||
@ -121,7 +138,7 @@ func IsClosed() bool {
|
||||
|
||||
err = DB.Ping()
|
||||
if err != nil {
|
||||
log.Error("DB.Ping() error: ", err)
|
||||
log.Error("DB.CheckPort() error: ", err)
|
||||
return true
|
||||
}
|
||||
return otvet
|
||||
@ -225,7 +242,7 @@ func WaitStop() {
|
||||
|
||||
select {
|
||||
case <-contextmain.GetContext().Done():
|
||||
log.Warn("Context app is canceled.")
|
||||
log.Warn("Context app is canceled. Postgres gorm.")
|
||||
}
|
||||
|
||||
//
|
||||
@ -249,6 +266,18 @@ func StartDB() {
|
||||
|
||||
}
|
||||
|
||||
// Start - делает соединение с БД, отключение и др.
|
||||
func Start(ApplicationName string) {
|
||||
Connect_WithApplicationName_err(ApplicationName)
|
||||
|
||||
stopapp.GetWaitGroup_Main().Add(1)
|
||||
go WaitStop()
|
||||
|
||||
stopapp.GetWaitGroup_Main().Add(1)
|
||||
go ping_go()
|
||||
|
||||
}
|
||||
|
||||
// FillSettings загружает переменные окружения в структуру из файла или из переменных окружения
|
||||
func FillSettings() {
|
||||
Settings = SettingsINI{}
|
||||
@ -298,12 +327,15 @@ func FillSettings() {
|
||||
}
|
||||
|
||||
// GetDSN - возвращает строку соединения к базе данных
|
||||
func GetDSN() string {
|
||||
func GetDSN(ApplicationName string) string {
|
||||
ApplicationName = strings.ReplaceAll(ApplicationName, " ", "_")
|
||||
|
||||
dsn := "host=" + Settings.DB_HOST + " "
|
||||
dsn += "user=" + Settings.DB_USER + " "
|
||||
dsn += "password=" + Settings.DB_PASSWORD + " "
|
||||
dsn += "dbname=" + Settings.DB_NAME + " "
|
||||
dsn += "port=" + Settings.DB_PORT + " sslmode=disable TimeZone=UTC"
|
||||
dsn += "port=" + Settings.DB_PORT + " sslmode=disable TimeZone=UTC "
|
||||
dsn += "application_name=" + ApplicationName
|
||||
|
||||
return dsn
|
||||
}
|
||||
@ -317,10 +349,20 @@ func GetConnection() *gorm.DB {
|
||||
return Conn
|
||||
}
|
||||
|
||||
// GetConnection_WithApplicationName - возвращает соединение к нужной базе данных, с указанием имени приложения
|
||||
func GetConnection_WithApplicationName(ApplicationName string) *gorm.DB {
|
||||
if Conn == nil {
|
||||
Connect_WithApplicationName_err(ApplicationName)
|
||||
}
|
||||
|
||||
return Conn
|
||||
}
|
||||
|
||||
// ping_go - делает пинг каждые 60 секунд, и реконнект
|
||||
func ping_go() {
|
||||
|
||||
ticker := time.NewTicker(60 * time.Second)
|
||||
defer ticker.Stop()
|
||||
|
||||
addr := Settings.DB_HOST + ":" + Settings.DB_PORT
|
||||
|
||||
@ -332,13 +374,13 @@ loop:
|
||||
log.Warn("Context app is canceled. postgres_gorm.ping")
|
||||
break loop
|
||||
case <-ticker.C:
|
||||
err := ping.Ping_err(Settings.DB_HOST, Settings.DB_PORT)
|
||||
err := port_checker.CheckPort_err(Settings.DB_HOST, Settings.DB_PORT)
|
||||
//log.Debug("ticker, ping err: ", err) //удалить
|
||||
if err != nil {
|
||||
NeedReconnect = true
|
||||
log.Warn("postgres_gorm Ping(", addr, ") error: ", err)
|
||||
log.Warn("postgres_gorm CheckPort(", addr, ") error: ", err)
|
||||
} else if NeedReconnect == true {
|
||||
log.Warn("postgres_gorm Ping(", addr, ") OK. Start Reconnect()")
|
||||
log.Warn("postgres_gorm CheckPort(", addr, ") OK. Start Reconnect()")
|
||||
NeedReconnect = false
|
||||
Connect()
|
||||
}
|
||||
@ -347,3 +389,34 @@ loop:
|
||||
|
||||
stopapp.GetWaitGroup_Main().Done()
|
||||
}
|
||||
|
||||
// RawMultipleSQL - выполняет текст запроса, отдельно для каждого запроса
|
||||
func RawMultipleSQL(db *gorm.DB, TextSQL string) *gorm.DB {
|
||||
var tx *gorm.DB
|
||||
var err error
|
||||
tx = db
|
||||
|
||||
// запустим все запросы отдельно
|
||||
sqlSlice := strings.Split(TextSQL, ";")
|
||||
len1 := len(sqlSlice)
|
||||
for i, v := range sqlSlice {
|
||||
if i == len1-1 {
|
||||
tx = tx.Raw(v)
|
||||
err = tx.Error
|
||||
} else {
|
||||
tx = tx.Exec(v)
|
||||
err = tx.Error
|
||||
}
|
||||
if err != nil {
|
||||
TextError := fmt.Sprint("db.Raw() error: ", err, ", TextSQL: \n", v)
|
||||
err = errors.New(TextError)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if tx == nil {
|
||||
log.Panic("db.Raw() error: rows =nil")
|
||||
}
|
||||
|
||||
return tx
|
||||
}
|
||||
|
9
vendor/github.com/jackc/pgservicefile/.travis.yml
generated
vendored
9
vendor/github.com/jackc/pgservicefile/.travis.yml
generated
vendored
@ -1,9 +0,0 @@
|
||||
language: go
|
||||
|
||||
go:
|
||||
- 1.x
|
||||
- tip
|
||||
|
||||
matrix:
|
||||
allow_failures:
|
||||
- go: tip
|
5
vendor/github.com/jackc/pgservicefile/README.md
generated
vendored
5
vendor/github.com/jackc/pgservicefile/README.md
generated
vendored
@ -1,5 +1,6 @@
|
||||
[![](https://godoc.org/github.com/jackc/pgservicefile?status.svg)](https://godoc.org/github.com/jackc/pgservicefile)
|
||||
[![Build Status](https://travis-ci.org/jackc/pgservicefile.svg)](https://travis-ci.org/jackc/pgservicefile)
|
||||
[![Go Reference](https://pkg.go.dev/badge/github.com/jackc/pgservicefile.svg)](https://pkg.go.dev/github.com/jackc/pgservicefile)
|
||||
[![Build Status](https://github.com/jackc/pgservicefile/actions/workflows/ci.yml/badge.svg)](https://github.com/jackc/pgservicefile/actions/workflows/ci.yml)
|
||||
|
||||
|
||||
# pgservicefile
|
||||
|
||||
|
23
vendor/github.com/jackc/pgx/v5/CHANGELOG.md
generated
vendored
23
vendor/github.com/jackc/pgx/v5/CHANGELOG.md
generated
vendored
@ -1,3 +1,26 @@
|
||||
# 5.5.1 (December 9, 2023)
|
||||
|
||||
* Add CopyFromFunc helper function. (robford)
|
||||
* Add PgConn.Deallocate method that uses PostgreSQL protocol Close message.
|
||||
* pgx uses new PgConn.Deallocate method. This allows deallocating statements to work in a failed transaction. This fixes a case where the prepared statement map could become invalid.
|
||||
* Fix: Prefer driver.Valuer over json.Marshaler for json fields. (Jacopo)
|
||||
* Fix: simple protocol SQL sanitizer previously panicked if an invalid $0 placeholder was used. This now returns an error instead. (maksymnevajdev)
|
||||
* Add pgtype.Numeric.ScanScientific (Eshton Robateau)
|
||||
|
||||
# 5.5.0 (November 4, 2023)
|
||||
|
||||
* Add CollectExactlyOneRow. (Julien GOTTELAND)
|
||||
* Add OpenDBFromPool to create *database/sql.DB from *pgxpool.Pool. (Lev Zakharov)
|
||||
* Prepare can automatically choose statement name based on sql. This makes it easier to explicitly manage prepared statements.
|
||||
* Statement cache now uses deterministic, stable statement names.
|
||||
* database/sql prepared statement names are deterministically generated.
|
||||
* Fix: SendBatch wasn't respecting context cancellation.
|
||||
* Fix: Timeout error from pipeline is now normalized.
|
||||
* Fix: database/sql encoding json.RawMessage to []byte.
|
||||
* CancelRequest: Wait for the cancel request to be acknowledged by the server. This should improve PgBouncer compatibility. (Anton Levakin)
|
||||
* stdlib: Use Ping instead of CheckConn in ResetSession
|
||||
* Add json.Marshaler and json.Unmarshaler for Float4, Float8 (Kirill Mironov)
|
||||
|
||||
# 5.4.3 (August 5, 2023)
|
||||
|
||||
* Fix: QCharArrayOID was defined with the wrong OID (Christoph Engelbert)
|
||||
|
6
vendor/github.com/jackc/pgx/v5/README.md
generated
vendored
6
vendor/github.com/jackc/pgx/v5/README.md
generated
vendored
@ -86,9 +86,13 @@ It is also possible to use the `database/sql` interface and convert a connection
|
||||
|
||||
See CONTRIBUTING.md for setup instructions.
|
||||
|
||||
## Architecture
|
||||
|
||||
See the presentation at Golang Estonia, [PGX Top to Bottom](https://www.youtube.com/watch?v=sXMSWhcHCf8) for a description of pgx architecture.
|
||||
|
||||
## Supported Go and PostgreSQL Versions
|
||||
|
||||
pgx supports the same versions of Go and PostgreSQL that are supported by their respective teams. For [Go](https://golang.org/doc/devel/release.html#policy) that is the two most recent major releases and for [PostgreSQL](https://www.postgresql.org/support/versioning/) the major releases in the last 5 years. This means pgx supports Go 1.19 and higher and PostgreSQL 11 and higher. pgx also is tested against the latest version of [CockroachDB](https://www.cockroachlabs.com/product/).
|
||||
pgx supports the same versions of Go and PostgreSQL that are supported by their respective teams. For [Go](https://golang.org/doc/devel/release.html#policy) that is the two most recent major releases and for [PostgreSQL](https://www.postgresql.org/support/versioning/) the major releases in the last 5 years. This means pgx supports Go 1.20 and higher and PostgreSQL 12 and higher. pgx also is tested against the latest version of [CockroachDB](https://www.cockroachlabs.com/product/).
|
||||
|
||||
## Version Policy
|
||||
|
||||
|
2
vendor/github.com/jackc/pgx/v5/batch.go
generated
vendored
2
vendor/github.com/jackc/pgx/v5/batch.go
generated
vendored
@ -61,6 +61,8 @@ type Batch struct {
|
||||
}
|
||||
|
||||
// Queue queues a query to batch b. query can be an SQL query or the name of a prepared statement.
|
||||
// The only pgx option argument that is supported is QueryRewriter. Queries are executed using the
|
||||
// connection's DefaultQueryExecMode.
|
||||
func (b *Batch) Queue(query string, arguments ...any) *QueuedQuery {
|
||||
qq := &QueuedQuery{
|
||||
query: query,
|
||||
|
102
vendor/github.com/jackc/pgx/v5/conn.go
generated
vendored
102
vendor/github.com/jackc/pgx/v5/conn.go
generated
vendored
@ -2,6 +2,8 @@ package pgx
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strconv"
|
||||
@ -35,7 +37,7 @@ type ConnConfig struct {
|
||||
|
||||
// DefaultQueryExecMode controls the default mode for executing queries. By default pgx uses the extended protocol
|
||||
// and automatically prepares and caches prepared statements. However, this may be incompatible with proxies such as
|
||||
// PGBouncer. In this case it may be preferrable to use QueryExecModeExec or QueryExecModeSimpleProtocol. The same
|
||||
// PGBouncer. In this case it may be preferable to use QueryExecModeExec or QueryExecModeSimpleProtocol. The same
|
||||
// functionality can be controlled on a per query basis by passing a QueryExecMode as the first query argument.
|
||||
DefaultQueryExecMode QueryExecMode
|
||||
|
||||
@ -99,8 +101,12 @@ func (ident Identifier) Sanitize() string {
|
||||
return strings.Join(parts, ".")
|
||||
}
|
||||
|
||||
// ErrNoRows occurs when rows are expected but none are returned.
|
||||
var ErrNoRows = errors.New("no rows in result set")
|
||||
var (
|
||||
// ErrNoRows occurs when rows are expected but none are returned.
|
||||
ErrNoRows = errors.New("no rows in result set")
|
||||
// ErrTooManyRows occurs when more rows than expected are returned.
|
||||
ErrTooManyRows = errors.New("too many rows in result set")
|
||||
)
|
||||
|
||||
var errDisabledStatementCache = fmt.Errorf("cannot use QueryExecModeCacheStatement with disabled statement cache")
|
||||
var errDisabledDescriptionCache = fmt.Errorf("cannot use QueryExecModeCacheDescribe with disabled description cache")
|
||||
@ -269,7 +275,7 @@ func connect(ctx context.Context, config *ConnConfig) (c *Conn, err error) {
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// Close closes a connection. It is safe to call Close on a already closed
|
||||
// Close closes a connection. It is safe to call Close on an already closed
|
||||
// connection.
|
||||
func (c *Conn) Close(ctx context.Context) error {
|
||||
if c.IsClosed() {
|
||||
@ -280,12 +286,15 @@ func (c *Conn) Close(ctx context.Context) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// Prepare creates a prepared statement with name and sql. sql can contain placeholders
|
||||
// for bound parameters. These placeholders are referenced positional as $1, $2, etc.
|
||||
// Prepare creates a prepared statement with name and sql. sql can contain placeholders for bound parameters. These
|
||||
// placeholders are referenced positionally as $1, $2, etc. name can be used instead of sql with Query, QueryRow, and
|
||||
// Exec to execute the statement. It can also be used with Batch.Queue.
|
||||
//
|
||||
// Prepare is idempotent; i.e. it is safe to call Prepare multiple times with the same
|
||||
// name and sql arguments. This allows a code path to Prepare and Query/Exec without
|
||||
// concern for if the statement has already been prepared.
|
||||
// The underlying PostgreSQL identifier for the prepared statement will be name if name != sql or a digest of sql if
|
||||
// name == sql.
|
||||
//
|
||||
// Prepare is idempotent; i.e. it is safe to call Prepare multiple times with the same name and sql arguments. This
|
||||
// allows a code path to Prepare and Query/Exec without concern for if the statement has already been prepared.
|
||||
func (c *Conn) Prepare(ctx context.Context, name, sql string) (sd *pgconn.StatementDescription, err error) {
|
||||
if c.prepareTracer != nil {
|
||||
ctx = c.prepareTracer.TracePrepareStart(ctx, c, TracePrepareStartData{Name: name, SQL: sql})
|
||||
@ -307,23 +316,48 @@ func (c *Conn) Prepare(ctx context.Context, name, sql string) (sd *pgconn.Statem
|
||||
}()
|
||||
}
|
||||
|
||||
sd, err = c.pgConn.Prepare(ctx, name, sql, nil)
|
||||
var psName, psKey string
|
||||
if name == sql {
|
||||
digest := sha256.Sum256([]byte(sql))
|
||||
psName = "stmt_" + hex.EncodeToString(digest[0:24])
|
||||
psKey = sql
|
||||
} else {
|
||||
psName = name
|
||||
psKey = name
|
||||
}
|
||||
|
||||
sd, err = c.pgConn.Prepare(ctx, psName, sql, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if name != "" {
|
||||
c.preparedStatements[name] = sd
|
||||
if psKey != "" {
|
||||
c.preparedStatements[psKey] = sd
|
||||
}
|
||||
|
||||
return sd, nil
|
||||
}
|
||||
|
||||
// Deallocate released a prepared statement
|
||||
// Deallocate releases a prepared statement. Calling Deallocate on a non-existent prepared statement will succeed.
|
||||
func (c *Conn) Deallocate(ctx context.Context, name string) error {
|
||||
delete(c.preparedStatements, name)
|
||||
_, err := c.pgConn.Exec(ctx, "deallocate "+quoteIdentifier(name)).ReadAll()
|
||||
return err
|
||||
var psName string
|
||||
sd := c.preparedStatements[name]
|
||||
if sd != nil {
|
||||
psName = sd.Name
|
||||
} else {
|
||||
psName = name
|
||||
}
|
||||
|
||||
err := c.pgConn.Deallocate(ctx, psName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if sd != nil {
|
||||
delete(c.preparedStatements, name)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeallocateAll releases all previously prepared statements from the server and client, where it also resets the statement and description cache.
|
||||
@ -441,7 +475,7 @@ optionLoop:
|
||||
if queryRewriter != nil {
|
||||
sql, arguments, err = queryRewriter.RewriteQuery(ctx, c, sql, arguments)
|
||||
if err != nil {
|
||||
return pgconn.CommandTag{}, fmt.Errorf("rewrite query failed: %v", err)
|
||||
return pgconn.CommandTag{}, fmt.Errorf("rewrite query failed: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
@ -461,7 +495,7 @@ optionLoop:
|
||||
}
|
||||
sd := c.statementCache.Get(sql)
|
||||
if sd == nil {
|
||||
sd, err = c.Prepare(ctx, stmtcache.NextStatementName(), sql)
|
||||
sd, err = c.Prepare(ctx, stmtcache.StatementName(sql), sql)
|
||||
if err != nil {
|
||||
return pgconn.CommandTag{}, err
|
||||
}
|
||||
@ -573,13 +607,16 @@ type QueryExecMode int32
|
||||
const (
|
||||
_ QueryExecMode = iota
|
||||
|
||||
// Automatically prepare and cache statements. This uses the extended protocol. Queries are executed in a single
|
||||
// round trip after the statement is cached. This is the default.
|
||||
// Automatically prepare and cache statements. This uses the extended protocol. Queries are executed in a single round
|
||||
// trip after the statement is cached. This is the default. If the database schema is modified or the search_path is
|
||||
// changed after a statement is cached then the first execution of a previously cached query may fail. e.g. If the
|
||||
// number of columns returned by a "SELECT *" changes or the type of a column is changed.
|
||||
QueryExecModeCacheStatement
|
||||
|
||||
// Cache statement descriptions (i.e. argument and result types) and assume they do not change. This uses the
|
||||
// extended protocol. Queries are executed in a single round trip after the description is cached. If the database
|
||||
// schema is modified or the search_path is changed this may result in undetected result decoding errors.
|
||||
// Cache statement descriptions (i.e. argument and result types) and assume they do not change. This uses the extended
|
||||
// protocol. Queries are executed in a single round trip after the description is cached. If the database schema is
|
||||
// modified or the search_path is changed after a statement is cached then the first execution of a previously cached
|
||||
// query may fail. e.g. If the number of columns returned by a "SELECT *" changes or the type of a column is changed.
|
||||
QueryExecModeCacheDescribe
|
||||
|
||||
// Get the statement description on every execution. This uses the extended protocol. Queries require two round trips
|
||||
@ -592,13 +629,13 @@ const (
|
||||
// Assume the PostgreSQL query parameter types based on the Go type of the arguments. This uses the extended protocol
|
||||
// with text formatted parameters and results. Queries are executed in a single round trip. Type mappings can be
|
||||
// registered with pgtype.Map.RegisterDefaultPgType. Queries will be rejected that have arguments that are
|
||||
// unregistered or ambigious. e.g. A map[string]string may have the PostgreSQL type json or hstore. Modes that know
|
||||
// unregistered or ambiguous. e.g. A map[string]string may have the PostgreSQL type json or hstore. Modes that know
|
||||
// the PostgreSQL type can use a map[string]string directly as an argument. This mode cannot.
|
||||
QueryExecModeExec
|
||||
|
||||
// Use the simple protocol. Assume the PostgreSQL query parameter types based on the Go type of the arguments.
|
||||
// Queries are executed in a single round trip. Type mappings can be registered with
|
||||
// pgtype.Map.RegisterDefaultPgType. Queries will be rejected that have arguments that are unregistered or ambigious.
|
||||
// pgtype.Map.RegisterDefaultPgType. Queries will be rejected that have arguments that are unregistered or ambiguous.
|
||||
// e.g. A map[string]string may have the PostgreSQL type json or hstore. Modes that know the PostgreSQL type can use
|
||||
// a map[string]string directly as an argument. This mode cannot.
|
||||
//
|
||||
@ -705,7 +742,7 @@ optionLoop:
|
||||
sql, args, err = queryRewriter.RewriteQuery(ctx, c, sql, args)
|
||||
if err != nil {
|
||||
rows := c.getRows(ctx, originalSQL, originalArgs)
|
||||
err = fmt.Errorf("rewrite query failed: %v", err)
|
||||
err = fmt.Errorf("rewrite query failed: %w", err)
|
||||
rows.fatal(err)
|
||||
return rows, err
|
||||
}
|
||||
@ -815,7 +852,7 @@ func (c *Conn) getStatementDescription(
|
||||
}
|
||||
sd = c.statementCache.Get(sql)
|
||||
if sd == nil {
|
||||
sd, err = c.Prepare(ctx, stmtcache.NextStatementName(), sql)
|
||||
sd, err = c.Prepare(ctx, stmtcache.StatementName(sql), sql)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -865,8 +902,6 @@ func (c *Conn) SendBatch(ctx context.Context, b *Batch) (br BatchResults) {
|
||||
return &batchResults{ctx: ctx, conn: c, err: err}
|
||||
}
|
||||
|
||||
mode := c.config.DefaultQueryExecMode
|
||||
|
||||
for _, bi := range b.queuedQueries {
|
||||
var queryRewriter QueryRewriter
|
||||
sql := bi.query
|
||||
@ -874,6 +909,7 @@ func (c *Conn) SendBatch(ctx context.Context, b *Batch) (br BatchResults) {
|
||||
|
||||
optionLoop:
|
||||
for len(arguments) > 0 {
|
||||
// Update Batch.Queue function comment when additional options are implemented
|
||||
switch arg := arguments[0].(type) {
|
||||
case QueryRewriter:
|
||||
queryRewriter = arg
|
||||
@ -887,7 +923,7 @@ func (c *Conn) SendBatch(ctx context.Context, b *Batch) (br BatchResults) {
|
||||
var err error
|
||||
sql, arguments, err = queryRewriter.RewriteQuery(ctx, c, sql, arguments)
|
||||
if err != nil {
|
||||
return &batchResults{ctx: ctx, conn: c, err: fmt.Errorf("rewrite query failed: %v", err)}
|
||||
return &batchResults{ctx: ctx, conn: c, err: fmt.Errorf("rewrite query failed: %w", err)}
|
||||
}
|
||||
}
|
||||
|
||||
@ -895,6 +931,8 @@ func (c *Conn) SendBatch(ctx context.Context, b *Batch) (br BatchResults) {
|
||||
bi.arguments = arguments
|
||||
}
|
||||
|
||||
// TODO: changing mode per batch? Update Batch.Queue function comment when implemented
|
||||
mode := c.config.DefaultQueryExecMode
|
||||
if mode == QueryExecModeSimpleProtocol {
|
||||
return c.sendBatchQueryExecModeSimpleProtocol(ctx, b)
|
||||
}
|
||||
@ -994,7 +1032,7 @@ func (c *Conn) sendBatchQueryExecModeCacheStatement(ctx context.Context, b *Batc
|
||||
bi.sd = distinctNewQueries[idx]
|
||||
} else {
|
||||
sd = &pgconn.StatementDescription{
|
||||
Name: stmtcache.NextStatementName(),
|
||||
Name: stmtcache.StatementName(bi.query),
|
||||
SQL: bi.query,
|
||||
}
|
||||
distinctNewQueriesIdxMap[sd.SQL] = len(distinctNewQueries)
|
||||
@ -1062,7 +1100,7 @@ func (c *Conn) sendBatchQueryExecModeDescribeExec(ctx context.Context, b *Batch)
|
||||
}
|
||||
|
||||
func (c *Conn) sendBatchExtendedWithDescription(ctx context.Context, b *Batch, distinctNewQueries []*pgconn.StatementDescription, sdCache stmtcache.Cache) (pbr *pipelineBatchResults) {
|
||||
pipeline := c.pgConn.StartPipeline(context.Background())
|
||||
pipeline := c.pgConn.StartPipeline(ctx)
|
||||
defer func() {
|
||||
if pbr != nil && pbr.err != nil {
|
||||
pipeline.Close()
|
||||
|
27
vendor/github.com/jackc/pgx/v5/copy_from.go
generated
vendored
27
vendor/github.com/jackc/pgx/v5/copy_from.go
generated
vendored
@ -64,6 +64,33 @@ func (cts *copyFromSlice) Err() error {
|
||||
return cts.err
|
||||
}
|
||||
|
||||
// CopyFromFunc returns a CopyFromSource interface that relies on nxtf for values.
|
||||
// nxtf returns rows until it either signals an 'end of data' by returning row=nil and err=nil,
|
||||
// or it returns an error. If nxtf returns an error, the copy is aborted.
|
||||
func CopyFromFunc(nxtf func() (row []any, err error)) CopyFromSource {
|
||||
return ©FromFunc{next: nxtf}
|
||||
}
|
||||
|
||||
type copyFromFunc struct {
|
||||
next func() ([]any, error)
|
||||
valueRow []any
|
||||
err error
|
||||
}
|
||||
|
||||
func (g *copyFromFunc) Next() bool {
|
||||
g.valueRow, g.err = g.next()
|
||||
// only return true if valueRow exists and no error
|
||||
return g.valueRow != nil && g.err == nil
|
||||
}
|
||||
|
||||
func (g *copyFromFunc) Values() ([]any, error) {
|
||||
return g.valueRow, g.err
|
||||
}
|
||||
|
||||
func (g *copyFromFunc) Err() error {
|
||||
return g.err
|
||||
}
|
||||
|
||||
// CopyFromSource is the interface used by *Conn.CopyFrom as the source for copy data.
|
||||
type CopyFromSource interface {
|
||||
// Next returns true if there is another row and makes the next row data
|
||||
|
2
vendor/github.com/jackc/pgx/v5/extended_query_builder.go
generated
vendored
2
vendor/github.com/jackc/pgx/v5/extended_query_builder.go
generated
vendored
@ -36,7 +36,7 @@ func (eqb *ExtendedQueryBuilder) Build(m *pgtype.Map, sd *pgconn.StatementDescri
|
||||
for i := range args {
|
||||
err := eqb.appendParam(m, sd.ParamOIDs[i], -1, args[i])
|
||||
if err != nil {
|
||||
err = fmt.Errorf("failed to encode args[%d]: %v", i, err)
|
||||
err = fmt.Errorf("failed to encode args[%d]: %w", i, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
5
vendor/github.com/jackc/pgx/v5/internal/sanitize/sanitize.go
generated
vendored
5
vendor/github.com/jackc/pgx/v5/internal/sanitize/sanitize.go
generated
vendored
@ -35,6 +35,11 @@ func (q *Query) Sanitize(args ...any) (string, error) {
|
||||
str = part
|
||||
case int:
|
||||
argIdx := part - 1
|
||||
|
||||
if argIdx < 0 {
|
||||
return "", fmt.Errorf("first sql argument must be > 0")
|
||||
}
|
||||
|
||||
if argIdx >= len(args) {
|
||||
return "", fmt.Errorf("insufficient arguments")
|
||||
}
|
||||
|
12
vendor/github.com/jackc/pgx/v5/internal/stmtcache/lru_cache.go
generated
vendored
12
vendor/github.com/jackc/pgx/v5/internal/stmtcache/lru_cache.go
generated
vendored
@ -34,7 +34,8 @@ func (c *LRUCache) Get(key string) *pgconn.StatementDescription {
|
||||
|
||||
}
|
||||
|
||||
// Put stores sd in the cache. Put panics if sd.SQL is "". Put does nothing if sd.SQL already exists in the cache.
|
||||
// Put stores sd in the cache. Put panics if sd.SQL is "". Put does nothing if sd.SQL already exists in the cache or
|
||||
// sd.SQL has been invalidated and HandleInvalidated has not been called yet.
|
||||
func (c *LRUCache) Put(sd *pgconn.StatementDescription) {
|
||||
if sd.SQL == "" {
|
||||
panic("cannot store statement description with empty SQL")
|
||||
@ -44,6 +45,13 @@ func (c *LRUCache) Put(sd *pgconn.StatementDescription) {
|
||||
return
|
||||
}
|
||||
|
||||
// The statement may have been invalidated but not yet handled. Do not readd it to the cache.
|
||||
for _, invalidSD := range c.invalidStmts {
|
||||
if invalidSD.SQL == sd.SQL {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if c.l.Len() == c.cap {
|
||||
c.invalidateOldest()
|
||||
}
|
||||
@ -73,6 +81,8 @@ func (c *LRUCache) InvalidateAll() {
|
||||
c.l = list.New()
|
||||
}
|
||||
|
||||
// HandleInvalidated returns a slice of all statement descriptions invalidated since the last call to HandleInvalidated.
|
||||
// Typically, the caller will then deallocate them.
|
||||
func (c *LRUCache) HandleInvalidated() []*pgconn.StatementDescription {
|
||||
invalidStmts := c.invalidStmts
|
||||
c.invalidStmts = nil
|
||||
|
31
vendor/github.com/jackc/pgx/v5/internal/stmtcache/stmtcache.go
generated
vendored
31
vendor/github.com/jackc/pgx/v5/internal/stmtcache/stmtcache.go
generated
vendored
@ -2,18 +2,17 @@
|
||||
package stmtcache
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"sync/atomic"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
|
||||
"github.com/jackc/pgx/v5/pgconn"
|
||||
)
|
||||
|
||||
var stmtCounter int64
|
||||
|
||||
// NextStatementName returns a statement name that will be unique for the lifetime of the program.
|
||||
func NextStatementName() string {
|
||||
n := atomic.AddInt64(&stmtCounter, 1)
|
||||
return "stmtcache_" + strconv.FormatInt(n, 10)
|
||||
// StatementName returns a statement name that will be stable for sql across multiple connections and program
|
||||
// executions.
|
||||
func StatementName(sql string) string {
|
||||
digest := sha256.Sum256([]byte(sql))
|
||||
return "stmtcache_" + hex.EncodeToString(digest[0:24])
|
||||
}
|
||||
|
||||
// Cache caches statement descriptions.
|
||||
@ -39,19 +38,3 @@ type Cache interface {
|
||||
// Cap returns the maximum number of cached prepared statement descriptions.
|
||||
Cap() int
|
||||
}
|
||||
|
||||
func IsStatementInvalid(err error) bool {
|
||||
pgErr, ok := err.(*pgconn.PgError)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
// https://github.com/jackc/pgx/issues/1162
|
||||
//
|
||||
// We used to look for the message "cached plan must not change result type". However, that message can be localized.
|
||||
// Unfortunately, error code "0A000" - "FEATURE NOT SUPPORTED" is used for many different errors and the only way to
|
||||
// tell the difference is by the message. But all that happens is we clear a statement that we otherwise wouldn't
|
||||
// have so it should be safe.
|
||||
possibleInvalidCachedPlanError := pgErr.Code == "0A000"
|
||||
return possibleInvalidCachedPlanError
|
||||
}
|
||||
|
4
vendor/github.com/jackc/pgx/v5/pgconn/auth_scram.go
generated
vendored
4
vendor/github.com/jackc/pgx/v5/pgconn/auth_scram.go
generated
vendored
@ -47,7 +47,7 @@ func (c *PgConn) scramAuth(serverAuthMechanisms []string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// Receive server-first-message payload in a AuthenticationSASLContinue.
|
||||
// Receive server-first-message payload in an AuthenticationSASLContinue.
|
||||
saslContinue, err := c.rxSASLContinue()
|
||||
if err != nil {
|
||||
return err
|
||||
@ -67,7 +67,7 @@ func (c *PgConn) scramAuth(serverAuthMechanisms []string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// Receive server-final-message payload in a AuthenticationSASLFinal.
|
||||
// Receive server-final-message payload in an AuthenticationSASLFinal.
|
||||
saslFinal, err := c.rxSASLFinal()
|
||||
if err != nil {
|
||||
return err
|
||||
|
10
vendor/github.com/jackc/pgx/v5/pgconn/config.go
generated
vendored
10
vendor/github.com/jackc/pgx/v5/pgconn/config.go
generated
vendored
@ -809,7 +809,7 @@ func makeConnectTimeoutDialFunc(timeout time.Duration) DialFunc {
|
||||
return d.DialContext
|
||||
}
|
||||
|
||||
// ValidateConnectTargetSessionAttrsReadWrite is an ValidateConnectFunc that implements libpq compatible
|
||||
// ValidateConnectTargetSessionAttrsReadWrite is a ValidateConnectFunc that implements libpq compatible
|
||||
// target_session_attrs=read-write.
|
||||
func ValidateConnectTargetSessionAttrsReadWrite(ctx context.Context, pgConn *PgConn) error {
|
||||
result := pgConn.ExecParams(ctx, "show transaction_read_only", nil, nil, nil, nil).Read()
|
||||
@ -824,7 +824,7 @@ func ValidateConnectTargetSessionAttrsReadWrite(ctx context.Context, pgConn *PgC
|
||||
return nil
|
||||
}
|
||||
|
||||
// ValidateConnectTargetSessionAttrsReadOnly is an ValidateConnectFunc that implements libpq compatible
|
||||
// ValidateConnectTargetSessionAttrsReadOnly is a ValidateConnectFunc that implements libpq compatible
|
||||
// target_session_attrs=read-only.
|
||||
func ValidateConnectTargetSessionAttrsReadOnly(ctx context.Context, pgConn *PgConn) error {
|
||||
result := pgConn.ExecParams(ctx, "show transaction_read_only", nil, nil, nil, nil).Read()
|
||||
@ -839,7 +839,7 @@ func ValidateConnectTargetSessionAttrsReadOnly(ctx context.Context, pgConn *PgCo
|
||||
return nil
|
||||
}
|
||||
|
||||
// ValidateConnectTargetSessionAttrsStandby is an ValidateConnectFunc that implements libpq compatible
|
||||
// ValidateConnectTargetSessionAttrsStandby is a ValidateConnectFunc that implements libpq compatible
|
||||
// target_session_attrs=standby.
|
||||
func ValidateConnectTargetSessionAttrsStandby(ctx context.Context, pgConn *PgConn) error {
|
||||
result := pgConn.ExecParams(ctx, "select pg_is_in_recovery()", nil, nil, nil, nil).Read()
|
||||
@ -854,7 +854,7 @@ func ValidateConnectTargetSessionAttrsStandby(ctx context.Context, pgConn *PgCon
|
||||
return nil
|
||||
}
|
||||
|
||||
// ValidateConnectTargetSessionAttrsPrimary is an ValidateConnectFunc that implements libpq compatible
|
||||
// ValidateConnectTargetSessionAttrsPrimary is a ValidateConnectFunc that implements libpq compatible
|
||||
// target_session_attrs=primary.
|
||||
func ValidateConnectTargetSessionAttrsPrimary(ctx context.Context, pgConn *PgConn) error {
|
||||
result := pgConn.ExecParams(ctx, "select pg_is_in_recovery()", nil, nil, nil, nil).Read()
|
||||
@ -869,7 +869,7 @@ func ValidateConnectTargetSessionAttrsPrimary(ctx context.Context, pgConn *PgCon
|
||||
return nil
|
||||
}
|
||||
|
||||
// ValidateConnectTargetSessionAttrsPreferStandby is an ValidateConnectFunc that implements libpq compatible
|
||||
// ValidateConnectTargetSessionAttrsPreferStandby is a ValidateConnectFunc that implements libpq compatible
|
||||
// target_session_attrs=prefer-standby.
|
||||
func ValidateConnectTargetSessionAttrsPreferStandby(ctx context.Context, pgConn *PgConn) error {
|
||||
result := pgConn.ExecParams(ctx, "select pg_is_in_recovery()", nil, nil, nil, nil).Read()
|
||||
|
108
vendor/github.com/jackc/pgx/v5/pgconn/pgconn.go
generated
vendored
108
vendor/github.com/jackc/pgx/v5/pgconn/pgconn.go
generated
vendored
@ -74,6 +74,7 @@ type PgConn struct {
|
||||
frontend *pgproto3.Frontend
|
||||
bgReader *bgreader.BGReader
|
||||
slowWriteTimer *time.Timer
|
||||
bgReaderStarted chan struct{}
|
||||
|
||||
config *Config
|
||||
|
||||
@ -301,8 +302,14 @@ func connect(ctx context.Context, config *Config, fallbackConfig *FallbackConfig
|
||||
pgConn.parameterStatuses = make(map[string]string)
|
||||
pgConn.status = connStatusConnecting
|
||||
pgConn.bgReader = bgreader.New(pgConn.conn)
|
||||
pgConn.slowWriteTimer = time.AfterFunc(time.Duration(math.MaxInt64), pgConn.bgReader.Start)
|
||||
pgConn.slowWriteTimer = time.AfterFunc(time.Duration(math.MaxInt64),
|
||||
func() {
|
||||
pgConn.bgReader.Start()
|
||||
pgConn.bgReaderStarted <- struct{}{}
|
||||
},
|
||||
)
|
||||
pgConn.slowWriteTimer.Stop()
|
||||
pgConn.bgReaderStarted = make(chan struct{})
|
||||
pgConn.frontend = config.BuildFrontend(pgConn.bgReader, pgConn.conn)
|
||||
|
||||
startupMsg := pgproto3.StartupMessage{
|
||||
@ -593,7 +600,7 @@ func (pgConn *PgConn) Frontend() *pgproto3.Frontend {
|
||||
return pgConn.frontend
|
||||
}
|
||||
|
||||
// Close closes a connection. It is safe to call Close on a already closed connection. Close attempts a clean close by
|
||||
// Close closes a connection. It is safe to call Close on an already closed connection. Close attempts a clean close by
|
||||
// sending the exit message to PostgreSQL. However, this could block so ctx is available to limit the time to wait. The
|
||||
// underlying net.Conn.Close() will always be called regardless of any other errors.
|
||||
func (pgConn *PgConn) Close(ctx context.Context) error {
|
||||
@ -806,6 +813,9 @@ type StatementDescription struct {
|
||||
|
||||
// Prepare creates a prepared statement. If the name is empty, the anonymous prepared statement will be used. This
|
||||
// allows Prepare to also to describe statements without creating a server-side prepared statement.
|
||||
//
|
||||
// Prepare does not send a PREPARE statement to the server. It uses the PostgreSQL Parse and Describe protocol messages
|
||||
// directly.
|
||||
func (pgConn *PgConn) Prepare(ctx context.Context, name, sql string, paramOIDs []uint32) (*StatementDescription, error) {
|
||||
if err := pgConn.lock(); err != nil {
|
||||
return nil, err
|
||||
@ -862,6 +872,52 @@ readloop:
|
||||
return psd, nil
|
||||
}
|
||||
|
||||
// Deallocate deallocates a prepared statement.
|
||||
//
|
||||
// Deallocate does not send a DEALLOCATE statement to the server. It uses the PostgreSQL Close protocol message
|
||||
// directly. This has slightly different behavior than executing DEALLOCATE statement.
|
||||
// - Deallocate can succeed in an aborted transaction.
|
||||
// - Deallocating a non-existent prepared statement is not an error.
|
||||
func (pgConn *PgConn) Deallocate(ctx context.Context, name string) error {
|
||||
if err := pgConn.lock(); err != nil {
|
||||
return err
|
||||
}
|
||||
defer pgConn.unlock()
|
||||
|
||||
if ctx != context.Background() {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return newContextAlreadyDoneError(ctx)
|
||||
default:
|
||||
}
|
||||
pgConn.contextWatcher.Watch(ctx)
|
||||
defer pgConn.contextWatcher.Unwatch()
|
||||
}
|
||||
|
||||
pgConn.frontend.SendClose(&pgproto3.Close{ObjectType: 'S', Name: name})
|
||||
pgConn.frontend.SendSync(&pgproto3.Sync{})
|
||||
err := pgConn.flushWithPotentialWriteReadDeadlock()
|
||||
if err != nil {
|
||||
pgConn.asyncClose()
|
||||
return err
|
||||
}
|
||||
|
||||
for {
|
||||
msg, err := pgConn.receiveMessage()
|
||||
if err != nil {
|
||||
pgConn.asyncClose()
|
||||
return normalizeTimeoutError(ctx, err)
|
||||
}
|
||||
|
||||
switch msg := msg.(type) {
|
||||
case *pgproto3.ErrorResponse:
|
||||
return ErrorResponseToPgError(msg)
|
||||
case *pgproto3.ReadyForQuery:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ErrorResponseToPgError converts a wire protocol error message to a *PgError.
|
||||
func ErrorResponseToPgError(msg *pgproto3.ErrorResponse) *PgError {
|
||||
return &PgError{
|
||||
@ -935,16 +991,21 @@ func (pgConn *PgConn) CancelRequest(ctx context.Context) error {
|
||||
buf := make([]byte, 16)
|
||||
binary.BigEndian.PutUint32(buf[0:4], 16)
|
||||
binary.BigEndian.PutUint32(buf[4:8], 80877102)
|
||||
binary.BigEndian.PutUint32(buf[8:12], uint32(pgConn.pid))
|
||||
binary.BigEndian.PutUint32(buf[12:16], uint32(pgConn.secretKey))
|
||||
// Postgres will process the request and close the connection
|
||||
// so when don't need to read the reply
|
||||
// https://www.postgresql.org/docs/current/protocol-flow.html#id-1.10.6.7.10
|
||||
_, err = cancelConn.Write(buf)
|
||||
return err
|
||||
binary.BigEndian.PutUint32(buf[8:12], pgConn.pid)
|
||||
binary.BigEndian.PutUint32(buf[12:16], pgConn.secretKey)
|
||||
|
||||
if _, err := cancelConn.Write(buf); err != nil {
|
||||
return fmt.Errorf("write to connection for cancellation: %w", err)
|
||||
}
|
||||
|
||||
// Wait for the cancel request to be acknowledged by the server.
|
||||
// It copies the behavior of the libpq: https://github.com/postgres/postgres/blob/REL_16_0/src/interfaces/libpq/fe-connect.c#L4946-L4960
|
||||
_, _ = cancelConn.Read(buf)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// WaitForNotification waits for a LISTON/NOTIFY message to be received. It returns an error if a notification was not
|
||||
// WaitForNotification waits for a LISTEN/NOTIFY message to be received. It returns an error if a notification was not
|
||||
// received.
|
||||
func (pgConn *PgConn) WaitForNotification(ctx context.Context) error {
|
||||
if err := pgConn.lock(); err != nil {
|
||||
@ -1732,10 +1793,16 @@ func (pgConn *PgConn) enterPotentialWriteReadDeadlock() {
|
||||
|
||||
// exitPotentialWriteReadDeadlock must be called after a call to enterPotentialWriteReadDeadlock.
|
||||
func (pgConn *PgConn) exitPotentialWriteReadDeadlock() {
|
||||
// The state of the timer is not relevant upon exiting the potential slow write. It may both
|
||||
// fire (due to a slow write), or not fire (due to a fast write).
|
||||
_ = pgConn.slowWriteTimer.Stop()
|
||||
pgConn.bgReader.Stop()
|
||||
if !pgConn.slowWriteTimer.Stop() {
|
||||
// The timer starts its function in a separate goroutine. It is necessary to ensure the background reader has
|
||||
// started before calling Stop. Otherwise, the background reader may not be stopped. That on its own is not a
|
||||
// serious problem. But what is a serious problem is that the background reader may start at an inopportune time in
|
||||
// a subsequent query. For example, if a subsequent query was canceled then a deadline may be set on the net.Conn to
|
||||
// interrupt an in-progress read. After the read is interrupted, but before the deadline is cleared, the background
|
||||
// reader could start and read a deadline error. Then the next query would receive the an unexpected deadline error.
|
||||
<-pgConn.bgReaderStarted
|
||||
pgConn.bgReader.Stop()
|
||||
}
|
||||
}
|
||||
|
||||
func (pgConn *PgConn) flushWithPotentialWriteReadDeadlock() error {
|
||||
@ -1764,7 +1831,7 @@ func (pgConn *PgConn) SyncConn(ctx context.Context) error {
|
||||
}
|
||||
}
|
||||
|
||||
// This should never happen. Only way I can imagine this occuring is if the server is constantly sending data such as
|
||||
// This should never happen. Only way I can imagine this occurring is if the server is constantly sending data such as
|
||||
// LISTEN/NOTIFY or log notifications such that we never can get an empty buffer.
|
||||
return errors.New("SyncConn: conn never synchronized")
|
||||
}
|
||||
@ -1830,8 +1897,14 @@ func Construct(hc *HijackedConn) (*PgConn, error) {
|
||||
|
||||
pgConn.contextWatcher = newContextWatcher(pgConn.conn)
|
||||
pgConn.bgReader = bgreader.New(pgConn.conn)
|
||||
pgConn.slowWriteTimer = time.AfterFunc(time.Duration(math.MaxInt64), pgConn.bgReader.Start)
|
||||
pgConn.slowWriteTimer = time.AfterFunc(time.Duration(math.MaxInt64),
|
||||
func() {
|
||||
pgConn.bgReader.Start()
|
||||
pgConn.bgReaderStarted <- struct{}{}
|
||||
},
|
||||
)
|
||||
pgConn.slowWriteTimer.Stop()
|
||||
pgConn.bgReaderStarted = make(chan struct{})
|
||||
pgConn.frontend = hc.Config.BuildFrontend(pgConn.bgReader, pgConn.conn)
|
||||
|
||||
return pgConn, nil
|
||||
@ -1996,7 +2069,8 @@ func (p *Pipeline) GetResults() (results any, err error) {
|
||||
for {
|
||||
msg, err := p.conn.receiveMessage()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
p.conn.asyncClose()
|
||||
return nil, normalizeTimeoutError(p.ctx, err)
|
||||
}
|
||||
|
||||
switch msg := msg.(type) {
|
||||
|
2
vendor/github.com/jackc/pgx/v5/pgproto3/README.md
generated
vendored
2
vendor/github.com/jackc/pgx/v5/pgproto3/README.md
generated
vendored
@ -1,6 +1,6 @@
|
||||
# pgproto3
|
||||
|
||||
Package pgproto3 is a encoder and decoder of the PostgreSQL wire protocol version 3.
|
||||
Package pgproto3 is an encoder and decoder of the PostgreSQL wire protocol version 3.
|
||||
|
||||
pgproto3 can be used as a foundation for PostgreSQL drivers, proxies, mock servers, load balancers and more.
|
||||
|
||||
|
4
vendor/github.com/jackc/pgx/v5/pgproto3/doc.go
generated
vendored
4
vendor/github.com/jackc/pgx/v5/pgproto3/doc.go
generated
vendored
@ -1,7 +1,7 @@
|
||||
// Package pgproto3 is a encoder and decoder of the PostgreSQL wire protocol version 3.
|
||||
// Package pgproto3 is an encoder and decoder of the PostgreSQL wire protocol version 3.
|
||||
//
|
||||
// The primary interfaces are Frontend and Backend. They correspond to a client and server respectively. Messages are
|
||||
// sent with Send (or a specialized Send variant). Messages are automatically bufferred to minimize small writes. Call
|
||||
// sent with Send (or a specialized Send variant). Messages are automatically buffered to minimize small writes. Call
|
||||
// Flush to ensure a message has actually been sent.
|
||||
//
|
||||
// The Trace method of Frontend and Backend can be used to examine the wire-level message traffic. It outputs in a
|
||||
|
2
vendor/github.com/jackc/pgx/v5/pgproto3/frontend.go
generated
vendored
2
vendor/github.com/jackc/pgx/v5/pgproto3/frontend.go
generated
vendored
@ -156,7 +156,7 @@ func (f *Frontend) SendDescribe(msg *Describe) {
|
||||
}
|
||||
}
|
||||
|
||||
// SendExecute sends a Execute message to the backend (i.e. the server). The message is not guaranteed to be written until
|
||||
// SendExecute sends an Execute message to the backend (i.e. the server). The message is not guaranteed to be written until
|
||||
// Flush is called.
|
||||
func (f *Frontend) SendExecute(msg *Execute) {
|
||||
prevLen := len(f.wbuf)
|
||||
|
4
vendor/github.com/jackc/pgx/v5/pgproto3/startup_message.go
generated
vendored
4
vendor/github.com/jackc/pgx/v5/pgproto3/startup_message.go
generated
vendored
@ -38,14 +38,14 @@ func (dst *StartupMessage) Decode(src []byte) error {
|
||||
for {
|
||||
idx := bytes.IndexByte(src[rp:], 0)
|
||||
if idx < 0 {
|
||||
return &invalidMessageFormatErr{messageType: "StartupMesage"}
|
||||
return &invalidMessageFormatErr{messageType: "StartupMessage"}
|
||||
}
|
||||
key := string(src[rp : rp+idx])
|
||||
rp += idx + 1
|
||||
|
||||
idx = bytes.IndexByte(src[rp:], 0)
|
||||
if idx < 0 {
|
||||
return &invalidMessageFormatErr{messageType: "StartupMesage"}
|
||||
return &invalidMessageFormatErr{messageType: "StartupMessage"}
|
||||
}
|
||||
value := string(src[rp : rp+idx])
|
||||
rp += idx + 1
|
||||
|
22
vendor/github.com/jackc/pgx/v5/pgtype/array.go
generated
vendored
22
vendor/github.com/jackc/pgx/v5/pgtype/array.go
generated
vendored
@ -110,7 +110,7 @@ func parseUntypedTextArray(src string) (*untypedTextArray, error) {
|
||||
|
||||
r, _, err := buf.ReadRune()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid array: %v", err)
|
||||
return nil, fmt.Errorf("invalid array: %w", err)
|
||||
}
|
||||
|
||||
var explicitDimensions []ArrayDimension
|
||||
@ -122,7 +122,7 @@ func parseUntypedTextArray(src string) (*untypedTextArray, error) {
|
||||
for {
|
||||
r, _, err = buf.ReadRune()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid array: %v", err)
|
||||
return nil, fmt.Errorf("invalid array: %w", err)
|
||||
}
|
||||
|
||||
if r == '=' {
|
||||
@ -133,12 +133,12 @@ func parseUntypedTextArray(src string) (*untypedTextArray, error) {
|
||||
|
||||
lower, err := arrayParseInteger(buf)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid array: %v", err)
|
||||
return nil, fmt.Errorf("invalid array: %w", err)
|
||||
}
|
||||
|
||||
r, _, err = buf.ReadRune()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid array: %v", err)
|
||||
return nil, fmt.Errorf("invalid array: %w", err)
|
||||
}
|
||||
|
||||
if r != ':' {
|
||||
@ -147,12 +147,12 @@ func parseUntypedTextArray(src string) (*untypedTextArray, error) {
|
||||
|
||||
upper, err := arrayParseInteger(buf)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid array: %v", err)
|
||||
return nil, fmt.Errorf("invalid array: %w", err)
|
||||
}
|
||||
|
||||
r, _, err = buf.ReadRune()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid array: %v", err)
|
||||
return nil, fmt.Errorf("invalid array: %w", err)
|
||||
}
|
||||
|
||||
if r != ']' {
|
||||
@ -164,12 +164,12 @@ func parseUntypedTextArray(src string) (*untypedTextArray, error) {
|
||||
|
||||
r, _, err = buf.ReadRune()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid array: %v", err)
|
||||
return nil, fmt.Errorf("invalid array: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if r != '{' {
|
||||
return nil, fmt.Errorf("invalid array, expected '{': %v", err)
|
||||
return nil, fmt.Errorf("invalid array, expected '{' got %v", r)
|
||||
}
|
||||
|
||||
implicitDimensions := []ArrayDimension{{LowerBound: 1, Length: 0}}
|
||||
@ -178,7 +178,7 @@ func parseUntypedTextArray(src string) (*untypedTextArray, error) {
|
||||
for {
|
||||
r, _, err = buf.ReadRune()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid array: %v", err)
|
||||
return nil, fmt.Errorf("invalid array: %w", err)
|
||||
}
|
||||
|
||||
if r == '{' {
|
||||
@ -195,7 +195,7 @@ func parseUntypedTextArray(src string) (*untypedTextArray, error) {
|
||||
for {
|
||||
r, _, err = buf.ReadRune()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid array: %v", err)
|
||||
return nil, fmt.Errorf("invalid array: %w", err)
|
||||
}
|
||||
|
||||
switch r {
|
||||
@ -214,7 +214,7 @@ func parseUntypedTextArray(src string) (*untypedTextArray, error) {
|
||||
buf.UnreadRune()
|
||||
value, quoted, err := arrayParseValue(buf)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid array value: %v", err)
|
||||
return nil, fmt.Errorf("invalid array value: %w", err)
|
||||
}
|
||||
if currentDim == counterDim {
|
||||
implicitDimensions[currentDim].Length++
|
||||
|
4
vendor/github.com/jackc/pgx/v5/pgtype/builtin_wrappers.go
generated
vendored
4
vendor/github.com/jackc/pgx/v5/pgtype/builtin_wrappers.go
generated
vendored
@ -231,7 +231,7 @@ func (w *uint64Wrapper) ScanNumeric(v Numeric) error {
|
||||
|
||||
bi, err := v.toBigInt()
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot scan into *uint64: %v", err)
|
||||
return fmt.Errorf("cannot scan into *uint64: %w", err)
|
||||
}
|
||||
|
||||
if !bi.IsUint64() {
|
||||
@ -284,7 +284,7 @@ func (w *uintWrapper) ScanNumeric(v Numeric) error {
|
||||
|
||||
bi, err := v.toBigInt()
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot scan into *uint: %v", err)
|
||||
return fmt.Errorf("cannot scan into *uint: %w", err)
|
||||
}
|
||||
|
||||
if !bi.IsUint64() {
|
||||
|
6
vendor/github.com/jackc/pgx/v5/pgtype/date.go
generated
vendored
6
vendor/github.com/jackc/pgx/v5/pgtype/date.go
generated
vendored
@ -282,17 +282,17 @@ func (scanPlanTextAnyToDateScanner) Scan(src []byte, dst any) error {
|
||||
if match != nil {
|
||||
year, err := strconv.ParseInt(match[1], 10, 32)
|
||||
if err != nil {
|
||||
return fmt.Errorf("BUG: cannot parse date that regexp matched (year): %v", err)
|
||||
return fmt.Errorf("BUG: cannot parse date that regexp matched (year): %w", err)
|
||||
}
|
||||
|
||||
month, err := strconv.ParseInt(match[2], 10, 32)
|
||||
if err != nil {
|
||||
return fmt.Errorf("BUG: cannot parse date that regexp matched (month): %v", err)
|
||||
return fmt.Errorf("BUG: cannot parse date that regexp matched (month): %w", err)
|
||||
}
|
||||
|
||||
day, err := strconv.ParseInt(match[3], 10, 32)
|
||||
if err != nil {
|
||||
return fmt.Errorf("BUG: cannot parse date that regexp matched (month): %v", err)
|
||||
return fmt.Errorf("BUG: cannot parse date that regexp matched (month): %w", err)
|
||||
}
|
||||
|
||||
// BC matched
|
||||
|
4
vendor/github.com/jackc/pgx/v5/pgtype/doc.go
generated
vendored
4
vendor/github.com/jackc/pgx/v5/pgtype/doc.go
generated
vendored
@ -67,7 +67,7 @@ See example_custom_type_test.go for an example of a custom type for the PostgreS
|
||||
|
||||
Sometimes pgx supports a PostgreSQL type such as numeric but the Go type is in an external package that does not have
|
||||
pgx support such as github.com/shopspring/decimal. These types can be registered with pgtype with custom conversion
|
||||
logic. See https://github.com/jackc/pgx-shopspring-decimal and https://github.com/jackc/pgx-gofrs-uuid for a example
|
||||
logic. See https://github.com/jackc/pgx-shopspring-decimal and https://github.com/jackc/pgx-gofrs-uuid for example
|
||||
integrations.
|
||||
|
||||
New PostgreSQL Type Support
|
||||
@ -149,7 +149,7 @@ Overview of Scanning Implementation
|
||||
The first step is to use the OID to lookup the correct Codec. If the OID is unavailable, Map will try to find the OID
|
||||
from previous calls of Map.RegisterDefaultPgType. The Map will call the Codec's PlanScan method to get a plan for
|
||||
scanning into the Go value. A Codec will support scanning into one or more Go types. Oftentime these Go types are
|
||||
interfaces rather than explicit types. For example, PointCodec can use any Go type that implments the PointScanner and
|
||||
interfaces rather than explicit types. For example, PointCodec can use any Go type that implements the PointScanner and
|
||||
PointValuer interfaces.
|
||||
|
||||
If a Go value is not supported directly by a Codec then Map will try wrapping it with additional logic and try again.
|
||||
|
24
vendor/github.com/jackc/pgx/v5/pgtype/float4.go
generated
vendored
24
vendor/github.com/jackc/pgx/v5/pgtype/float4.go
generated
vendored
@ -3,6 +3,7 @@ package pgtype
|
||||
import (
|
||||
"database/sql/driver"
|
||||
"encoding/binary"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"math"
|
||||
"strconv"
|
||||
@ -65,6 +66,29 @@ func (f Float4) Value() (driver.Value, error) {
|
||||
return float64(f.Float32), nil
|
||||
}
|
||||
|
||||
func (f Float4) MarshalJSON() ([]byte, error) {
|
||||
if !f.Valid {
|
||||
return []byte("null"), nil
|
||||
}
|
||||
return json.Marshal(f.Float32)
|
||||
}
|
||||
|
||||
func (f *Float4) UnmarshalJSON(b []byte) error {
|
||||
var n *float32
|
||||
err := json.Unmarshal(b, &n)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if n == nil {
|
||||
*f = Float4{}
|
||||
} else {
|
||||
*f = Float4{Float32: *n, Valid: true}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type Float4Codec struct{}
|
||||
|
||||
func (Float4Codec) FormatSupported(format int16) bool {
|
||||
|
30
vendor/github.com/jackc/pgx/v5/pgtype/float8.go
generated
vendored
30
vendor/github.com/jackc/pgx/v5/pgtype/float8.go
generated
vendored
@ -74,6 +74,29 @@ func (f Float8) Value() (driver.Value, error) {
|
||||
return f.Float64, nil
|
||||
}
|
||||
|
||||
func (f Float8) MarshalJSON() ([]byte, error) {
|
||||
if !f.Valid {
|
||||
return []byte("null"), nil
|
||||
}
|
||||
return json.Marshal(f.Float64)
|
||||
}
|
||||
|
||||
func (f *Float8) UnmarshalJSON(b []byte) error {
|
||||
var n *float64
|
||||
err := json.Unmarshal(b, &n)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if n == nil {
|
||||
*f = Float8{}
|
||||
} else {
|
||||
*f = Float8{Float64: *n, Valid: true}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type Float8Codec struct{}
|
||||
|
||||
func (Float8Codec) FormatSupported(format int16) bool {
|
||||
@ -109,13 +132,6 @@ func (Float8Codec) PlanEncode(m *Map, oid uint32, format int16, value any) Encod
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *Float8) MarshalJSON() ([]byte, error) {
|
||||
if !f.Valid {
|
||||
return []byte("null"), nil
|
||||
}
|
||||
return json.Marshal(f.Float64)
|
||||
}
|
||||
|
||||
type encodePlanFloat8CodecBinaryFloat64 struct{}
|
||||
|
||||
func (encodePlanFloat8CodecBinaryFloat64) Encode(value any, buf []byte) (newBuf []byte, err error) {
|
||||
|
2
vendor/github.com/jackc/pgx/v5/pgtype/inet.go
generated
vendored
2
vendor/github.com/jackc/pgx/v5/pgtype/inet.go
generated
vendored
@ -156,7 +156,7 @@ func (scanPlanBinaryInetToNetipPrefixScanner) Scan(src []byte, dst any) error {
|
||||
}
|
||||
|
||||
if len(src) != 8 && len(src) != 20 {
|
||||
return fmt.Errorf("Received an invalid size for a inet: %d", len(src))
|
||||
return fmt.Errorf("Received an invalid size for an inet: %d", len(src))
|
||||
}
|
||||
|
||||
// ignore family
|
||||
|
16
vendor/github.com/jackc/pgx/v5/pgtype/interval.go
generated
vendored
16
vendor/github.com/jackc/pgx/v5/pgtype/interval.go
generated
vendored
@ -179,7 +179,7 @@ func (scanPlanBinaryIntervalToIntervalScanner) Scan(src []byte, dst any) error {
|
||||
}
|
||||
|
||||
if len(src) != 16 {
|
||||
return fmt.Errorf("Received an invalid size for a interval: %d", len(src))
|
||||
return fmt.Errorf("Received an invalid size for an interval: %d", len(src))
|
||||
}
|
||||
|
||||
microseconds := int64(binary.BigEndian.Uint64(src))
|
||||
@ -242,21 +242,21 @@ func (scanPlanTextAnyToIntervalScanner) Scan(src []byte, dst any) error {
|
||||
return fmt.Errorf("bad interval minute format: %s", timeParts[1])
|
||||
}
|
||||
|
||||
secondParts := strings.SplitN(timeParts[2], ".", 2)
|
||||
sec, secFrac, secFracFound := strings.Cut(timeParts[2], ".")
|
||||
|
||||
seconds, err := strconv.ParseInt(secondParts[0], 10, 64)
|
||||
seconds, err := strconv.ParseInt(sec, 10, 64)
|
||||
if err != nil {
|
||||
return fmt.Errorf("bad interval second format: %s", secondParts[0])
|
||||
return fmt.Errorf("bad interval second format: %s", sec)
|
||||
}
|
||||
|
||||
var uSeconds int64
|
||||
if len(secondParts) == 2 {
|
||||
uSeconds, err = strconv.ParseInt(secondParts[1], 10, 64)
|
||||
if secFracFound {
|
||||
uSeconds, err = strconv.ParseInt(secFrac, 10, 64)
|
||||
if err != nil {
|
||||
return fmt.Errorf("bad interval decimal format: %s", secondParts[1])
|
||||
return fmt.Errorf("bad interval decimal format: %s", secFrac)
|
||||
}
|
||||
|
||||
for i := 0; i < 6-len(secondParts[1]); i++ {
|
||||
for i := 0; i < 6-len(secFrac); i++ {
|
||||
uSeconds *= 10
|
||||
}
|
||||
}
|
||||
|
15
vendor/github.com/jackc/pgx/v5/pgtype/json.go
generated
vendored
15
vendor/github.com/jackc/pgx/v5/pgtype/json.go
generated
vendored
@ -25,18 +25,21 @@ func (c JSONCodec) PlanEncode(m *Map, oid uint32, format int16, value any) Encod
|
||||
case []byte:
|
||||
return encodePlanJSONCodecEitherFormatByteSlice{}
|
||||
|
||||
// Cannot rely on driver.Valuer being handled later because anything can be marshalled.
|
||||
//
|
||||
// https://github.com/jackc/pgx/issues/1430
|
||||
//
|
||||
// Check for driver.Valuer must come before json.Marshaler so that it is guaranteed to beused
|
||||
// when both are implemented https://github.com/jackc/pgx/issues/1805
|
||||
case driver.Valuer:
|
||||
return &encodePlanDriverValuer{m: m, oid: oid, formatCode: format}
|
||||
|
||||
// Must come before trying wrap encode plans because a pointer to a struct may be unwrapped to a struct that can be
|
||||
// marshalled.
|
||||
//
|
||||
// https://github.com/jackc/pgx/issues/1681
|
||||
case json.Marshaler:
|
||||
return encodePlanJSONCodecEitherFormatMarshal{}
|
||||
|
||||
// Cannot rely on driver.Valuer being handled later because anything can be marshalled.
|
||||
//
|
||||
// https://github.com/jackc/pgx/issues/1430
|
||||
case driver.Valuer:
|
||||
return &encodePlanDriverValuer{m: m, oid: oid, formatCode: format}
|
||||
}
|
||||
|
||||
// Because anything can be marshalled the normal wrapping in Map.PlanScan doesn't get a chance to run. So try the
|
||||
|
8
vendor/github.com/jackc/pgx/v5/pgtype/multirange.go
generated
vendored
8
vendor/github.com/jackc/pgx/v5/pgtype/multirange.go
generated
vendored
@ -339,18 +339,18 @@ func parseUntypedTextMultirange(src []byte) ([]string, error) {
|
||||
|
||||
r, _, err := buf.ReadRune()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid array: %v", err)
|
||||
return nil, fmt.Errorf("invalid array: %w", err)
|
||||
}
|
||||
|
||||
if r != '{' {
|
||||
return nil, fmt.Errorf("invalid multirange, expected '{': %v", err)
|
||||
return nil, fmt.Errorf("invalid multirange, expected '{' got %v", r)
|
||||
}
|
||||
|
||||
parseValueLoop:
|
||||
for {
|
||||
r, _, err = buf.ReadRune()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid multirange: %v", err)
|
||||
return nil, fmt.Errorf("invalid multirange: %w", err)
|
||||
}
|
||||
|
||||
switch r {
|
||||
@ -361,7 +361,7 @@ parseValueLoop:
|
||||
buf.UnreadRune()
|
||||
value, err := parseRange(buf)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid multirange value: %v", err)
|
||||
return nil, fmt.Errorf("invalid multirange value: %w", err)
|
||||
}
|
||||
elements = append(elements, value)
|
||||
}
|
||||
|
20
vendor/github.com/jackc/pgx/v5/pgtype/numeric.go
generated
vendored
20
vendor/github.com/jackc/pgx/v5/pgtype/numeric.go
generated
vendored
@ -119,6 +119,26 @@ func (n Numeric) Int64Value() (Int8, error) {
|
||||
return Int8{Int64: bi.Int64(), Valid: true}, nil
|
||||
}
|
||||
|
||||
func (n *Numeric) ScanScientific(src string) error {
|
||||
if !strings.ContainsAny("eE", src) {
|
||||
return scanPlanTextAnyToNumericScanner{}.Scan([]byte(src), n)
|
||||
}
|
||||
|
||||
if bigF, ok := new(big.Float).SetString(string(src)); ok {
|
||||
smallF, _ := bigF.Float64()
|
||||
src = strconv.FormatFloat(smallF, 'f', -1, 64)
|
||||
}
|
||||
|
||||
num, exp, err := parseNumericString(src)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
*n = Numeric{Int: num, Exp: exp, Valid: true}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n *Numeric) toBigInt() (*big.Int, error) {
|
||||
if n.Exp == 0 {
|
||||
return n.Int, nil
|
||||
|
15
vendor/github.com/jackc/pgx/v5/pgtype/pgtype.go
generated
vendored
15
vendor/github.com/jackc/pgx/v5/pgtype/pgtype.go
generated
vendored
@ -1358,6 +1358,8 @@ var kindToTypes map[reflect.Kind]reflect.Type = map[reflect.Kind]reflect.Type{
|
||||
reflect.Bool: reflect.TypeOf(false),
|
||||
}
|
||||
|
||||
var byteSliceType = reflect.TypeOf([]byte{})
|
||||
|
||||
type underlyingTypeEncodePlan struct {
|
||||
nextValueType reflect.Type
|
||||
next EncodePlan
|
||||
@ -1372,6 +1374,10 @@ func (plan *underlyingTypeEncodePlan) Encode(value any, buf []byte) (newBuf []by
|
||||
// TryWrapFindUnderlyingTypeEncodePlan tries to convert to a Go builtin type. e.g. If value was of type MyString and
|
||||
// MyString was defined as a string then a wrapper plan would be returned that converts MyString to string.
|
||||
func TryWrapFindUnderlyingTypeEncodePlan(value any) (plan WrappedEncodePlanNextSetter, nextValue any, ok bool) {
|
||||
if value == nil {
|
||||
return nil, nil, false
|
||||
}
|
||||
|
||||
if _, ok := value.(driver.Valuer); ok {
|
||||
return nil, nil, false
|
||||
}
|
||||
@ -1387,6 +1393,15 @@ func TryWrapFindUnderlyingTypeEncodePlan(value any) (plan WrappedEncodePlanNextS
|
||||
return &underlyingTypeEncodePlan{nextValueType: nextValueType}, refValue.Convert(nextValueType).Interface(), true
|
||||
}
|
||||
|
||||
// []byte is a special case. It is a slice but we treat it as a scalar type. In the case of a named type like
|
||||
// json.RawMessage which is defined as []byte the underlying type should be considered as []byte. But any other slice
|
||||
// does not have a special underlying type.
|
||||
//
|
||||
// https://github.com/jackc/pgx/issues/1763
|
||||
if refValue.Type() != byteSliceType && refValue.Type().AssignableTo(byteSliceType) {
|
||||
return &underlyingTypeEncodePlan{nextValueType: byteSliceType}, refValue.Convert(byteSliceType).Interface(), true
|
||||
}
|
||||
|
||||
return nil, nil, false
|
||||
}
|
||||
|
||||
|
16
vendor/github.com/jackc/pgx/v5/pgtype/point.go
generated
vendored
16
vendor/github.com/jackc/pgx/v5/pgtype/point.go
generated
vendored
@ -50,17 +50,17 @@ func parsePoint(src []byte) (*Point, error) {
|
||||
if src[0] == '"' && src[len(src)-1] == '"' {
|
||||
src = src[1 : len(src)-1]
|
||||
}
|
||||
parts := strings.SplitN(string(src[1:len(src)-1]), ",", 2)
|
||||
if len(parts) < 2 {
|
||||
sx, sy, found := strings.Cut(string(src[1:len(src)-1]), ",")
|
||||
if !found {
|
||||
return nil, fmt.Errorf("invalid format for point")
|
||||
}
|
||||
|
||||
x, err := strconv.ParseFloat(parts[0], 64)
|
||||
x, err := strconv.ParseFloat(sx, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
y, err := strconv.ParseFloat(parts[1], 64)
|
||||
y, err := strconv.ParseFloat(sy, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -247,17 +247,17 @@ func (scanPlanTextAnyToPointScanner) Scan(src []byte, dst any) error {
|
||||
return fmt.Errorf("invalid length for point: %v", len(src))
|
||||
}
|
||||
|
||||
parts := strings.SplitN(string(src[1:len(src)-1]), ",", 2)
|
||||
if len(parts) < 2 {
|
||||
sx, sy, found := strings.Cut(string(src[1:len(src)-1]), ",")
|
||||
if !found {
|
||||
return fmt.Errorf("invalid format for point")
|
||||
}
|
||||
|
||||
x, err := strconv.ParseFloat(parts[0], 64)
|
||||
x, err := strconv.ParseFloat(sx, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
y, err := strconv.ParseFloat(parts[1], 64)
|
||||
y, err := strconv.ParseFloat(sy, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
14
vendor/github.com/jackc/pgx/v5/pgtype/range.go
generated
vendored
14
vendor/github.com/jackc/pgx/v5/pgtype/range.go
generated
vendored
@ -40,7 +40,7 @@ func parseUntypedTextRange(src string) (*untypedTextRange, error) {
|
||||
|
||||
r, _, err := buf.ReadRune()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid lower bound: %v", err)
|
||||
return nil, fmt.Errorf("invalid lower bound: %w", err)
|
||||
}
|
||||
switch r {
|
||||
case '(':
|
||||
@ -53,7 +53,7 @@ func parseUntypedTextRange(src string) (*untypedTextRange, error) {
|
||||
|
||||
r, _, err = buf.ReadRune()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid lower value: %v", err)
|
||||
return nil, fmt.Errorf("invalid lower value: %w", err)
|
||||
}
|
||||
buf.UnreadRune()
|
||||
|
||||
@ -62,13 +62,13 @@ func parseUntypedTextRange(src string) (*untypedTextRange, error) {
|
||||
} else {
|
||||
utr.Lower, err = rangeParseValue(buf)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid lower value: %v", err)
|
||||
return nil, fmt.Errorf("invalid lower value: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
r, _, err = buf.ReadRune()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("missing range separator: %v", err)
|
||||
return nil, fmt.Errorf("missing range separator: %w", err)
|
||||
}
|
||||
if r != ',' {
|
||||
return nil, fmt.Errorf("missing range separator: %v", r)
|
||||
@ -76,7 +76,7 @@ func parseUntypedTextRange(src string) (*untypedTextRange, error) {
|
||||
|
||||
r, _, err = buf.ReadRune()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid upper value: %v", err)
|
||||
return nil, fmt.Errorf("invalid upper value: %w", err)
|
||||
}
|
||||
|
||||
if r == ')' || r == ']' {
|
||||
@ -85,12 +85,12 @@ func parseUntypedTextRange(src string) (*untypedTextRange, error) {
|
||||
buf.UnreadRune()
|
||||
utr.Upper, err = rangeParseValue(buf)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid upper value: %v", err)
|
||||
return nil, fmt.Errorf("invalid upper value: %w", err)
|
||||
}
|
||||
|
||||
r, _, err = buf.ReadRune()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("missing upper bound: %v", err)
|
||||
return nil, fmt.Errorf("missing upper bound: %w", err)
|
||||
}
|
||||
switch r {
|
||||
case ')':
|
||||
|
16
vendor/github.com/jackc/pgx/v5/pgtype/range_codec.go
generated
vendored
16
vendor/github.com/jackc/pgx/v5/pgtype/range_codec.go
generated
vendored
@ -120,7 +120,7 @@ func (plan *encodePlanRangeCodecRangeValuerToBinary) Encode(value any, buf []byt
|
||||
|
||||
buf, err = lowerPlan.Encode(lower, buf)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to encode %v as element of range: %v", lower, err)
|
||||
return nil, fmt.Errorf("failed to encode %v as element of range: %w", lower, err)
|
||||
}
|
||||
if buf == nil {
|
||||
return nil, fmt.Errorf("Lower cannot be NULL unless LowerType is Unbounded")
|
||||
@ -144,7 +144,7 @@ func (plan *encodePlanRangeCodecRangeValuerToBinary) Encode(value any, buf []byt
|
||||
|
||||
buf, err = upperPlan.Encode(upper, buf)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to encode %v as element of range: %v", upper, err)
|
||||
return nil, fmt.Errorf("failed to encode %v as element of range: %w", upper, err)
|
||||
}
|
||||
if buf == nil {
|
||||
return nil, fmt.Errorf("Upper cannot be NULL unless UpperType is Unbounded")
|
||||
@ -194,7 +194,7 @@ func (plan *encodePlanRangeCodecRangeValuerToText) Encode(value any, buf []byte)
|
||||
|
||||
buf, err = lowerPlan.Encode(lower, buf)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to encode %v as element of range: %v", lower, err)
|
||||
return nil, fmt.Errorf("failed to encode %v as element of range: %w", lower, err)
|
||||
}
|
||||
if buf == nil {
|
||||
return nil, fmt.Errorf("Lower cannot be NULL unless LowerType is Unbounded")
|
||||
@ -215,7 +215,7 @@ func (plan *encodePlanRangeCodecRangeValuerToText) Encode(value any, buf []byte)
|
||||
|
||||
buf, err = upperPlan.Encode(upper, buf)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to encode %v as element of range: %v", upper, err)
|
||||
return nil, fmt.Errorf("failed to encode %v as element of range: %w", upper, err)
|
||||
}
|
||||
if buf == nil {
|
||||
return nil, fmt.Errorf("Upper cannot be NULL unless UpperType is Unbounded")
|
||||
@ -282,7 +282,7 @@ func (plan *scanPlanBinaryRangeToRangeScanner) Scan(src []byte, target any) erro
|
||||
|
||||
err = lowerPlan.Scan(ubr.Lower, lowerTarget)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot scan into %v from range element: %v", lowerTarget, err)
|
||||
return fmt.Errorf("cannot scan into %v from range element: %w", lowerTarget, err)
|
||||
}
|
||||
}
|
||||
|
||||
@ -294,7 +294,7 @@ func (plan *scanPlanBinaryRangeToRangeScanner) Scan(src []byte, target any) erro
|
||||
|
||||
err = upperPlan.Scan(ubr.Upper, upperTarget)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot scan into %v from range element: %v", upperTarget, err)
|
||||
return fmt.Errorf("cannot scan into %v from range element: %w", upperTarget, err)
|
||||
}
|
||||
}
|
||||
|
||||
@ -332,7 +332,7 @@ func (plan *scanPlanTextRangeToRangeScanner) Scan(src []byte, target any) error
|
||||
|
||||
err = lowerPlan.Scan([]byte(utr.Lower), lowerTarget)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot scan into %v from range element: %v", lowerTarget, err)
|
||||
return fmt.Errorf("cannot scan into %v from range element: %w", lowerTarget, err)
|
||||
}
|
||||
}
|
||||
|
||||
@ -344,7 +344,7 @@ func (plan *scanPlanTextRangeToRangeScanner) Scan(src []byte, target any) error
|
||||
|
||||
err = upperPlan.Scan([]byte(utr.Upper), upperTarget)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot scan into %v from range element: %v", upperTarget, err)
|
||||
return fmt.Errorf("cannot scan into %v from range element: %w", upperTarget, err)
|
||||
}
|
||||
}
|
||||
|
||||
|
8
vendor/github.com/jackc/pgx/v5/pgtype/tid.go
generated
vendored
8
vendor/github.com/jackc/pgx/v5/pgtype/tid.go
generated
vendored
@ -205,17 +205,17 @@ func (scanPlanTextAnyToTIDScanner) Scan(src []byte, dst any) error {
|
||||
return fmt.Errorf("invalid length for tid: %v", len(src))
|
||||
}
|
||||
|
||||
parts := strings.SplitN(string(src[1:len(src)-1]), ",", 2)
|
||||
if len(parts) < 2 {
|
||||
block, offset, found := strings.Cut(string(src[1:len(src)-1]), ",")
|
||||
if !found {
|
||||
return fmt.Errorf("invalid format for tid")
|
||||
}
|
||||
|
||||
blockNumber, err := strconv.ParseUint(parts[0], 10, 32)
|
||||
blockNumber, err := strconv.ParseUint(block, 10, 32)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
offsetNumber, err := strconv.ParseUint(parts[1], 10, 16)
|
||||
offsetNumber, err := strconv.ParseUint(offset, 10, 16)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
52
vendor/github.com/jackc/pgx/v5/pgxpool/batch_results.go
generated
vendored
Normal file
52
vendor/github.com/jackc/pgx/v5/pgxpool/batch_results.go
generated
vendored
Normal file
@ -0,0 +1,52 @@
|
||||
package pgxpool
|
||||
|
||||
import (
|
||||
"github.com/jackc/pgx/v5"
|
||||
"github.com/jackc/pgx/v5/pgconn"
|
||||
)
|
||||
|
||||
type errBatchResults struct {
|
||||
err error
|
||||
}
|
||||
|
||||
func (br errBatchResults) Exec() (pgconn.CommandTag, error) {
|
||||
return pgconn.CommandTag{}, br.err
|
||||
}
|
||||
|
||||
func (br errBatchResults) Query() (pgx.Rows, error) {
|
||||
return errRows{err: br.err}, br.err
|
||||
}
|
||||
|
||||
func (br errBatchResults) QueryRow() pgx.Row {
|
||||
return errRow{err: br.err}
|
||||
}
|
||||
|
||||
func (br errBatchResults) Close() error {
|
||||
return br.err
|
||||
}
|
||||
|
||||
type poolBatchResults struct {
|
||||
br pgx.BatchResults
|
||||
c *Conn
|
||||
}
|
||||
|
||||
func (br *poolBatchResults) Exec() (pgconn.CommandTag, error) {
|
||||
return br.br.Exec()
|
||||
}
|
||||
|
||||
func (br *poolBatchResults) Query() (pgx.Rows, error) {
|
||||
return br.br.Query()
|
||||
}
|
||||
|
||||
func (br *poolBatchResults) QueryRow() pgx.Row {
|
||||
return br.br.QueryRow()
|
||||
}
|
||||
|
||||
func (br *poolBatchResults) Close() error {
|
||||
err := br.br.Close()
|
||||
if br.c != nil {
|
||||
br.c.Release()
|
||||
br.c = nil
|
||||
}
|
||||
return err
|
||||
}
|
130
vendor/github.com/jackc/pgx/v5/pgxpool/conn.go
generated
vendored
Normal file
130
vendor/github.com/jackc/pgx/v5/pgxpool/conn.go
generated
vendored
Normal file
@ -0,0 +1,130 @@
|
||||
package pgxpool
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/jackc/pgx/v5"
|
||||
"github.com/jackc/pgx/v5/pgconn"
|
||||
"github.com/jackc/puddle/v2"
|
||||
)
|
||||
|
||||
// Conn is an acquired *pgx.Conn from a Pool.
|
||||
type Conn struct {
|
||||
res *puddle.Resource[*connResource]
|
||||
p *Pool
|
||||
}
|
||||
|
||||
// Release returns c to the pool it was acquired from. Once Release has been called, other methods must not be called.
|
||||
// However, it is safe to call Release multiple times. Subsequent calls after the first will be ignored.
|
||||
func (c *Conn) Release() {
|
||||
if c.res == nil {
|
||||
return
|
||||
}
|
||||
|
||||
conn := c.Conn()
|
||||
res := c.res
|
||||
c.res = nil
|
||||
|
||||
if conn.IsClosed() || conn.PgConn().IsBusy() || conn.PgConn().TxStatus() != 'I' {
|
||||
res.Destroy()
|
||||
// Signal to the health check to run since we just destroyed a connections
|
||||
// and we might be below minConns now
|
||||
c.p.triggerHealthCheck()
|
||||
return
|
||||
}
|
||||
|
||||
// If the pool is consistently being used, we might never get to check the
|
||||
// lifetime of a connection since we only check idle connections in checkConnsHealth
|
||||
// so we also check the lifetime here and force a health check
|
||||
if c.p.isExpired(res) {
|
||||
atomic.AddInt64(&c.p.lifetimeDestroyCount, 1)
|
||||
res.Destroy()
|
||||
// Signal to the health check to run since we just destroyed a connections
|
||||
// and we might be below minConns now
|
||||
c.p.triggerHealthCheck()
|
||||
return
|
||||
}
|
||||
|
||||
if c.p.afterRelease == nil {
|
||||
res.Release()
|
||||
return
|
||||
}
|
||||
|
||||
go func() {
|
||||
if c.p.afterRelease(conn) {
|
||||
res.Release()
|
||||
} else {
|
||||
res.Destroy()
|
||||
// Signal to the health check to run since we just destroyed a connections
|
||||
// and we might be below minConns now
|
||||
c.p.triggerHealthCheck()
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// Hijack assumes ownership of the connection from the pool. Caller is responsible for closing the connection. Hijack
|
||||
// will panic if called on an already released or hijacked connection.
|
||||
func (c *Conn) Hijack() *pgx.Conn {
|
||||
if c.res == nil {
|
||||
panic("cannot hijack already released or hijacked connection")
|
||||
}
|
||||
|
||||
conn := c.Conn()
|
||||
res := c.res
|
||||
c.res = nil
|
||||
|
||||
res.Hijack()
|
||||
|
||||
return conn
|
||||
}
|
||||
|
||||
func (c *Conn) Exec(ctx context.Context, sql string, arguments ...any) (pgconn.CommandTag, error) {
|
||||
return c.Conn().Exec(ctx, sql, arguments...)
|
||||
}
|
||||
|
||||
func (c *Conn) Query(ctx context.Context, sql string, args ...any) (pgx.Rows, error) {
|
||||
return c.Conn().Query(ctx, sql, args...)
|
||||
}
|
||||
|
||||
func (c *Conn) QueryRow(ctx context.Context, sql string, args ...any) pgx.Row {
|
||||
return c.Conn().QueryRow(ctx, sql, args...)
|
||||
}
|
||||
|
||||
func (c *Conn) SendBatch(ctx context.Context, b *pgx.Batch) pgx.BatchResults {
|
||||
return c.Conn().SendBatch(ctx, b)
|
||||
}
|
||||
|
||||
func (c *Conn) CopyFrom(ctx context.Context, tableName pgx.Identifier, columnNames []string, rowSrc pgx.CopyFromSource) (int64, error) {
|
||||
return c.Conn().CopyFrom(ctx, tableName, columnNames, rowSrc)
|
||||
}
|
||||
|
||||
// Begin starts a transaction block from the *Conn without explicitly setting a transaction mode (see BeginTx with TxOptions if transaction mode is required).
|
||||
func (c *Conn) Begin(ctx context.Context) (pgx.Tx, error) {
|
||||
return c.Conn().Begin(ctx)
|
||||
}
|
||||
|
||||
// BeginTx starts a transaction block from the *Conn with txOptions determining the transaction mode.
|
||||
func (c *Conn) BeginTx(ctx context.Context, txOptions pgx.TxOptions) (pgx.Tx, error) {
|
||||
return c.Conn().BeginTx(ctx, txOptions)
|
||||
}
|
||||
|
||||
func (c *Conn) Ping(ctx context.Context) error {
|
||||
return c.Conn().Ping(ctx)
|
||||
}
|
||||
|
||||
func (c *Conn) Conn() *pgx.Conn {
|
||||
return c.connResource().conn
|
||||
}
|
||||
|
||||
func (c *Conn) connResource() *connResource {
|
||||
return c.res.Value()
|
||||
}
|
||||
|
||||
func (c *Conn) getPoolRow(r pgx.Row) *poolRow {
|
||||
return c.connResource().getPoolRow(c, r)
|
||||
}
|
||||
|
||||
func (c *Conn) getPoolRows(r pgx.Rows) *poolRows {
|
||||
return c.connResource().getPoolRows(c, r)
|
||||
}
|
27
vendor/github.com/jackc/pgx/v5/pgxpool/doc.go
generated
vendored
Normal file
27
vendor/github.com/jackc/pgx/v5/pgxpool/doc.go
generated
vendored
Normal file
@ -0,0 +1,27 @@
|
||||
// Package pgxpool is a concurrency-safe connection pool for pgx.
|
||||
/*
|
||||
pgxpool implements a nearly identical interface to pgx connections.
|
||||
|
||||
Creating a Pool
|
||||
|
||||
The primary way of creating a pool is with [pgxpool.New]:
|
||||
|
||||
pool, err := pgxpool.New(context.Background(), os.Getenv("DATABASE_URL"))
|
||||
|
||||
The database connection string can be in URL or DSN format. PostgreSQL settings, pgx settings, and pool settings can be
|
||||
specified here. In addition, a config struct can be created by [ParseConfig].
|
||||
|
||||
config, err := pgxpool.ParseConfig(os.Getenv("DATABASE_URL"))
|
||||
if err != nil {
|
||||
// ...
|
||||
}
|
||||
config.AfterConnect = func(ctx context.Context, conn *pgx.Conn) error {
|
||||
// do something with every new connection
|
||||
}
|
||||
|
||||
pool, err := pgxpool.NewWithConfig(context.Background(), config)
|
||||
|
||||
A pool returns without waiting for any connections to be established. Acquire a connection immediately after creating
|
||||
the pool to check if a connection can successfully be established.
|
||||
*/
|
||||
package pgxpool
|
695
vendor/github.com/jackc/pgx/v5/pgxpool/pool.go
generated
vendored
Normal file
695
vendor/github.com/jackc/pgx/v5/pgxpool/pool.go
generated
vendored
Normal file
@ -0,0 +1,695 @@
|
||||
package pgxpool
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/jackc/pgx/v5"
|
||||
"github.com/jackc/pgx/v5/pgconn"
|
||||
"github.com/jackc/puddle/v2"
|
||||
)
|
||||
|
||||
var defaultMaxConns = int32(4)
|
||||
var defaultMinConns = int32(0)
|
||||
var defaultMaxConnLifetime = time.Hour
|
||||
var defaultMaxConnIdleTime = time.Minute * 30
|
||||
var defaultHealthCheckPeriod = time.Minute
|
||||
|
||||
type connResource struct {
|
||||
conn *pgx.Conn
|
||||
conns []Conn
|
||||
poolRows []poolRow
|
||||
poolRowss []poolRows
|
||||
maxAgeTime time.Time
|
||||
}
|
||||
|
||||
func (cr *connResource) getConn(p *Pool, res *puddle.Resource[*connResource]) *Conn {
|
||||
if len(cr.conns) == 0 {
|
||||
cr.conns = make([]Conn, 128)
|
||||
}
|
||||
|
||||
c := &cr.conns[len(cr.conns)-1]
|
||||
cr.conns = cr.conns[0 : len(cr.conns)-1]
|
||||
|
||||
c.res = res
|
||||
c.p = p
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
func (cr *connResource) getPoolRow(c *Conn, r pgx.Row) *poolRow {
|
||||
if len(cr.poolRows) == 0 {
|
||||
cr.poolRows = make([]poolRow, 128)
|
||||
}
|
||||
|
||||
pr := &cr.poolRows[len(cr.poolRows)-1]
|
||||
cr.poolRows = cr.poolRows[0 : len(cr.poolRows)-1]
|
||||
|
||||
pr.c = c
|
||||
pr.r = r
|
||||
|
||||
return pr
|
||||
}
|
||||
|
||||
func (cr *connResource) getPoolRows(c *Conn, r pgx.Rows) *poolRows {
|
||||
if len(cr.poolRowss) == 0 {
|
||||
cr.poolRowss = make([]poolRows, 128)
|
||||
}
|
||||
|
||||
pr := &cr.poolRowss[len(cr.poolRowss)-1]
|
||||
cr.poolRowss = cr.poolRowss[0 : len(cr.poolRowss)-1]
|
||||
|
||||
pr.c = c
|
||||
pr.r = r
|
||||
|
||||
return pr
|
||||
}
|
||||
|
||||
// Pool allows for connection reuse.
|
||||
type Pool struct {
|
||||
// 64 bit fields accessed with atomics must be at beginning of struct to guarantee alignment for certain 32-bit
|
||||
// architectures. See BUGS section of https://pkg.go.dev/sync/atomic and https://github.com/jackc/pgx/issues/1288.
|
||||
newConnsCount int64
|
||||
lifetimeDestroyCount int64
|
||||
idleDestroyCount int64
|
||||
|
||||
p *puddle.Pool[*connResource]
|
||||
config *Config
|
||||
beforeConnect func(context.Context, *pgx.ConnConfig) error
|
||||
afterConnect func(context.Context, *pgx.Conn) error
|
||||
beforeAcquire func(context.Context, *pgx.Conn) bool
|
||||
afterRelease func(*pgx.Conn) bool
|
||||
beforeClose func(*pgx.Conn)
|
||||
minConns int32
|
||||
maxConns int32
|
||||
maxConnLifetime time.Duration
|
||||
maxConnLifetimeJitter time.Duration
|
||||
maxConnIdleTime time.Duration
|
||||
healthCheckPeriod time.Duration
|
||||
|
||||
healthCheckChan chan struct{}
|
||||
|
||||
closeOnce sync.Once
|
||||
closeChan chan struct{}
|
||||
}
|
||||
|
||||
// Config is the configuration struct for creating a pool. It must be created by [ParseConfig] and then it can be
|
||||
// modified.
|
||||
type Config struct {
|
||||
ConnConfig *pgx.ConnConfig
|
||||
|
||||
// BeforeConnect is called before a new connection is made. It is passed a copy of the underlying pgx.ConnConfig and
|
||||
// will not impact any existing open connections.
|
||||
BeforeConnect func(context.Context, *pgx.ConnConfig) error
|
||||
|
||||
// AfterConnect is called after a connection is established, but before it is added to the pool.
|
||||
AfterConnect func(context.Context, *pgx.Conn) error
|
||||
|
||||
// BeforeAcquire is called before a connection is acquired from the pool. It must return true to allow the
|
||||
// acquisition or false to indicate that the connection should be destroyed and a different connection should be
|
||||
// acquired.
|
||||
BeforeAcquire func(context.Context, *pgx.Conn) bool
|
||||
|
||||
// AfterRelease is called after a connection is released, but before it is returned to the pool. It must return true to
|
||||
// return the connection to the pool or false to destroy the connection.
|
||||
AfterRelease func(*pgx.Conn) bool
|
||||
|
||||
// BeforeClose is called right before a connection is closed and removed from the pool.
|
||||
BeforeClose func(*pgx.Conn)
|
||||
|
||||
// MaxConnLifetime is the duration since creation after which a connection will be automatically closed.
|
||||
MaxConnLifetime time.Duration
|
||||
|
||||
// MaxConnLifetimeJitter is the duration after MaxConnLifetime to randomly decide to close a connection.
|
||||
// This helps prevent all connections from being closed at the exact same time, starving the pool.
|
||||
MaxConnLifetimeJitter time.Duration
|
||||
|
||||
// MaxConnIdleTime is the duration after which an idle connection will be automatically closed by the health check.
|
||||
MaxConnIdleTime time.Duration
|
||||
|
||||
// MaxConns is the maximum size of the pool. The default is the greater of 4 or runtime.NumCPU().
|
||||
MaxConns int32
|
||||
|
||||
// MinConns is the minimum size of the pool. After connection closes, the pool might dip below MinConns. A low
|
||||
// number of MinConns might mean the pool is empty after MaxConnLifetime until the health check has a chance
|
||||
// to create new connections.
|
||||
MinConns int32
|
||||
|
||||
// HealthCheckPeriod is the duration between checks of the health of idle connections.
|
||||
HealthCheckPeriod time.Duration
|
||||
|
||||
createdByParseConfig bool // Used to enforce created by ParseConfig rule.
|
||||
}
|
||||
|
||||
// Copy returns a deep copy of the config that is safe to use and modify.
|
||||
// The only exception is the tls.Config:
|
||||
// according to the tls.Config docs it must not be modified after creation.
|
||||
func (c *Config) Copy() *Config {
|
||||
newConfig := new(Config)
|
||||
*newConfig = *c
|
||||
newConfig.ConnConfig = c.ConnConfig.Copy()
|
||||
return newConfig
|
||||
}
|
||||
|
||||
// ConnString returns the connection string as parsed by pgxpool.ParseConfig into pgxpool.Config.
|
||||
func (c *Config) ConnString() string { return c.ConnConfig.ConnString() }
|
||||
|
||||
// New creates a new Pool. See [ParseConfig] for information on connString format.
|
||||
func New(ctx context.Context, connString string) (*Pool, error) {
|
||||
config, err := ParseConfig(connString)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return NewWithConfig(ctx, config)
|
||||
}
|
||||
|
||||
// NewWithConfig creates a new Pool. config must have been created by [ParseConfig].
|
||||
func NewWithConfig(ctx context.Context, config *Config) (*Pool, error) {
|
||||
// Default values are set in ParseConfig. Enforce initial creation by ParseConfig rather than setting defaults from
|
||||
// zero values.
|
||||
if !config.createdByParseConfig {
|
||||
panic("config must be created by ParseConfig")
|
||||
}
|
||||
|
||||
p := &Pool{
|
||||
config: config,
|
||||
beforeConnect: config.BeforeConnect,
|
||||
afterConnect: config.AfterConnect,
|
||||
beforeAcquire: config.BeforeAcquire,
|
||||
afterRelease: config.AfterRelease,
|
||||
beforeClose: config.BeforeClose,
|
||||
minConns: config.MinConns,
|
||||
maxConns: config.MaxConns,
|
||||
maxConnLifetime: config.MaxConnLifetime,
|
||||
maxConnLifetimeJitter: config.MaxConnLifetimeJitter,
|
||||
maxConnIdleTime: config.MaxConnIdleTime,
|
||||
healthCheckPeriod: config.HealthCheckPeriod,
|
||||
healthCheckChan: make(chan struct{}, 1),
|
||||
closeChan: make(chan struct{}),
|
||||
}
|
||||
|
||||
var err error
|
||||
p.p, err = puddle.NewPool(
|
||||
&puddle.Config[*connResource]{
|
||||
Constructor: func(ctx context.Context) (*connResource, error) {
|
||||
atomic.AddInt64(&p.newConnsCount, 1)
|
||||
connConfig := p.config.ConnConfig.Copy()
|
||||
|
||||
// Connection will continue in background even if Acquire is canceled. Ensure that a connect won't hang forever.
|
||||
if connConfig.ConnectTimeout <= 0 {
|
||||
connConfig.ConnectTimeout = 2 * time.Minute
|
||||
}
|
||||
|
||||
if p.beforeConnect != nil {
|
||||
if err := p.beforeConnect(ctx, connConfig); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
conn, err := pgx.ConnectConfig(ctx, connConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if p.afterConnect != nil {
|
||||
err = p.afterConnect(ctx, conn)
|
||||
if err != nil {
|
||||
conn.Close(ctx)
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
jitterSecs := rand.Float64() * config.MaxConnLifetimeJitter.Seconds()
|
||||
maxAgeTime := time.Now().Add(config.MaxConnLifetime).Add(time.Duration(jitterSecs) * time.Second)
|
||||
|
||||
cr := &connResource{
|
||||
conn: conn,
|
||||
conns: make([]Conn, 64),
|
||||
poolRows: make([]poolRow, 64),
|
||||
poolRowss: make([]poolRows, 64),
|
||||
maxAgeTime: maxAgeTime,
|
||||
}
|
||||
|
||||
return cr, nil
|
||||
},
|
||||
Destructor: func(value *connResource) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second)
|
||||
conn := value.conn
|
||||
if p.beforeClose != nil {
|
||||
p.beforeClose(conn)
|
||||
}
|
||||
conn.Close(ctx)
|
||||
select {
|
||||
case <-conn.PgConn().CleanupDone():
|
||||
case <-ctx.Done():
|
||||
}
|
||||
cancel()
|
||||
},
|
||||
MaxSize: config.MaxConns,
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
go func() {
|
||||
p.createIdleResources(ctx, int(p.minConns))
|
||||
p.backgroundHealthCheck()
|
||||
}()
|
||||
|
||||
return p, nil
|
||||
}
|
||||
|
||||
// ParseConfig builds a Config from connString. It parses connString with the same behavior as [pgx.ParseConfig] with the
|
||||
// addition of the following variables:
|
||||
//
|
||||
// - pool_max_conns: integer greater than 0
|
||||
// - pool_min_conns: integer 0 or greater
|
||||
// - pool_max_conn_lifetime: duration string
|
||||
// - pool_max_conn_idle_time: duration string
|
||||
// - pool_health_check_period: duration string
|
||||
// - pool_max_conn_lifetime_jitter: duration string
|
||||
//
|
||||
// See Config for definitions of these arguments.
|
||||
//
|
||||
// # Example DSN
|
||||
// user=jack password=secret host=pg.example.com port=5432 dbname=mydb sslmode=verify-ca pool_max_conns=10
|
||||
//
|
||||
// # Example URL
|
||||
// postgres://jack:secret@pg.example.com:5432/mydb?sslmode=verify-ca&pool_max_conns=10
|
||||
func ParseConfig(connString string) (*Config, error) {
|
||||
connConfig, err := pgx.ParseConfig(connString)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
config := &Config{
|
||||
ConnConfig: connConfig,
|
||||
createdByParseConfig: true,
|
||||
}
|
||||
|
||||
if s, ok := config.ConnConfig.Config.RuntimeParams["pool_max_conns"]; ok {
|
||||
delete(connConfig.Config.RuntimeParams, "pool_max_conns")
|
||||
n, err := strconv.ParseInt(s, 10, 32)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot parse pool_max_conns: %w", err)
|
||||
}
|
||||
if n < 1 {
|
||||
return nil, fmt.Errorf("pool_max_conns too small: %d", n)
|
||||
}
|
||||
config.MaxConns = int32(n)
|
||||
} else {
|
||||
config.MaxConns = defaultMaxConns
|
||||
if numCPU := int32(runtime.NumCPU()); numCPU > config.MaxConns {
|
||||
config.MaxConns = numCPU
|
||||
}
|
||||
}
|
||||
|
||||
if s, ok := config.ConnConfig.Config.RuntimeParams["pool_min_conns"]; ok {
|
||||
delete(connConfig.Config.RuntimeParams, "pool_min_conns")
|
||||
n, err := strconv.ParseInt(s, 10, 32)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot parse pool_min_conns: %w", err)
|
||||
}
|
||||
config.MinConns = int32(n)
|
||||
} else {
|
||||
config.MinConns = defaultMinConns
|
||||
}
|
||||
|
||||
if s, ok := config.ConnConfig.Config.RuntimeParams["pool_max_conn_lifetime"]; ok {
|
||||
delete(connConfig.Config.RuntimeParams, "pool_max_conn_lifetime")
|
||||
d, err := time.ParseDuration(s)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid pool_max_conn_lifetime: %w", err)
|
||||
}
|
||||
config.MaxConnLifetime = d
|
||||
} else {
|
||||
config.MaxConnLifetime = defaultMaxConnLifetime
|
||||
}
|
||||
|
||||
if s, ok := config.ConnConfig.Config.RuntimeParams["pool_max_conn_idle_time"]; ok {
|
||||
delete(connConfig.Config.RuntimeParams, "pool_max_conn_idle_time")
|
||||
d, err := time.ParseDuration(s)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid pool_max_conn_idle_time: %w", err)
|
||||
}
|
||||
config.MaxConnIdleTime = d
|
||||
} else {
|
||||
config.MaxConnIdleTime = defaultMaxConnIdleTime
|
||||
}
|
||||
|
||||
if s, ok := config.ConnConfig.Config.RuntimeParams["pool_health_check_period"]; ok {
|
||||
delete(connConfig.Config.RuntimeParams, "pool_health_check_period")
|
||||
d, err := time.ParseDuration(s)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid pool_health_check_period: %w", err)
|
||||
}
|
||||
config.HealthCheckPeriod = d
|
||||
} else {
|
||||
config.HealthCheckPeriod = defaultHealthCheckPeriod
|
||||
}
|
||||
|
||||
if s, ok := config.ConnConfig.Config.RuntimeParams["pool_max_conn_lifetime_jitter"]; ok {
|
||||
delete(connConfig.Config.RuntimeParams, "pool_max_conn_lifetime_jitter")
|
||||
d, err := time.ParseDuration(s)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid pool_max_conn_lifetime_jitter: %w", err)
|
||||
}
|
||||
config.MaxConnLifetimeJitter = d
|
||||
}
|
||||
|
||||
return config, nil
|
||||
}
|
||||
|
||||
// Close closes all connections in the pool and rejects future Acquire calls. Blocks until all connections are returned
|
||||
// to pool and closed.
|
||||
func (p *Pool) Close() {
|
||||
p.closeOnce.Do(func() {
|
||||
close(p.closeChan)
|
||||
p.p.Close()
|
||||
})
|
||||
}
|
||||
|
||||
func (p *Pool) isExpired(res *puddle.Resource[*connResource]) bool {
|
||||
return time.Now().After(res.Value().maxAgeTime)
|
||||
}
|
||||
|
||||
func (p *Pool) triggerHealthCheck() {
|
||||
go func() {
|
||||
// Destroy is asynchronous so we give it time to actually remove itself from
|
||||
// the pool otherwise we might try to check the pool size too soon
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
select {
|
||||
case p.healthCheckChan <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func (p *Pool) backgroundHealthCheck() {
|
||||
ticker := time.NewTicker(p.healthCheckPeriod)
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-p.closeChan:
|
||||
return
|
||||
case <-p.healthCheckChan:
|
||||
p.checkHealth()
|
||||
case <-ticker.C:
|
||||
p.checkHealth()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Pool) checkHealth() {
|
||||
for {
|
||||
// If checkMinConns failed we don't destroy any connections since we couldn't
|
||||
// even get to minConns
|
||||
if err := p.checkMinConns(); err != nil {
|
||||
// Should we log this error somewhere?
|
||||
break
|
||||
}
|
||||
if !p.checkConnsHealth() {
|
||||
// Since we didn't destroy any connections we can stop looping
|
||||
break
|
||||
}
|
||||
// Technically Destroy is asynchronous but 500ms should be enough for it to
|
||||
// remove it from the underlying pool
|
||||
select {
|
||||
case <-p.closeChan:
|
||||
return
|
||||
case <-time.After(500 * time.Millisecond):
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// checkConnsHealth will check all idle connections, destroy a connection if
|
||||
// it's idle or too old, and returns true if any were destroyed
|
||||
func (p *Pool) checkConnsHealth() bool {
|
||||
var destroyed bool
|
||||
totalConns := p.Stat().TotalConns()
|
||||
resources := p.p.AcquireAllIdle()
|
||||
for _, res := range resources {
|
||||
// We're okay going under minConns if the lifetime is up
|
||||
if p.isExpired(res) && totalConns >= p.minConns {
|
||||
atomic.AddInt64(&p.lifetimeDestroyCount, 1)
|
||||
res.Destroy()
|
||||
destroyed = true
|
||||
// Since Destroy is async we manually decrement totalConns.
|
||||
totalConns--
|
||||
} else if res.IdleDuration() > p.maxConnIdleTime && totalConns > p.minConns {
|
||||
atomic.AddInt64(&p.idleDestroyCount, 1)
|
||||
res.Destroy()
|
||||
destroyed = true
|
||||
// Since Destroy is async we manually decrement totalConns.
|
||||
totalConns--
|
||||
} else {
|
||||
res.ReleaseUnused()
|
||||
}
|
||||
}
|
||||
return destroyed
|
||||
}
|
||||
|
||||
func (p *Pool) checkMinConns() error {
|
||||
// TotalConns can include ones that are being destroyed but we should have
|
||||
// sleep(500ms) around all of the destroys to help prevent that from throwing
|
||||
// off this check
|
||||
toCreate := p.minConns - p.Stat().TotalConns()
|
||||
if toCreate > 0 {
|
||||
return p.createIdleResources(context.Background(), int(toCreate))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Pool) createIdleResources(parentCtx context.Context, targetResources int) error {
|
||||
ctx, cancel := context.WithCancel(parentCtx)
|
||||
defer cancel()
|
||||
|
||||
errs := make(chan error, targetResources)
|
||||
|
||||
for i := 0; i < targetResources; i++ {
|
||||
go func() {
|
||||
err := p.p.CreateResource(ctx)
|
||||
// Ignore ErrNotAvailable since it means that the pool has become full since we started creating resource.
|
||||
if err == puddle.ErrNotAvailable {
|
||||
err = nil
|
||||
}
|
||||
errs <- err
|
||||
}()
|
||||
}
|
||||
|
||||
var firstError error
|
||||
for i := 0; i < targetResources; i++ {
|
||||
err := <-errs
|
||||
if err != nil && firstError == nil {
|
||||
cancel()
|
||||
firstError = err
|
||||
}
|
||||
}
|
||||
|
||||
return firstError
|
||||
}
|
||||
|
||||
// Acquire returns a connection (*Conn) from the Pool
|
||||
func (p *Pool) Acquire(ctx context.Context) (*Conn, error) {
|
||||
for {
|
||||
res, err := p.p.Acquire(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cr := res.Value()
|
||||
|
||||
if res.IdleDuration() > time.Second {
|
||||
err := cr.conn.Ping(ctx)
|
||||
if err != nil {
|
||||
res.Destroy()
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if p.beforeAcquire == nil || p.beforeAcquire(ctx, cr.conn) {
|
||||
return cr.getConn(p, res), nil
|
||||
}
|
||||
|
||||
res.Destroy()
|
||||
}
|
||||
}
|
||||
|
||||
// AcquireFunc acquires a *Conn and calls f with that *Conn. ctx will only affect the Acquire. It has no effect on the
|
||||
// call of f. The return value is either an error acquiring the *Conn or the return value of f. The *Conn is
|
||||
// automatically released after the call of f.
|
||||
func (p *Pool) AcquireFunc(ctx context.Context, f func(*Conn) error) error {
|
||||
conn, err := p.Acquire(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer conn.Release()
|
||||
|
||||
return f(conn)
|
||||
}
|
||||
|
||||
// AcquireAllIdle atomically acquires all currently idle connections. Its intended use is for health check and
|
||||
// keep-alive functionality. It does not update pool statistics.
|
||||
func (p *Pool) AcquireAllIdle(ctx context.Context) []*Conn {
|
||||
resources := p.p.AcquireAllIdle()
|
||||
conns := make([]*Conn, 0, len(resources))
|
||||
for _, res := range resources {
|
||||
cr := res.Value()
|
||||
if p.beforeAcquire == nil || p.beforeAcquire(ctx, cr.conn) {
|
||||
conns = append(conns, cr.getConn(p, res))
|
||||
} else {
|
||||
res.Destroy()
|
||||
}
|
||||
}
|
||||
|
||||
return conns
|
||||
}
|
||||
|
||||
// Reset closes all connections, but leaves the pool open. It is intended for use when an error is detected that would
|
||||
// disrupt all connections (such as a network interruption or a server state change).
|
||||
//
|
||||
// It is safe to reset a pool while connections are checked out. Those connections will be closed when they are returned
|
||||
// to the pool.
|
||||
func (p *Pool) Reset() {
|
||||
p.p.Reset()
|
||||
}
|
||||
|
||||
// Config returns a copy of config that was used to initialize this pool.
|
||||
func (p *Pool) Config() *Config { return p.config.Copy() }
|
||||
|
||||
// Stat returns a pgxpool.Stat struct with a snapshot of Pool statistics.
|
||||
func (p *Pool) Stat() *Stat {
|
||||
return &Stat{
|
||||
s: p.p.Stat(),
|
||||
newConnsCount: atomic.LoadInt64(&p.newConnsCount),
|
||||
lifetimeDestroyCount: atomic.LoadInt64(&p.lifetimeDestroyCount),
|
||||
idleDestroyCount: atomic.LoadInt64(&p.idleDestroyCount),
|
||||
}
|
||||
}
|
||||
|
||||
// Exec acquires a connection from the Pool and executes the given SQL.
|
||||
// SQL can be either a prepared statement name or an SQL string.
|
||||
// Arguments should be referenced positionally from the SQL string as $1, $2, etc.
|
||||
// The acquired connection is returned to the pool when the Exec function returns.
|
||||
func (p *Pool) Exec(ctx context.Context, sql string, arguments ...any) (pgconn.CommandTag, error) {
|
||||
c, err := p.Acquire(ctx)
|
||||
if err != nil {
|
||||
return pgconn.CommandTag{}, err
|
||||
}
|
||||
defer c.Release()
|
||||
|
||||
return c.Exec(ctx, sql, arguments...)
|
||||
}
|
||||
|
||||
// Query acquires a connection and executes a query that returns pgx.Rows.
|
||||
// Arguments should be referenced positionally from the SQL string as $1, $2, etc.
|
||||
// See pgx.Rows documentation to close the returned Rows and return the acquired connection to the Pool.
|
||||
//
|
||||
// If there is an error, the returned pgx.Rows will be returned in an error state.
|
||||
// If preferred, ignore the error returned from Query and handle errors using the returned pgx.Rows.
|
||||
//
|
||||
// For extra control over how the query is executed, the types QuerySimpleProtocol, QueryResultFormats, and
|
||||
// QueryResultFormatsByOID may be used as the first args to control exactly how the query is executed. This is rarely
|
||||
// needed. See the documentation for those types for details.
|
||||
func (p *Pool) Query(ctx context.Context, sql string, args ...any) (pgx.Rows, error) {
|
||||
c, err := p.Acquire(ctx)
|
||||
if err != nil {
|
||||
return errRows{err: err}, err
|
||||
}
|
||||
|
||||
rows, err := c.Query(ctx, sql, args...)
|
||||
if err != nil {
|
||||
c.Release()
|
||||
return errRows{err: err}, err
|
||||
}
|
||||
|
||||
return c.getPoolRows(rows), nil
|
||||
}
|
||||
|
||||
// QueryRow acquires a connection and executes a query that is expected
|
||||
// to return at most one row (pgx.Row). Errors are deferred until pgx.Row's
|
||||
// Scan method is called. If the query selects no rows, pgx.Row's Scan will
|
||||
// return ErrNoRows. Otherwise, pgx.Row's Scan scans the first selected row
|
||||
// and discards the rest. The acquired connection is returned to the Pool when
|
||||
// pgx.Row's Scan method is called.
|
||||
//
|
||||
// Arguments should be referenced positionally from the SQL string as $1, $2, etc.
|
||||
//
|
||||
// For extra control over how the query is executed, the types QuerySimpleProtocol, QueryResultFormats, and
|
||||
// QueryResultFormatsByOID may be used as the first args to control exactly how the query is executed. This is rarely
|
||||
// needed. See the documentation for those types for details.
|
||||
func (p *Pool) QueryRow(ctx context.Context, sql string, args ...any) pgx.Row {
|
||||
c, err := p.Acquire(ctx)
|
||||
if err != nil {
|
||||
return errRow{err: err}
|
||||
}
|
||||
|
||||
row := c.QueryRow(ctx, sql, args...)
|
||||
return c.getPoolRow(row)
|
||||
}
|
||||
|
||||
func (p *Pool) SendBatch(ctx context.Context, b *pgx.Batch) pgx.BatchResults {
|
||||
c, err := p.Acquire(ctx)
|
||||
if err != nil {
|
||||
return errBatchResults{err: err}
|
||||
}
|
||||
|
||||
br := c.SendBatch(ctx, b)
|
||||
return &poolBatchResults{br: br, c: c}
|
||||
}
|
||||
|
||||
// Begin acquires a connection from the Pool and starts a transaction. Unlike database/sql, the context only affects the begin command. i.e. there is no
|
||||
// auto-rollback on context cancellation. Begin initiates a transaction block without explicitly setting a transaction mode for the block (see BeginTx with TxOptions if transaction mode is required).
|
||||
// *pgxpool.Tx is returned, which implements the pgx.Tx interface.
|
||||
// Commit or Rollback must be called on the returned transaction to finalize the transaction block.
|
||||
func (p *Pool) Begin(ctx context.Context) (pgx.Tx, error) {
|
||||
return p.BeginTx(ctx, pgx.TxOptions{})
|
||||
}
|
||||
|
||||
// BeginTx acquires a connection from the Pool and starts a transaction with pgx.TxOptions determining the transaction mode.
|
||||
// Unlike database/sql, the context only affects the begin command. i.e. there is no auto-rollback on context cancellation.
|
||||
// *pgxpool.Tx is returned, which implements the pgx.Tx interface.
|
||||
// Commit or Rollback must be called on the returned transaction to finalize the transaction block.
|
||||
func (p *Pool) BeginTx(ctx context.Context, txOptions pgx.TxOptions) (pgx.Tx, error) {
|
||||
c, err := p.Acquire(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
t, err := c.BeginTx(ctx, txOptions)
|
||||
if err != nil {
|
||||
c.Release()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &Tx{t: t, c: c}, nil
|
||||
}
|
||||
|
||||
func (p *Pool) CopyFrom(ctx context.Context, tableName pgx.Identifier, columnNames []string, rowSrc pgx.CopyFromSource) (int64, error) {
|
||||
c, err := p.Acquire(ctx)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer c.Release()
|
||||
|
||||
return c.Conn().CopyFrom(ctx, tableName, columnNames, rowSrc)
|
||||
}
|
||||
|
||||
// Ping acquires a connection from the Pool and executes an empty sql statement against it.
|
||||
// If the sql returns without error, the database Ping is considered successful, otherwise, the error is returned.
|
||||
func (p *Pool) Ping(ctx context.Context) error {
|
||||
c, err := p.Acquire(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer c.Release()
|
||||
return c.Ping(ctx)
|
||||
}
|
116
vendor/github.com/jackc/pgx/v5/pgxpool/rows.go
generated
vendored
Normal file
116
vendor/github.com/jackc/pgx/v5/pgxpool/rows.go
generated
vendored
Normal file
@ -0,0 +1,116 @@
|
||||
package pgxpool
|
||||
|
||||
import (
|
||||
"github.com/jackc/pgx/v5"
|
||||
"github.com/jackc/pgx/v5/pgconn"
|
||||
)
|
||||
|
||||
type errRows struct {
|
||||
err error
|
||||
}
|
||||
|
||||
func (errRows) Close() {}
|
||||
func (e errRows) Err() error { return e.err }
|
||||
func (errRows) CommandTag() pgconn.CommandTag { return pgconn.CommandTag{} }
|
||||
func (errRows) FieldDescriptions() []pgconn.FieldDescription { return nil }
|
||||
func (errRows) Next() bool { return false }
|
||||
func (e errRows) Scan(dest ...any) error { return e.err }
|
||||
func (e errRows) Values() ([]any, error) { return nil, e.err }
|
||||
func (e errRows) RawValues() [][]byte { return nil }
|
||||
func (e errRows) Conn() *pgx.Conn { return nil }
|
||||
|
||||
type errRow struct {
|
||||
err error
|
||||
}
|
||||
|
||||
func (e errRow) Scan(dest ...any) error { return e.err }
|
||||
|
||||
type poolRows struct {
|
||||
r pgx.Rows
|
||||
c *Conn
|
||||
err error
|
||||
}
|
||||
|
||||
func (rows *poolRows) Close() {
|
||||
rows.r.Close()
|
||||
if rows.c != nil {
|
||||
rows.c.Release()
|
||||
rows.c = nil
|
||||
}
|
||||
}
|
||||
|
||||
func (rows *poolRows) Err() error {
|
||||
if rows.err != nil {
|
||||
return rows.err
|
||||
}
|
||||
return rows.r.Err()
|
||||
}
|
||||
|
||||
func (rows *poolRows) CommandTag() pgconn.CommandTag {
|
||||
return rows.r.CommandTag()
|
||||
}
|
||||
|
||||
func (rows *poolRows) FieldDescriptions() []pgconn.FieldDescription {
|
||||
return rows.r.FieldDescriptions()
|
||||
}
|
||||
|
||||
func (rows *poolRows) Next() bool {
|
||||
if rows.err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
n := rows.r.Next()
|
||||
if !n {
|
||||
rows.Close()
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (rows *poolRows) Scan(dest ...any) error {
|
||||
err := rows.r.Scan(dest...)
|
||||
if err != nil {
|
||||
rows.Close()
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (rows *poolRows) Values() ([]any, error) {
|
||||
values, err := rows.r.Values()
|
||||
if err != nil {
|
||||
rows.Close()
|
||||
}
|
||||
return values, err
|
||||
}
|
||||
|
||||
func (rows *poolRows) RawValues() [][]byte {
|
||||
return rows.r.RawValues()
|
||||
}
|
||||
|
||||
func (rows *poolRows) Conn() *pgx.Conn {
|
||||
return rows.r.Conn()
|
||||
}
|
||||
|
||||
type poolRow struct {
|
||||
r pgx.Row
|
||||
c *Conn
|
||||
err error
|
||||
}
|
||||
|
||||
func (row *poolRow) Scan(dest ...any) error {
|
||||
if row.err != nil {
|
||||
return row.err
|
||||
}
|
||||
|
||||
panicked := true
|
||||
defer func() {
|
||||
if panicked && row.c != nil {
|
||||
row.c.Release()
|
||||
}
|
||||
}()
|
||||
err := row.r.Scan(dest...)
|
||||
panicked = false
|
||||
if row.c != nil {
|
||||
row.c.Release()
|
||||
}
|
||||
return err
|
||||
}
|
84
vendor/github.com/jackc/pgx/v5/pgxpool/stat.go
generated
vendored
Normal file
84
vendor/github.com/jackc/pgx/v5/pgxpool/stat.go
generated
vendored
Normal file
@ -0,0 +1,84 @@
|
||||
package pgxpool
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/jackc/puddle/v2"
|
||||
)
|
||||
|
||||
// Stat is a snapshot of Pool statistics.
|
||||
type Stat struct {
|
||||
s *puddle.Stat
|
||||
newConnsCount int64
|
||||
lifetimeDestroyCount int64
|
||||
idleDestroyCount int64
|
||||
}
|
||||
|
||||
// AcquireCount returns the cumulative count of successful acquires from the pool.
|
||||
func (s *Stat) AcquireCount() int64 {
|
||||
return s.s.AcquireCount()
|
||||
}
|
||||
|
||||
// AcquireDuration returns the total duration of all successful acquires from
|
||||
// the pool.
|
||||
func (s *Stat) AcquireDuration() time.Duration {
|
||||
return s.s.AcquireDuration()
|
||||
}
|
||||
|
||||
// AcquiredConns returns the number of currently acquired connections in the pool.
|
||||
func (s *Stat) AcquiredConns() int32 {
|
||||
return s.s.AcquiredResources()
|
||||
}
|
||||
|
||||
// CanceledAcquireCount returns the cumulative count of acquires from the pool
|
||||
// that were canceled by a context.
|
||||
func (s *Stat) CanceledAcquireCount() int64 {
|
||||
return s.s.CanceledAcquireCount()
|
||||
}
|
||||
|
||||
// ConstructingConns returns the number of conns with construction in progress in
|
||||
// the pool.
|
||||
func (s *Stat) ConstructingConns() int32 {
|
||||
return s.s.ConstructingResources()
|
||||
}
|
||||
|
||||
// EmptyAcquireCount returns the cumulative count of successful acquires from the pool
|
||||
// that waited for a resource to be released or constructed because the pool was
|
||||
// empty.
|
||||
func (s *Stat) EmptyAcquireCount() int64 {
|
||||
return s.s.EmptyAcquireCount()
|
||||
}
|
||||
|
||||
// IdleConns returns the number of currently idle conns in the pool.
|
||||
func (s *Stat) IdleConns() int32 {
|
||||
return s.s.IdleResources()
|
||||
}
|
||||
|
||||
// MaxConns returns the maximum size of the pool.
|
||||
func (s *Stat) MaxConns() int32 {
|
||||
return s.s.MaxResources()
|
||||
}
|
||||
|
||||
// TotalConns returns the total number of resources currently in the pool.
|
||||
// The value is the sum of ConstructingConns, AcquiredConns, and
|
||||
// IdleConns.
|
||||
func (s *Stat) TotalConns() int32 {
|
||||
return s.s.TotalResources()
|
||||
}
|
||||
|
||||
// NewConnsCount returns the cumulative count of new connections opened.
|
||||
func (s *Stat) NewConnsCount() int64 {
|
||||
return s.newConnsCount
|
||||
}
|
||||
|
||||
// MaxLifetimeDestroyCount returns the cumulative count of connections destroyed
|
||||
// because they exceeded MaxConnLifetime.
|
||||
func (s *Stat) MaxLifetimeDestroyCount() int64 {
|
||||
return s.lifetimeDestroyCount
|
||||
}
|
||||
|
||||
// MaxIdleDestroyCount returns the cumulative count of connections destroyed because
|
||||
// they exceeded MaxConnIdleTime.
|
||||
func (s *Stat) MaxIdleDestroyCount() int64 {
|
||||
return s.idleDestroyCount
|
||||
}
|
82
vendor/github.com/jackc/pgx/v5/pgxpool/tx.go
generated
vendored
Normal file
82
vendor/github.com/jackc/pgx/v5/pgxpool/tx.go
generated
vendored
Normal file
@ -0,0 +1,82 @@
|
||||
package pgxpool
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/jackc/pgx/v5"
|
||||
"github.com/jackc/pgx/v5/pgconn"
|
||||
)
|
||||
|
||||
// Tx represents a database transaction acquired from a Pool.
|
||||
type Tx struct {
|
||||
t pgx.Tx
|
||||
c *Conn
|
||||
}
|
||||
|
||||
// Begin starts a pseudo nested transaction implemented with a savepoint.
|
||||
func (tx *Tx) Begin(ctx context.Context) (pgx.Tx, error) {
|
||||
return tx.t.Begin(ctx)
|
||||
}
|
||||
|
||||
// Commit commits the transaction and returns the associated connection back to the Pool. Commit will return ErrTxClosed
|
||||
// if the Tx is already closed, but is otherwise safe to call multiple times. If the commit fails with a rollback status
|
||||
// (e.g. the transaction was already in a broken state) then ErrTxCommitRollback will be returned.
|
||||
func (tx *Tx) Commit(ctx context.Context) error {
|
||||
err := tx.t.Commit(ctx)
|
||||
if tx.c != nil {
|
||||
tx.c.Release()
|
||||
tx.c = nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Rollback rolls back the transaction and returns the associated connection back to the Pool. Rollback will return ErrTxClosed
|
||||
// if the Tx is already closed, but is otherwise safe to call multiple times. Hence, defer tx.Rollback() is safe even if
|
||||
// tx.Commit() will be called first in a non-error condition.
|
||||
func (tx *Tx) Rollback(ctx context.Context) error {
|
||||
err := tx.t.Rollback(ctx)
|
||||
if tx.c != nil {
|
||||
tx.c.Release()
|
||||
tx.c = nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (tx *Tx) CopyFrom(ctx context.Context, tableName pgx.Identifier, columnNames []string, rowSrc pgx.CopyFromSource) (int64, error) {
|
||||
return tx.t.CopyFrom(ctx, tableName, columnNames, rowSrc)
|
||||
}
|
||||
|
||||
func (tx *Tx) SendBatch(ctx context.Context, b *pgx.Batch) pgx.BatchResults {
|
||||
return tx.t.SendBatch(ctx, b)
|
||||
}
|
||||
|
||||
func (tx *Tx) LargeObjects() pgx.LargeObjects {
|
||||
return tx.t.LargeObjects()
|
||||
}
|
||||
|
||||
// Prepare creates a prepared statement with name and sql. If the name is empty,
|
||||
// an anonymous prepared statement will be used. sql can contain placeholders
|
||||
// for bound parameters. These placeholders are referenced positionally as $1, $2, etc.
|
||||
//
|
||||
// Prepare is idempotent; i.e. it is safe to call Prepare multiple times with the same
|
||||
// name and sql arguments. This allows a code path to Prepare and Query/Exec without
|
||||
// needing to first check whether the statement has already been prepared.
|
||||
func (tx *Tx) Prepare(ctx context.Context, name, sql string) (*pgconn.StatementDescription, error) {
|
||||
return tx.t.Prepare(ctx, name, sql)
|
||||
}
|
||||
|
||||
func (tx *Tx) Exec(ctx context.Context, sql string, arguments ...any) (pgconn.CommandTag, error) {
|
||||
return tx.t.Exec(ctx, sql, arguments...)
|
||||
}
|
||||
|
||||
func (tx *Tx) Query(ctx context.Context, sql string, args ...any) (pgx.Rows, error) {
|
||||
return tx.t.Query(ctx, sql, args...)
|
||||
}
|
||||
|
||||
func (tx *Tx) QueryRow(ctx context.Context, sql string, args ...any) pgx.Row {
|
||||
return tx.t.QueryRow(ctx, sql, args...)
|
||||
}
|
||||
|
||||
func (tx *Tx) Conn() *pgx.Conn {
|
||||
return tx.t.Conn()
|
||||
}
|
76
vendor/github.com/jackc/pgx/v5/rows.go
generated
vendored
76
vendor/github.com/jackc/pgx/v5/rows.go
generated
vendored
@ -8,7 +8,6 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/jackc/pgx/v5/internal/stmtcache"
|
||||
"github.com/jackc/pgx/v5/pgconn"
|
||||
"github.com/jackc/pgx/v5/pgtype"
|
||||
)
|
||||
@ -17,7 +16,8 @@ import (
|
||||
// the *Conn can be used again. Rows are closed by explicitly calling Close(),
|
||||
// calling Next() until it returns false, or when a fatal error occurs.
|
||||
//
|
||||
// Once a Rows is closed the only methods that may be called are Close(), Err(), and CommandTag().
|
||||
// Once a Rows is closed the only methods that may be called are Close(), Err(),
|
||||
// and CommandTag().
|
||||
//
|
||||
// Rows is an interface instead of a struct to allow tests to mock Query. However,
|
||||
// adding a method to an interface is technically a breaking change. Because of this
|
||||
@ -41,8 +41,15 @@ type Rows interface {
|
||||
FieldDescriptions() []pgconn.FieldDescription
|
||||
|
||||
// Next prepares the next row for reading. It returns true if there is another
|
||||
// row and false if no more rows are available. It automatically closes rows
|
||||
// when all rows are read.
|
||||
// row and false if no more rows are available or a fatal error has occurred.
|
||||
// It automatically closes rows when all rows are read.
|
||||
//
|
||||
// Callers should check rows.Err() after rows.Next() returns false to detect
|
||||
// whether result-set reading ended prematurely due to an error. See
|
||||
// Conn.Query for details.
|
||||
//
|
||||
// For simpler error handling, consider using the higher-level pgx v5
|
||||
// CollectRows() and ForEachRow() helpers instead.
|
||||
Next() bool
|
||||
|
||||
// Scan reads the values from the current row into dest values positionally.
|
||||
@ -166,14 +173,12 @@ func (rows *baseRows) Close() {
|
||||
}
|
||||
|
||||
if rows.err != nil && rows.conn != nil && rows.sql != "" {
|
||||
if stmtcache.IsStatementInvalid(rows.err) {
|
||||
if sc := rows.conn.statementCache; sc != nil {
|
||||
sc.Invalidate(rows.sql)
|
||||
}
|
||||
if sc := rows.conn.statementCache; sc != nil {
|
||||
sc.Invalidate(rows.sql)
|
||||
}
|
||||
|
||||
if sc := rows.conn.descriptionCache; sc != nil {
|
||||
sc.Invalidate(rows.sql)
|
||||
}
|
||||
if sc := rows.conn.descriptionCache; sc != nil {
|
||||
sc.Invalidate(rows.sql)
|
||||
}
|
||||
}
|
||||
|
||||
@ -457,6 +462,39 @@ func CollectOneRow[T any](rows Rows, fn RowToFunc[T]) (T, error) {
|
||||
return value, rows.Err()
|
||||
}
|
||||
|
||||
// CollectExactlyOneRow calls fn for the first row in rows and returns the result.
|
||||
// - If no rows are found returns an error where errors.Is(ErrNoRows) is true.
|
||||
// - If more than 1 row is found returns an error where errors.Is(ErrTooManyRows) is true.
|
||||
func CollectExactlyOneRow[T any](rows Rows, fn RowToFunc[T]) (T, error) {
|
||||
defer rows.Close()
|
||||
|
||||
var (
|
||||
err error
|
||||
value T
|
||||
)
|
||||
|
||||
if !rows.Next() {
|
||||
if err = rows.Err(); err != nil {
|
||||
return value, err
|
||||
}
|
||||
|
||||
return value, ErrNoRows
|
||||
}
|
||||
|
||||
value, err = fn(rows)
|
||||
if err != nil {
|
||||
return value, err
|
||||
}
|
||||
|
||||
if rows.Next() {
|
||||
var zero T
|
||||
|
||||
return zero, ErrTooManyRows
|
||||
}
|
||||
|
||||
return value, rows.Err()
|
||||
}
|
||||
|
||||
// RowTo returns a T scanned from row.
|
||||
func RowTo[T any](row CollectableRow) (T, error) {
|
||||
var value T
|
||||
@ -496,7 +534,7 @@ func (rs *mapRowScanner) ScanRow(rows Rows) error {
|
||||
}
|
||||
|
||||
// RowToStructByPos returns a T scanned from row. T must be a struct. T must have the same number a public fields as row
|
||||
// has fields. The row and T fields will by matched by position. If the "db" struct tag is "-" then the field will be
|
||||
// has fields. The row and T fields will be matched by position. If the "db" struct tag is "-" then the field will be
|
||||
// ignored.
|
||||
func RowToStructByPos[T any](row CollectableRow) (T, error) {
|
||||
var value T
|
||||
@ -505,7 +543,7 @@ func RowToStructByPos[T any](row CollectableRow) (T, error) {
|
||||
}
|
||||
|
||||
// RowToAddrOfStructByPos returns the address of a T scanned from row. T must be a struct. T must have the same number a
|
||||
// public fields as row has fields. The row and T fields will by matched by position. If the "db" struct tag is "-" then
|
||||
// public fields as row has fields. The row and T fields will be matched by position. If the "db" struct tag is "-" then
|
||||
// the field will be ignored.
|
||||
func RowToAddrOfStructByPos[T any](row CollectableRow) (*T, error) {
|
||||
var value T
|
||||
@ -560,7 +598,7 @@ func (rs *positionalStructRowScanner) appendScanTargets(dstElemValue reflect.Val
|
||||
}
|
||||
|
||||
// RowToStructByName returns a T scanned from row. T must be a struct. T must have the same number of named public
|
||||
// fields as row has fields. The row and T fields will by matched by name. The match is case-insensitive. The database
|
||||
// fields as row has fields. The row and T fields will be matched by name. The match is case-insensitive. The database
|
||||
// column name can be overridden with a "db" struct tag. If the "db" struct tag is "-" then the field will be ignored.
|
||||
func RowToStructByName[T any](row CollectableRow) (T, error) {
|
||||
var value T
|
||||
@ -569,7 +607,7 @@ func RowToStructByName[T any](row CollectableRow) (T, error) {
|
||||
}
|
||||
|
||||
// RowToAddrOfStructByName returns the address of a T scanned from row. T must be a struct. T must have the same number
|
||||
// of named public fields as row has fields. The row and T fields will by matched by name. The match is
|
||||
// of named public fields as row has fields. The row and T fields will be matched by name. The match is
|
||||
// case-insensitive. The database column name can be overridden with a "db" struct tag. If the "db" struct tag is "-"
|
||||
// then the field will be ignored.
|
||||
func RowToAddrOfStructByName[T any](row CollectableRow) (*T, error) {
|
||||
@ -579,7 +617,7 @@ func RowToAddrOfStructByName[T any](row CollectableRow) (*T, error) {
|
||||
}
|
||||
|
||||
// RowToStructByNameLax returns a T scanned from row. T must be a struct. T must have greater than or equal number of named public
|
||||
// fields as row has fields. The row and T fields will by matched by name. The match is case-insensitive. The database
|
||||
// fields as row has fields. The row and T fields will be matched by name. The match is case-insensitive. The database
|
||||
// column name can be overridden with a "db" struct tag. If the "db" struct tag is "-" then the field will be ignored.
|
||||
func RowToStructByNameLax[T any](row CollectableRow) (T, error) {
|
||||
var value T
|
||||
@ -588,7 +626,7 @@ func RowToStructByNameLax[T any](row CollectableRow) (T, error) {
|
||||
}
|
||||
|
||||
// RowToAddrOfStructByNameLax returns the address of a T scanned from row. T must be a struct. T must have greater than or
|
||||
// equal number of named public fields as row has fields. The row and T fields will by matched by name. The match is
|
||||
// equal number of named public fields as row has fields. The row and T fields will be matched by name. The match is
|
||||
// case-insensitive. The database column name can be overridden with a "db" struct tag. If the "db" struct tag is "-"
|
||||
// then the field will be ignored.
|
||||
func RowToAddrOfStructByNameLax[T any](row CollectableRow) (*T, error) {
|
||||
@ -650,7 +688,7 @@ func (rs *namedStructRowScanner) appendScanTargets(dstElemValue reflect.Value, s
|
||||
// Field is unexported, skip it.
|
||||
continue
|
||||
}
|
||||
// Handle anoymous struct embedding, but do not try to handle embedded pointers.
|
||||
// Handle anonymous struct embedding, but do not try to handle embedded pointers.
|
||||
if sf.Anonymous && sf.Type.Kind() == reflect.Struct {
|
||||
scanTargets, err = rs.appendScanTargets(dstElemValue.Field(i), scanTargets, fldDescs)
|
||||
if err != nil {
|
||||
@ -659,7 +697,7 @@ func (rs *namedStructRowScanner) appendScanTargets(dstElemValue reflect.Value, s
|
||||
} else {
|
||||
dbTag, dbTagPresent := sf.Tag.Lookup(structTagKey)
|
||||
if dbTagPresent {
|
||||
dbTag = strings.Split(dbTag, ",")[0]
|
||||
dbTag, _, _ = strings.Cut(dbTag, ",")
|
||||
}
|
||||
if dbTag == "-" {
|
||||
// Field is ignored, skip it.
|
||||
|
145
vendor/github.com/jackc/pgx/v5/stdlib/sql.go
generated
vendored
145
vendor/github.com/jackc/pgx/v5/stdlib/sql.go
generated
vendored
@ -14,12 +14,24 @@
|
||||
// return err
|
||||
// }
|
||||
//
|
||||
// Or from a *pgxpool.Pool.
|
||||
//
|
||||
// pool, err := pgxpool.New(context.Background(), os.Getenv("DATABASE_URL"))
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
//
|
||||
// db, err := stdlib.OpenDBFromPool(pool)
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
//
|
||||
// Or a pgx.ConnConfig can be used to set configuration not accessible via connection string. In this case the
|
||||
// pgx.ConnConfig must first be registered with the driver. This registration returns a connection string which is used
|
||||
// with sql.Open.
|
||||
//
|
||||
// connConfig, _ := pgx.ParseConfig(os.Getenv("DATABASE_URL"))
|
||||
// connConfig.Logger = myLogger
|
||||
// connConfig.Tracer = &tracelog.TraceLog{Logger: myLogger, LogLevel: tracelog.LogLevelInfo}
|
||||
// connStr := stdlib.RegisterConnConfig(connConfig)
|
||||
// db, _ := sql.Open("pgx", connStr)
|
||||
//
|
||||
@ -74,6 +86,7 @@ import (
|
||||
"github.com/jackc/pgx/v5"
|
||||
"github.com/jackc/pgx/v5/pgconn"
|
||||
"github.com/jackc/pgx/v5/pgtype"
|
||||
"github.com/jackc/pgx/v5/pgxpool"
|
||||
)
|
||||
|
||||
// Only intrinsic types should be binary format with database/sql.
|
||||
@ -125,14 +138,14 @@ func contains(list []string, y string) bool {
|
||||
type OptionOpenDB func(*connector)
|
||||
|
||||
// OptionBeforeConnect provides a callback for before connect. It is passed a shallow copy of the ConnConfig that will
|
||||
// be used to connect, so only its immediate members should be modified.
|
||||
// be used to connect, so only its immediate members should be modified. Used only if db is opened with *pgx.ConnConfig.
|
||||
func OptionBeforeConnect(bc func(context.Context, *pgx.ConnConfig) error) OptionOpenDB {
|
||||
return func(dc *connector) {
|
||||
dc.BeforeConnect = bc
|
||||
}
|
||||
}
|
||||
|
||||
// OptionAfterConnect provides a callback for after connect.
|
||||
// OptionAfterConnect provides a callback for after connect. Used only if db is opened with *pgx.ConnConfig.
|
||||
func OptionAfterConnect(ac func(context.Context, *pgx.Conn) error) OptionOpenDB {
|
||||
return func(dc *connector) {
|
||||
dc.AfterConnect = ac
|
||||
@ -191,13 +204,42 @@ func GetConnector(config pgx.ConnConfig, opts ...OptionOpenDB) driver.Connector
|
||||
return c
|
||||
}
|
||||
|
||||
// GetPoolConnector creates a new driver.Connector from the given *pgxpool.Pool. By using this be sure to set the
|
||||
// maximum idle connections of the *sql.DB created with this connector to zero since they must be managed from the
|
||||
// *pgxpool.Pool. This is required to avoid acquiring all the connections from the pgxpool and starving any direct
|
||||
// users of the pgxpool.
|
||||
func GetPoolConnector(pool *pgxpool.Pool, opts ...OptionOpenDB) driver.Connector {
|
||||
c := connector{
|
||||
pool: pool,
|
||||
ResetSession: func(context.Context, *pgx.Conn) error { return nil }, // noop reset session by default
|
||||
driver: pgxDriver,
|
||||
}
|
||||
|
||||
for _, opt := range opts {
|
||||
opt(&c)
|
||||
}
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
func OpenDB(config pgx.ConnConfig, opts ...OptionOpenDB) *sql.DB {
|
||||
c := GetConnector(config, opts...)
|
||||
return sql.OpenDB(c)
|
||||
}
|
||||
|
||||
// OpenDBFromPool creates a new *sql.DB from the given *pgxpool.Pool. Note that this method automatically sets the
|
||||
// maximum number of idle connections in *sql.DB to zero, since they must be managed from the *pgxpool.Pool. This is
|
||||
// required to avoid acquiring all the connections from the pgxpool and starving any direct users of the pgxpool.
|
||||
func OpenDBFromPool(pool *pgxpool.Pool, opts ...OptionOpenDB) *sql.DB {
|
||||
c := GetPoolConnector(pool, opts...)
|
||||
db := sql.OpenDB(c)
|
||||
db.SetMaxIdleConns(0)
|
||||
return db
|
||||
}
|
||||
|
||||
type connector struct {
|
||||
pgx.ConnConfig
|
||||
pool *pgxpool.Pool
|
||||
BeforeConnect func(context.Context, *pgx.ConnConfig) error // function to call before creation of every new connection
|
||||
AfterConnect func(context.Context, *pgx.Conn) error // function to call after creation of every new connection
|
||||
ResetSession func(context.Context, *pgx.Conn) error // function is called before a connection is reused
|
||||
@ -207,25 +249,53 @@ type connector struct {
|
||||
// Connect implement driver.Connector interface
|
||||
func (c connector) Connect(ctx context.Context) (driver.Conn, error) {
|
||||
var (
|
||||
err error
|
||||
conn *pgx.Conn
|
||||
connConfig pgx.ConnConfig
|
||||
conn *pgx.Conn
|
||||
close func(context.Context) error
|
||||
err error
|
||||
)
|
||||
|
||||
// Create a shallow copy of the config, so that BeforeConnect can safely modify it
|
||||
connConfig := c.ConnConfig
|
||||
if err = c.BeforeConnect(ctx, &connConfig); err != nil {
|
||||
return nil, err
|
||||
if c.pool == nil {
|
||||
// Create a shallow copy of the config, so that BeforeConnect can safely modify it
|
||||
connConfig = c.ConnConfig
|
||||
|
||||
if err = c.BeforeConnect(ctx, &connConfig); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if conn, err = pgx.ConnectConfig(ctx, &connConfig); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err = c.AfterConnect(ctx, conn); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
close = conn.Close
|
||||
} else {
|
||||
var pconn *pgxpool.Conn
|
||||
|
||||
pconn, err = c.pool.Acquire(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
conn = pconn.Conn()
|
||||
|
||||
close = func(_ context.Context) error {
|
||||
pconn.Release()
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
if conn, err = pgx.ConnectConfig(ctx, &connConfig); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err = c.AfterConnect(ctx, conn); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &Conn{conn: conn, driver: c.driver, connConfig: connConfig, resetSessionFunc: c.ResetSession}, nil
|
||||
return &Conn{
|
||||
conn: conn,
|
||||
close: close,
|
||||
driver: c.driver,
|
||||
connConfig: connConfig,
|
||||
resetSessionFunc: c.ResetSession,
|
||||
psRefCounts: make(map[*pgconn.StatementDescription]int),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Driver implement driver.Connector interface
|
||||
@ -302,9 +372,11 @@ func (dc *driverConnector) Connect(ctx context.Context) (driver.Conn, error) {
|
||||
|
||||
c := &Conn{
|
||||
conn: conn,
|
||||
close: conn.Close,
|
||||
driver: dc.driver,
|
||||
connConfig: *connConfig,
|
||||
resetSessionFunc: func(context.Context, *pgx.Conn) error { return nil },
|
||||
psRefCounts: make(map[*pgconn.StatementDescription]int),
|
||||
}
|
||||
|
||||
return c, nil
|
||||
@ -326,11 +398,19 @@ func UnregisterConnConfig(connStr string) {
|
||||
|
||||
type Conn struct {
|
||||
conn *pgx.Conn
|
||||
psCount int64 // Counter used for creating unique prepared statement names
|
||||
close func(context.Context) error
|
||||
driver *Driver
|
||||
connConfig pgx.ConnConfig
|
||||
resetSessionFunc func(context.Context, *pgx.Conn) error // Function is called before a connection is reused
|
||||
lastResetSessionTime time.Time
|
||||
|
||||
// psRefCounts contains reference counts for prepared statements. Prepare uses the underlying pgx logic to generate
|
||||
// deterministic statement names from the statement text. If this query has already been prepared then the existing
|
||||
// *pgconn.StatementDescription will be returned. However, this means that if Close is called on the returned Stmt
|
||||
// then the underlying prepared statement will be closed even when the underlying prepared statement is still in use
|
||||
// by another database/sql Stmt. To prevent this psRefCounts keeps track of how many database/sql statements are using
|
||||
// the same underlying statement and only closes the underlying statement when the reference count reaches 0.
|
||||
psRefCounts map[*pgconn.StatementDescription]int
|
||||
}
|
||||
|
||||
// Conn returns the underlying *pgx.Conn
|
||||
@ -347,13 +427,11 @@ func (c *Conn) PrepareContext(ctx context.Context, query string) (driver.Stmt, e
|
||||
return nil, driver.ErrBadConn
|
||||
}
|
||||
|
||||
name := fmt.Sprintf("pgx_%d", c.psCount)
|
||||
c.psCount++
|
||||
|
||||
sd, err := c.conn.Prepare(ctx, name, query)
|
||||
sd, err := c.conn.Prepare(ctx, query, query)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c.psRefCounts[sd]++
|
||||
|
||||
return &Stmt{sd: sd, conn: c}, nil
|
||||
}
|
||||
@ -361,7 +439,7 @@ func (c *Conn) PrepareContext(ctx context.Context, query string) (driver.Stmt, e
|
||||
func (c *Conn) Close() error {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Second*5)
|
||||
defer cancel()
|
||||
return c.conn.Close(ctx)
|
||||
return c.close(ctx)
|
||||
}
|
||||
|
||||
func (c *Conn) Begin() (driver.Tx, error) {
|
||||
@ -470,7 +548,7 @@ func (c *Conn) ResetSession(ctx context.Context) error {
|
||||
|
||||
now := time.Now()
|
||||
if now.Sub(c.lastResetSessionTime) > time.Second {
|
||||
if err := c.conn.PgConn().CheckConn(); err != nil {
|
||||
if err := c.conn.PgConn().Ping(ctx); err != nil {
|
||||
return driver.ErrBadConn
|
||||
}
|
||||
}
|
||||
@ -487,7 +565,16 @@ type Stmt struct {
|
||||
func (s *Stmt) Close() error {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Second*5)
|
||||
defer cancel()
|
||||
return s.conn.conn.Deallocate(ctx, s.sd.Name)
|
||||
|
||||
refCount := s.conn.psRefCounts[s.sd]
|
||||
if refCount == 1 {
|
||||
delete(s.conn.psRefCounts, s.sd)
|
||||
} else {
|
||||
s.conn.psRefCounts[s.sd]--
|
||||
return nil
|
||||
}
|
||||
|
||||
return s.conn.conn.Deallocate(ctx, s.sd.SQL)
|
||||
}
|
||||
|
||||
func (s *Stmt) NumInput() int {
|
||||
@ -499,7 +586,7 @@ func (s *Stmt) Exec(argsV []driver.Value) (driver.Result, error) {
|
||||
}
|
||||
|
||||
func (s *Stmt) ExecContext(ctx context.Context, argsV []driver.NamedValue) (driver.Result, error) {
|
||||
return s.conn.ExecContext(ctx, s.sd.Name, argsV)
|
||||
return s.conn.ExecContext(ctx, s.sd.SQL, argsV)
|
||||
}
|
||||
|
||||
func (s *Stmt) Query(argsV []driver.Value) (driver.Rows, error) {
|
||||
@ -507,7 +594,7 @@ func (s *Stmt) Query(argsV []driver.Value) (driver.Rows, error) {
|
||||
}
|
||||
|
||||
func (s *Stmt) QueryContext(ctx context.Context, argsV []driver.NamedValue) (driver.Rows, error) {
|
||||
return s.conn.QueryContext(ctx, s.sd.Name, argsV)
|
||||
return s.conn.QueryContext(ctx, s.sd.SQL, argsV)
|
||||
}
|
||||
|
||||
type rowValueFunc func(src []byte) (driver.Value, error)
|
||||
@ -753,7 +840,7 @@ func (r *Rows) Next(dest []driver.Value) error {
|
||||
var err error
|
||||
dest[i], err = r.valueFuncs[i](rv)
|
||||
if err != nil {
|
||||
return fmt.Errorf("convert field %d failed: %v", i, err)
|
||||
return fmt.Errorf("convert field %d failed: %w", i, err)
|
||||
}
|
||||
} else {
|
||||
dest[i] = nil
|
||||
|
74
vendor/github.com/jackc/puddle/v2/CHANGELOG.md
generated
vendored
Normal file
74
vendor/github.com/jackc/puddle/v2/CHANGELOG.md
generated
vendored
Normal file
@ -0,0 +1,74 @@
|
||||
# 2.2.1 (July 15, 2023)
|
||||
|
||||
* Fix: CreateResource cannot overflow pool. This changes documented behavior of CreateResource. Previously,
|
||||
CreateResource could create a resource even if the pool was full. This could cause the pool to overflow. While this
|
||||
was documented, it was documenting incorrect behavior. CreateResource now returns an error if the pool is full.
|
||||
|
||||
# 2.2.0 (February 11, 2023)
|
||||
|
||||
* Use Go 1.19 atomics and drop go.uber.org/atomic dependency
|
||||
|
||||
# 2.1.2 (November 12, 2022)
|
||||
|
||||
* Restore support to Go 1.18 via go.uber.org/atomic
|
||||
|
||||
# 2.1.1 (November 11, 2022)
|
||||
|
||||
* Fix create resource concurrently with Stat call race
|
||||
|
||||
# 2.1.0 (October 28, 2022)
|
||||
|
||||
* Concurrency control is now implemented with a semaphore. This simplifies some internal logic, resolves a few error conditions (including a deadlock), and improves performance. (Jan Dubsky)
|
||||
* Go 1.19 is now required for the improved atomic support.
|
||||
|
||||
# 2.0.1 (October 28, 2022)
|
||||
|
||||
* Fix race condition when Close is called concurrently with multiple constructors
|
||||
|
||||
# 2.0.0 (September 17, 2022)
|
||||
|
||||
* Use generics instead of interface{} (Столяров Владимир Алексеевич)
|
||||
* Add Reset
|
||||
* Do not cancel resource construction when Acquire is canceled
|
||||
* NewPool takes Config
|
||||
|
||||
# 1.3.0 (August 27, 2022)
|
||||
|
||||
* Acquire creates resources in background to allow creation to continue after Acquire is canceled (James Hartig)
|
||||
|
||||
# 1.2.1 (December 2, 2021)
|
||||
|
||||
* TryAcquire now does not block when background constructing resource
|
||||
|
||||
# 1.2.0 (November 20, 2021)
|
||||
|
||||
* Add TryAcquire (A. Jensen)
|
||||
* Fix: remove memory leak / unintentionally pinned memory when shrinking slices (Alexander Staubo)
|
||||
* Fix: Do not leave pool locked after panic from nil context
|
||||
|
||||
# 1.1.4 (September 11, 2021)
|
||||
|
||||
* Fix: Deadlock in CreateResource if pool was closed during resource acquisition (Dmitriy Matrenichev)
|
||||
|
||||
# 1.1.3 (December 3, 2020)
|
||||
|
||||
* Fix: Failed resource creation could cause concurrent Acquire to hang. (Evgeny Vanslov)
|
||||
|
||||
# 1.1.2 (September 26, 2020)
|
||||
|
||||
* Fix: Resource.Destroy no longer removes itself from the pool before its destructor has completed.
|
||||
* Fix: Prevent crash when pool is closed while resource is being created.
|
||||
|
||||
# 1.1.1 (April 2, 2020)
|
||||
|
||||
* Pool.Close can be safely called multiple times
|
||||
* AcquireAllIDle immediately returns nil if pool is closed
|
||||
* CreateResource checks if pool is closed before taking any action
|
||||
* Fix potential race condition when CreateResource and Close are called concurrently. CreateResource now checks if pool is closed before adding newly created resource to pool.
|
||||
|
||||
# 1.1.0 (February 5, 2020)
|
||||
|
||||
* Use runtime.nanotime for faster tracking of acquire time and last usage time.
|
||||
* Track resource idle time to enable client health check logic. (Patrick Ellul)
|
||||
* Add CreateResource to construct a new resource without acquiring it. (Patrick Ellul)
|
||||
* Fix deadlock race when acquire is cancelled. (Michael Tharp)
|
22
vendor/github.com/jackc/puddle/v2/LICENSE
generated
vendored
Normal file
22
vendor/github.com/jackc/puddle/v2/LICENSE
generated
vendored
Normal file
@ -0,0 +1,22 @@
|
||||
Copyright (c) 2018 Jack Christensen
|
||||
|
||||
MIT License
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining
|
||||
a copy of this software and associated documentation files (the
|
||||
"Software"), to deal in the Software without restriction, including
|
||||
without limitation the rights to use, copy, modify, merge, publish,
|
||||
distribute, sublicense, and/or sell copies of the Software, and to
|
||||
permit persons to whom the Software is furnished to do so, subject to
|
||||
the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be
|
||||
included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
||||
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
80
vendor/github.com/jackc/puddle/v2/README.md
generated
vendored
Normal file
80
vendor/github.com/jackc/puddle/v2/README.md
generated
vendored
Normal file
@ -0,0 +1,80 @@
|
||||
[![](https://godoc.org/github.com/jackc/puddle?status.svg)](https://godoc.org/github.com/jackc/puddle)
|
||||
![Build Status](https://github.com/jackc/puddle/actions/workflows/ci.yml/badge.svg)
|
||||
|
||||
# Puddle
|
||||
|
||||
Puddle is a tiny generic resource pool library for Go that uses the standard
|
||||
context library to signal cancellation of acquires. It is designed to contain
|
||||
the minimum functionality required for a resource pool. It can be used directly
|
||||
or it can be used as the base for a domain specific resource pool. For example,
|
||||
a database connection pool may use puddle internally and implement health checks
|
||||
and keep-alive behavior without needing to implement any concurrent code of its
|
||||
own.
|
||||
|
||||
## Features
|
||||
|
||||
* Acquire cancellation via context standard library
|
||||
* Statistics API for monitoring pool pressure
|
||||
* No dependencies outside of standard library and golang.org/x/sync
|
||||
* High performance
|
||||
* 100% test coverage of reachable code
|
||||
|
||||
## Example Usage
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log"
|
||||
"net"
|
||||
|
||||
"github.com/jackc/puddle/v2"
|
||||
)
|
||||
|
||||
func main() {
|
||||
constructor := func(context.Context) (net.Conn, error) {
|
||||
return net.Dial("tcp", "127.0.0.1:8080")
|
||||
}
|
||||
destructor := func(value net.Conn) {
|
||||
value.Close()
|
||||
}
|
||||
maxPoolSize := int32(10)
|
||||
|
||||
pool, err := puddle.NewPool(&puddle.Config[net.Conn]{Constructor: constructor, Destructor: destructor, MaxSize: maxPoolSize})
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
// Acquire resource from the pool.
|
||||
res, err := pool.Acquire(context.Background())
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
// Use resource.
|
||||
_, err = res.Value().Write([]byte{1})
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
// Release when done.
|
||||
res.Release()
|
||||
}
|
||||
```
|
||||
|
||||
## Status
|
||||
|
||||
Puddle is stable and feature complete.
|
||||
|
||||
* Bug reports and fixes are welcome.
|
||||
* New features will usually not be accepted if they can be feasibly implemented in a wrapper.
|
||||
* Performance optimizations will usually not be accepted unless the performance issue rises to the level of a bug.
|
||||
|
||||
## Supported Go Versions
|
||||
|
||||
puddle supports the same versions of Go that are supported by the Go project. For [Go](https://golang.org/doc/devel/release.html#policy) that is the two most recent major releases. This means puddle supports Go 1.19 and higher.
|
||||
|
||||
## License
|
||||
|
||||
MIT
|
24
vendor/github.com/jackc/puddle/v2/context.go
generated
vendored
Normal file
24
vendor/github.com/jackc/puddle/v2/context.go
generated
vendored
Normal file
@ -0,0 +1,24 @@
|
||||
package puddle
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
)
|
||||
|
||||
// valueCancelCtx combines two contexts into one. One context is used for values and the other is used for cancellation.
|
||||
type valueCancelCtx struct {
|
||||
valueCtx context.Context
|
||||
cancelCtx context.Context
|
||||
}
|
||||
|
||||
func (ctx *valueCancelCtx) Deadline() (time.Time, bool) { return ctx.cancelCtx.Deadline() }
|
||||
func (ctx *valueCancelCtx) Done() <-chan struct{} { return ctx.cancelCtx.Done() }
|
||||
func (ctx *valueCancelCtx) Err() error { return ctx.cancelCtx.Err() }
|
||||
func (ctx *valueCancelCtx) Value(key any) any { return ctx.valueCtx.Value(key) }
|
||||
|
||||
func newValueCancelCtx(valueCtx, cancelContext context.Context) context.Context {
|
||||
return &valueCancelCtx{
|
||||
valueCtx: valueCtx,
|
||||
cancelCtx: cancelContext,
|
||||
}
|
||||
}
|
11
vendor/github.com/jackc/puddle/v2/doc.go
generated
vendored
Normal file
11
vendor/github.com/jackc/puddle/v2/doc.go
generated
vendored
Normal file
@ -0,0 +1,11 @@
|
||||
// Package puddle is a generic resource pool with type-parametrized api.
|
||||
/*
|
||||
|
||||
Puddle is a tiny generic resource pool library for Go that uses the standard
|
||||
context library to signal cancellation of acquires. It is designed to contain
|
||||
the minimum functionality a resource pool needs that cannot be implemented
|
||||
without concurrency concerns. For example, a database connection pool may use
|
||||
puddle internally and implement health checks and keep-alive behavior without
|
||||
needing to implement any concurrent code of its own.
|
||||
*/
|
||||
package puddle
|
85
vendor/github.com/jackc/puddle/v2/internal/genstack/gen_stack.go
generated
vendored
Normal file
85
vendor/github.com/jackc/puddle/v2/internal/genstack/gen_stack.go
generated
vendored
Normal file
@ -0,0 +1,85 @@
|
||||
package genstack
|
||||
|
||||
// GenStack implements a generational stack.
|
||||
//
|
||||
// GenStack works as common stack except for the fact that all elements in the
|
||||
// older generation are guaranteed to be popped before any element in the newer
|
||||
// generation. New elements are always pushed to the current (newest)
|
||||
// generation.
|
||||
//
|
||||
// We could also say that GenStack behaves as a stack in case of a single
|
||||
// generation, but it behaves as a queue of individual generation stacks.
|
||||
type GenStack[T any] struct {
|
||||
// We can represent arbitrary number of generations using 2 stacks. The
|
||||
// new stack stores all new pushes and the old stack serves all reads.
|
||||
// Old stack can represent multiple generations. If old == new, then all
|
||||
// elements pushed in previous (not current) generations have already
|
||||
// been popped.
|
||||
|
||||
old *stack[T]
|
||||
new *stack[T]
|
||||
}
|
||||
|
||||
// NewGenStack creates a new empty GenStack.
|
||||
func NewGenStack[T any]() *GenStack[T] {
|
||||
s := &stack[T]{}
|
||||
return &GenStack[T]{
|
||||
old: s,
|
||||
new: s,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *GenStack[T]) Pop() (T, bool) {
|
||||
// Pushes always append to the new stack, so if the old once becomes
|
||||
// empty, it will remail empty forever.
|
||||
if s.old.len() == 0 && s.old != s.new {
|
||||
s.old = s.new
|
||||
}
|
||||
|
||||
if s.old.len() == 0 {
|
||||
var zero T
|
||||
return zero, false
|
||||
}
|
||||
|
||||
return s.old.pop(), true
|
||||
}
|
||||
|
||||
// Push pushes a new element at the top of the stack.
|
||||
func (s *GenStack[T]) Push(v T) { s.new.push(v) }
|
||||
|
||||
// NextGen starts a new stack generation.
|
||||
func (s *GenStack[T]) NextGen() {
|
||||
if s.old == s.new {
|
||||
s.new = &stack[T]{}
|
||||
return
|
||||
}
|
||||
|
||||
// We need to pop from the old stack to the top of the new stack. Let's
|
||||
// have an example:
|
||||
//
|
||||
// Old: <bottom> 4 3 2 1
|
||||
// New: <bottom> 8 7 6 5
|
||||
// PopOrder: 1 2 3 4 5 6 7 8
|
||||
//
|
||||
//
|
||||
// To preserve pop order, we have to take all elements from the old
|
||||
// stack and push them to the top of new stack:
|
||||
//
|
||||
// New: 8 7 6 5 4 3 2 1
|
||||
//
|
||||
s.new.push(s.old.takeAll()...)
|
||||
|
||||
// We have the old stack allocated and empty, so why not to reuse it as
|
||||
// new new stack.
|
||||
s.old, s.new = s.new, s.old
|
||||
}
|
||||
|
||||
// Len returns number of elements in the stack.
|
||||
func (s *GenStack[T]) Len() int {
|
||||
l := s.old.len()
|
||||
if s.old != s.new {
|
||||
l += s.new.len()
|
||||
}
|
||||
|
||||
return l
|
||||
}
|
39
vendor/github.com/jackc/puddle/v2/internal/genstack/stack.go
generated
vendored
Normal file
39
vendor/github.com/jackc/puddle/v2/internal/genstack/stack.go
generated
vendored
Normal file
@ -0,0 +1,39 @@
|
||||
package genstack
|
||||
|
||||
// stack is a wrapper around an array implementing a stack.
|
||||
//
|
||||
// We cannot use slice to represent the stack because append might change the
|
||||
// pointer value of the slice. That would be an issue in GenStack
|
||||
// implementation.
|
||||
type stack[T any] struct {
|
||||
arr []T
|
||||
}
|
||||
|
||||
// push pushes a new element at the top of a stack.
|
||||
func (s *stack[T]) push(vs ...T) { s.arr = append(s.arr, vs...) }
|
||||
|
||||
// pop pops the stack top-most element.
|
||||
//
|
||||
// If stack length is zero, this method panics.
|
||||
func (s *stack[T]) pop() T {
|
||||
idx := s.len() - 1
|
||||
val := s.arr[idx]
|
||||
|
||||
// Avoid memory leak
|
||||
var zero T
|
||||
s.arr[idx] = zero
|
||||
|
||||
s.arr = s.arr[:idx]
|
||||
return val
|
||||
}
|
||||
|
||||
// takeAll returns all elements in the stack in order as they are stored - i.e.
|
||||
// the top-most stack element is the last one.
|
||||
func (s *stack[T]) takeAll() []T {
|
||||
arr := s.arr
|
||||
s.arr = nil
|
||||
return arr
|
||||
}
|
||||
|
||||
// len returns number of elements in the stack.
|
||||
func (s *stack[T]) len() int { return len(s.arr) }
|
32
vendor/github.com/jackc/puddle/v2/log.go
generated
vendored
Normal file
32
vendor/github.com/jackc/puddle/v2/log.go
generated
vendored
Normal file
@ -0,0 +1,32 @@
|
||||
package puddle
|
||||
|
||||
import "unsafe"
|
||||
|
||||
type ints interface {
|
||||
int | int8 | int16 | int32 | int64 | uint | uint8 | uint16 | uint32 | uint64
|
||||
}
|
||||
|
||||
// log2Int returns log2 of an integer. This function panics if val < 0. For val
|
||||
// == 0, returns 0.
|
||||
func log2Int[T ints](val T) uint8 {
|
||||
if val <= 0 {
|
||||
panic("log2 of non-positive number does not exist")
|
||||
}
|
||||
|
||||
return log2IntRange(val, 0, uint8(8*unsafe.Sizeof(val)))
|
||||
}
|
||||
|
||||
func log2IntRange[T ints](val T, begin, end uint8) uint8 {
|
||||
length := end - begin
|
||||
if length == 1 {
|
||||
return begin
|
||||
}
|
||||
|
||||
delim := begin + length/2
|
||||
mask := T(1) << delim
|
||||
if mask > val {
|
||||
return log2IntRange(val, begin, delim)
|
||||
} else {
|
||||
return log2IntRange(val, delim, end)
|
||||
}
|
||||
}
|
13
vendor/github.com/jackc/puddle/v2/nanotime_time.go
generated
vendored
Normal file
13
vendor/github.com/jackc/puddle/v2/nanotime_time.go
generated
vendored
Normal file
@ -0,0 +1,13 @@
|
||||
//go:build purego || appengine || js
|
||||
|
||||
// This file contains the safe implementation of nanotime using time.Now().
|
||||
|
||||
package puddle
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
func nanotime() int64 {
|
||||
return time.Now().UnixNano()
|
||||
}
|
12
vendor/github.com/jackc/puddle/v2/nanotime_unsafe.go
generated
vendored
Normal file
12
vendor/github.com/jackc/puddle/v2/nanotime_unsafe.go
generated
vendored
Normal file
@ -0,0 +1,12 @@
|
||||
//go:build !purego && !appengine && !js
|
||||
|
||||
// This file contains the implementation of nanotime using runtime.nanotime.
|
||||
|
||||
package puddle
|
||||
|
||||
import "unsafe"
|
||||
|
||||
var _ = unsafe.Sizeof(0)
|
||||
|
||||
//go:linkname nanotime runtime.nanotime
|
||||
func nanotime() int64
|
696
vendor/github.com/jackc/puddle/v2/pool.go
generated
vendored
Normal file
696
vendor/github.com/jackc/puddle/v2/pool.go
generated
vendored
Normal file
@ -0,0 +1,696 @@
|
||||
package puddle
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/jackc/puddle/v2/internal/genstack"
|
||||
"golang.org/x/sync/semaphore"
|
||||
)
|
||||
|
||||
const (
|
||||
resourceStatusConstructing = 0
|
||||
resourceStatusIdle = iota
|
||||
resourceStatusAcquired = iota
|
||||
resourceStatusHijacked = iota
|
||||
)
|
||||
|
||||
// ErrClosedPool occurs on an attempt to acquire a connection from a closed pool
|
||||
// or a pool that is closed while the acquire is waiting.
|
||||
var ErrClosedPool = errors.New("closed pool")
|
||||
|
||||
// ErrNotAvailable occurs on an attempt to acquire a resource from a pool
|
||||
// that is at maximum capacity and has no available resources.
|
||||
var ErrNotAvailable = errors.New("resource not available")
|
||||
|
||||
// Constructor is a function called by the pool to construct a resource.
|
||||
type Constructor[T any] func(ctx context.Context) (res T, err error)
|
||||
|
||||
// Destructor is a function called by the pool to destroy a resource.
|
||||
type Destructor[T any] func(res T)
|
||||
|
||||
// Resource is the resource handle returned by acquiring from the pool.
|
||||
type Resource[T any] struct {
|
||||
value T
|
||||
pool *Pool[T]
|
||||
creationTime time.Time
|
||||
lastUsedNano int64
|
||||
poolResetCount int
|
||||
status byte
|
||||
}
|
||||
|
||||
// Value returns the resource value.
|
||||
func (res *Resource[T]) Value() T {
|
||||
if !(res.status == resourceStatusAcquired || res.status == resourceStatusHijacked) {
|
||||
panic("tried to access resource that is not acquired or hijacked")
|
||||
}
|
||||
return res.value
|
||||
}
|
||||
|
||||
// Release returns the resource to the pool. res must not be subsequently used.
|
||||
func (res *Resource[T]) Release() {
|
||||
if res.status != resourceStatusAcquired {
|
||||
panic("tried to release resource that is not acquired")
|
||||
}
|
||||
res.pool.releaseAcquiredResource(res, nanotime())
|
||||
}
|
||||
|
||||
// ReleaseUnused returns the resource to the pool without updating when it was last used used. i.e. LastUsedNanotime
|
||||
// will not change. res must not be subsequently used.
|
||||
func (res *Resource[T]) ReleaseUnused() {
|
||||
if res.status != resourceStatusAcquired {
|
||||
panic("tried to release resource that is not acquired")
|
||||
}
|
||||
res.pool.releaseAcquiredResource(res, res.lastUsedNano)
|
||||
}
|
||||
|
||||
// Destroy returns the resource to the pool for destruction. res must not be
|
||||
// subsequently used.
|
||||
func (res *Resource[T]) Destroy() {
|
||||
if res.status != resourceStatusAcquired {
|
||||
panic("tried to destroy resource that is not acquired")
|
||||
}
|
||||
go res.pool.destroyAcquiredResource(res)
|
||||
}
|
||||
|
||||
// Hijack assumes ownership of the resource from the pool. Caller is responsible
|
||||
// for cleanup of resource value.
|
||||
func (res *Resource[T]) Hijack() {
|
||||
if res.status != resourceStatusAcquired {
|
||||
panic("tried to hijack resource that is not acquired")
|
||||
}
|
||||
res.pool.hijackAcquiredResource(res)
|
||||
}
|
||||
|
||||
// CreationTime returns when the resource was created by the pool.
|
||||
func (res *Resource[T]) CreationTime() time.Time {
|
||||
if !(res.status == resourceStatusAcquired || res.status == resourceStatusHijacked) {
|
||||
panic("tried to access resource that is not acquired or hijacked")
|
||||
}
|
||||
return res.creationTime
|
||||
}
|
||||
|
||||
// LastUsedNanotime returns when Release was last called on the resource measured in nanoseconds from an arbitrary time
|
||||
// (a monotonic time). Returns creation time if Release has never been called. This is only useful to compare with
|
||||
// other calls to LastUsedNanotime. In almost all cases, IdleDuration should be used instead.
|
||||
func (res *Resource[T]) LastUsedNanotime() int64 {
|
||||
if !(res.status == resourceStatusAcquired || res.status == resourceStatusHijacked) {
|
||||
panic("tried to access resource that is not acquired or hijacked")
|
||||
}
|
||||
|
||||
return res.lastUsedNano
|
||||
}
|
||||
|
||||
// IdleDuration returns the duration since Release was last called on the resource. This is equivalent to subtracting
|
||||
// LastUsedNanotime to the current nanotime.
|
||||
func (res *Resource[T]) IdleDuration() time.Duration {
|
||||
if !(res.status == resourceStatusAcquired || res.status == resourceStatusHijacked) {
|
||||
panic("tried to access resource that is not acquired or hijacked")
|
||||
}
|
||||
|
||||
return time.Duration(nanotime() - res.lastUsedNano)
|
||||
}
|
||||
|
||||
// Pool is a concurrency-safe resource pool.
|
||||
type Pool[T any] struct {
|
||||
// mux is the pool internal lock. Any modification of shared state of
|
||||
// the pool (but Acquires of acquireSem) must be performed only by
|
||||
// holder of the lock. Long running operations are not allowed when mux
|
||||
// is held.
|
||||
mux sync.Mutex
|
||||
// acquireSem provides an allowance to acquire a resource.
|
||||
//
|
||||
// Releases are allowed only when caller holds mux. Acquires have to
|
||||
// happen before mux is locked (doesn't apply to semaphore.TryAcquire in
|
||||
// AcquireAllIdle).
|
||||
acquireSem *semaphore.Weighted
|
||||
destructWG sync.WaitGroup
|
||||
|
||||
allResources resList[T]
|
||||
idleResources *genstack.GenStack[*Resource[T]]
|
||||
|
||||
constructor Constructor[T]
|
||||
destructor Destructor[T]
|
||||
maxSize int32
|
||||
|
||||
acquireCount int64
|
||||
acquireDuration time.Duration
|
||||
emptyAcquireCount int64
|
||||
canceledAcquireCount atomic.Int64
|
||||
|
||||
resetCount int
|
||||
|
||||
baseAcquireCtx context.Context
|
||||
cancelBaseAcquireCtx context.CancelFunc
|
||||
closed bool
|
||||
}
|
||||
|
||||
type Config[T any] struct {
|
||||
Constructor Constructor[T]
|
||||
Destructor Destructor[T]
|
||||
MaxSize int32
|
||||
}
|
||||
|
||||
// NewPool creates a new pool. Panics if maxSize is less than 1.
|
||||
func NewPool[T any](config *Config[T]) (*Pool[T], error) {
|
||||
if config.MaxSize < 1 {
|
||||
return nil, errors.New("MaxSize must be >= 1")
|
||||
}
|
||||
|
||||
baseAcquireCtx, cancelBaseAcquireCtx := context.WithCancel(context.Background())
|
||||
|
||||
return &Pool[T]{
|
||||
acquireSem: semaphore.NewWeighted(int64(config.MaxSize)),
|
||||
idleResources: genstack.NewGenStack[*Resource[T]](),
|
||||
maxSize: config.MaxSize,
|
||||
constructor: config.Constructor,
|
||||
destructor: config.Destructor,
|
||||
baseAcquireCtx: baseAcquireCtx,
|
||||
cancelBaseAcquireCtx: cancelBaseAcquireCtx,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Close destroys all resources in the pool and rejects future Acquire calls.
|
||||
// Blocks until all resources are returned to pool and destroyed.
|
||||
func (p *Pool[T]) Close() {
|
||||
defer p.destructWG.Wait()
|
||||
|
||||
p.mux.Lock()
|
||||
defer p.mux.Unlock()
|
||||
|
||||
if p.closed {
|
||||
return
|
||||
}
|
||||
p.closed = true
|
||||
p.cancelBaseAcquireCtx()
|
||||
|
||||
for res, ok := p.idleResources.Pop(); ok; res, ok = p.idleResources.Pop() {
|
||||
p.allResources.remove(res)
|
||||
go p.destructResourceValue(res.value)
|
||||
}
|
||||
}
|
||||
|
||||
// Stat is a snapshot of Pool statistics.
|
||||
type Stat struct {
|
||||
constructingResources int32
|
||||
acquiredResources int32
|
||||
idleResources int32
|
||||
maxResources int32
|
||||
acquireCount int64
|
||||
acquireDuration time.Duration
|
||||
emptyAcquireCount int64
|
||||
canceledAcquireCount int64
|
||||
}
|
||||
|
||||
// TotalResources returns the total number of resources currently in the pool.
|
||||
// The value is the sum of ConstructingResources, AcquiredResources, and
|
||||
// IdleResources.
|
||||
func (s *Stat) TotalResources() int32 {
|
||||
return s.constructingResources + s.acquiredResources + s.idleResources
|
||||
}
|
||||
|
||||
// ConstructingResources returns the number of resources with construction in progress in
|
||||
// the pool.
|
||||
func (s *Stat) ConstructingResources() int32 {
|
||||
return s.constructingResources
|
||||
}
|
||||
|
||||
// AcquiredResources returns the number of currently acquired resources in the pool.
|
||||
func (s *Stat) AcquiredResources() int32 {
|
||||
return s.acquiredResources
|
||||
}
|
||||
|
||||
// IdleResources returns the number of currently idle resources in the pool.
|
||||
func (s *Stat) IdleResources() int32 {
|
||||
return s.idleResources
|
||||
}
|
||||
|
||||
// MaxResources returns the maximum size of the pool.
|
||||
func (s *Stat) MaxResources() int32 {
|
||||
return s.maxResources
|
||||
}
|
||||
|
||||
// AcquireCount returns the cumulative count of successful acquires from the pool.
|
||||
func (s *Stat) AcquireCount() int64 {
|
||||
return s.acquireCount
|
||||
}
|
||||
|
||||
// AcquireDuration returns the total duration of all successful acquires from
|
||||
// the pool.
|
||||
func (s *Stat) AcquireDuration() time.Duration {
|
||||
return s.acquireDuration
|
||||
}
|
||||
|
||||
// EmptyAcquireCount returns the cumulative count of successful acquires from the pool
|
||||
// that waited for a resource to be released or constructed because the pool was
|
||||
// empty.
|
||||
func (s *Stat) EmptyAcquireCount() int64 {
|
||||
return s.emptyAcquireCount
|
||||
}
|
||||
|
||||
// CanceledAcquireCount returns the cumulative count of acquires from the pool
|
||||
// that were canceled by a context.
|
||||
func (s *Stat) CanceledAcquireCount() int64 {
|
||||
return s.canceledAcquireCount
|
||||
}
|
||||
|
||||
// Stat returns the current pool statistics.
|
||||
func (p *Pool[T]) Stat() *Stat {
|
||||
p.mux.Lock()
|
||||
defer p.mux.Unlock()
|
||||
|
||||
s := &Stat{
|
||||
maxResources: p.maxSize,
|
||||
acquireCount: p.acquireCount,
|
||||
emptyAcquireCount: p.emptyAcquireCount,
|
||||
canceledAcquireCount: p.canceledAcquireCount.Load(),
|
||||
acquireDuration: p.acquireDuration,
|
||||
}
|
||||
|
||||
for _, res := range p.allResources {
|
||||
switch res.status {
|
||||
case resourceStatusConstructing:
|
||||
s.constructingResources += 1
|
||||
case resourceStatusIdle:
|
||||
s.idleResources += 1
|
||||
case resourceStatusAcquired:
|
||||
s.acquiredResources += 1
|
||||
}
|
||||
}
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
// tryAcquireIdleResource checks if there is any idle resource. If there is
|
||||
// some, this method removes it from idle list and returns it. If the idle pool
|
||||
// is empty, this method returns nil and doesn't modify the idleResources slice.
|
||||
//
|
||||
// WARNING: Caller of this method must hold the pool mutex!
|
||||
func (p *Pool[T]) tryAcquireIdleResource() *Resource[T] {
|
||||
res, ok := p.idleResources.Pop()
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
res.status = resourceStatusAcquired
|
||||
return res
|
||||
}
|
||||
|
||||
// createNewResource creates a new resource and inserts it into list of pool
|
||||
// resources.
|
||||
//
|
||||
// WARNING: Caller of this method must hold the pool mutex!
|
||||
func (p *Pool[T]) createNewResource() *Resource[T] {
|
||||
res := &Resource[T]{
|
||||
pool: p,
|
||||
creationTime: time.Now(),
|
||||
lastUsedNano: nanotime(),
|
||||
poolResetCount: p.resetCount,
|
||||
status: resourceStatusConstructing,
|
||||
}
|
||||
|
||||
p.allResources.append(res)
|
||||
p.destructWG.Add(1)
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
// Acquire gets a resource from the pool. If no resources are available and the pool is not at maximum capacity it will
|
||||
// create a new resource. If the pool is at maximum capacity it will block until a resource is available. ctx can be
|
||||
// used to cancel the Acquire.
|
||||
//
|
||||
// If Acquire creates a new resource the resource constructor function will receive a context that delegates Value() to
|
||||
// ctx. Canceling ctx will cause Acquire to return immediately but it will not cancel the resource creation. This avoids
|
||||
// the problem of it being impossible to create resources when the time to create a resource is greater than any one
|
||||
// caller of Acquire is willing to wait.
|
||||
func (p *Pool[T]) Acquire(ctx context.Context) (_ *Resource[T], err error) {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
p.canceledAcquireCount.Add(1)
|
||||
return nil, ctx.Err()
|
||||
default:
|
||||
}
|
||||
|
||||
return p.acquire(ctx)
|
||||
}
|
||||
|
||||
// acquire is a continuation of Acquire function that doesn't check context
|
||||
// validity.
|
||||
//
|
||||
// This function exists solely only for benchmarking purposes.
|
||||
func (p *Pool[T]) acquire(ctx context.Context) (*Resource[T], error) {
|
||||
startNano := nanotime()
|
||||
|
||||
var waitedForLock bool
|
||||
if !p.acquireSem.TryAcquire(1) {
|
||||
waitedForLock = true
|
||||
err := p.acquireSem.Acquire(ctx, 1)
|
||||
if err != nil {
|
||||
p.canceledAcquireCount.Add(1)
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
p.mux.Lock()
|
||||
if p.closed {
|
||||
p.acquireSem.Release(1)
|
||||
p.mux.Unlock()
|
||||
return nil, ErrClosedPool
|
||||
}
|
||||
|
||||
// If a resource is available in the pool.
|
||||
if res := p.tryAcquireIdleResource(); res != nil {
|
||||
if waitedForLock {
|
||||
p.emptyAcquireCount += 1
|
||||
}
|
||||
p.acquireCount += 1
|
||||
p.acquireDuration += time.Duration(nanotime() - startNano)
|
||||
p.mux.Unlock()
|
||||
return res, nil
|
||||
}
|
||||
|
||||
if len(p.allResources) >= int(p.maxSize) {
|
||||
// Unreachable code.
|
||||
panic("bug: semaphore allowed more acquires than pool allows")
|
||||
}
|
||||
|
||||
// The resource is not idle, but there is enough space to create one.
|
||||
res := p.createNewResource()
|
||||
p.mux.Unlock()
|
||||
|
||||
res, err := p.initResourceValue(ctx, res)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
p.mux.Lock()
|
||||
defer p.mux.Unlock()
|
||||
|
||||
p.emptyAcquireCount += 1
|
||||
p.acquireCount += 1
|
||||
p.acquireDuration += time.Duration(nanotime() - startNano)
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (p *Pool[T]) initResourceValue(ctx context.Context, res *Resource[T]) (*Resource[T], error) {
|
||||
// Create the resource in a goroutine to immediately return from Acquire
|
||||
// if ctx is canceled without also canceling the constructor.
|
||||
//
|
||||
// See:
|
||||
// - https://github.com/jackc/pgx/issues/1287
|
||||
// - https://github.com/jackc/pgx/issues/1259
|
||||
constructErrChan := make(chan error)
|
||||
go func() {
|
||||
constructorCtx := newValueCancelCtx(ctx, p.baseAcquireCtx)
|
||||
value, err := p.constructor(constructorCtx)
|
||||
if err != nil {
|
||||
p.mux.Lock()
|
||||
p.allResources.remove(res)
|
||||
p.destructWG.Done()
|
||||
|
||||
// The resource won't be acquired because its
|
||||
// construction failed. We have to allow someone else to
|
||||
// take that resouce.
|
||||
p.acquireSem.Release(1)
|
||||
p.mux.Unlock()
|
||||
|
||||
select {
|
||||
case constructErrChan <- err:
|
||||
case <-ctx.Done():
|
||||
// The caller is cancelled, so no-one awaits the
|
||||
// error. This branch avoid goroutine leak.
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// The resource is already in p.allResources where it might be read. So we need to acquire the lock to update its
|
||||
// status.
|
||||
p.mux.Lock()
|
||||
res.value = value
|
||||
res.status = resourceStatusAcquired
|
||||
p.mux.Unlock()
|
||||
|
||||
// This select works because the channel is unbuffered.
|
||||
select {
|
||||
case constructErrChan <- nil:
|
||||
case <-ctx.Done():
|
||||
p.releaseAcquiredResource(res, res.lastUsedNano)
|
||||
}
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
p.canceledAcquireCount.Add(1)
|
||||
return nil, ctx.Err()
|
||||
case err := <-constructErrChan:
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
}
|
||||
|
||||
// TryAcquire gets a resource from the pool if one is immediately available. If not, it returns ErrNotAvailable. If no
|
||||
// resources are available but the pool has room to grow, a resource will be created in the background. ctx is only
|
||||
// used to cancel the background creation.
|
||||
func (p *Pool[T]) TryAcquire(ctx context.Context) (*Resource[T], error) {
|
||||
if !p.acquireSem.TryAcquire(1) {
|
||||
return nil, ErrNotAvailable
|
||||
}
|
||||
|
||||
p.mux.Lock()
|
||||
defer p.mux.Unlock()
|
||||
|
||||
if p.closed {
|
||||
p.acquireSem.Release(1)
|
||||
return nil, ErrClosedPool
|
||||
}
|
||||
|
||||
// If a resource is available now
|
||||
if res := p.tryAcquireIdleResource(); res != nil {
|
||||
p.acquireCount += 1
|
||||
return res, nil
|
||||
}
|
||||
|
||||
if len(p.allResources) >= int(p.maxSize) {
|
||||
// Unreachable code.
|
||||
panic("bug: semaphore allowed more acquires than pool allows")
|
||||
}
|
||||
|
||||
res := p.createNewResource()
|
||||
go func() {
|
||||
value, err := p.constructor(ctx)
|
||||
|
||||
p.mux.Lock()
|
||||
defer p.mux.Unlock()
|
||||
// We have to create the resource and only then release the
|
||||
// semaphore - For the time being there is no resource that
|
||||
// someone could acquire.
|
||||
defer p.acquireSem.Release(1)
|
||||
|
||||
if err != nil {
|
||||
p.allResources.remove(res)
|
||||
p.destructWG.Done()
|
||||
return
|
||||
}
|
||||
|
||||
res.value = value
|
||||
res.status = resourceStatusIdle
|
||||
p.idleResources.Push(res)
|
||||
}()
|
||||
|
||||
return nil, ErrNotAvailable
|
||||
}
|
||||
|
||||
// acquireSemAll tries to acquire num free tokens from sem. This function is
|
||||
// guaranteed to acquire at least the lowest number of tokens that has been
|
||||
// available in the semaphore during runtime of this function.
|
||||
//
|
||||
// For the time being, semaphore doesn't allow to acquire all tokens atomically
|
||||
// (see https://github.com/golang/sync/pull/19). We simulate this by trying all
|
||||
// powers of 2 that are less or equal to num.
|
||||
//
|
||||
// For example, let's immagine we have 19 free tokens in the semaphore which in
|
||||
// total has 24 tokens (i.e. the maxSize of the pool is 24 resources). Then if
|
||||
// num is 24, the log2Uint(24) is 4 and we try to acquire 16, 8, 4, 2 and 1
|
||||
// tokens. Out of those, the acquire of 16, 2 and 1 tokens will succeed.
|
||||
//
|
||||
// Naturally, Acquires and Releases of the semaphore might take place
|
||||
// concurrently. For this reason, it's not guaranteed that absolutely all free
|
||||
// tokens in the semaphore will be acquired. But it's guaranteed that at least
|
||||
// the minimal number of tokens that has been present over the whole process
|
||||
// will be acquired. This is sufficient for the use-case we have in this
|
||||
// package.
|
||||
//
|
||||
// TODO: Replace this with acquireSem.TryAcquireAll() if it gets to
|
||||
// upstream. https://github.com/golang/sync/pull/19
|
||||
func acquireSemAll(sem *semaphore.Weighted, num int) int {
|
||||
if sem.TryAcquire(int64(num)) {
|
||||
return num
|
||||
}
|
||||
|
||||
var acquired int
|
||||
for i := int(log2Int(num)); i >= 0; i-- {
|
||||
val := 1 << i
|
||||
if sem.TryAcquire(int64(val)) {
|
||||
acquired += val
|
||||
}
|
||||
}
|
||||
|
||||
return acquired
|
||||
}
|
||||
|
||||
// AcquireAllIdle acquires all currently idle resources. Its intended use is for
|
||||
// health check and keep-alive functionality. It does not update pool
|
||||
// statistics.
|
||||
func (p *Pool[T]) AcquireAllIdle() []*Resource[T] {
|
||||
p.mux.Lock()
|
||||
defer p.mux.Unlock()
|
||||
|
||||
if p.closed {
|
||||
return nil
|
||||
}
|
||||
|
||||
numIdle := p.idleResources.Len()
|
||||
if numIdle == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// In acquireSemAll we use only TryAcquire and not Acquire. Because
|
||||
// TryAcquire cannot block, the fact that we hold mutex locked and try
|
||||
// to acquire semaphore cannot result in dead-lock.
|
||||
//
|
||||
// Because the mutex is locked, no parallel Release can run. This
|
||||
// implies that the number of tokens can only decrease because some
|
||||
// Acquire/TryAcquire call can consume the semaphore token. Consequently
|
||||
// acquired is always less or equal to numIdle. Moreover if acquired <
|
||||
// numIdle, then there are some parallel Acquire/TryAcquire calls that
|
||||
// will take the remaining idle connections.
|
||||
acquired := acquireSemAll(p.acquireSem, numIdle)
|
||||
|
||||
idle := make([]*Resource[T], acquired)
|
||||
for i := range idle {
|
||||
res, _ := p.idleResources.Pop()
|
||||
res.status = resourceStatusAcquired
|
||||
idle[i] = res
|
||||
}
|
||||
|
||||
// We have to bump the generation to ensure that Acquire/TryAcquire
|
||||
// calls running in parallel (those which caused acquired < numIdle)
|
||||
// will consume old connections and not freshly released connections
|
||||
// instead.
|
||||
p.idleResources.NextGen()
|
||||
|
||||
return idle
|
||||
}
|
||||
|
||||
// CreateResource constructs a new resource without acquiring it. It goes straight in the IdlePool. If the pool is full
|
||||
// it returns an error. It can be useful to maintain warm resources under little load.
|
||||
func (p *Pool[T]) CreateResource(ctx context.Context) error {
|
||||
if !p.acquireSem.TryAcquire(1) {
|
||||
return ErrNotAvailable
|
||||
}
|
||||
|
||||
p.mux.Lock()
|
||||
if p.closed {
|
||||
p.acquireSem.Release(1)
|
||||
p.mux.Unlock()
|
||||
return ErrClosedPool
|
||||
}
|
||||
|
||||
if len(p.allResources) >= int(p.maxSize) {
|
||||
p.acquireSem.Release(1)
|
||||
p.mux.Unlock()
|
||||
return ErrNotAvailable
|
||||
}
|
||||
|
||||
res := p.createNewResource()
|
||||
p.mux.Unlock()
|
||||
|
||||
value, err := p.constructor(ctx)
|
||||
p.mux.Lock()
|
||||
defer p.mux.Unlock()
|
||||
defer p.acquireSem.Release(1)
|
||||
if err != nil {
|
||||
p.allResources.remove(res)
|
||||
p.destructWG.Done()
|
||||
return err
|
||||
}
|
||||
|
||||
res.value = value
|
||||
res.status = resourceStatusIdle
|
||||
|
||||
// If closed while constructing resource then destroy it and return an error
|
||||
if p.closed {
|
||||
go p.destructResourceValue(res.value)
|
||||
return ErrClosedPool
|
||||
}
|
||||
|
||||
p.idleResources.Push(res)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Reset destroys all resources, but leaves the pool open. It is intended for use when an error is detected that would
|
||||
// disrupt all resources (such as a network interruption or a server state change).
|
||||
//
|
||||
// It is safe to reset a pool while resources are checked out. Those resources will be destroyed when they are returned
|
||||
// to the pool.
|
||||
func (p *Pool[T]) Reset() {
|
||||
p.mux.Lock()
|
||||
defer p.mux.Unlock()
|
||||
|
||||
p.resetCount++
|
||||
|
||||
for res, ok := p.idleResources.Pop(); ok; res, ok = p.idleResources.Pop() {
|
||||
p.allResources.remove(res)
|
||||
go p.destructResourceValue(res.value)
|
||||
}
|
||||
}
|
||||
|
||||
// releaseAcquiredResource returns res to the the pool.
|
||||
func (p *Pool[T]) releaseAcquiredResource(res *Resource[T], lastUsedNano int64) {
|
||||
p.mux.Lock()
|
||||
defer p.mux.Unlock()
|
||||
defer p.acquireSem.Release(1)
|
||||
|
||||
if p.closed || res.poolResetCount != p.resetCount {
|
||||
p.allResources.remove(res)
|
||||
go p.destructResourceValue(res.value)
|
||||
} else {
|
||||
res.lastUsedNano = lastUsedNano
|
||||
res.status = resourceStatusIdle
|
||||
p.idleResources.Push(res)
|
||||
}
|
||||
}
|
||||
|
||||
// Remove removes res from the pool and closes it. If res is not part of the
|
||||
// pool Remove will panic.
|
||||
func (p *Pool[T]) destroyAcquiredResource(res *Resource[T]) {
|
||||
p.destructResourceValue(res.value)
|
||||
|
||||
p.mux.Lock()
|
||||
defer p.mux.Unlock()
|
||||
defer p.acquireSem.Release(1)
|
||||
|
||||
p.allResources.remove(res)
|
||||
}
|
||||
|
||||
func (p *Pool[T]) hijackAcquiredResource(res *Resource[T]) {
|
||||
p.mux.Lock()
|
||||
defer p.mux.Unlock()
|
||||
defer p.acquireSem.Release(1)
|
||||
|
||||
p.allResources.remove(res)
|
||||
res.status = resourceStatusHijacked
|
||||
p.destructWG.Done() // not responsible for destructing hijacked resources
|
||||
}
|
||||
|
||||
func (p *Pool[T]) destructResourceValue(value T) {
|
||||
p.destructor(value)
|
||||
p.destructWG.Done()
|
||||
}
|
28
vendor/github.com/jackc/puddle/v2/resource_list.go
generated
vendored
Normal file
28
vendor/github.com/jackc/puddle/v2/resource_list.go
generated
vendored
Normal file
@ -0,0 +1,28 @@
|
||||
package puddle
|
||||
|
||||
type resList[T any] []*Resource[T]
|
||||
|
||||
func (l *resList[T]) append(val *Resource[T]) { *l = append(*l, val) }
|
||||
|
||||
func (l *resList[T]) popBack() *Resource[T] {
|
||||
idx := len(*l) - 1
|
||||
val := (*l)[idx]
|
||||
(*l)[idx] = nil // Avoid memory leak
|
||||
*l = (*l)[:idx]
|
||||
|
||||
return val
|
||||
}
|
||||
|
||||
func (l *resList[T]) remove(val *Resource[T]) {
|
||||
for i, elem := range *l {
|
||||
if elem == val {
|
||||
lastIdx := len(*l) - 1
|
||||
(*l)[i] = (*l)[lastIdx]
|
||||
(*l)[lastIdx] = nil // Avoid memory leak
|
||||
(*l) = (*l)[:lastIdx]
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
panic("BUG: removeResource could not find res in slice")
|
||||
}
|
27
vendor/golang.org/x/sync/LICENSE
generated
vendored
Normal file
27
vendor/golang.org/x/sync/LICENSE
generated
vendored
Normal file
@ -0,0 +1,27 @@
|
||||
Copyright (c) 2009 The Go Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
22
vendor/golang.org/x/sync/PATENTS
generated
vendored
Normal file
22
vendor/golang.org/x/sync/PATENTS
generated
vendored
Normal file
@ -0,0 +1,22 @@
|
||||
Additional IP Rights Grant (Patents)
|
||||
|
||||
"This implementation" means the copyrightable works distributed by
|
||||
Google as part of the Go project.
|
||||
|
||||
Google hereby grants to You a perpetual, worldwide, non-exclusive,
|
||||
no-charge, royalty-free, irrevocable (except as stated in this section)
|
||||
patent license to make, have made, use, offer to sell, sell, import,
|
||||
transfer and otherwise run, modify and propagate the contents of this
|
||||
implementation of Go, where such license applies only to those patent
|
||||
claims, both currently owned or controlled by Google and acquired in
|
||||
the future, licensable by Google that are necessarily infringed by this
|
||||
implementation of Go. This grant does not include claims that would be
|
||||
infringed only as a consequence of further modification of this
|
||||
implementation. If you or your agent or exclusive licensee institute or
|
||||
order or agree to the institution of patent litigation against any
|
||||
entity (including a cross-claim or counterclaim in a lawsuit) alleging
|
||||
that this implementation of Go or any code incorporated within this
|
||||
implementation of Go constitutes direct or contributory patent
|
||||
infringement, or inducement of patent infringement, then any patent
|
||||
rights granted to you under this License for this implementation of Go
|
||||
shall terminate as of the date such litigation is filed.
|
136
vendor/golang.org/x/sync/semaphore/semaphore.go
generated
vendored
Normal file
136
vendor/golang.org/x/sync/semaphore/semaphore.go
generated
vendored
Normal file
@ -0,0 +1,136 @@
|
||||
// Copyright 2017 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package semaphore provides a weighted semaphore implementation.
|
||||
package semaphore // import "golang.org/x/sync/semaphore"
|
||||
|
||||
import (
|
||||
"container/list"
|
||||
"context"
|
||||
"sync"
|
||||
)
|
||||
|
||||
type waiter struct {
|
||||
n int64
|
||||
ready chan<- struct{} // Closed when semaphore acquired.
|
||||
}
|
||||
|
||||
// NewWeighted creates a new weighted semaphore with the given
|
||||
// maximum combined weight for concurrent access.
|
||||
func NewWeighted(n int64) *Weighted {
|
||||
w := &Weighted{size: n}
|
||||
return w
|
||||
}
|
||||
|
||||
// Weighted provides a way to bound concurrent access to a resource.
|
||||
// The callers can request access with a given weight.
|
||||
type Weighted struct {
|
||||
size int64
|
||||
cur int64
|
||||
mu sync.Mutex
|
||||
waiters list.List
|
||||
}
|
||||
|
||||
// Acquire acquires the semaphore with a weight of n, blocking until resources
|
||||
// are available or ctx is done. On success, returns nil. On failure, returns
|
||||
// ctx.Err() and leaves the semaphore unchanged.
|
||||
//
|
||||
// If ctx is already done, Acquire may still succeed without blocking.
|
||||
func (s *Weighted) Acquire(ctx context.Context, n int64) error {
|
||||
s.mu.Lock()
|
||||
if s.size-s.cur >= n && s.waiters.Len() == 0 {
|
||||
s.cur += n
|
||||
s.mu.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
if n > s.size {
|
||||
// Don't make other Acquire calls block on one that's doomed to fail.
|
||||
s.mu.Unlock()
|
||||
<-ctx.Done()
|
||||
return ctx.Err()
|
||||
}
|
||||
|
||||
ready := make(chan struct{})
|
||||
w := waiter{n: n, ready: ready}
|
||||
elem := s.waiters.PushBack(w)
|
||||
s.mu.Unlock()
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
err := ctx.Err()
|
||||
s.mu.Lock()
|
||||
select {
|
||||
case <-ready:
|
||||
// Acquired the semaphore after we were canceled. Rather than trying to
|
||||
// fix up the queue, just pretend we didn't notice the cancelation.
|
||||
err = nil
|
||||
default:
|
||||
isFront := s.waiters.Front() == elem
|
||||
s.waiters.Remove(elem)
|
||||
// If we're at the front and there're extra tokens left, notify other waiters.
|
||||
if isFront && s.size > s.cur {
|
||||
s.notifyWaiters()
|
||||
}
|
||||
}
|
||||
s.mu.Unlock()
|
||||
return err
|
||||
|
||||
case <-ready:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// TryAcquire acquires the semaphore with a weight of n without blocking.
|
||||
// On success, returns true. On failure, returns false and leaves the semaphore unchanged.
|
||||
func (s *Weighted) TryAcquire(n int64) bool {
|
||||
s.mu.Lock()
|
||||
success := s.size-s.cur >= n && s.waiters.Len() == 0
|
||||
if success {
|
||||
s.cur += n
|
||||
}
|
||||
s.mu.Unlock()
|
||||
return success
|
||||
}
|
||||
|
||||
// Release releases the semaphore with a weight of n.
|
||||
func (s *Weighted) Release(n int64) {
|
||||
s.mu.Lock()
|
||||
s.cur -= n
|
||||
if s.cur < 0 {
|
||||
s.mu.Unlock()
|
||||
panic("semaphore: released more than held")
|
||||
}
|
||||
s.notifyWaiters()
|
||||
s.mu.Unlock()
|
||||
}
|
||||
|
||||
func (s *Weighted) notifyWaiters() {
|
||||
for {
|
||||
next := s.waiters.Front()
|
||||
if next == nil {
|
||||
break // No more waiters blocked.
|
||||
}
|
||||
|
||||
w := next.Value.(waiter)
|
||||
if s.size-s.cur < w.n {
|
||||
// Not enough tokens for the next waiter. We could keep going (to try to
|
||||
// find a waiter with a smaller request), but under load that could cause
|
||||
// starvation for large requests; instead, we leave all remaining waiters
|
||||
// blocked.
|
||||
//
|
||||
// Consider a semaphore used as a read-write lock, with N tokens, N
|
||||
// readers, and one writer. Each reader can Acquire(1) to obtain a read
|
||||
// lock. The writer can Acquire(N) to obtain a write lock, excluding all
|
||||
// of the readers. If we allow the readers to jump ahead in the queue,
|
||||
// the writer will starve — there is always one token available for every
|
||||
// reader.
|
||||
break
|
||||
}
|
||||
|
||||
s.cur += w.n
|
||||
s.waiters.Remove(next)
|
||||
close(w.ready)
|
||||
}
|
||||
}
|
30
vendor/golang.org/x/sys/internal/unsafeheader/unsafeheader.go
generated
vendored
30
vendor/golang.org/x/sys/internal/unsafeheader/unsafeheader.go
generated
vendored
@ -1,30 +0,0 @@
|
||||
// Copyright 2020 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package unsafeheader contains header declarations for the Go runtime's
|
||||
// slice and string implementations.
|
||||
//
|
||||
// This package allows x/sys to use types equivalent to
|
||||
// reflect.SliceHeader and reflect.StringHeader without introducing
|
||||
// a dependency on the (relatively heavy) "reflect" package.
|
||||
package unsafeheader
|
||||
|
||||
import (
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// Slice is the runtime representation of a slice.
|
||||
// It cannot be used safely or portably and its representation may change in a later release.
|
||||
type Slice struct {
|
||||
Data unsafe.Pointer
|
||||
Len int
|
||||
Cap int
|
||||
}
|
||||
|
||||
// String is the runtime representation of a string.
|
||||
// It cannot be used safely or portably and its representation may change in a later release.
|
||||
type String struct {
|
||||
Data unsafe.Pointer
|
||||
Len int
|
||||
}
|
2
vendor/golang.org/x/sys/unix/aliases.go
generated
vendored
2
vendor/golang.org/x/sys/unix/aliases.go
generated
vendored
@ -3,8 +3,6 @@
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build (aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos) && go1.9
|
||||
// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos
|
||||
// +build go1.9
|
||||
|
||||
package unix
|
||||
|
||||
|
1
vendor/golang.org/x/sys/unix/asm_aix_ppc64.s
generated
vendored
1
vendor/golang.org/x/sys/unix/asm_aix_ppc64.s
generated
vendored
@ -3,7 +3,6 @@
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build gc
|
||||
// +build gc
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
|
2
vendor/golang.org/x/sys/unix/asm_bsd_386.s
generated
vendored
2
vendor/golang.org/x/sys/unix/asm_bsd_386.s
generated
vendored
@ -3,8 +3,6 @@
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build (freebsd || netbsd || openbsd) && gc
|
||||
// +build freebsd netbsd openbsd
|
||||
// +build gc
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
|
2
vendor/golang.org/x/sys/unix/asm_bsd_amd64.s
generated
vendored
2
vendor/golang.org/x/sys/unix/asm_bsd_amd64.s
generated
vendored
@ -3,8 +3,6 @@
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build (darwin || dragonfly || freebsd || netbsd || openbsd) && gc
|
||||
// +build darwin dragonfly freebsd netbsd openbsd
|
||||
// +build gc
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
|
2
vendor/golang.org/x/sys/unix/asm_bsd_arm.s
generated
vendored
2
vendor/golang.org/x/sys/unix/asm_bsd_arm.s
generated
vendored
@ -3,8 +3,6 @@
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build (freebsd || netbsd || openbsd) && gc
|
||||
// +build freebsd netbsd openbsd
|
||||
// +build gc
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
|
2
vendor/golang.org/x/sys/unix/asm_bsd_arm64.s
generated
vendored
2
vendor/golang.org/x/sys/unix/asm_bsd_arm64.s
generated
vendored
@ -3,8 +3,6 @@
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build (darwin || freebsd || netbsd || openbsd) && gc
|
||||
// +build darwin freebsd netbsd openbsd
|
||||
// +build gc
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
|
2
vendor/golang.org/x/sys/unix/asm_bsd_ppc64.s
generated
vendored
2
vendor/golang.org/x/sys/unix/asm_bsd_ppc64.s
generated
vendored
@ -3,8 +3,6 @@
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build (darwin || freebsd || netbsd || openbsd) && gc
|
||||
// +build darwin freebsd netbsd openbsd
|
||||
// +build gc
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
|
2
vendor/golang.org/x/sys/unix/asm_bsd_riscv64.s
generated
vendored
2
vendor/golang.org/x/sys/unix/asm_bsd_riscv64.s
generated
vendored
@ -3,8 +3,6 @@
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build (darwin || freebsd || netbsd || openbsd) && gc
|
||||
// +build darwin freebsd netbsd openbsd
|
||||
// +build gc
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
|
1
vendor/golang.org/x/sys/unix/asm_linux_386.s
generated
vendored
1
vendor/golang.org/x/sys/unix/asm_linux_386.s
generated
vendored
@ -3,7 +3,6 @@
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build gc
|
||||
// +build gc
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
|
1
vendor/golang.org/x/sys/unix/asm_linux_amd64.s
generated
vendored
1
vendor/golang.org/x/sys/unix/asm_linux_amd64.s
generated
vendored
@ -3,7 +3,6 @@
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build gc
|
||||
// +build gc
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
|
1
vendor/golang.org/x/sys/unix/asm_linux_arm.s
generated
vendored
1
vendor/golang.org/x/sys/unix/asm_linux_arm.s
generated
vendored
@ -3,7 +3,6 @@
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build gc
|
||||
// +build gc
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
|
3
vendor/golang.org/x/sys/unix/asm_linux_arm64.s
generated
vendored
3
vendor/golang.org/x/sys/unix/asm_linux_arm64.s
generated
vendored
@ -3,9 +3,6 @@
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build linux && arm64 && gc
|
||||
// +build linux
|
||||
// +build arm64
|
||||
// +build gc
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
|
3
vendor/golang.org/x/sys/unix/asm_linux_loong64.s
generated
vendored
3
vendor/golang.org/x/sys/unix/asm_linux_loong64.s
generated
vendored
@ -3,9 +3,6 @@
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build linux && loong64 && gc
|
||||
// +build linux
|
||||
// +build loong64
|
||||
// +build gc
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
|
3
vendor/golang.org/x/sys/unix/asm_linux_mips64x.s
generated
vendored
3
vendor/golang.org/x/sys/unix/asm_linux_mips64x.s
generated
vendored
@ -3,9 +3,6 @@
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build linux && (mips64 || mips64le) && gc
|
||||
// +build linux
|
||||
// +build mips64 mips64le
|
||||
// +build gc
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
|
3
vendor/golang.org/x/sys/unix/asm_linux_mipsx.s
generated
vendored
3
vendor/golang.org/x/sys/unix/asm_linux_mipsx.s
generated
vendored
@ -3,9 +3,6 @@
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build linux && (mips || mipsle) && gc
|
||||
// +build linux
|
||||
// +build mips mipsle
|
||||
// +build gc
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
|
3
vendor/golang.org/x/sys/unix/asm_linux_ppc64x.s
generated
vendored
3
vendor/golang.org/x/sys/unix/asm_linux_ppc64x.s
generated
vendored
@ -3,9 +3,6 @@
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build linux && (ppc64 || ppc64le) && gc
|
||||
// +build linux
|
||||
// +build ppc64 ppc64le
|
||||
// +build gc
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
|
2
vendor/golang.org/x/sys/unix/asm_linux_riscv64.s
generated
vendored
2
vendor/golang.org/x/sys/unix/asm_linux_riscv64.s
generated
vendored
@ -3,8 +3,6 @@
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build riscv64 && gc
|
||||
// +build riscv64
|
||||
// +build gc
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user