1
0
mirror of https://github.com/ManyakRus/crud_generator.git synced 2025-07-14 14:44:48 +02:00

сделал NEED_USE_DB_VIEWS=false

This commit is contained in:
Nikitin Aleksandr
2024-10-10 17:42:37 +03:00
parent cbc3ff089f
commit 93ec6abf6f
242 changed files with 15760 additions and 5287 deletions

View File

@ -15,7 +15,7 @@ run:
mod:
clear
go get -u ./internal/...
go mod tidy -compat=1.18
go mod tidy -compat=1.22
go mod vendor
go fmt ./...
build:

View File

@ -0,0 +1,3 @@
{
}

View File

@ -0,0 +1,3 @@
{
"contracts_organizations_lawsuits":["Договор_id"]
}

View File

@ -486,3 +486,6 @@ SERVER_GRPC_TABLE_CACHE_TEST_FILENAME = "server_grpc_table_cache_test.go_"
#TEXT_READALL - "ReadAll" function name
TEXT_READALL = "ReadAll"
#TEMPLATES_NAME_PRIMARYKEYS_FILENAME - filename of "primary_keys.json" file
TEMPLATES_NAME_PRIMARYKEYS_FILENAME = "primary_keys.json"

30
go.mod
View File

@ -1,9 +1,11 @@
module github.com/ManyakRus/crud_generator
go 1.20
go 1.22.0
toolchain go1.22.1
require (
github.com/ManyakRus/starter v1.0.22
github.com/ManyakRus/starter v1.0.58
github.com/bxcodec/faker/v3 v3.8.1
github.com/davecgh/go-spew v1.1.1
github.com/iancoleman/strcase v0.3.0
@ -13,22 +15,24 @@ require (
github.com/ompluscator/dynamic-struct v1.4.0
github.com/otiai10/copy v1.14.0
github.com/serenize/snaker v0.0.0-20201027110005-a7ad2135616e
golang.org/x/tools v0.20.0
gorm.io/gorm v1.25.9
golang.org/x/tools v0.26.0
gorm.io/gorm v1.25.12
)
require (
github.com/ManyakRus/logrus v0.0.0-20231019115155-9e6fede0d792 // indirect
github.com/gobeam/stringy v0.0.7 // indirect
github.com/google/uuid v1.6.0 // indirect
github.com/jackc/pgpassfile v1.0.0 // indirect
github.com/jackc/pgservicefile v0.0.0-20231201235250-de7065d80cb9 // indirect
github.com/jackc/pgx/v5 v5.5.5 // indirect
github.com/jackc/puddle/v2 v2.2.1 // indirect
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 // indirect
github.com/jackc/pgx/v5 v5.7.1 // indirect
github.com/jackc/puddle/v2 v2.2.2 // indirect
github.com/jinzhu/now v1.1.5 // indirect
github.com/joho/godotenv v1.5.1 // indirect
golang.org/x/crypto v0.22.0 // indirect
golang.org/x/mod v0.17.0 // indirect
golang.org/x/sync v0.7.0 // indirect
golang.org/x/sys v0.19.0 // indirect
golang.org/x/text v0.14.0 // indirect
gorm.io/driver/postgres v1.5.7 // indirect
golang.org/x/crypto v0.28.0 // indirect
golang.org/x/mod v0.21.0 // indirect
golang.org/x/sync v0.8.0 // indirect
golang.org/x/sys v0.26.0 // indirect
golang.org/x/text v0.19.0 // indirect
gorm.io/driver/postgres v1.5.9 // indirect
)

65
go.sum
View File

@ -3,8 +3,8 @@ github.com/Azure/azure-sdk-for-go/sdk/azidentity v0.11.0/go.mod h1:HcM1YX14R7CJc
github.com/Azure/azure-sdk-for-go/sdk/internal v0.7.0/go.mod h1:yqy467j36fJxcRV2TzfVZ1pCb5vxm4BtZPUdYWe/Xo8=
github.com/ManyakRus/logrus v0.0.0-20231019115155-9e6fede0d792 h1:bxwxD0H3kSUAH3uNk/b74gkImcUiP7dyibmMoVwk338=
github.com/ManyakRus/logrus v0.0.0-20231019115155-9e6fede0d792/go.mod h1:OUyxCVbPW/2lC1e6cM7Am941SJiC88BhNnb24x2R3a8=
github.com/ManyakRus/starter v1.0.22 h1:M8XCr1pbTS/azHDSLp9H7HowgxlU1ONp/nfamHs42R4=
github.com/ManyakRus/starter v1.0.22/go.mod h1:E9SpMjRb4E/ycq7xUj5BC/+ab4TNCFnHMtiM1qzvCBI=
github.com/ManyakRus/starter v1.0.58 h1:/Sm97vILgqj8cmuqIPxcnM4EiROKG1eXLHf5isgkHHk=
github.com/ManyakRus/starter v1.0.58/go.mod h1:U9WyHWnavyxu4+J5D6VpF3G2yZ4KagfSNM2Soo9O+7c=
github.com/bxcodec/faker/v3 v3.8.1 h1:qO/Xq19V6uHt2xujwpaetgKhraGCapqY2CRWGD/SqcM=
github.com/bxcodec/faker/v3 v3.8.1/go.mod h1:DdSDccxF5msjFo5aO4vrobRQ8nIApg8kq3QWPEQD6+o=
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
@ -27,6 +27,8 @@ github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbV
github.com/go-sql-driver/mysql v1.7.0 h1:ueSltNNllEqE3qcWBTD0iQd3IpL/6U+mJxLkazJ7YPc=
github.com/go-sql-driver/mysql v1.7.0/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI=
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
github.com/gobeam/stringy v0.0.7 h1:TD8SfhedUoiANhW88JlJqfrMsihskIRpU/VTsHGnAps=
github.com/gobeam/stringy v0.0.7/go.mod h1:W3620X9dJHf2FSZF5fRnWekHcHQjwmCz8ZQ2d1qloqE=
github.com/godror/godror v0.36.0 h1:4kymETiaTOJcyF5+47JSUs44Pi0R9bTwsWtBTWqAVRs=
github.com/godror/godror v0.36.0/go.mod h1:jW1+pN+z/V0h28p9XZXVNtEvfZP/2EBfaSjKJLp3E4g=
github.com/godror/knownpb v0.1.0 h1:dJPK8s/I3PQzGGaGcUStL2zIaaICNzKKAK8BzP1uLio=
@ -53,19 +55,22 @@ github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/iancoleman/strcase v0.3.0 h1:nTXanmYxhfFAMjZL34Ov6gkzEsSJZ5DbhxWjvSASxEI=
github.com/iancoleman/strcase v0.3.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho=
github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM=
github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg=
github.com/jackc/pgservicefile v0.0.0-20231201235250-de7065d80cb9 h1:L0QtFUgDarD7Fpv9jeVMgy/+Ec0mtnmYuImjTz6dtDA=
github.com/jackc/pgservicefile v0.0.0-20231201235250-de7065d80cb9/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM=
github.com/jackc/pgx/v5 v5.5.5 h1:amBjrZVmksIdNjxGW/IiIMzxMKZFelXbUoPNb+8sjQw=
github.com/jackc/pgx/v5 v5.5.5/go.mod h1:ez9gk+OAat140fv9ErkZDYFWmXLfV+++K0uAOiwgm1A=
github.com/jackc/puddle/v2 v2.2.1 h1:RhxXJtFG022u4ibrCSMSiu5aOq1i77R3OHKNJj77OAk=
github.com/jackc/puddle/v2 v2.2.1/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4=
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo=
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM=
github.com/jackc/pgx/v5 v5.7.1 h1:x7SYsPBYDkHDksogeSmZZ5xzThcTgRz++I5E+ePFUcs=
github.com/jackc/pgx/v5 v5.7.1/go.mod h1:e7O26IywZZ+naJtWWos6i6fvWK+29etgITqrqHLfoZA=
github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo=
github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4=
github.com/jimsmart/schema v0.2.1 h1:MsSsqq0i86bUskhJJZ6RnrgscbDeBMalLZym6Hx9l3U=
github.com/jimsmart/schema v0.2.1/go.mod h1:4O5InKFd6Fv1xsegHVRLW/Zzm6U2iOqfE8PaI/f+wMU=
github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E=
@ -76,10 +81,12 @@ github.com/joho/godotenv v1.5.1 h1:7eLL/+HRGLY0ldzfGMeQkb7vMd0as4CfYvUVzLqw0N0=
github.com/joho/godotenv v1.5.1/go.mod h1:f4LDr5Voq0i2e/R5DDNOoa2zzDfwtkZa6DnEwAbqwq4=
github.com/lib/pq v1.10.7/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw=
github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
github.com/logrusorgru/aurora v2.0.3+incompatible h1:tOpm7WcpBTn4fjmVfgpQq0EfczGlG91VSDkswnjF5A8=
github.com/logrusorgru/aurora v2.0.3+incompatible/go.mod h1:7rIyQOR62GCctdiQpZ/zOJlFyk6y+94wXzv6RNZgaR4=
github.com/mattn/go-sqlite3 v1.14.16/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg=
github.com/mattn/go-sqlite3 v1.14.22 h1:2gZY6PC6kBnID23Tichd1K+Z0oS6nE/XwU+Vz/5o4kU=
github.com/mattn/go-sqlite3 v1.14.23 h1:gbShiuAP1W5j9UOksQ06aiiqPMxYecovVGwmTxWtuw0=
github.com/mattn/go-sqlite3 v1.14.23/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y=
github.com/modocache/gover v0.0.0-20171022184752-b58185e213c5/go.mod h1:caMODM3PzxT8aQXRPkAt8xlV/e7d7w8GM5g0fa5F0D8=
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=
@ -113,6 +120,7 @@ github.com/onsi/gomega v1.24.2/go.mod h1:gs3J10IS7Z7r7eXRoNJIrNqU4ToQukCJhFtKrWg
github.com/otiai10/copy v1.14.0 h1:dCI/t1iTdYGtkvCuBG2BgR6KZa83PTclw4U5n2wAllU=
github.com/otiai10/copy v1.14.0/go.mod h1:ECfuL02W+/FkTWZWgQqXPWZgW9oeKCSQ5qVfSc4qc4w=
github.com/otiai10/mint v1.5.1 h1:XaPLeE+9vGbuyEHem1JNk3bYc7KKqyI/na0/mLd/Kks=
github.com/otiai10/mint v1.5.1/go.mod h1:MJm72SBthJjz8qhefc4z1PYEieWmy8Bku7CjcAqyUSM=
github.com/pborman/getopt v0.0.0-20170112200414-7148bc3a4c30/go.mod h1:85jBQOZwpVEaDAr341tbn15RS4fCAsIst0qp7i8ex1o=
github.com/pkg/browser v0.0.0-20180916011732-0a3d74bf9ce4/go.mod h1:4OwLy04Bl9Ef3GJJCoec+30X3LQs/0/m4HFRt/2LUSA=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
@ -124,6 +132,7 @@ github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UV
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
@ -135,15 +144,15 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y
golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw=
golang.org/x/crypto v0.5.0/go.mod h1:NK/OQwhpMQP3MwtdjgLlYHnH9ebylxKWv3e0fK+mkQU=
golang.org/x/crypto v0.22.0 h1:g1v0xeRhjcugydODzvb3mEM9SQ0HGp9s/nh3COQ/C30=
golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M=
golang.org/x/crypto v0.28.0 h1:GBDwsMXVQi34v5CCYUm2jkJvu4cbtru2U4TN2PSyQnw=
golang.org/x/crypto v0.28.0/go.mod h1:rmgy+3RHxRZMyY0jjAJShp2zgEdOqj2AO7U0pYmeQ7U=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI=
golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA=
golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0=
golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
@ -162,7 +171,8 @@ golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY=
golang.org/x/net v0.3.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE=
golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE=
golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws=
golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w=
golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4=
golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@ -170,8 +180,8 @@ golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20220513210516-0976fa681c29/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M=
golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ=
golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@ -197,8 +207,8 @@ golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o=
golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo=
golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
@ -212,8 +222,8 @@ golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM=
golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
@ -221,8 +231,8 @@ golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA=
golang.org/x/tools v0.4.0/go.mod h1:UE5sM2OK9E/d67R0ANs2xJizIymRP5gJU295PvKXxjQ=
golang.org/x/tools v0.20.0 h1:hz/CVckiOxybQvFw6h7b/q80NTr9IUQb4s1IIzW7KNY=
golang.org/x/tools v0.20.0/go.mod h1:WvitBU7JJf6A4jOdg4S1tviW9bhUxkgeCui/0JHctQg=
golang.org/x/tools v0.26.0 h1:v/60pFQmzmT9ExmjDv2gGIfi3OqfKoEP6I5+umXlbnQ=
golang.org/x/tools v0.26.0/go.mod h1:TPVVj70c7JJ3WCazhD8OdXcZg/og+b9+tH/KxylGwH0=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@ -238,7 +248,8 @@ google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQ
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI=
google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg=
google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
@ -252,8 +263,8 @@ gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gorm.io/driver/postgres v1.5.7 h1:8ptbNJTDbEmhdr62uReG5BGkdQyeasu/FZHxI0IMGnM=
gorm.io/driver/postgres v1.5.7/go.mod h1:3e019WlBaYI5o5LIdNV+LyxCMNtLOQETBXL2h4chKpA=
gorm.io/driver/postgres v1.5.9 h1:DkegyItji119OlcaLjqN11kHoUgZ/j13E0jkJZgD6A8=
gorm.io/driver/postgres v1.5.9/go.mod h1:DX3GReXH+3FPWGrrgffdvCk3DQ1dwDPdmbenSkweRGI=
gorm.io/gorm v1.25.4/go.mod h1:L4uxeKpfBml98NYqVqwAdmV1a2nBtAec/cf3fpucW/k=
gorm.io/gorm v1.25.9 h1:wct0gxZIELDk8+ZqF/MVnHLkA1rvYlBWUMv2EdsK1g8=
gorm.io/gorm v1.25.9/go.mod h1:hbnx/Oo0ChWMn1BIhpy1oYozzpM15i4YPuHDmfYtwg8=
gorm.io/gorm v1.25.12 h1:I0u8i2hWQItBq1WfE0o2+WuL9+8L21K9e2HHSTE/0f8=
gorm.io/gorm v1.25.12/go.mod h1:xh7N7RHfYlNc5EmcI/El95gXusucDrQnHXe0+CgWcLQ=

View File

@ -48,6 +48,7 @@ type SettingsINI struct {
TEMPLATES_CRUD_FUNCTIONS_RENAME_FILENAME string
TEMPLATES_MODEL_CRUD_DELETE_FUNCTIONS_FILENAME string
TEMPLATES_NAME_REPLACE_FILENAME string
TEMPLATES_NAME_PRIMARYKEYS_FILENAME string
TEMPLATES_NULLABLE_FILENAME string
TEMPLATES_CRUD_TABLE_UPDATE_FILENAME string
TEMPLATES_CRUD_TABLE_UPDATE_TEST_FILENAME string
@ -1041,6 +1042,11 @@ func FillSettings() {
s = Getenv(Name, true)
Settings.NEED_USE_DB_VIEWS = BoolFromString(s)
//
Name = "TEMPLATES_NAME_PRIMARYKEYS_FILENAME"
s = Getenv(Name, true)
Settings.TEMPLATES_NAME_PRIMARYKEYS_FILENAME = s
}
// CurrentDirectory - возвращает текущую директорию ОС

View File

@ -8,7 +8,7 @@ import (
"github.com/ManyakRus/crud_generator/internal/types"
"github.com/ManyakRus/starter/log"
"github.com/ManyakRus/starter/micro"
"github.com/iancoleman/strcase"
"github.com/gobeam/stringy"
"github.com/jinzhu/inflection"
"sort"
"strconv"
@ -43,7 +43,10 @@ func FormatName(Name string) string {
return Otvet
}
Otvet = strcase.ToCamel(Otvet)
//Otvet = strcase.ToCamel(Otvet) //не понимает русские буквы
str := stringy.New(Otvet)
Otvet = str.PascalCase("?", "").Get()
//_id в конце заменяем на ID
lenName := len(Name)

View File

@ -25,6 +25,7 @@ func LoadConfigsAll() {
LoadCrudFunctionsRename()
LoadFindBy()
LoadFindMassBy()
Load_MapPrimaryKeys()
}
// LoadMappings - загружает маппинг ТипБД = ТипGolang, из файла .json
@ -292,3 +293,25 @@ func LoadReadAll(MapAll map[string]*types.Table) map[*types.Table]bool {
return Otvet
}
// Load_MapPrimaryKeys - загружает маппинг ТипБД = ТипGolang, из файла .json
func Load_MapPrimaryKeys() {
dir := micro.ProgramDir_bin()
FileName := dir + config.Settings.TEMPLATE_FOLDERNAME + micro.SeparatorFile() + constants.CONFIG_FOLDER_NAME + micro.SeparatorFile() + config.Settings.TEMPLATES_NAME_PRIMARYKEYS_FILENAME
var err error
//чтение файла
bytes, err := os.ReadFile(FileName)
if err != nil {
TextError := fmt.Sprint("ReadFile() error: ", err)
log.Panic(TextError)
}
//json в map
err = json.Unmarshal(bytes, &types.MapPrimaryKeys)
if err != nil {
log.Panic("Unmarshal() error: ", err)
}
}

View File

@ -45,6 +45,7 @@ func StartFillAll() error {
SettingsFillFromDatabase.INCLUDE_TABLES = config.Settings.INCLUDE_TABLES
SettingsFillFromDatabase.EXCLUDE_TABLES = config.Settings.EXCLUDE_TABLES
SettingsFillFromDatabase.NEED_USE_DB_VIEWS = config.Settings.NEED_USE_DB_VIEWS
SettingsFillFromDatabase.MapPrimaryKeys = types.MapPrimaryKeys
MapAll, err := postgres.FillMapTable(SettingsFillFromDatabase)
if err != nil {
log.Error("FillMapTable() error: ", err)

View File

@ -26,6 +26,7 @@ type TableColumn struct {
ColumnColumnKey string `json:"column_key" gorm:"column:column_key;default:''"`
TableComment string `json:"table_comment" gorm:"column:table_comment;default:''"`
IsPrimaryKey bool `json:"is_primary_key" gorm:"column:is_primary_key;default:false"`
TableIsView bool `json:"table_is_view" gorm:"column:table_is_view;default:false"`
}
type TableRowsStruct struct {
@ -131,8 +132,12 @@ SELECT
WHEN tpk.table_name is not null
THEN true
ELSE false END
as is_primary_key
as is_primary_key,
CASE
WHEN v.table_name is not null
THEN true
ELSE false END
as is_view
FROM
information_schema.columns c
@ -312,6 +317,8 @@ order by
//Type_go := SQLMapping1.GoType
//Column1.TypeGo = Type_go
IsPrimaryKey_manual := Find_IsPrimaryKey_manual(SettingsFill, v.TableName, v.ColumnName)
IsPrimaryKey := v.IsPrimaryKey || IsPrimaryKey_manual
//
if v.ColumnIsIdentity == "YES" {
Column1.IsIdentity = true
@ -323,8 +330,8 @@ order by
Column1.OrderNumber = OrderNumberColumn
Column1.TableKey = v.ColumnTableKey
Column1.ColumnKey = v.ColumnColumnKey
Column1.IsPrimaryKey = v.IsPrimaryKey
if v.IsPrimaryKey == true {
Column1.IsPrimaryKey = IsPrimaryKey
if IsPrimaryKey == true {
PrimaryKeyColumnsCount++
}
@ -663,3 +670,21 @@ func FillTypeGo(SettingsFill types.SettingsFillFromDatabase, Column1 *types.Colu
return
}
// Find_IsPrimaryKey_manual - возвращает true если эта колонка является PrimaryKey (заполненных вручную)
func Find_IsPrimaryKey_manual(SettingsFill types.SettingsFillFromDatabase, TableName string, ColumnName string) bool {
Otvet := false
MassColumnStrings, ok := SettingsFill.MapPrimaryKeys[TableName]
if ok == false {
return Otvet
}
for _, ColumnString := range MassColumnStrings {
if ColumnString == ColumnName {
Otvet = true
}
}
return Otvet
}

View File

@ -28,6 +28,7 @@ type Table struct {
Comment string `json:"table_comment" gorm:"column:table_comment;default:''"`
RowsCount int64
PrimaryKeyColumnsCount int
IsView bool
}
type ReplaceStruct struct {
@ -87,4 +88,8 @@ type SettingsFillFromDatabase struct {
EXCLUDE_TABLES string
NEED_USE_DB_VIEWS bool
MapDBTypes map[string]*dbmeta.SQLMapping //карта соответсвий типов в базе данных и типов в golang
MapPrimaryKeys map[string][]string
}
// MapPrimaryKeys - карта первичных ключей, добавленных вручную в файл primary_keys.json
var MapPrimaryKeys = make(map[string][]string, 0)

View File

@ -3,12 +3,16 @@
package micro
import (
"bytes"
"context"
"encoding/gob"
"errors"
"fmt"
"github.com/google/uuid"
"hash/fnv"
"reflect"
"runtime"
"sort"
"strconv"
"strings"
"unicode"
@ -87,6 +91,17 @@ func Pause(ms int) {
Sleep(ms)
}
// Pause_ctx - приостановка работы программы на нужное число миллисекунд, с учётом глобального контекста
func Pause_ctx(ctx context.Context, ms int) {
Duration := time.Duration(ms) * time.Millisecond
select {
case <-ctx.Done():
case <-time.After(Duration):
}
}
// FindDirUp - возвращает строку с именем каталога на уровень выше
func FindDirUp(dir string) string {
otvet := dir
@ -827,5 +842,165 @@ func StringFloat32_Dimension2(f float32) string {
// запускать:
// defer micro.ShowTimePassed(time.Now())
func ShowTimePassed(StartAt time.Time) {
fmt.Print("Time passed: ", time.Since(StartAt))
fmt.Printf("Time passed: %s\n", time.Since(StartAt))
}
// ShowTimePassedSeconds - показывает время секунд прошедшее с момента старта
// запускать:
// defer micro.ShowTimePassedSeconds(time.Now())
func ShowTimePassedSeconds(StartAt time.Time) {
fmt.Printf("Time passed: %s\n", time.Since(StartAt).Round(time.Second))
}
// ShowTimePassedMilliSeconds - показывает время миллисекунд прошедшее с момента старта
// запускать:
// defer micro.ShowTimePassedMilliSeconds(time.Now())
func ShowTimePassedMilliSeconds(StartAt time.Time) {
fmt.Printf("Time passed: %s\n", time.Since(StartAt).Round(time.Millisecond))
}
// StructDeepCopy - копирует структуру из src в dist
// dist - обязательно ссылка &
func StructDeepCopy(src, dist interface{}) (err error) {
buf := bytes.Buffer{}
if err = gob.NewEncoder(&buf).Encode(src); err != nil {
return
}
return gob.NewDecoder(&buf).Decode(dist)
}
// IsEmptyValue - возвращает true если значение по умолчанию (0, пустая строка, пустой слайс)
func IsEmptyValue(v any) bool {
rv := reflect.ValueOf(v)
Otvet := !rv.IsValid() || reflect.DeepEqual(rv.Interface(), reflect.Zero(rv.Type()).Interface())
return Otvet
}
// StringIdentifierFromUUID - возвращает строку из UUID
func StringIdentifierFromUUID() string {
Otvet := uuid.New().String()
Otvet = strings.ReplaceAll(Otvet, "-", "")
return Otvet
}
// IndexSubstringMin - возвращает индекс первого вхождения в строке
func IndexSubstringMin(s string, MassSubstr ...string) int {
Otvet := -1
for _, v := range MassSubstr {
Otvet1 := -1
if v != "" {
Otvet1 = strings.Index(s, v)
}
if Otvet1 != -1 && (Otvet1 < Otvet || Otvet == -1) {
Otvet = Otvet1
}
}
return Otvet
}
// IndexSubstringMin2 - возвращает индекс первого вхождения в строке
func IndexSubstringMin2(s string, substr1, substr2 string) int {
Otvet := -1
Otvet1 := -1
Otvet2 := -1
if substr1 != "" {
Otvet1 = strings.Index(s, substr1)
}
if substr2 != "" {
Otvet2 = strings.Index(s, substr2)
}
if Otvet1 != -1 && (Otvet1 < Otvet2 || Otvet2 == -1) {
Otvet = Otvet1
} else {
Otvet = Otvet2
}
return Otvet
}
// SortMapStringInt_Desc - сортирует map по значению, по убыванию
func SortMapStringInt_Desc(values map[string]int) []string {
type kv struct {
Key string
Value int
}
var ss []kv
for k, v := range values {
ss = append(ss, kv{k, v})
}
sort.Slice(ss, func(i, j int) bool {
return ss[i].Value > ss[j].Value
})
ranked := make([]string, len(values))
for i, kv := range ss {
ranked[i] = kv.Key
}
return ranked
}
// IsNilInterface - проверка интерфейса на nil
func IsNilInterface(i any) bool {
iv := reflect.ValueOf(i)
if !iv.IsValid() {
return true
}
switch iv.Kind() {
case reflect.Ptr, reflect.Slice, reflect.Map, reflect.Func, reflect.Interface:
return iv.IsNil()
default:
return false
}
}
// StringFromMassInt64 - преобразование массива int64 в строку
func StringFromMassInt64(A []int64, delim string) string {
var buffer bytes.Buffer
for i := 0; i < len(A); i++ {
s1 := StringFromInt64(A[i])
buffer.WriteString(s1)
if i != len(A)-1 {
buffer.WriteString(delim)
}
}
return buffer.String()
}
// IsInt - проверяет, является ли строка целым числом
func IsInt(s string) bool {
Otvet := false
if s == "" {
return Otvet
}
for _, c := range s {
if !unicode.IsDigit(c) {
return Otvet
}
}
Otvet = true
return Otvet
}
// Int32FromString - возвращает int32 из строки
func Int32FromString(s string) (int32, error) {
var Otvet int32
var err error
Otvet64, err := strconv.ParseInt(s, 10, 32)
if err != nil {
return Otvet, err
}
Otvet = int32(Otvet64)
return Otvet, err
}

View File

@ -12,17 +12,11 @@ import (
"strings"
"time"
//"github.com/jackc/pgconn"
"os"
"sync"
//"time"
//"github.com/jmoiron/sqlx"
//_ "github.com/lib/pq"
"github.com/ManyakRus/starter/contextmain"
"github.com/ManyakRus/starter/micro"
"github.com/ManyakRus/starter/stopapp"
"os"
"sync"
"gorm.io/driver/postgres"
"gorm.io/gorm"
@ -55,6 +49,9 @@ type SettingsINI struct {
DB_PASSWORD string
}
// NamingStrategy - структура для хранения настроек наименования таблиц
var NamingStrategy = schema.NamingStrategy{}
// Connect_err - подключается к базе данных
func Connect() {
@ -65,6 +62,12 @@ func Connect() {
port_checker.CheckPort(Settings.DB_HOST, Settings.DB_PORT)
err := Connect_err()
LogInfo_Connected(err)
}
// LogInfo_Connected - выводит сообщение в Лог, или паника при ошибке
func LogInfo_Connected(err error) {
if err != nil {
log.Panicln("POSTGRES gorm Connect() to database host: ", Settings.DB_HOST, ", Error: ", err)
} else {
@ -81,6 +84,33 @@ func Connect_err() error {
return err
}
// Connect_WithApplicationName_SingularTableName - подключается к базе данных, с указанием имени приложения, без переименования имени таблиц
func Connect_WithApplicationName_SingularTableName(ApplicationName string) {
err := Connect_WithApplicationName_SingularTableName_err(ApplicationName)
if err != nil {
log.Panicln("POSTGRES gorm Connect_WithApplicationName_SingularTableName_err() to database host: ", Settings.DB_HOST, ", Error: ", err)
} else {
log.Info("POSTGRES gorm Connected. host: ", Settings.DB_HOST, ", base name: ", Settings.DB_NAME, ", schema: ", Settings.DB_SCHEMA)
}
}
// Connect_WithApplicationName_SingularTableName_err - подключается к базе данных, с указанием имени приложения, без переименования имени таблиц
func Connect_WithApplicationName_SingularTableName_err(ApplicationName string) error {
SetSingularTableNames(true)
err := Connect_WithApplicationName_err(ApplicationName)
return err
}
// Connect_WithApplicationName - подключается к базе данных, с указанием имени приложения
func Connect_WithApplicationName(ApplicationName string) {
err := Connect_WithApplicationName_err(ApplicationName)
if err != nil {
log.Panicln("POSTGRES gorm Connect_WithApplicationName_err() to database host: ", Settings.DB_HOST, ", Error: ", err)
} else {
log.Info("POSTGRES gorm Connected. host: ", Settings.DB_HOST, ", base name: ", Settings.DB_NAME, ", schema: ", Settings.DB_SCHEMA)
}
}
// Connect_WithApplicationName_err - подключается к базе данных, с указанием имени приложения
func Connect_WithApplicationName_err(ApplicationName string) error {
var err error
@ -89,29 +119,24 @@ func Connect_WithApplicationName_err(ApplicationName string) error {
FillSettings()
}
//ctxMain := context.Background()
//ctxMain := contextmain.GetContext()
//ctx, cancel := context.WithTimeout(ctxMain, 5*time.Second)
//defer cancel()
// get the database connection URL.
//get the database connection URL.
dsn := GetDSN(ApplicationName)
//
conf := &gorm.Config{
Logger: gormlogger.Default.LogMode(gormlogger.Silent),
}
//conn := postgres.Open(dsn)
conf.NamingStrategy = NamingStrategy
dialect := postgres.New(postgres.Config{
//
config := postgres.Config{
DSN: dsn,
PreferSimpleProtocol: true, //для запуска мультизапросов
})
Conn, err = gorm.Open(dialect, conf)
}
//Conn, err = gorm.Open(conn, conf)
Conn.Config.NamingStrategy = schema.NamingStrategy{TablePrefix: Settings.DB_SCHEMA + "."}
//Conn.Config.Logger = gormlogger.Default.LogMode(gormlogger.Error)
//
dialect := postgres.New(config)
Conn, err = gorm.Open(dialect, conf)
if err == nil {
DB, err := Conn.DB()
@ -259,7 +284,54 @@ func WaitStop() {
// StartDB - делает соединение с БД, отключение и др.
func StartDB() {
Connect()
var err error
ctx := contextmain.GetContext()
WaitGroup := stopapp.GetWaitGroup_Main()
err = Start_ctx(&ctx, WaitGroup)
LogInfo_Connected(err)
}
// Start_ctx - необходимые процедуры для подключения к серверу БД
// Свой контекст и WaitGroup нужны для остановки работы сервиса Graceful shutdown
// Для тех кто пользуется этим репозиторием для старта и останова сервиса можно просто StartDB()
func Start_ctx(ctx *context.Context, WaitGroup *sync.WaitGroup) error {
var err error
//запомним к себе контекст
contextmain.Ctx = ctx
if ctx == nil {
contextmain.GetContext()
}
//запомним к себе WaitGroup
stopapp.SetWaitGroup_Main(WaitGroup)
if WaitGroup == nil {
stopapp.StartWaitStop()
}
//
err = Connect_err()
if err != nil {
return err
}
stopapp.GetWaitGroup_Main().Add(1)
go WaitStop()
stopapp.GetWaitGroup_Main().Add(1)
go ping_go()
return err
}
// Start - делает соединение с БД, отключение и др.
func Start(ApplicationName string) {
err := Connect_WithApplicationName_err(ApplicationName)
if err != nil {
log.Panic("Postgres gorm Start() error: ", err)
}
stopapp.GetWaitGroup_Main().Add(1)
go WaitStop()
@ -269,15 +341,10 @@ func StartDB() {
}
// Start - делает соединение с БД, отключение и др.
func Start(ApplicationName string) {
Connect_WithApplicationName_err(ApplicationName)
stopapp.GetWaitGroup_Main().Add(1)
go WaitStop()
stopapp.GetWaitGroup_Main().Add(1)
go ping_go()
// Start_SingularTableName - делает соединение с БД, отключение и др. Без переименования имени таблиц на множественное число
func Start_SingularTableName(ApplicationName string) {
SetSingularTableNames(true)
Start(ApplicationName)
}
@ -327,6 +394,8 @@ func FillSettings() {
}
//
NamingStrategy.SchemaName(Settings.DB_SCHEMA)
NamingStrategy.TablePrefix = Settings.DB_SCHEMA + "."
}
// GetDSN - возвращает строку соединения к базе данных
@ -356,7 +425,10 @@ func GetConnection() *gorm.DB {
// GetConnection_WithApplicationName - возвращает соединение к нужной базе данных, с указанием имени приложения
func GetConnection_WithApplicationName(ApplicationName string) *gorm.DB {
if Conn == nil {
Connect_WithApplicationName_err(ApplicationName)
err := Connect_WithApplicationName_err(ApplicationName)
if err != nil {
log.Panic("GetConnection_WithApplicationName() error: ", err)
}
}
return Conn
@ -441,8 +513,8 @@ func RawMultipleSQL(db *gorm.DB, TextSQL string) *gorm.DB {
}
//запустим транзакцию
//tx = tx.Begin()
//defer tx.Commit()
//tx0 := tx.Begin()
//defer tx0.Commit()
//
TextSQL1 := ""
@ -473,3 +545,86 @@ func RawMultipleSQL(db *gorm.DB, TextSQL string) *gorm.DB {
return tx
}
// ReplaceSchema - заменяет "public." на Settings.DB_SCHEMA
func ReplaceSchema(TextSQL string) string {
Otvet := TextSQL
if Settings.DB_SCHEMA == "" {
return Otvet
}
Otvet = strings.ReplaceAll(Otvet, "\tpublic.", "\t"+Settings.DB_SCHEMA+".")
Otvet = strings.ReplaceAll(Otvet, "\npublic.", "\n"+Settings.DB_SCHEMA+".")
Otvet = strings.ReplaceAll(Otvet, " public.", " "+Settings.DB_SCHEMA+".")
return Otvet
}
// ReplaceTemporaryTableNamesToUnique - заменяет "public.TableName" на "public.TableName_UUID"
func ReplaceTemporaryTableNamesToUnique(TextSQL string) string {
Otvet := TextSQL
sUUID := micro.StringIdentifierFromUUID()
map1 := make(map[string]int)
//найдём список всех временных таблиц, и заполним в map1
s0 := Otvet
for {
sFind := "CREATE TEMPORARY TABLE "
sFind2 := "create temporary table "
pos1 := micro.IndexSubstringMin2(s0, sFind, sFind2)
if pos1 < 0 {
break
}
s2 := s0[pos1+len(sFind):]
pos2 := micro.IndexSubstringMin2(s2, " ", "\n")
if pos2 <= 0 {
break
}
name1 := s0[pos1+len(sFind) : pos1+len(sFind)+pos2]
if name1 == "" {
break
}
s0 = s0[pos1+len(sFind)+pos2:]
map1[name1] = len(name1)
}
//заменим все временные таблицы на уникальные
MassNames := micro.SortMapStringInt_Desc(map1)
for _, v := range MassNames {
sFirst := " "
Otvet = strings.ReplaceAll(Otvet, sFirst+v+" ", " "+v+"_"+sUUID+" ")
Otvet = strings.ReplaceAll(Otvet, sFirst+v+"\n", " "+v+"_"+sUUID+"\n")
Otvet = strings.ReplaceAll(Otvet, sFirst+v+"\t", " "+v+"_"+sUUID+"\t")
Otvet = strings.ReplaceAll(Otvet, sFirst+v+";", " "+v+"_"+sUUID+";")
Otvet = strings.ReplaceAll(Otvet, sFirst+v+"(", " "+v+"_"+sUUID+"(")
Otvet = strings.ReplaceAll(Otvet, sFirst+v+")", " "+v+"_"+sUUID+")")
sFirst = "\t"
Otvet = strings.ReplaceAll(Otvet, sFirst+v+" ", " "+v+"_"+sUUID+" ")
Otvet = strings.ReplaceAll(Otvet, sFirst+v+"\n", " "+v+"_"+sUUID+"\n")
Otvet = strings.ReplaceAll(Otvet, sFirst+v+"\t", " "+v+"_"+sUUID+"\t")
Otvet = strings.ReplaceAll(Otvet, sFirst+v+";", " "+v+"_"+sUUID+";")
Otvet = strings.ReplaceAll(Otvet, sFirst+v+"(", " "+v+"_"+sUUID+"(")
Otvet = strings.ReplaceAll(Otvet, sFirst+v+")", " "+v+"_"+sUUID+")")
sFirst = "\n"
Otvet = strings.ReplaceAll(Otvet, sFirst+v+" ", " "+v+"_"+sUUID+" ")
Otvet = strings.ReplaceAll(Otvet, sFirst+v+"\n", " "+v+"_"+sUUID+"\n")
Otvet = strings.ReplaceAll(Otvet, sFirst+v+"\t", " "+v+"_"+sUUID+"\t")
Otvet = strings.ReplaceAll(Otvet, sFirst+v+";", " "+v+"_"+sUUID+";")
Otvet = strings.ReplaceAll(Otvet, sFirst+v+"(", " "+v+"_"+sUUID+"(")
Otvet = strings.ReplaceAll(Otvet, sFirst+v+")", " "+v+"_"+sUUID+")")
}
return Otvet
}
// SetSingularTableNames - меняет настройку "SingularTable" - надо ли НЕ переименовывать имя таблиц во вножественное число
// true = не переименовывать
func SetSingularTableNames(IsSingular bool) {
NamingStrategy.SingularTable = IsSingular
}

View File

@ -94,7 +94,8 @@ func StopAppAndWait() {
if contextmain.CancelContext != nil {
contextmain.CancelContext()
} else {
os.Exit(0)
//os.Exit(0)
log.Warn("Context = nil")
}
GetWaitGroup_Main().Wait()
@ -106,7 +107,9 @@ func WaitStop() {
select {
case <-SignalInterrupt:
log.Warn("Interrupt clean shutdown.")
contextmain.CancelContext()
if contextmain.CancelContext != nil {
contextmain.CancelContext()
}
case <-contextmain.GetContext().Done():
log.Warn("Context app is canceled.")
}

86
vendor/github.com/gobeam/stringy/.coverprofile generated vendored Normal file
View File

@ -0,0 +1,86 @@
mode: set
github.com/gobeam/stringy/helper.go:10.70,11.14 1 1
github.com/gobeam/stringy/helper.go:15.2,16.39 2 1
github.com/gobeam/stringy/helper.go:19.2,24.14 5 1
github.com/gobeam/stringy/helper.go:11.14,14.3 2 1
github.com/gobeam/stringy/helper.go:16.39,17.30 1 1
github.com/gobeam/stringy/helper.go:27.49,29.26 2 1
github.com/gobeam/stringy/helper.go:32.2,33.11 2 1
github.com/gobeam/stringy/helper.go:29.26,31.3 1 1
github.com/gobeam/stringy/helper.go:36.39,37.20 1 1
github.com/gobeam/stringy/helper.go:42.2,42.8 1 1
github.com/gobeam/stringy/helper.go:37.20,39.3 1 1
github.com/gobeam/stringy/helper.go:39.8,41.3 1 1
github.com/gobeam/stringy/helper.go:45.62,48.57 3 1
github.com/gobeam/stringy/helper.go:51.2,52.21 2 1
github.com/gobeam/stringy/helper.go:57.2,58.46 2 1
github.com/gobeam/stringy/helper.go:48.57,50.3 1 1
github.com/gobeam/stringy/helper.go:52.21,54.3 1 1
github.com/gobeam/stringy/helper.go:54.8,56.3 1 1
github.com/gobeam/stringy/helper.go:61.33,62.24 1 1
github.com/gobeam/stringy/helper.go:65.2,65.11 1 1
github.com/gobeam/stringy/helper.go:62.24,64.3 1 1
github.com/gobeam/stringy/stringy.go:47.41,49.2 1 1
github.com/gobeam/stringy/stringy.go:55.63,56.49 1 1
github.com/gobeam/stringy/stringy.go:60.2,65.56 5 1
github.com/gobeam/stringy/stringy.go:68.2,68.52 1 1
github.com/gobeam/stringy/stringy.go:73.2,74.10 2 1
github.com/gobeam/stringy/stringy.go:56.49,58.3 1 1
github.com/gobeam/stringy/stringy.go:65.56,67.3 1 1
github.com/gobeam/stringy/stringy.go:68.52,70.3 1 1
github.com/gobeam/stringy/stringy.go:70.8,70.27 1 1
github.com/gobeam/stringy/stringy.go:70.27,72.3 1 1
github.com/gobeam/stringy/stringy.go:80.32,84.9 4 1
github.com/gobeam/stringy/stringy.go:87.2,88.8 2 1
github.com/gobeam/stringy/stringy.go:91.2,91.41 1 1
github.com/gobeam/stringy/stringy.go:84.9,86.3 1 1
github.com/gobeam/stringy/stringy.go:88.8,90.3 1 1
github.com/gobeam/stringy/stringy.go:100.50,104.33 3 1
github.com/gobeam/stringy/stringy.go:107.2,107.36 1 1
github.com/gobeam/stringy/stringy.go:104.33,106.3 1 1
github.com/gobeam/stringy/stringy.go:112.51,114.29 2 1
github.com/gobeam/stringy/stringy.go:119.2,119.13 1 1
github.com/gobeam/stringy/stringy.go:114.29,115.37 1 1
github.com/gobeam/stringy/stringy.go:115.37,117.4 1 1
github.com/gobeam/stringy/stringy.go:126.80,128.40 2 1
github.com/gobeam/stringy/stringy.go:131.2,133.10 3 1
github.com/gobeam/stringy/stringy.go:128.40,130.3 1 1
github.com/gobeam/stringy/stringy.go:137.42,140.25 3 1
github.com/gobeam/stringy/stringy.go:143.2,143.24 1 1
github.com/gobeam/stringy/stringy.go:140.25,141.33 1 1
github.com/gobeam/stringy/stringy.go:148.30,150.2 1 1
github.com/gobeam/stringy/stringy.go:158.62,163.2 4 1
github.com/gobeam/stringy/stringy.go:166.41,170.25 4 1
github.com/gobeam/stringy/stringy.go:173.2,174.30 2 1
github.com/gobeam/stringy/stringy.go:170.25,171.33 1 1
github.com/gobeam/stringy/stringy.go:179.34,181.26 2 1
github.com/gobeam/stringy/stringy.go:184.2,184.14 1 1
github.com/gobeam/stringy/stringy.go:181.26,183.3 1 1
github.com/gobeam/stringy/stringy.go:188.34,193.2 4 1
github.com/gobeam/stringy/stringy.go:199.62,203.27 4 1
github.com/gobeam/stringy/stringy.go:206.2,206.17 1 1
github.com/gobeam/stringy/stringy.go:203.27,205.3 1 1
github.com/gobeam/stringy/stringy.go:207.13,210.25 3 1
github.com/gobeam/stringy/stringy.go:211.12,214.41 3 1
github.com/gobeam/stringy/stringy.go:215.12,218.156 3 1
github.com/gobeam/stringy/stringy.go:219.10,220.15 1 1
github.com/gobeam/stringy/stringy.go:226.49,229.34 3 1
github.com/gobeam/stringy/stringy.go:238.2,238.24 1 1
github.com/gobeam/stringy/stringy.go:229.34,234.13 2 1
github.com/gobeam/stringy/stringy.go:234.13,236.4 1 1
github.com/gobeam/stringy/stringy.go:244.61,247.2 2 1
github.com/gobeam/stringy/stringy.go:253.60,256.2 2 1
github.com/gobeam/stringy/stringy.go:260.34,263.57 3 1
github.com/gobeam/stringy/stringy.go:266.2,266.18 1 1
github.com/gobeam/stringy/stringy.go:263.57,265.3 1 1
github.com/gobeam/stringy/stringy.go:271.34,276.43 4 1
github.com/gobeam/stringy/stringy.go:279.2,279.23 1 1
github.com/gobeam/stringy/stringy.go:276.43,278.3 1 1
github.com/gobeam/stringy/stringy.go:287.62,292.2 4 1
github.com/gobeam/stringy/stringy.go:296.46,299.2 2 1
github.com/gobeam/stringy/stringy.go:304.60,306.40 2 1
github.com/gobeam/stringy/stringy.go:309.2,309.35 1 1
github.com/gobeam/stringy/stringy.go:306.40,308.3 1 1
github.com/gobeam/stringy/stringy.go:314.43,317.2 2 1
github.com/gobeam/stringy/stringy.go:321.34,324.2 2 1
github.com/gobeam/stringy/stringy.go:328.34,331.2 2 1

83
vendor/github.com/gobeam/stringy/.gitignore generated vendored Normal file
View File

@ -0,0 +1,83 @@
# directory structure
bin/*
pkg/*
!.gitkeep
# goop - go package manager
.vendor
.vendor/*
vendor/*
example/vendor/*
# Compiled Object files, Static and Dynamic libs (Shared Objects)
*.o
*.a
*.so
# Folders
_obj
_test
# Architecture specific extensions/prefixes
*.[568vq]
[568vq].out
*.cgo1.go
*.cgo2.c
_cgo_defun.c
_cgo_gotypes.go
_cgo_export.*
_testmain.go
*.exe
*.test
*.prof
# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm
/*.iml
## Directory-based project format:
.idea/
## File-based project format:
*.ipr
*.iws
## Plugin-specific files:
# IntelliJ
out/
.DS_Store
.AppleDouble
.LSOverride
# Icon must end with two \r
Icon
private.pem
public.pem
# Thumbnails
._*
# Files that might appear on external disk
.Spotlight-V100
.Trashes
# Directories potentially created on remote AFP share
.AppleDB
.AppleDesktop
Network Trash Folder
Temporary Items
tmp/*
config/config.ini
main
gin-bin

12
vendor/github.com/gobeam/stringy/.travis.yml generated vendored Normal file
View File

@ -0,0 +1,12 @@
language: go
sudo: false
go_import_path: github.com/gobeam/stringy
go:
- 1.13
before_install:
- go get github.com/mattn/goveralls
script:
- go test -cover -coverprofile=.coverprofile $(go list .)
- $GOPATH/bin/goveralls -service=travis-ci -coverprofile=.coverprofile -repotoken=$REPO_TOKEN

76
vendor/github.com/gobeam/stringy/CODE_OF_CONDUCT.md generated vendored Normal file
View File

@ -0,0 +1,76 @@
# Contributor Covenant Code of Conduct
## Our Pledge
In the interest of fostering an open and welcoming environment, we as
contributors and maintainers pledge to making participation in our project and
our community a harassment-free experience for everyone, regardless of age, body
size, disability, ethnicity, sex characteristics, gender identity and expression,
level of experience, education, socio-economic status, nationality, personal
appearance, race, religion, or sexual identity and orientation.
## Our Standards
Examples of behavior that contributes to creating a positive environment
include:
* Using welcoming and inclusive language
* Being respectful of differing viewpoints and experiences
* Gracefully accepting constructive criticism
* Focusing on what is best for the community
* Showing empathy towards other community members
Examples of unacceptable behavior by participants include:
* The use of sexualized language or imagery and unwelcome sexual attention or
advances
* Trolling, insulting/derogatory comments, and personal or political attacks
* Public or private harassment
* Publishing others' private information, such as a physical or electronic
address, without explicit permission
* Other conduct which could reasonably be considered inappropriate in a
professional setting
## Our Responsibilities
Project maintainers are responsible for clarifying the standards of acceptable
behavior and are expected to take appropriate and fair corrective action in
response to any instances of unacceptable behavior.
Project maintainers have the right and responsibility to remove, edit, or
reject comments, commits, code, wiki edits, issues, and other contributions
that are not aligned to this Code of Conduct, or to ban temporarily or
permanently any contributor for other behaviors that they deem inappropriate,
threatening, offensive, or harmful.
## Scope
This Code of Conduct applies both within project spaces and in public spaces
when an individual is representing the project or its community. Examples of
representing a project or community include using an official project e-mail
address, posting via an official social media account, or acting as an appointed
representative at an online or offline event. Representation of a project may be
further defined and clarified by project maintainers.
## Enforcement
Instances of abusive, harassing, or otherwise unacceptable behavior may be
reported by contacting the project team at roshanranabhat11@gmail.com. All
complaints will be reviewed and investigated and will result in a response that
is deemed necessary and appropriate to the circumstances. The project team is
obligated to maintain confidentiality with regard to the reporter of an incident.
Further details of specific enforcement policies may be posted separately.
Project maintainers who do not follow or enforce the Code of Conduct in good
faith may face temporary or permanent repercussions as determined by other
members of the project's leadership.
## Attribution
This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,
available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html
[homepage]: https://www.contributor-covenant.org
For answers to common questions about this code of conduct, see
https://www.contributor-covenant.org/faq

58
vendor/github.com/gobeam/stringy/CONTRIBUTING.md generated vendored Normal file
View File

@ -0,0 +1,58 @@
# Contributing to Transcriptase
We love your input! We want to make contributing to this project as easy and transparent as possible, whether it's:
- Reporting a bug
- Discussing the current state of the code
- Submitting a fix
- Proposing new features
- Becoming a maintainer
## Any contributions you make will be under the MIT Software License
In short, when you submit code changes, your submissions are understood to be under the same [MIT License](http://choosealicense.com/licenses/mit/) that covers the project. Feel free to contact the maintainers if that's a concern.
## Report bugs using Github's [issues](https://github.com/gobeam/stringy/issues)
A relevant coding style guideline is the [Go Code Review Comments](https://code.google.com/p/go-wiki/wiki/CodeReviewComments).
Documentation
-------------
If you contribute anything that changes the behavior of the application,
document it in the follow places as applicable:
* the code itself, through clear comments and unit tests
* [README](README.md)
This includes new features, additional variants of behavior, and breaking
changes.
Testing
-------
Tests are written using golang's standard testing tools, and are run prior to
the PR being accepted.
Issues
------
For creating an issue:
* **Bugs:** please be as thorough as possible, with steps to recreate the issue
and any other relevant information.
* **Feature Requests:** please include functionality and use cases. If this is
an extension of a current feature, please include whether or not this would
be a breaking change or how to extend the feature with backwards
compatibility.
* **Security Vulnerability:** please report it at
https://my.xfinity.com/vulnerabilityreport and contact the [maintainers](MAINTAINERS.md).
If you wish to work on an issue, please assign it to yourself. If you have any
questions regarding implementation, feel free to ask clarifying questions on
the issue itself.
Pull Requests
-------------
* should be narrowly focused with no more than 3 or 4 logical commits
* when possible, address no more than one issue
* should be reviewable in the GitHub code review tool
* should be linked to any issues it relates to (i.e. issue number after (#) in commit messages or pull request message)
* should conform to idiomatic golang code formatting

21
vendor/github.com/gobeam/stringy/LICENSE generated vendored Normal file
View File

@ -0,0 +1,21 @@
MIT License
Copyright (c) 2020
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@ -0,0 +1,32 @@
# Description
Please include a summary of the change and which issue is fixed. Please also include relevant motivation and context. List any dependencies that are required for this change.
Fixes # (issue)
## Type of change
Please delete options that are not relevant.
- [ ] Bug fix (non-breaking change which fixes an issue)
- [ ] New feature (non-breaking change which adds functionality)
- [ ] Breaking change (fix or feature that would cause existing functionality to not work as expected)
- [ ] This change requires a documentation update
# How Has This Been Tested?
Please describe the tests that you ran to verify your changes. Provide instructions so we can reproduce. Please also list any relevant details for your test configuration
- [ ] Test A
- [ ] Test B
# Checklist:
- [ ] My code follows the style guidelines of this project
- [ ] I have performed a self-review of my own code
- [ ] I have commented my code, particularly in hard-to-understand areas
- [ ] I have made corresponding changes to the documentation
- [ ] My changes generate no new warnings
- [ ] I have added tests that prove my fix is effective or that my feature works
- [ ] New and existing unit tests pass locally with my changes
- [ ] Any dependent changes have been merged and published in downstream modules

421
vendor/github.com/gobeam/stringy/README.md generated vendored Normal file
View File

@ -0,0 +1,421 @@
# Golang String manipulation helper package
![Workflow](https://github.com/gobeam/stringy/actions/workflows/ci.yml/badge.svg) [![Build][Build-Status-Image]][Build-Status-Url] [![Go Report Card](https://goreportcard.com/badge/github.com/gobeam/stringy?branch=master&kill_cache=1)](https://goreportcard.com/report/github.com/gobeam/Stringy) [![GoDoc][godoc-image]][godoc-url]
[![Coverage Status](https://coveralls.io/repos/github/gobeam/stringy/badge.svg)](https://coveralls.io/github/gobeam/stringy)
Convert string to camel case, snake case, kebab case / slugify, custom delimiter, pad string, tease string and many other functionality with help of by Stringy package. You can convert camelcase to snakecase or kebabcase, or snakecase to camelcase and kebabcase and vice versa. This package was inspired from PHP [danielstjules/Stringy](https://github.com/danielstjules/Stringy).
* [Why?](#why)
* [Installation](#installation)
* [Functions](#functions)
* [Running the tests](#running-the-tests)
* [Contributing](#contributing)
* [License](#license)
<table>
<tr>
<td><a href="#betweenstart-end-string-stringmanipulation">Between</a></td>
<td><a href="#boolean-bool">Boolean</a></td>
<td><a href="#camelcaserule-string-string">CamelCase</a></td>
</tr>
<tr>
<td><a href="#containsallcheck-string-bool">ContainsAll</a></td>
<td><a href="#delimiteddelimiter-string-rule-string-stringmanipulation">Delimited</a></td>
<td><a href="#get-string">Get</a></td>
</tr>
<tr>
<td><a href="#kebabcaserule-string-stringmanipulation">KebabCase</a></td>
<td><a href="#lcfirst-string">LcFirst</a></td>
<td><a href="#lines-string">Lines</a></td>
</tr>
<tr>
<td><a href="#padlength-int-with-padtype-string-string">Pad</a></td>
<td><a href="#removespecialcharacter-string">RemoveSpecialCharacter</a></td>
<td><a href="#replacefirstsearch-replace-string-string">ReplaceFirst</a></td>
</tr>
<tr>
<td><a href="#replacelastsearch-replace-string-string">ReplaceLast</a></td>
<td><a href="#reverse-string">Reverse</a></td>
<td><a href="#shuffle-string">Shuffle</a></td>
</tr>
<tr>
<td><a href="#surroundwith-string-string">Surround</a></td>
<td><a href="#snakecaserule-string-stringmanipulation">SnakeCase</a></td>
<td><a href="#teaselength-int-indicator-string-string">Tease</a></td>
</tr>
<tr>
<td><a href="#tolower-string">ToLower</a></td>
<td><a href="#toupper-string">ToUpper</a></td>
<td><a href="#ucfirst-string">UcFirst</a></td>
</tr>
<tr>
<td><a href="#firstlength-int-string">First</a></td>
<td><a href="#lastlength-int-string">Last</a></td>
<td><a href="#prefixstring-string">Prefix</a></td>
</tr>
<tr>
<td><a href="#suffixstring-string">Suffix</a></td>
<td><a href="#acronym-string">Acronym</a></td>
<td><a href="#title-string">Title</a></td>
</tr>
<tr>
<td><a href="#pascalcaserule-string-string">PascalCase</a></td>
<td></td>
<td></td>
</tr>
</table>
## Why?
Golang has very rich strings core package despite some extra helper function are not available and this stringy package is here to fill that void. Plus there are other some packages in golang, that have same functionality but for some extreme cases they fail to provide correct output. This package cross flexibility is it's main advantage. You can convert to camelcase to snakecase or kebabcase or vice versa.
```go
package main
import (
"fmt"
"github.com/gobeam/stringy"
)
func main() {
str := stringy.New("hello__man how-Are you??")
result := str.CamelCase("?", "")
fmt.Println(result) // HelloManHowAreYou
snakeStr := str.SnakeCase("?", "")
fmt.Println(snakeStr.ToLower()) // hello_man_how_are_you
kebabStr := str.KebabCase("?", "")
fmt.Println(kebabStr.ToUpper()) // HELLO-MAN-HOW-ARE-YOU
}
```
## Installation
``` bash
$ go get -u -v github.com/gobeam/stringy
```
or with dep
``` bash
$ dep ensure -add github.com/gobeam/stringy
```
## Functions
#### Between(start, end string) StringManipulation
Between takes two string params start and end which and returns value which is in middle of start and end part of input. You can chain to upper which with make result all uppercase or ToLower which will make result all lower case or Get which will return result as it is.
```go
strBetween := stringy.New("HelloMyName")
fmt.Println(strBetween.Between("hello", "name").ToUpper()) // MY
```
#### Boolean() bool
Boolean func returns boolean value of string value like on, off, 0, 1, yes, no returns boolean value of string input. You can chain this function on other function which returns implemented StringManipulation interface.
```go
boolString := stringy.New("off")
fmt.Println(boolString.Boolean()) // false
```
#### CamelCase(rule ...string) string
CamelCase is variadic function which takes one Param rule i.e slice of strings and it returns input type string in camel case form and rule helps to omit character you want to omit from string. By default special characters like "_", "-","."," " are treated like word separator and treated accordingly by default and you dont have to worry about it.
```go
camelCase := stringy.New("ThisIsOne___messed up string. Can we Really camel-case It ?##")
fmt.Println(camelCase.CamelCase("?", "", "#", "")) // thisIsOneMessedUpStringCanWeReallyCamelCaseIt
```
look how it omitted ?## from string. If you dont want to omit anything and since it returns plain strings and you cant actually cap all or lower case all camelcase string its not required.
```go
camelCase := stringy.New("ThisIsOne___messed up string. Can we Really camel-case It ?##")
fmt.Println(camelCase.CamelCase()) // thisIsOneMessedUpStringCanWeReallyCamelCaseIt?##
```
#### ContainsAll(check ...string) bool
ContainsAll is variadic function which takes slice of strings as param and checks if they are present in input and returns boolean value accordingly.
```go
contains := stringy.New("hello mam how are you??")
fmt.Println(contains.ContainsAll("mam", "?")) // true
```
#### Delimited(delimiter string, rule ...string) StringManipulation
Delimited is variadic function that takes two params delimiter and slice of strings named rule. It joins the string by passed delimeter. Rule param helps to omit character you want to omit from string. By default special characters like "_", "-","."," " are treated like word separator and treated accordingly by default and you dont have to worry about it. If you don't want to omit any character pass empty string.
```go
delimiterString := stringy.New("ThisIsOne___messed up string. Can we Really delimeter-case It?")
fmt.Println(delimiterString.Delimited("?").Get())
```
You can chain to upper which with make result all uppercase or ToLower which will make result all lower case or Get which will return result as it is.
#### First(length int) string
First returns first n characters from provided input. It removes all spaces in string before doing so.
```go
fcn := stringy.New("4111 1111 1111 1111")
first := fcn.First(4)
fmt.Println(first) // 4111
```
#### Get() string
Get simply returns result and can be chained on function which returns StringManipulation interface view above examples
#### KebabCase(rule ...string) StringManipulation
KebabCase/slugify is variadic function that takes one Param slice of strings named rule and it returns passed string in kebab case or slugify form. Rule param helps to omit character you want to omit from string. By default special characters like "_", "-","."," " are treated like word separator and treated accordingly by default and you don't have to worry about it. If you don't want to omit any character pass nothing.
```go
str := stringy.New("hello__man how-Are you??")
kebabStr := str.KebabCase("?","")
fmt.Println(kebabStr.ToUpper()) // HELLO-MAN-HOW-ARE-YOU
fmt.Println(kebabStr.Get()) // hello-man-how-Are-you
```
You can chain to upper which with make result all uppercase or ToLower which will make result all lower case or Get which will return result as it is.
#### Last(length int) string
Last returns last n characters from provided input. It removes all spaces in string before doing so.
```go
lcn := stringy.New("4111 1111 1111 1348")
last := lcn.Last(4)
fmt.Println(last) // 1348
```
#### LcFirst() string
LcFirst simply returns result by lower casing first letter of string and it can be chained on function which return StringManipulation interface
```go
contains := stringy.New("Hello roshan")
fmt.Println(contains.LcFirst()) // hello roshan
```
#### Lines() []string
Lines returns slice of strings by removing white space characters
```go
lines := stringy.New("fòô\r\nbàř\nyolo123")
fmt.Println(lines.Lines()) // [fòô bàř yolo123]
```
#### Pad(length int, with, padType string) string
Pad takes three param length i.e total length to be after padding, with i.e what to pad with and pad type which can be ("both" or "left" or "right") it return string after padding upto length by with param and on padType type it can be chained on function which return StringManipulation interface
```go
pad := stringy.New("Roshan")
fmt.Println(pad.Pad(0, "0", "both")) // 00Roshan00
fmt.Println(pad.Pad(0, "0", "left")) // 0000Roshan
fmt.Println(pad.Pad(0, "0", "right")) // Roshan0000
```
#### RemoveSpecialCharacter() string
RemoveSpecialCharacter removes all special characters and returns the string nit can be chained on function which return StringManipulation interface
```go
cleanString := stringy.New("special@#remove%%%%")
fmt.Println(cleanString.RemoveSpecialCharacter()) // specialremove
```
#### ReplaceFirst(search, replace string) string
ReplaceFirst takes two param search and replace. It returns string by searching search sub string and replacing it with replace substring on first occurrence it can be chained on function which return StringManipulation interface.
```go
replaceFirst := stringy.New("Hello My name is Roshan and his name is Alis.")
fmt.Println(replaceFirst.ReplaceFirst("name", "nombre")) // Hello My nombre is Roshan and his name is Alis.
```
#### ReplaceLast(search, replace string) string
ReplaceLast takes two param search and replace it return string by searching search sub string and replacing it with replace substring on last occurrence it can be chained on function which return StringManipulation interface
```go
replaceLast := stringy.New("Hello My name is Roshan and his name is Alis.")
fmt.Println(replaceLast.ReplaceLast("name", "nombre")) // Hello My name is Roshan and his nombre is Alis.
```
#### Reverse() string
Reverse function reverses the passed strings it can be chained on function which return StringManipulation interface.
```go
reverse := stringy.New("This is only test")
fmt.Println(reverse.Reverse()) // tset ylno si sihT
```
#### Shuffle() string
Shuffle shuffles the given string randomly it can be chained on function which return StringManipulation interface.
```go
shuffleString := stringy.New("roshan")
fmt.Println(shuffleString.Shuffle()) // nhasro
```
#### Surround(with string) string
Surround takes one param with which is used to surround user input and it can be chained on function which return StringManipulation interface.
```go
surroundStr := stringy.New("__")
fmt.Println(surroundStr.Surround("-")) // -__-
```
#### SnakeCase(rule ...string) StringManipulation
SnakeCase is variadic function that takes one Param slice of strings named rule and it returns passed string in snake case form. Rule param helps to omit character you want to omit from string. By default special characters like "_", "-","."," " are treated like word separator and treated accordingly by default and you don't have to worry about it. If you don't want to omit any character pass nothing.
```go
snakeCase := stringy.New("ThisIsOne___messed up string. Can we Really Snake Case It?")
fmt.Println(snakeCase.SnakeCase("?", "").Get()) // This_Is_One_messed_up_string_Can_we_Really_Snake_Case_It
fmt.Println(snakeCase.SnakeCase("?", "").ToUpper()) // THIS_IS_ONE_MESSED_UP_STRING_CAN_WE_REALLY_SNAKE_CASE_IT
```
You can chain to upper which with make result all uppercase or ToLower which will make result all lower case or Get which will return result as it is.
#### Tease(length int, indicator string) string
Tease takes two params length and indicator and it shortens given string on passed length and adds indicator on end it can be chained on function which return StringManipulation interface.
```go
teaseString := stringy.New("Hello My name is Roshan. I am full stack developer")
fmt.Println(teaseString.Tease(20, "...")) // Hello My name is Ros...
```
#### Title() string
Title returns string with first letter of each word in uppercase it can be chained on function which return StringManipulation interface.
```go
title := stringy.New("hello roshan")
fmt.Println(title.Title()) // Hello Roshan
```
#### ToLower() string
ToLower makes all string of user input to lowercase and it can be chained on function which return StringManipulation interface.
```go
snakeCase := stringy.New("ThisIsOne___messed up string. Can we Really Snake Case It?")
fmt.Println(snakeCase.SnakeCase("?", "").ToLower()) // this_is_one_messed_up_string_can_we_really_snake_case_it
```
#### ToUpper() string
ToUpper makes all string of user input to uppercase and it can be chained on function which return StringManipulation interface.
```go
snakeCase := stringy.New("ThisIsOne___messed up string. Can we Really Snake Case It?")
fmt.Println(snakeCase.SnakeCase("?", "").ToUpper()) // THIS_IS_ONE_MESSED_UP_STRING_CAN_WE_REALLY_SNAKE_CASE_IT
```
#### UcFirst() string
UcFirst simply returns result by upper casing first letter of string and it can be chained on function which return StringManipulation interface.
```go
contains := stringy.New("hello roshan")
fmt.Println(contains.UcFirst()) // Hello roshan
```
#### Prefix(string) string
Prefix makes sure string has been prefixed with a given string and avoids adding it again if it has.
```go
ufo := stringy.New("known flying object")
fmt.Println(ufo.Prefix("un")) // unknown flying object
```
#### Suffix(string) string
Suffix makes sure string has been suffixed with a given string and avoids adding it again if it has.
```go
pun := stringy.New("this really is a cliff")
fmt.Println(pun.Suffix("hanger")) // this really is a cliffhanger
```
#### Acronym() string
Acronym func returns acronym of input string. You can chain ToUpper() which with make result all upercase or ToLower() which will make result all lower case or Get which will return result as it is
```go
acronym := stringy.New("Laugh Out Loud")
fmt.Println(acronym.Acronym().ToLower()) // lol
```
#### PascalCase(rule ...string) string
PascalCase is variadic function which takes one Param rule i.e slice of strings and it returns input type string in pascal case form and rule helps to omit character you want to omit from string. By default special characters like "_", "-","."," " are treated like word separator and treated accordingly by default and you don't have to worry about it.
```go
pascalCase := stringy.New("ThisIsOne___messed up string. Can we Really pascal-case It ?##")
fmt.Println(pascalCase.PascalCase("?", "", "#", "")) // ThisIsOneMessedUpStringCanWeReallyPascalCaseIt
```
look how it omitted ?## from string. If you dont want to omit anything and since it returns plain strings and you cant actually cap all or lower case all camelcase string it's not required.
```go
pascalCase := stringy.New("ThisIsOne___messed up string. Can we Really camel-case It ?##")
fmt.Println(pascalCase.PascalCase()) // ThisIsOneMessedUpStringCanWeReallyCamelCaseIt?##
```
## Running the tests
``` bash
$ go test
```
## Contributing
Pull requests are welcome. For major changes, please open an issue first to discuss what you would like to change.
Please make sure to update tests as appropriate. - see `CONTRIBUTING.md` for details.
## License
Released under the MIT License - see `LICENSE.txt` for details.
[Build-Status-Url]: https://travis-ci.com/gobeam/stringy
[Build-Status-Image]: https://travis-ci.com/gobeam/stringy.svg?branch=master
[godoc-url]: https://pkg.go.dev/github.com/gobeam/stringy?tab=doc
[godoc-image]: https://godoc.org/github.com/gobeam/stringy?status.svg

67
vendor/github.com/gobeam/stringy/helper.go generated vendored Normal file
View File

@ -0,0 +1,67 @@
package stringy
import (
"errors"
"regexp"
"strings"
"unicode"
)
var selectCapitalRegexp = regexp.MustCompile(SelectCapital)
func caseHelper(input string, isCamel bool, rule ...string) []string {
if !isCamel {
input = selectCapitalRegexp.ReplaceAllString(input, ReplaceCapital)
}
input = strings.Join(strings.Fields(strings.TrimSpace(input)), " ")
if len(rule) > 0 && len(rule)%2 != 0 {
panic(errors.New(OddError))
}
rule = append(rule, ".", " ", "_", " ", "-", " ")
replacer := strings.NewReplacer(rule...)
input = replacer.Replace(input)
words := strings.Fields(input)
return words
}
func contains(slice []string, item string) bool {
set := make(map[string]struct{}, len(slice))
for _, s := range slice {
set[s] = struct{}{}
}
_, ok := set[item]
return ok
}
func getInput(i input) (input string) {
if i.Result != "" {
input = i.Result
} else {
input = i.Input
}
return
}
func replaceStr(input, search, replace, types string) string {
lcInput := strings.ToLower(input)
lcSearch := strings.ToLower(search)
if input == "" || !strings.Contains(lcInput, lcSearch) {
return input
}
var start int
if types == "last" {
start = strings.LastIndex(lcInput, lcSearch)
} else {
start = strings.Index(lcInput, lcSearch)
}
end := start + len(search)
return input[:start] + replace + input[end:]
}
func ucfirst(val string) string {
for _, v := range val {
return string(unicode.ToUpper(v)) + val[len(string(v)):]
}
return ""
}

21
vendor/github.com/gobeam/stringy/message.go generated vendored Normal file
View File

@ -0,0 +1,21 @@
package stringy
// const below are used in packages
const (
First = "first"
Last = "last"
Left = "left"
Right = "right"
Both = "both"
OddError = "odd number rule provided please provide in even count"
SelectCapital = "([a-z])([A-Z])"
ReplaceCapital = "$1 $2"
LengthError = "passed length cannot be greater than input length"
InvalidLogicalString = "invalid string value to test boolean value"
)
// False is slice of array for false logical representation in string
var False = []string{"off", "no", "0", "false"}
// True is slice of array for true logical representation in string
var True = []string{"on", "yes", "1", "true"}

405
vendor/github.com/gobeam/stringy/stringy.go generated vendored Normal file
View File

@ -0,0 +1,405 @@
package stringy
import (
"errors"
"math"
"math/rand"
"regexp"
"strings"
"time"
"unicode"
)
var matchWordRegexp = regexp.MustCompile(`[\s]*[\W]\pN`)
// input is struct that holds input from user and result
type input struct {
Input string
Result string
}
// StringManipulation is an interface that holds all abstract methods to manipulate strings
type StringManipulation interface {
Acronym() StringManipulation
Between(start, end string) StringManipulation
Boolean() bool
PascalCase(rule ...string) StringManipulation
CamelCase(rule ...string) StringManipulation
ContainsAll(check ...string) bool
Delimited(delimiter string, rule ...string) StringManipulation
First(length int) string
Get() string
KebabCase(rule ...string) StringManipulation
Last(length int) string
LcFirst() string
Lines() []string
Pad(length int, with, padType string) string
RemoveSpecialCharacter() string
ReplaceFirst(search, replace string) string
ReplaceLast(search, replace string) string
Reverse() string
Shuffle() string
Surround(with string) string
SnakeCase(rule ...string) StringManipulation
Tease(length int, indicator string) string
Title() string
ToLower() string
ToUpper() string
UcFirst() string
Prefix(with string) string
Suffix(with string) string
}
// New func returns pointer to input struct
func New(val string) StringManipulation {
return &input{Input: val}
}
// Acronym func returns acronym of input string.
// You can chain to upper which with make result all upercase or ToLower
// which will make result all lower case or Get which will return result as it is
func (i *input) Acronym() StringManipulation {
input := getInput(*i)
words := strings.Fields(input)
var acronym string
for _, word := range words {
acronym += string(word[0])
}
i.Result = acronym
return i
}
// Between takes two string params start and end which and returns
// value which is in middle of start and end part of input. You can
// chain to upper which with make result all upercase or ToLower which
// will make result all lower case or Get which will return result as it is
func (i *input) Between(start, end string) StringManipulation {
if (start == "" && end == "") || i.Input == "" {
return i
}
input := strings.ToLower(i.Input)
lcStart := strings.ToLower(start)
lcEnd := strings.ToLower(end)
var startIndex, endIndex int
if len(start) > 0 && strings.Contains(input, lcStart) {
startIndex = len(start)
}
if len(end) > 0 && strings.Contains(input, lcEnd) {
endIndex = strings.Index(input, lcEnd)
} else if len(input) > 0 {
endIndex = len(input)
}
i.Result = strings.TrimSpace(i.Input[startIndex:endIndex])
return i
}
// Boolean func returns boolean value of string value like on, off, 0, 1, yes, no
// returns boolean value of string input. You can chain this function on other function
// which returns implemented StringManipulation interface
func (i *input) Boolean() bool {
input := getInput(*i)
inputLower := strings.ToLower(input)
off := contains(False, inputLower)
if off {
return false
}
on := contains(True, inputLower)
if on {
return true
}
panic(errors.New(InvalidLogicalString))
}
// CamelCase is variadic function which takes one Param rule i.e slice of strings and it returns
// input type string in camel case form and rule helps to omit character you want to omit from string.
// By default special characters like "_", "-","."," " are l\treated like word separator and treated
// accordingly by default and you don't have to worry about it
// First letter will be lowercase.
// Example input: hello user
// Result : helloUser
func (i *input) CamelCase(rule ...string) StringManipulation {
input := getInput(*i)
// removing excess space
wordArray := caseHelper(input, true, rule...)
for i, word := range wordArray {
if i == 0 {
wordArray[i] = strings.ToLower(word)
} else {
wordArray[i] = ucfirst(word)
}
}
i.Result = strings.Join(wordArray, "")
return i
}
// PascalCase is variadic function which takes one Param rule i.e slice of strings and it returns
// input type string in camel case form and rule helps to omit character you want to omit from string.
// By default special characters like "_", "-","."," " are l\treated like word separator and treated
// accordingly by default and you don't have to worry about it
// Example input: hello user
// Result : HelloUser
func (i *input) PascalCase(rule ...string) StringManipulation {
input := getInput(*i)
// removing excess space
wordArray := caseHelper(input, true, rule...)
for i, word := range wordArray {
wordArray[i] = ucfirst(word)
}
i.Result = strings.Join(wordArray, "")
return i
}
// ContainsAll is variadic function which takes slice of strings as param and checks if they are present
// in input and returns boolean value accordingly
func (i *input) ContainsAll(check ...string) bool {
input := getInput(*i)
for _, item := range check {
if !strings.Contains(input, item) {
return false
}
}
return true
}
// Delimited is variadic function that takes two params delimiter and slice of strings i.e rule. It joins
// the string by passed delimeter. Rule param helps to omit character you want to omit from string. By
// default special characters like "_", "-","."," " are l\treated like word separator and treated accordingly
// by default and you dont have to worry about it.
func (i *input) Delimited(delimiter string, rule ...string) StringManipulation {
input := getInput(*i)
if strings.TrimSpace(delimiter) == "" {
delimiter = "."
}
wordArray := caseHelper(input, false, rule...)
i.Result = strings.Join(wordArray, delimiter)
return i
}
// First returns first n characters from provided input. It removes all spaces in string before doing so.
func (i *input) First(length int) string {
input := getInput(*i)
input = strings.ReplaceAll(input, " ", "")
if len(input) < length {
panic(errors.New(LengthError))
}
return input[0:length]
}
// Get simply returns result and can be chained on function which
// returns StringManipulation interface
func (i *input) Get() string {
return getInput(*i)
}
// KebabCase is variadic function that takes one Param slice of strings named rule and it returns passed string
// in kebab case form. Rule param helps to omit character you want to omit from string. By default special characters
// like "_", "-","."," " are l\treated like word separator and treated accordingly by default and you dont have to worry
// about it. If you don't want to omit any character pass nothing.
// Example input: hello user
// Result : hello-user
func (i *input) KebabCase(rule ...string) StringManipulation {
input := getInput(*i)
wordArray := caseHelper(input, false, rule...)
i.Result = strings.Join(wordArray, "-")
return i
}
// Last returns last n characters from provided input. It removes all spaces in string before doing so.
func (i *input) Last(length int) string {
input := getInput(*i)
input = strings.ReplaceAll(input, " ", "")
inputLen := len(input)
if len(input) < length {
panic(errors.New(LengthError))
}
start := inputLen - length
return input[start:inputLen]
}
// LcFirst simply returns result by lower casing first letter of string and it can be chained on
// function which return StringManipulation interface
func (i *input) LcFirst() string {
input := getInput(*i)
for _, v := range input {
return string(unicode.ToLower(v)) + input[len(string(v)):]
}
return ""
}
// Lines returns slice of strings by removing white space characters
func (i *input) Lines() []string {
input := getInput(*i)
result := matchWordRegexp.ReplaceAllString(input, " ")
return strings.Fields(strings.TrimSpace(result))
}
// Pad takes three param length i.e total length to be after padding, with i.e what to pad
// with and pad type which can be ("both" or "left" or "right") it return string after padding
// upto length by with param and on padType type it can be chained on function which return
// StringManipulation interface
func (i *input) Pad(length int, with, padType string) string {
input := getInput(*i)
inputLength := len(input)
padLength := len(with)
if inputLength >= length {
return input
}
switch padType {
case Right:
var count = 1 + ((length - padLength) / padLength)
var result = input + strings.Repeat(with, count)
return result[:length]
case Left:
var count = 1 + ((length - padLength) / padLength)
var result = strings.Repeat(with, count) + input
return result[(len(result) - length):]
case Both:
length := (float64(length - inputLength)) / float64(2)
repeat := math.Ceil(length / float64(padLength))
return strings.Repeat(with, int(repeat))[:int(math.Floor(float64(length)))] + input + strings.Repeat(with, int(repeat))[:int(math.Ceil(float64(length)))]
default:
return input
}
}
// RemoveSpecialCharacter removes all special characters and returns the string
// it can be chained on function which return StringManipulation interface
func (i *input) RemoveSpecialCharacter() string {
input := getInput(*i)
var result strings.Builder
for i := 0; i < len(input); i++ {
b := input[i]
if ('a' <= b && b <= 'z') ||
('A' <= b && b <= 'Z') ||
('0' <= b && b <= '9') ||
b == ' ' {
result.WriteByte(b)
}
}
return result.String()
}
// ReplaceFirst takes two param search and replace. It returns string by searching search
// sub string and replacing it with replace substring on first occurrence it can be chained
// on function which return StringManipulation interface.
func (i *input) ReplaceFirst(search, replace string) string {
input := getInput(*i)
return replaceStr(input, search, replace, First)
}
// ReplaceLast takes two param search and replace
// it return string by searching search sub string and replacing it
// with replace substring on last occurrence
// it can be chained on function which return StringManipulation interface
func (i *input) ReplaceLast(search, replace string) string {
input := getInput(*i)
return replaceStr(input, search, replace, Last)
}
// Reverse reverses the passed strings
// it can be chained on function which return StringManipulation interface
func (i *input) Reverse() string {
input := getInput(*i)
r := []rune(input)
for i, j := 0, len(r)-1; i < len(r)/2; i, j = i+1, j-1 {
r[i], r[j] = r[j], r[i]
}
return string(r)
}
// Shuffle shuffles the given string randomly
// it can be chained on function which return StringManipulation interface
func (i *input) Shuffle() string {
input := getInput(*i)
rand.Seed(time.Now().Unix())
inRune := []rune(input)
rand.Shuffle(len(inRune), func(i, j int) {
inRune[i], inRune[j] = inRune[j], inRune[i]
})
return string(inRune)
}
// SnakeCase is variadic function that takes one Param slice of strings named rule
// and it returns passed string in snake case form. Rule param helps to omit character
// you want to omit from string. By default special characters like "_", "-","."," " are treated
// like word separator and treated accordingly by default and you don't have to worry about it.
// If you don't want to omit any character pass nothing.
func (i *input) SnakeCase(rule ...string) StringManipulation {
input := getInput(*i)
wordArray := caseHelper(input, false, rule...)
i.Result = strings.Join(wordArray, "_")
return i
}
// Surround takes one param with which is used to surround user input and it
// can be chained on function which return StringManipulation interface.
func (i *input) Surround(with string) string {
input := getInput(*i)
return with + input + with
}
// Tease takes two params length and indicator and it shortens given string
// on passed length and adds indicator on end
// it can be chained on function which return StringManipulation interface
func (i *input) Tease(length int, indicator string) string {
input := getInput(*i)
if input == "" || len(input) < length {
return input
}
return input[:length] + indicator
}
// ToLower makes all string of user input to lowercase
// it can be chained on function which return StringManipulation interface
func (i *input) ToLower() (result string) {
input := getInput(*i)
return strings.ToLower(input)
}
// Title makes first letter of each word of user input to uppercase
// it can be chained on function which return StringManipulation interface
func (i *input) Title() (result string) {
input := getInput(*i)
wordArray := strings.Split(input, " ")
for i, word := range wordArray {
wordArray[i] = strings.ToUpper(string(word[0])) + strings.ToLower(word[1:])
}
return strings.Join(wordArray, " ")
}
// ToUpper makes all string of user input to uppercase
// it can be chained on function which return StringManipulation interface
func (i *input) ToUpper() string {
input := getInput(*i)
return strings.ToUpper(input)
}
// UcFirst makes first word of user input to uppercase
// it can be chained on function which return StringManipulation interface
func (i *input) UcFirst() string {
input := getInput(*i)
return ucfirst(input)
}
// Prefix makes sure that string is prefixed with a given string
func (i *input) Prefix(with string) string {
input := getInput(*i)
if strings.HasPrefix(input, with) {
return input
}
return with + input
}
// Suffix makes sure that string is suffixed with a given string
func (i *input) Suffix(with string) string {
input := getInput(*i)
if strings.HasSuffix(input, with) {
return input
}
return input + with
}

41
vendor/github.com/google/uuid/CHANGELOG.md generated vendored Normal file
View File

@ -0,0 +1,41 @@
# Changelog
## [1.6.0](https://github.com/google/uuid/compare/v1.5.0...v1.6.0) (2024-01-16)
### Features
* add Max UUID constant ([#149](https://github.com/google/uuid/issues/149)) ([c58770e](https://github.com/google/uuid/commit/c58770eb495f55fe2ced6284f93c5158a62e53e3))
### Bug Fixes
* fix typo in version 7 uuid documentation ([#153](https://github.com/google/uuid/issues/153)) ([016b199](https://github.com/google/uuid/commit/016b199544692f745ffc8867b914129ecb47ef06))
* Monotonicity in UUIDv7 ([#150](https://github.com/google/uuid/issues/150)) ([a2b2b32](https://github.com/google/uuid/commit/a2b2b32373ff0b1a312b7fdf6d38a977099698a6))
## [1.5.0](https://github.com/google/uuid/compare/v1.4.0...v1.5.0) (2023-12-12)
### Features
* Validate UUID without creating new UUID ([#141](https://github.com/google/uuid/issues/141)) ([9ee7366](https://github.com/google/uuid/commit/9ee7366e66c9ad96bab89139418a713dc584ae29))
## [1.4.0](https://github.com/google/uuid/compare/v1.3.1...v1.4.0) (2023-10-26)
### Features
* UUIDs slice type with Strings() convenience method ([#133](https://github.com/google/uuid/issues/133)) ([cd5fbbd](https://github.com/google/uuid/commit/cd5fbbdd02f3e3467ac18940e07e062be1f864b4))
### Fixes
* Clarify that Parse's job is to parse but not necessarily validate strings. (Documents current behavior)
## [1.3.1](https://github.com/google/uuid/compare/v1.3.0...v1.3.1) (2023-08-18)
### Bug Fixes
* Use .EqualFold() to parse urn prefixed UUIDs ([#118](https://github.com/google/uuid/issues/118)) ([574e687](https://github.com/google/uuid/commit/574e6874943741fb99d41764c705173ada5293f0))
## Changelog

26
vendor/github.com/google/uuid/CONTRIBUTING.md generated vendored Normal file
View File

@ -0,0 +1,26 @@
# How to contribute
We definitely welcome patches and contribution to this project!
### Tips
Commits must be formatted according to the [Conventional Commits Specification](https://www.conventionalcommits.org).
Always try to include a test case! If it is not possible or not necessary,
please explain why in the pull request description.
### Releasing
Commits that would precipitate a SemVer change, as described in the Conventional
Commits Specification, will trigger [`release-please`](https://github.com/google-github-actions/release-please-action)
to create a release candidate pull request. Once submitted, `release-please`
will create a release.
For tips on how to work with `release-please`, see its documentation.
### Legal requirements
In order to protect both you and ourselves, you will need to sign the
[Contributor License Agreement](https://cla.developers.google.com/clas).
You may have already signed it for other Google projects.

9
vendor/github.com/google/uuid/CONTRIBUTORS generated vendored Normal file
View File

@ -0,0 +1,9 @@
Paul Borman <borman@google.com>
bmatsuo
shawnps
theory
jboverfelt
dsymonds
cd1
wallclockbuilder
dansouza

27
vendor/github.com/google/uuid/LICENSE generated vendored Normal file
View File

@ -0,0 +1,27 @@
Copyright (c) 2009,2014 Google Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

21
vendor/github.com/google/uuid/README.md generated vendored Normal file
View File

@ -0,0 +1,21 @@
# uuid
The uuid package generates and inspects UUIDs based on
[RFC 4122](https://datatracker.ietf.org/doc/html/rfc4122)
and DCE 1.1: Authentication and Security Services.
This package is based on the github.com/pborman/uuid package (previously named
code.google.com/p/go-uuid). It differs from these earlier packages in that
a UUID is a 16 byte array rather than a byte slice. One loss due to this
change is the ability to represent an invalid UUID (vs a NIL UUID).
###### Install
```sh
go get github.com/google/uuid
```
###### Documentation
[![Go Reference](https://pkg.go.dev/badge/github.com/google/uuid.svg)](https://pkg.go.dev/github.com/google/uuid)
Full `go doc` style documentation for the package can be viewed online without
installing this package by using the GoDoc site here:
http://pkg.go.dev/github.com/google/uuid

80
vendor/github.com/google/uuid/dce.go generated vendored Normal file
View File

@ -0,0 +1,80 @@
// Copyright 2016 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package uuid
import (
"encoding/binary"
"fmt"
"os"
)
// A Domain represents a Version 2 domain
type Domain byte
// Domain constants for DCE Security (Version 2) UUIDs.
const (
Person = Domain(0)
Group = Domain(1)
Org = Domain(2)
)
// NewDCESecurity returns a DCE Security (Version 2) UUID.
//
// The domain should be one of Person, Group or Org.
// On a POSIX system the id should be the users UID for the Person
// domain and the users GID for the Group. The meaning of id for
// the domain Org or on non-POSIX systems is site defined.
//
// For a given domain/id pair the same token may be returned for up to
// 7 minutes and 10 seconds.
func NewDCESecurity(domain Domain, id uint32) (UUID, error) {
uuid, err := NewUUID()
if err == nil {
uuid[6] = (uuid[6] & 0x0f) | 0x20 // Version 2
uuid[9] = byte(domain)
binary.BigEndian.PutUint32(uuid[0:], id)
}
return uuid, err
}
// NewDCEPerson returns a DCE Security (Version 2) UUID in the person
// domain with the id returned by os.Getuid.
//
// NewDCESecurity(Person, uint32(os.Getuid()))
func NewDCEPerson() (UUID, error) {
return NewDCESecurity(Person, uint32(os.Getuid()))
}
// NewDCEGroup returns a DCE Security (Version 2) UUID in the group
// domain with the id returned by os.Getgid.
//
// NewDCESecurity(Group, uint32(os.Getgid()))
func NewDCEGroup() (UUID, error) {
return NewDCESecurity(Group, uint32(os.Getgid()))
}
// Domain returns the domain for a Version 2 UUID. Domains are only defined
// for Version 2 UUIDs.
func (uuid UUID) Domain() Domain {
return Domain(uuid[9])
}
// ID returns the id for a Version 2 UUID. IDs are only defined for Version 2
// UUIDs.
func (uuid UUID) ID() uint32 {
return binary.BigEndian.Uint32(uuid[0:4])
}
func (d Domain) String() string {
switch d {
case Person:
return "Person"
case Group:
return "Group"
case Org:
return "Org"
}
return fmt.Sprintf("Domain%d", int(d))
}

12
vendor/github.com/google/uuid/doc.go generated vendored Normal file
View File

@ -0,0 +1,12 @@
// Copyright 2016 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package uuid generates and inspects UUIDs.
//
// UUIDs are based on RFC 4122 and DCE 1.1: Authentication and Security
// Services.
//
// A UUID is a 16 byte (128 bit) array. UUIDs may be used as keys to
// maps or compared directly.
package uuid

59
vendor/github.com/google/uuid/hash.go generated vendored Normal file
View File

@ -0,0 +1,59 @@
// Copyright 2016 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package uuid
import (
"crypto/md5"
"crypto/sha1"
"hash"
)
// Well known namespace IDs and UUIDs
var (
NameSpaceDNS = Must(Parse("6ba7b810-9dad-11d1-80b4-00c04fd430c8"))
NameSpaceURL = Must(Parse("6ba7b811-9dad-11d1-80b4-00c04fd430c8"))
NameSpaceOID = Must(Parse("6ba7b812-9dad-11d1-80b4-00c04fd430c8"))
NameSpaceX500 = Must(Parse("6ba7b814-9dad-11d1-80b4-00c04fd430c8"))
Nil UUID // empty UUID, all zeros
// The Max UUID is special form of UUID that is specified to have all 128 bits set to 1.
Max = UUID{
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
}
)
// NewHash returns a new UUID derived from the hash of space concatenated with
// data generated by h. The hash should be at least 16 byte in length. The
// first 16 bytes of the hash are used to form the UUID. The version of the
// UUID will be the lower 4 bits of version. NewHash is used to implement
// NewMD5 and NewSHA1.
func NewHash(h hash.Hash, space UUID, data []byte, version int) UUID {
h.Reset()
h.Write(space[:]) //nolint:errcheck
h.Write(data) //nolint:errcheck
s := h.Sum(nil)
var uuid UUID
copy(uuid[:], s)
uuid[6] = (uuid[6] & 0x0f) | uint8((version&0xf)<<4)
uuid[8] = (uuid[8] & 0x3f) | 0x80 // RFC 4122 variant
return uuid
}
// NewMD5 returns a new MD5 (Version 3) UUID based on the
// supplied name space and data. It is the same as calling:
//
// NewHash(md5.New(), space, data, 3)
func NewMD5(space UUID, data []byte) UUID {
return NewHash(md5.New(), space, data, 3)
}
// NewSHA1 returns a new SHA1 (Version 5) UUID based on the
// supplied name space and data. It is the same as calling:
//
// NewHash(sha1.New(), space, data, 5)
func NewSHA1(space UUID, data []byte) UUID {
return NewHash(sha1.New(), space, data, 5)
}

38
vendor/github.com/google/uuid/marshal.go generated vendored Normal file
View File

@ -0,0 +1,38 @@
// Copyright 2016 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package uuid
import "fmt"
// MarshalText implements encoding.TextMarshaler.
func (uuid UUID) MarshalText() ([]byte, error) {
var js [36]byte
encodeHex(js[:], uuid)
return js[:], nil
}
// UnmarshalText implements encoding.TextUnmarshaler.
func (uuid *UUID) UnmarshalText(data []byte) error {
id, err := ParseBytes(data)
if err != nil {
return err
}
*uuid = id
return nil
}
// MarshalBinary implements encoding.BinaryMarshaler.
func (uuid UUID) MarshalBinary() ([]byte, error) {
return uuid[:], nil
}
// UnmarshalBinary implements encoding.BinaryUnmarshaler.
func (uuid *UUID) UnmarshalBinary(data []byte) error {
if len(data) != 16 {
return fmt.Errorf("invalid UUID (got %d bytes)", len(data))
}
copy(uuid[:], data)
return nil
}

90
vendor/github.com/google/uuid/node.go generated vendored Normal file
View File

@ -0,0 +1,90 @@
// Copyright 2016 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package uuid
import (
"sync"
)
var (
nodeMu sync.Mutex
ifname string // name of interface being used
nodeID [6]byte // hardware for version 1 UUIDs
zeroID [6]byte // nodeID with only 0's
)
// NodeInterface returns the name of the interface from which the NodeID was
// derived. The interface "user" is returned if the NodeID was set by
// SetNodeID.
func NodeInterface() string {
defer nodeMu.Unlock()
nodeMu.Lock()
return ifname
}
// SetNodeInterface selects the hardware address to be used for Version 1 UUIDs.
// If name is "" then the first usable interface found will be used or a random
// Node ID will be generated. If a named interface cannot be found then false
// is returned.
//
// SetNodeInterface never fails when name is "".
func SetNodeInterface(name string) bool {
defer nodeMu.Unlock()
nodeMu.Lock()
return setNodeInterface(name)
}
func setNodeInterface(name string) bool {
iname, addr := getHardwareInterface(name) // null implementation for js
if iname != "" && addr != nil {
ifname = iname
copy(nodeID[:], addr)
return true
}
// We found no interfaces with a valid hardware address. If name
// does not specify a specific interface generate a random Node ID
// (section 4.1.6)
if name == "" {
ifname = "random"
randomBits(nodeID[:])
return true
}
return false
}
// NodeID returns a slice of a copy of the current Node ID, setting the Node ID
// if not already set.
func NodeID() []byte {
defer nodeMu.Unlock()
nodeMu.Lock()
if nodeID == zeroID {
setNodeInterface("")
}
nid := nodeID
return nid[:]
}
// SetNodeID sets the Node ID to be used for Version 1 UUIDs. The first 6 bytes
// of id are used. If id is less than 6 bytes then false is returned and the
// Node ID is not set.
func SetNodeID(id []byte) bool {
if len(id) < 6 {
return false
}
defer nodeMu.Unlock()
nodeMu.Lock()
copy(nodeID[:], id)
ifname = "user"
return true
}
// NodeID returns the 6 byte node id encoded in uuid. It returns nil if uuid is
// not valid. The NodeID is only well defined for version 1 and 2 UUIDs.
func (uuid UUID) NodeID() []byte {
var node [6]byte
copy(node[:], uuid[10:])
return node[:]
}

12
vendor/github.com/google/uuid/node_js.go generated vendored Normal file
View File

@ -0,0 +1,12 @@
// Copyright 2017 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build js
package uuid
// getHardwareInterface returns nil values for the JS version of the code.
// This removes the "net" dependency, because it is not used in the browser.
// Using the "net" library inflates the size of the transpiled JS code by 673k bytes.
func getHardwareInterface(name string) (string, []byte) { return "", nil }

33
vendor/github.com/google/uuid/node_net.go generated vendored Normal file
View File

@ -0,0 +1,33 @@
// Copyright 2017 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !js
package uuid
import "net"
var interfaces []net.Interface // cached list of interfaces
// getHardwareInterface returns the name and hardware address of interface name.
// If name is "" then the name and hardware address of one of the system's
// interfaces is returned. If no interfaces are found (name does not exist or
// there are no interfaces) then "", nil is returned.
//
// Only addresses of at least 6 bytes are returned.
func getHardwareInterface(name string) (string, []byte) {
if interfaces == nil {
var err error
interfaces, err = net.Interfaces()
if err != nil {
return "", nil
}
}
for _, ifs := range interfaces {
if len(ifs.HardwareAddr) >= 6 && (name == "" || name == ifs.Name) {
return ifs.Name, ifs.HardwareAddr
}
}
return "", nil
}

118
vendor/github.com/google/uuid/null.go generated vendored Normal file
View File

@ -0,0 +1,118 @@
// Copyright 2021 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package uuid
import (
"bytes"
"database/sql/driver"
"encoding/json"
"fmt"
)
var jsonNull = []byte("null")
// NullUUID represents a UUID that may be null.
// NullUUID implements the SQL driver.Scanner interface so
// it can be used as a scan destination:
//
// var u uuid.NullUUID
// err := db.QueryRow("SELECT name FROM foo WHERE id=?", id).Scan(&u)
// ...
// if u.Valid {
// // use u.UUID
// } else {
// // NULL value
// }
//
type NullUUID struct {
UUID UUID
Valid bool // Valid is true if UUID is not NULL
}
// Scan implements the SQL driver.Scanner interface.
func (nu *NullUUID) Scan(value interface{}) error {
if value == nil {
nu.UUID, nu.Valid = Nil, false
return nil
}
err := nu.UUID.Scan(value)
if err != nil {
nu.Valid = false
return err
}
nu.Valid = true
return nil
}
// Value implements the driver Valuer interface.
func (nu NullUUID) Value() (driver.Value, error) {
if !nu.Valid {
return nil, nil
}
// Delegate to UUID Value function
return nu.UUID.Value()
}
// MarshalBinary implements encoding.BinaryMarshaler.
func (nu NullUUID) MarshalBinary() ([]byte, error) {
if nu.Valid {
return nu.UUID[:], nil
}
return []byte(nil), nil
}
// UnmarshalBinary implements encoding.BinaryUnmarshaler.
func (nu *NullUUID) UnmarshalBinary(data []byte) error {
if len(data) != 16 {
return fmt.Errorf("invalid UUID (got %d bytes)", len(data))
}
copy(nu.UUID[:], data)
nu.Valid = true
return nil
}
// MarshalText implements encoding.TextMarshaler.
func (nu NullUUID) MarshalText() ([]byte, error) {
if nu.Valid {
return nu.UUID.MarshalText()
}
return jsonNull, nil
}
// UnmarshalText implements encoding.TextUnmarshaler.
func (nu *NullUUID) UnmarshalText(data []byte) error {
id, err := ParseBytes(data)
if err != nil {
nu.Valid = false
return err
}
nu.UUID = id
nu.Valid = true
return nil
}
// MarshalJSON implements json.Marshaler.
func (nu NullUUID) MarshalJSON() ([]byte, error) {
if nu.Valid {
return json.Marshal(nu.UUID)
}
return jsonNull, nil
}
// UnmarshalJSON implements json.Unmarshaler.
func (nu *NullUUID) UnmarshalJSON(data []byte) error {
if bytes.Equal(data, jsonNull) {
*nu = NullUUID{}
return nil // valid null UUID
}
err := json.Unmarshal(data, &nu.UUID)
nu.Valid = err == nil
return err
}

59
vendor/github.com/google/uuid/sql.go generated vendored Normal file
View File

@ -0,0 +1,59 @@
// Copyright 2016 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package uuid
import (
"database/sql/driver"
"fmt"
)
// Scan implements sql.Scanner so UUIDs can be read from databases transparently.
// Currently, database types that map to string and []byte are supported. Please
// consult database-specific driver documentation for matching types.
func (uuid *UUID) Scan(src interface{}) error {
switch src := src.(type) {
case nil:
return nil
case string:
// if an empty UUID comes from a table, we return a null UUID
if src == "" {
return nil
}
// see Parse for required string format
u, err := Parse(src)
if err != nil {
return fmt.Errorf("Scan: %v", err)
}
*uuid = u
case []byte:
// if an empty UUID comes from a table, we return a null UUID
if len(src) == 0 {
return nil
}
// assumes a simple slice of bytes if 16 bytes
// otherwise attempts to parse
if len(src) != 16 {
return uuid.Scan(string(src))
}
copy((*uuid)[:], src)
default:
return fmt.Errorf("Scan: unable to scan type %T into UUID", src)
}
return nil
}
// Value implements sql.Valuer so that UUIDs can be written to databases
// transparently. Currently, UUIDs map to strings. Please consult
// database-specific driver documentation for matching types.
func (uuid UUID) Value() (driver.Value, error) {
return uuid.String(), nil
}

134
vendor/github.com/google/uuid/time.go generated vendored Normal file
View File

@ -0,0 +1,134 @@
// Copyright 2016 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package uuid
import (
"encoding/binary"
"sync"
"time"
)
// A Time represents a time as the number of 100's of nanoseconds since 15 Oct
// 1582.
type Time int64
const (
lillian = 2299160 // Julian day of 15 Oct 1582
unix = 2440587 // Julian day of 1 Jan 1970
epoch = unix - lillian // Days between epochs
g1582 = epoch * 86400 // seconds between epochs
g1582ns100 = g1582 * 10000000 // 100s of a nanoseconds between epochs
)
var (
timeMu sync.Mutex
lasttime uint64 // last time we returned
clockSeq uint16 // clock sequence for this run
timeNow = time.Now // for testing
)
// UnixTime converts t the number of seconds and nanoseconds using the Unix
// epoch of 1 Jan 1970.
func (t Time) UnixTime() (sec, nsec int64) {
sec = int64(t - g1582ns100)
nsec = (sec % 10000000) * 100
sec /= 10000000
return sec, nsec
}
// GetTime returns the current Time (100s of nanoseconds since 15 Oct 1582) and
// clock sequence as well as adjusting the clock sequence as needed. An error
// is returned if the current time cannot be determined.
func GetTime() (Time, uint16, error) {
defer timeMu.Unlock()
timeMu.Lock()
return getTime()
}
func getTime() (Time, uint16, error) {
t := timeNow()
// If we don't have a clock sequence already, set one.
if clockSeq == 0 {
setClockSequence(-1)
}
now := uint64(t.UnixNano()/100) + g1582ns100
// If time has gone backwards with this clock sequence then we
// increment the clock sequence
if now <= lasttime {
clockSeq = ((clockSeq + 1) & 0x3fff) | 0x8000
}
lasttime = now
return Time(now), clockSeq, nil
}
// ClockSequence returns the current clock sequence, generating one if not
// already set. The clock sequence is only used for Version 1 UUIDs.
//
// The uuid package does not use global static storage for the clock sequence or
// the last time a UUID was generated. Unless SetClockSequence is used, a new
// random clock sequence is generated the first time a clock sequence is
// requested by ClockSequence, GetTime, or NewUUID. (section 4.2.1.1)
func ClockSequence() int {
defer timeMu.Unlock()
timeMu.Lock()
return clockSequence()
}
func clockSequence() int {
if clockSeq == 0 {
setClockSequence(-1)
}
return int(clockSeq & 0x3fff)
}
// SetClockSequence sets the clock sequence to the lower 14 bits of seq. Setting to
// -1 causes a new sequence to be generated.
func SetClockSequence(seq int) {
defer timeMu.Unlock()
timeMu.Lock()
setClockSequence(seq)
}
func setClockSequence(seq int) {
if seq == -1 {
var b [2]byte
randomBits(b[:]) // clock sequence
seq = int(b[0])<<8 | int(b[1])
}
oldSeq := clockSeq
clockSeq = uint16(seq&0x3fff) | 0x8000 // Set our variant
if oldSeq != clockSeq {
lasttime = 0
}
}
// Time returns the time in 100s of nanoseconds since 15 Oct 1582 encoded in
// uuid. The time is only defined for version 1, 2, 6 and 7 UUIDs.
func (uuid UUID) Time() Time {
var t Time
switch uuid.Version() {
case 6:
time := binary.BigEndian.Uint64(uuid[:8]) // Ignore uuid[6] version b0110
t = Time(time)
case 7:
time := binary.BigEndian.Uint64(uuid[:8])
t = Time((time>>16)*10000 + g1582ns100)
default: // forward compatible
time := int64(binary.BigEndian.Uint32(uuid[0:4]))
time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32
time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48
t = Time(time)
}
return t
}
// ClockSequence returns the clock sequence encoded in uuid.
// The clock sequence is only well defined for version 1 and 2 UUIDs.
func (uuid UUID) ClockSequence() int {
return int(binary.BigEndian.Uint16(uuid[8:10])) & 0x3fff
}

43
vendor/github.com/google/uuid/util.go generated vendored Normal file
View File

@ -0,0 +1,43 @@
// Copyright 2016 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package uuid
import (
"io"
)
// randomBits completely fills slice b with random data.
func randomBits(b []byte) {
if _, err := io.ReadFull(rander, b); err != nil {
panic(err.Error()) // rand should never fail
}
}
// xvalues returns the value of a byte as a hexadecimal digit or 255.
var xvalues = [256]byte{
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 255, 255, 255, 255, 255, 255,
255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
}
// xtob converts hex characters x1 and x2 into a byte.
func xtob(x1, x2 byte) (byte, bool) {
b1 := xvalues[x1]
b2 := xvalues[x2]
return (b1 << 4) | b2, b1 != 255 && b2 != 255
}

365
vendor/github.com/google/uuid/uuid.go generated vendored Normal file
View File

@ -0,0 +1,365 @@
// Copyright 2018 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package uuid
import (
"bytes"
"crypto/rand"
"encoding/hex"
"errors"
"fmt"
"io"
"strings"
"sync"
)
// A UUID is a 128 bit (16 byte) Universal Unique IDentifier as defined in RFC
// 4122.
type UUID [16]byte
// A Version represents a UUID's version.
type Version byte
// A Variant represents a UUID's variant.
type Variant byte
// Constants returned by Variant.
const (
Invalid = Variant(iota) // Invalid UUID
RFC4122 // The variant specified in RFC4122
Reserved // Reserved, NCS backward compatibility.
Microsoft // Reserved, Microsoft Corporation backward compatibility.
Future // Reserved for future definition.
)
const randPoolSize = 16 * 16
var (
rander = rand.Reader // random function
poolEnabled = false
poolMu sync.Mutex
poolPos = randPoolSize // protected with poolMu
pool [randPoolSize]byte // protected with poolMu
)
type invalidLengthError struct{ len int }
func (err invalidLengthError) Error() string {
return fmt.Sprintf("invalid UUID length: %d", err.len)
}
// IsInvalidLengthError is matcher function for custom error invalidLengthError
func IsInvalidLengthError(err error) bool {
_, ok := err.(invalidLengthError)
return ok
}
// Parse decodes s into a UUID or returns an error if it cannot be parsed. Both
// the standard UUID forms defined in RFC 4122
// (xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and
// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx) are decoded. In addition,
// Parse accepts non-standard strings such as the raw hex encoding
// xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx and 38 byte "Microsoft style" encodings,
// e.g. {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx}. Only the middle 36 bytes are
// examined in the latter case. Parse should not be used to validate strings as
// it parses non-standard encodings as indicated above.
func Parse(s string) (UUID, error) {
var uuid UUID
switch len(s) {
// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
case 36:
// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
case 36 + 9:
if !strings.EqualFold(s[:9], "urn:uuid:") {
return uuid, fmt.Errorf("invalid urn prefix: %q", s[:9])
}
s = s[9:]
// {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx}
case 36 + 2:
s = s[1:]
// xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
case 32:
var ok bool
for i := range uuid {
uuid[i], ok = xtob(s[i*2], s[i*2+1])
if !ok {
return uuid, errors.New("invalid UUID format")
}
}
return uuid, nil
default:
return uuid, invalidLengthError{len(s)}
}
// s is now at least 36 bytes long
// it must be of the form xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' {
return uuid, errors.New("invalid UUID format")
}
for i, x := range [16]int{
0, 2, 4, 6,
9, 11,
14, 16,
19, 21,
24, 26, 28, 30, 32, 34,
} {
v, ok := xtob(s[x], s[x+1])
if !ok {
return uuid, errors.New("invalid UUID format")
}
uuid[i] = v
}
return uuid, nil
}
// ParseBytes is like Parse, except it parses a byte slice instead of a string.
func ParseBytes(b []byte) (UUID, error) {
var uuid UUID
switch len(b) {
case 36: // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
case 36 + 9: // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
if !bytes.EqualFold(b[:9], []byte("urn:uuid:")) {
return uuid, fmt.Errorf("invalid urn prefix: %q", b[:9])
}
b = b[9:]
case 36 + 2: // {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx}
b = b[1:]
case 32: // xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
var ok bool
for i := 0; i < 32; i += 2 {
uuid[i/2], ok = xtob(b[i], b[i+1])
if !ok {
return uuid, errors.New("invalid UUID format")
}
}
return uuid, nil
default:
return uuid, invalidLengthError{len(b)}
}
// s is now at least 36 bytes long
// it must be of the form xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
if b[8] != '-' || b[13] != '-' || b[18] != '-' || b[23] != '-' {
return uuid, errors.New("invalid UUID format")
}
for i, x := range [16]int{
0, 2, 4, 6,
9, 11,
14, 16,
19, 21,
24, 26, 28, 30, 32, 34,
} {
v, ok := xtob(b[x], b[x+1])
if !ok {
return uuid, errors.New("invalid UUID format")
}
uuid[i] = v
}
return uuid, nil
}
// MustParse is like Parse but panics if the string cannot be parsed.
// It simplifies safe initialization of global variables holding compiled UUIDs.
func MustParse(s string) UUID {
uuid, err := Parse(s)
if err != nil {
panic(`uuid: Parse(` + s + `): ` + err.Error())
}
return uuid
}
// FromBytes creates a new UUID from a byte slice. Returns an error if the slice
// does not have a length of 16. The bytes are copied from the slice.
func FromBytes(b []byte) (uuid UUID, err error) {
err = uuid.UnmarshalBinary(b)
return uuid, err
}
// Must returns uuid if err is nil and panics otherwise.
func Must(uuid UUID, err error) UUID {
if err != nil {
panic(err)
}
return uuid
}
// Validate returns an error if s is not a properly formatted UUID in one of the following formats:
// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
// xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
// {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx}
// It returns an error if the format is invalid, otherwise nil.
func Validate(s string) error {
switch len(s) {
// Standard UUID format
case 36:
// UUID with "urn:uuid:" prefix
case 36 + 9:
if !strings.EqualFold(s[:9], "urn:uuid:") {
return fmt.Errorf("invalid urn prefix: %q", s[:9])
}
s = s[9:]
// UUID enclosed in braces
case 36 + 2:
if s[0] != '{' || s[len(s)-1] != '}' {
return fmt.Errorf("invalid bracketed UUID format")
}
s = s[1 : len(s)-1]
// UUID without hyphens
case 32:
for i := 0; i < len(s); i += 2 {
_, ok := xtob(s[i], s[i+1])
if !ok {
return errors.New("invalid UUID format")
}
}
default:
return invalidLengthError{len(s)}
}
// Check for standard UUID format
if len(s) == 36 {
if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' {
return errors.New("invalid UUID format")
}
for _, x := range []int{0, 2, 4, 6, 9, 11, 14, 16, 19, 21, 24, 26, 28, 30, 32, 34} {
if _, ok := xtob(s[x], s[x+1]); !ok {
return errors.New("invalid UUID format")
}
}
}
return nil
}
// String returns the string form of uuid, xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
// , or "" if uuid is invalid.
func (uuid UUID) String() string {
var buf [36]byte
encodeHex(buf[:], uuid)
return string(buf[:])
}
// URN returns the RFC 2141 URN form of uuid,
// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx, or "" if uuid is invalid.
func (uuid UUID) URN() string {
var buf [36 + 9]byte
copy(buf[:], "urn:uuid:")
encodeHex(buf[9:], uuid)
return string(buf[:])
}
func encodeHex(dst []byte, uuid UUID) {
hex.Encode(dst, uuid[:4])
dst[8] = '-'
hex.Encode(dst[9:13], uuid[4:6])
dst[13] = '-'
hex.Encode(dst[14:18], uuid[6:8])
dst[18] = '-'
hex.Encode(dst[19:23], uuid[8:10])
dst[23] = '-'
hex.Encode(dst[24:], uuid[10:])
}
// Variant returns the variant encoded in uuid.
func (uuid UUID) Variant() Variant {
switch {
case (uuid[8] & 0xc0) == 0x80:
return RFC4122
case (uuid[8] & 0xe0) == 0xc0:
return Microsoft
case (uuid[8] & 0xe0) == 0xe0:
return Future
default:
return Reserved
}
}
// Version returns the version of uuid.
func (uuid UUID) Version() Version {
return Version(uuid[6] >> 4)
}
func (v Version) String() string {
if v > 15 {
return fmt.Sprintf("BAD_VERSION_%d", v)
}
return fmt.Sprintf("VERSION_%d", v)
}
func (v Variant) String() string {
switch v {
case RFC4122:
return "RFC4122"
case Reserved:
return "Reserved"
case Microsoft:
return "Microsoft"
case Future:
return "Future"
case Invalid:
return "Invalid"
}
return fmt.Sprintf("BadVariant%d", int(v))
}
// SetRand sets the random number generator to r, which implements io.Reader.
// If r.Read returns an error when the package requests random data then
// a panic will be issued.
//
// Calling SetRand with nil sets the random number generator to the default
// generator.
func SetRand(r io.Reader) {
if r == nil {
rander = rand.Reader
return
}
rander = r
}
// EnableRandPool enables internal randomness pool used for Random
// (Version 4) UUID generation. The pool contains random bytes read from
// the random number generator on demand in batches. Enabling the pool
// may improve the UUID generation throughput significantly.
//
// Since the pool is stored on the Go heap, this feature may be a bad fit
// for security sensitive applications.
//
// Both EnableRandPool and DisableRandPool are not thread-safe and should
// only be called when there is no possibility that New or any other
// UUID Version 4 generation function will be called concurrently.
func EnableRandPool() {
poolEnabled = true
}
// DisableRandPool disables the randomness pool if it was previously
// enabled with EnableRandPool.
//
// Both EnableRandPool and DisableRandPool are not thread-safe and should
// only be called when there is no possibility that New or any other
// UUID Version 4 generation function will be called concurrently.
func DisableRandPool() {
poolEnabled = false
defer poolMu.Unlock()
poolMu.Lock()
poolPos = randPoolSize
}
// UUIDs is a slice of UUID types.
type UUIDs []UUID
// Strings returns a string slice containing the string form of each UUID in uuids.
func (uuids UUIDs) Strings() []string {
var uuidStrs = make([]string, len(uuids))
for i, uuid := range uuids {
uuidStrs[i] = uuid.String()
}
return uuidStrs
}

44
vendor/github.com/google/uuid/version1.go generated vendored Normal file
View File

@ -0,0 +1,44 @@
// Copyright 2016 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package uuid
import (
"encoding/binary"
)
// NewUUID returns a Version 1 UUID based on the current NodeID and clock
// sequence, and the current time. If the NodeID has not been set by SetNodeID
// or SetNodeInterface then it will be set automatically. If the NodeID cannot
// be set NewUUID returns nil. If clock sequence has not been set by
// SetClockSequence then it will be set automatically. If GetTime fails to
// return the current NewUUID returns nil and an error.
//
// In most cases, New should be used.
func NewUUID() (UUID, error) {
var uuid UUID
now, seq, err := GetTime()
if err != nil {
return uuid, err
}
timeLow := uint32(now & 0xffffffff)
timeMid := uint16((now >> 32) & 0xffff)
timeHi := uint16((now >> 48) & 0x0fff)
timeHi |= 0x1000 // Version 1
binary.BigEndian.PutUint32(uuid[0:], timeLow)
binary.BigEndian.PutUint16(uuid[4:], timeMid)
binary.BigEndian.PutUint16(uuid[6:], timeHi)
binary.BigEndian.PutUint16(uuid[8:], seq)
nodeMu.Lock()
if nodeID == zeroID {
setNodeInterface("")
}
copy(uuid[10:], nodeID[:])
nodeMu.Unlock()
return uuid, nil
}

76
vendor/github.com/google/uuid/version4.go generated vendored Normal file
View File

@ -0,0 +1,76 @@
// Copyright 2016 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package uuid
import "io"
// New creates a new random UUID or panics. New is equivalent to
// the expression
//
// uuid.Must(uuid.NewRandom())
func New() UUID {
return Must(NewRandom())
}
// NewString creates a new random UUID and returns it as a string or panics.
// NewString is equivalent to the expression
//
// uuid.New().String()
func NewString() string {
return Must(NewRandom()).String()
}
// NewRandom returns a Random (Version 4) UUID.
//
// The strength of the UUIDs is based on the strength of the crypto/rand
// package.
//
// Uses the randomness pool if it was enabled with EnableRandPool.
//
// A note about uniqueness derived from the UUID Wikipedia entry:
//
// Randomly generated UUIDs have 122 random bits. One's annual risk of being
// hit by a meteorite is estimated to be one chance in 17 billion, that
// means the probability is about 0.00000000006 (6 × 10−11),
// equivalent to the odds of creating a few tens of trillions of UUIDs in a
// year and having one duplicate.
func NewRandom() (UUID, error) {
if !poolEnabled {
return NewRandomFromReader(rander)
}
return newRandomFromPool()
}
// NewRandomFromReader returns a UUID based on bytes read from a given io.Reader.
func NewRandomFromReader(r io.Reader) (UUID, error) {
var uuid UUID
_, err := io.ReadFull(r, uuid[:])
if err != nil {
return Nil, err
}
uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4
uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10
return uuid, nil
}
func newRandomFromPool() (UUID, error) {
var uuid UUID
poolMu.Lock()
if poolPos == randPoolSize {
_, err := io.ReadFull(rander, pool[:])
if err != nil {
poolMu.Unlock()
return Nil, err
}
poolPos = 0
}
copy(uuid[:], pool[poolPos:(poolPos+16)])
poolPos += 16
poolMu.Unlock()
uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4
uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10
return uuid, nil
}

56
vendor/github.com/google/uuid/version6.go generated vendored Normal file
View File

@ -0,0 +1,56 @@
// Copyright 2023 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package uuid
import "encoding/binary"
// UUID version 6 is a field-compatible version of UUIDv1, reordered for improved DB locality.
// It is expected that UUIDv6 will primarily be used in contexts where there are existing v1 UUIDs.
// Systems that do not involve legacy UUIDv1 SHOULD consider using UUIDv7 instead.
//
// see https://datatracker.ietf.org/doc/html/draft-peabody-dispatch-new-uuid-format-03#uuidv6
//
// NewV6 returns a Version 6 UUID based on the current NodeID and clock
// sequence, and the current time. If the NodeID has not been set by SetNodeID
// or SetNodeInterface then it will be set automatically. If the NodeID cannot
// be set NewV6 set NodeID is random bits automatically . If clock sequence has not been set by
// SetClockSequence then it will be set automatically. If GetTime fails to
// return the current NewV6 returns Nil and an error.
func NewV6() (UUID, error) {
var uuid UUID
now, seq, err := GetTime()
if err != nil {
return uuid, err
}
/*
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| time_high |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| time_mid | time_low_and_version |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|clk_seq_hi_res | clk_seq_low | node (0-1) |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| node (2-5) |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
*/
binary.BigEndian.PutUint64(uuid[0:], uint64(now))
binary.BigEndian.PutUint16(uuid[8:], seq)
uuid[6] = 0x60 | (uuid[6] & 0x0F)
uuid[8] = 0x80 | (uuid[8] & 0x3F)
nodeMu.Lock()
if nodeID == zeroID {
setNodeInterface("")
}
copy(uuid[10:], nodeID[:])
nodeMu.Unlock()
return uuid, nil
}

104
vendor/github.com/google/uuid/version7.go generated vendored Normal file
View File

@ -0,0 +1,104 @@
// Copyright 2023 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package uuid
import (
"io"
)
// UUID version 7 features a time-ordered value field derived from the widely
// implemented and well known Unix Epoch timestamp source,
// the number of milliseconds seconds since midnight 1 Jan 1970 UTC, leap seconds excluded.
// As well as improved entropy characteristics over versions 1 or 6.
//
// see https://datatracker.ietf.org/doc/html/draft-peabody-dispatch-new-uuid-format-03#name-uuid-version-7
//
// Implementations SHOULD utilize UUID version 7 over UUID version 1 and 6 if possible.
//
// NewV7 returns a Version 7 UUID based on the current time(Unix Epoch).
// Uses the randomness pool if it was enabled with EnableRandPool.
// On error, NewV7 returns Nil and an error
func NewV7() (UUID, error) {
uuid, err := NewRandom()
if err != nil {
return uuid, err
}
makeV7(uuid[:])
return uuid, nil
}
// NewV7FromReader returns a Version 7 UUID based on the current time(Unix Epoch).
// it use NewRandomFromReader fill random bits.
// On error, NewV7FromReader returns Nil and an error.
func NewV7FromReader(r io.Reader) (UUID, error) {
uuid, err := NewRandomFromReader(r)
if err != nil {
return uuid, err
}
makeV7(uuid[:])
return uuid, nil
}
// makeV7 fill 48 bits time (uuid[0] - uuid[5]), set version b0111 (uuid[6])
// uuid[8] already has the right version number (Variant is 10)
// see function NewV7 and NewV7FromReader
func makeV7(uuid []byte) {
/*
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| unix_ts_ms |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| unix_ts_ms | ver | rand_a (12 bit seq) |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|var| rand_b |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| rand_b |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
*/
_ = uuid[15] // bounds check
t, s := getV7Time()
uuid[0] = byte(t >> 40)
uuid[1] = byte(t >> 32)
uuid[2] = byte(t >> 24)
uuid[3] = byte(t >> 16)
uuid[4] = byte(t >> 8)
uuid[5] = byte(t)
uuid[6] = 0x70 | (0x0F & byte(s>>8))
uuid[7] = byte(s)
}
// lastV7time is the last time we returned stored as:
//
// 52 bits of time in milliseconds since epoch
// 12 bits of (fractional nanoseconds) >> 8
var lastV7time int64
const nanoPerMilli = 1000000
// getV7Time returns the time in milliseconds and nanoseconds / 256.
// The returned (milli << 12 + seq) is guarenteed to be greater than
// (milli << 12 + seq) returned by any previous call to getV7Time.
func getV7Time() (milli, seq int64) {
timeMu.Lock()
defer timeMu.Unlock()
nano := timeNow().UnixNano()
milli = nano / nanoPerMilli
// Sequence number is between 0 and 3906 (nanoPerMilli>>8)
seq = (nano - milli*nanoPerMilli) >> 8
now := milli<<12 + seq
if now <= lastV7time {
now = lastV7time + 1
milli = now >> 12
seq = now & 0xfff
}
lastV7time = now
return milli, seq
}

View File

@ -57,7 +57,7 @@ func ParseServicefile(r io.Reader) (*Servicefile, error) {
} else if strings.HasPrefix(line, "[") && strings.HasSuffix(line, "]") {
service = &Service{Name: line[1 : len(line)-1], Settings: make(map[string]string)}
servicefile.Services = append(servicefile.Services, service)
} else {
} else if service != nil {
parts := strings.SplitN(line, "=", 2)
if len(parts) != 2 {
return nil, fmt.Errorf("unable to parse line %d", lineNum)
@ -67,6 +67,8 @@ func ParseServicefile(r io.Reader) (*Servicefile, error) {
value := strings.TrimSpace(parts[1])
service.Settings[key] = value
} else {
return nil, fmt.Errorf("line %d is not in a section", lineNum)
}
}

View File

@ -1,3 +1,44 @@
# 5.7.1 (September 10, 2024)
* Fix data race in tracelog.TraceLog
* Update puddle to v2.2.2. This removes the import of nanotime via linkname.
* Update golang.org/x/crypto and golang.org/x/text
# 5.7.0 (September 7, 2024)
* Add support for sslrootcert=system (Yann Soubeyrand)
* Add LoadTypes to load multiple types in a single SQL query (Nick Farrell)
* Add XMLCodec supports encoding + scanning XML column type like json (nickcruess-soda)
* Add MultiTrace (Stepan Rabotkin)
* Add TraceLogConfig with customizable TimeKey (stringintech)
* pgx.ErrNoRows wraps sql.ErrNoRows to aid in database/sql compatibility with native pgx functions (merlin)
* Support scanning binary formatted uint32 into string / TextScanner (jennifersp)
* Fix interval encoding to allow 0s and avoid extra spaces (Carlos Pérez-Aradros Herce)
* Update pgservicefile - fixes panic when parsing invalid file
* Better error message when reading past end of batch
* Don't print url when url.Parse returns an error (Kevin Biju)
* Fix snake case name normalization collision in RowToStructByName with db tag (nolandseigler)
* Fix: Scan and encode types with underlying types of arrays
# 5.6.0 (May 25, 2024)
* Add StrictNamedArgs (Tomas Zahradnicek)
* Add support for macaddr8 type (Carlos Pérez-Aradros Herce)
* Add SeverityUnlocalized field to PgError / Notice
* Performance optimization of RowToStructByPos/Name (Zach Olstein)
* Allow customizing context canceled behavior for pgconn
* Add ScanLocation to pgtype.Timestamp[tz]Codec
* Add custom data to pgconn.PgConn
* Fix ResultReader.Read() to handle nil values
* Do not encode interval microseconds when they are 0 (Carlos Pérez-Aradros Herce)
* pgconn.SafeToRetry checks for wrapped errors (tjasko)
* Failed connection attempts include all errors
* Optimize LargeObject.Read (Mitar)
* Add tracing for connection acquire and release from pool (ngavinsir)
* Fix encode driver.Valuer not called when nil
* Add support for custom JSON marshal and unmarshal (Mitar)
* Use Go default keepalive for TCP connections (Hans-Joachim Kliemeck)
# 5.5.5 (March 9, 2024)
Use spaces instead of parentheses for SQL sanitization.

View File

@ -29,6 +29,7 @@ Create and setup a test database:
export PGDATABASE=pgx_test
createdb
psql -c 'create extension hstore;'
psql -c 'create extension ltree;'
psql -c 'create domain uint64 as numeric(20,0);'
```

View File

@ -92,7 +92,7 @@ See the presentation at Golang Estonia, [PGX Top to Bottom](https://www.youtube.
## Supported Go and PostgreSQL Versions
pgx supports the same versions of Go and PostgreSQL that are supported by their respective teams. For [Go](https://golang.org/doc/devel/release.html#policy) that is the two most recent major releases and for [PostgreSQL](https://www.postgresql.org/support/versioning/) the major releases in the last 5 years. This means pgx supports Go 1.20 and higher and PostgreSQL 12 and higher. pgx also is tested against the latest version of [CockroachDB](https://www.cockroachlabs.com/product/).
pgx supports the same versions of Go and PostgreSQL that are supported by their respective teams. For [Go](https://golang.org/doc/devel/release.html#policy) that is the two most recent major releases and for [PostgreSQL](https://www.postgresql.org/support/versioning/) the major releases in the last 5 years. This means pgx supports Go 1.21 and higher and PostgreSQL 12 and higher. pgx also is tested against the latest version of [CockroachDB](https://www.cockroachlabs.com/product/).
## Version Policy

View File

@ -12,7 +12,7 @@ import (
type QueuedQuery struct {
SQL string
Arguments []any
fn batchItemFunc
Fn batchItemFunc
sd *pgconn.StatementDescription
}
@ -20,7 +20,7 @@ type batchItemFunc func(br BatchResults) error
// Query sets fn to be called when the response to qq is received.
func (qq *QueuedQuery) Query(fn func(rows Rows) error) {
qq.fn = func(br BatchResults) error {
qq.Fn = func(br BatchResults) error {
rows, _ := br.Query()
defer rows.Close()
@ -36,7 +36,7 @@ func (qq *QueuedQuery) Query(fn func(rows Rows) error) {
// Query sets fn to be called when the response to qq is received.
func (qq *QueuedQuery) QueryRow(fn func(row Row) error) {
qq.fn = func(br BatchResults) error {
qq.Fn = func(br BatchResults) error {
row := br.QueryRow()
return fn(row)
}
@ -44,7 +44,7 @@ func (qq *QueuedQuery) QueryRow(fn func(row Row) error) {
// Exec sets fn to be called when the response to qq is received.
func (qq *QueuedQuery) Exec(fn func(ct pgconn.CommandTag) error) {
qq.fn = func(br BatchResults) error {
qq.Fn = func(br BatchResults) error {
ct, err := br.Exec()
if err != nil {
return err
@ -60,9 +60,13 @@ type Batch struct {
QueuedQueries []*QueuedQuery
}
// Queue queues a query to batch b. query can be an SQL query or the name of a prepared statement.
// The only pgx option argument that is supported is QueryRewriter. Queries are executed using the
// connection's DefaultQueryExecMode.
// Queue queues a query to batch b. query can be an SQL query or the name of a prepared statement. The only pgx option
// argument that is supported is QueryRewriter. Queries are executed using the connection's DefaultQueryExecMode.
//
// While query can contain multiple statements if the connection's DefaultQueryExecMode is QueryModeSimple, this should
// be avoided. QueuedQuery.Fn must not be set as it will only be called for the first query. That is, QueuedQuery.Query,
// QueuedQuery.QueryRow, and QueuedQuery.Exec must not be called. In addition, any error messages or tracing that
// include the current query may reference the wrong query.
func (b *Batch) Queue(query string, arguments ...any) *QueuedQuery {
qq := &QueuedQuery{
SQL: query,
@ -128,7 +132,7 @@ func (br *batchResults) Exec() (pgconn.CommandTag, error) {
if !br.mrr.NextResult() {
err := br.mrr.Close()
if err == nil {
err = errors.New("no result")
err = errors.New("no more results in batch")
}
if br.conn.batchTracer != nil {
br.conn.batchTracer.TraceBatchQuery(br.ctx, br.conn, TraceBatchQueryData{
@ -180,7 +184,7 @@ func (br *batchResults) Query() (Rows, error) {
if !br.mrr.NextResult() {
rows.err = br.mrr.Close()
if rows.err == nil {
rows.err = errors.New("no result")
rows.err = errors.New("no more results in batch")
}
rows.closed = true
@ -228,8 +232,8 @@ func (br *batchResults) Close() error {
// Read and run fn for all remaining items
for br.err == nil && !br.closed && br.b != nil && br.qqIdx < len(br.b.QueuedQueries) {
if br.b.QueuedQueries[br.qqIdx].fn != nil {
err := br.b.QueuedQueries[br.qqIdx].fn(br)
if br.b.QueuedQueries[br.qqIdx].Fn != nil {
err := br.b.QueuedQueries[br.qqIdx].Fn(br)
if err != nil {
br.err = err
}
@ -287,7 +291,10 @@ func (br *pipelineBatchResults) Exec() (pgconn.CommandTag, error) {
return pgconn.CommandTag{}, br.err
}
query, arguments, _ := br.nextQueryAndArgs()
query, arguments, err := br.nextQueryAndArgs()
if err != nil {
return pgconn.CommandTag{}, err
}
results, err := br.pipeline.GetResults()
if err != nil {
@ -330,9 +337,9 @@ func (br *pipelineBatchResults) Query() (Rows, error) {
return &baseRows{err: br.err, closed: true}, br.err
}
query, arguments, ok := br.nextQueryAndArgs()
if !ok {
query = "batch query"
query, arguments, err := br.nextQueryAndArgs()
if err != nil {
return &baseRows{err: err, closed: true}, err
}
rows := br.conn.getRows(br.ctx, query, arguments)
@ -397,8 +404,8 @@ func (br *pipelineBatchResults) Close() error {
// Read and run fn for all remaining items
for br.err == nil && !br.closed && br.b != nil && br.qqIdx < len(br.b.QueuedQueries) {
if br.b.QueuedQueries[br.qqIdx].fn != nil {
err := br.b.QueuedQueries[br.qqIdx].fn(br)
if br.b.QueuedQueries[br.qqIdx].Fn != nil {
err := br.b.QueuedQueries[br.qqIdx].Fn(br)
if err != nil {
br.err = err
}
@ -421,13 +428,16 @@ func (br *pipelineBatchResults) earlyError() error {
return br.err
}
func (br *pipelineBatchResults) nextQueryAndArgs() (query string, args []any, ok bool) {
if br.b != nil && br.qqIdx < len(br.b.QueuedQueries) {
bi := br.b.QueuedQueries[br.qqIdx]
query = bi.SQL
args = bi.Arguments
ok = true
br.qqIdx++
func (br *pipelineBatchResults) nextQueryAndArgs() (query string, args []any, err error) {
if br.b == nil {
return "", nil, errors.New("no reference to batch")
}
return
if br.qqIdx >= len(br.b.QueuedQueries) {
return "", nil, errors.New("no more results in batch")
}
bi := br.b.QueuedQueries[br.qqIdx]
br.qqIdx++
return bi.SQL, bi.Arguments, nil
}

View File

@ -3,6 +3,7 @@ package pgx
import (
"context"
"crypto/sha256"
"database/sql"
"encoding/hex"
"errors"
"fmt"
@ -10,7 +11,6 @@ import (
"strings"
"time"
"github.com/jackc/pgx/v5/internal/anynil"
"github.com/jackc/pgx/v5/internal/sanitize"
"github.com/jackc/pgx/v5/internal/stmtcache"
"github.com/jackc/pgx/v5/pgconn"
@ -103,13 +103,31 @@ func (ident Identifier) Sanitize() string {
var (
// ErrNoRows occurs when rows are expected but none are returned.
ErrNoRows = errors.New("no rows in result set")
ErrNoRows = newProxyErr(sql.ErrNoRows, "no rows in result set")
// ErrTooManyRows occurs when more rows than expected are returned.
ErrTooManyRows = errors.New("too many rows in result set")
)
var errDisabledStatementCache = fmt.Errorf("cannot use QueryExecModeCacheStatement with disabled statement cache")
var errDisabledDescriptionCache = fmt.Errorf("cannot use QueryExecModeCacheDescribe with disabled description cache")
func newProxyErr(background error, msg string) error {
return &proxyError{
msg: msg,
background: background,
}
}
type proxyError struct {
msg string
background error
}
func (err *proxyError) Error() string { return err.msg }
func (err *proxyError) Unwrap() error { return err.background }
var (
errDisabledStatementCache = fmt.Errorf("cannot use QueryExecModeCacheStatement with disabled statement cache")
errDisabledDescriptionCache = fmt.Errorf("cannot use QueryExecModeCacheDescribe with disabled description cache")
)
// Connect establishes a connection with a PostgreSQL server with a connection string. See
// pgconn.Connect for details.
@ -624,7 +642,7 @@ const (
// to execute. It does not use named prepared statements. But it does use the unnamed prepared statement to get the
// statement description on the first round trip and then uses it to execute the query on the second round trip. This
// may cause problems with connection poolers that switch the underlying connection between round trips. It is safe
// even when the the database schema is modified concurrently.
// even when the database schema is modified concurrently.
QueryExecModeDescribeExec
// Assume the PostgreSQL query parameter types based on the Go type of the arguments. This uses the extended protocol
@ -755,7 +773,6 @@ optionLoop:
}
c.eqb.reset()
anynil.NormalizeSlice(args)
rows := c.getRows(ctx, sql, args)
var err error
@ -845,7 +862,6 @@ func (c *Conn) getStatementDescription(
mode QueryExecMode,
sql string,
) (sd *pgconn.StatementDescription, err error) {
switch mode {
case QueryExecModeCacheStatement:
if c.statementCache == nil {

262
vendor/github.com/jackc/pgx/v5/derived_types.go generated vendored Normal file
View File

@ -0,0 +1,262 @@
package pgx
import (
"context"
"fmt"
"regexp"
"strconv"
"strings"
"github.com/jackc/pgx/v5/pgtype"
)
/*
buildLoadDerivedTypesSQL generates the correct query for retrieving type information.
pgVersion: the major version of the PostgreSQL server
typeNames: the names of the types to load. If nil, load all types.
*/
func buildLoadDerivedTypesSQL(pgVersion int64, typeNames []string) string {
supportsMultirange := (pgVersion >= 14)
var typeNamesClause string
if typeNames == nil {
// This should not occur; this will not return any types
typeNamesClause = "= ''"
} else {
typeNamesClause = "= ANY($1)"
}
parts := make([]string, 0, 10)
// Each of the type names provided might be found in pg_class or pg_type.
// Additionally, it may or may not include a schema portion.
parts = append(parts, `
WITH RECURSIVE
-- find the OIDs in pg_class which match one of the provided type names
selected_classes(oid,reltype) AS (
-- this query uses the namespace search path, so will match type names without a schema prefix
SELECT pg_class.oid, pg_class.reltype
FROM pg_catalog.pg_class
LEFT JOIN pg_catalog.pg_namespace n ON n.oid = pg_class.relnamespace
WHERE pg_catalog.pg_table_is_visible(pg_class.oid)
AND relname `, typeNamesClause, `
UNION ALL
-- this query will only match type names which include the schema prefix
SELECT pg_class.oid, pg_class.reltype
FROM pg_class
INNER JOIN pg_namespace ON (pg_class.relnamespace = pg_namespace.oid)
WHERE nspname || '.' || relname `, typeNamesClause, `
),
selected_types(oid) AS (
-- collect the OIDs from pg_types which correspond to the selected classes
SELECT reltype AS oid
FROM selected_classes
UNION ALL
-- as well as any other type names which match our criteria
SELECT pg_type.oid
FROM pg_type
LEFT OUTER JOIN pg_namespace ON (pg_type.typnamespace = pg_namespace.oid)
WHERE typname `, typeNamesClause, `
OR nspname || '.' || typname `, typeNamesClause, `
),
-- this builds a parent/child mapping of objects, allowing us to know
-- all the child (ie: dependent) types that a parent (type) requires
-- As can be seen, there are 3 ways this can occur (the last of which
-- is due to being a composite class, where the composite fields are children)
pc(parent, child) AS (
SELECT parent.oid, parent.typelem
FROM pg_type parent
WHERE parent.typtype = 'b' AND parent.typelem != 0
UNION ALL
SELECT parent.oid, parent.typbasetype
FROM pg_type parent
WHERE parent.typtypmod = -1 AND parent.typbasetype != 0
UNION ALL
SELECT pg_type.oid, atttypid
FROM pg_attribute
INNER JOIN pg_class ON (pg_class.oid = pg_attribute.attrelid)
INNER JOIN pg_type ON (pg_type.oid = pg_class.reltype)
WHERE NOT attisdropped
AND attnum > 0
),
-- Now construct a recursive query which includes a 'depth' element.
-- This is used to ensure that the "youngest" children are registered before
-- their parents.
relationships(parent, child, depth) AS (
SELECT DISTINCT 0::OID, selected_types.oid, 0
FROM selected_types
UNION ALL
SELECT pg_type.oid AS parent, pg_attribute.atttypid AS child, 1
FROM selected_classes c
inner join pg_type ON (c.reltype = pg_type.oid)
inner join pg_attribute on (c.oid = pg_attribute.attrelid)
UNION ALL
SELECT pc.parent, pc.child, relationships.depth + 1
FROM pc
INNER JOIN relationships ON (pc.parent = relationships.child)
),
-- composite fields need to be encapsulated as a couple of arrays to provide the required information for registration
composite AS (
SELECT pg_type.oid, ARRAY_AGG(attname ORDER BY attnum) AS attnames, ARRAY_AGG(atttypid ORDER BY ATTNUM) AS atttypids
FROM pg_attribute
INNER JOIN pg_class ON (pg_class.oid = pg_attribute.attrelid)
INNER JOIN pg_type ON (pg_type.oid = pg_class.reltype)
WHERE NOT attisdropped
AND attnum > 0
GROUP BY pg_type.oid
)
-- Bring together this information, showing all the information which might possibly be required
-- to complete the registration, applying filters to only show the items which relate to the selected
-- types/classes.
SELECT typname,
pg_namespace.nspname,
typtype,
typbasetype,
typelem,
pg_type.oid,`)
if supportsMultirange {
parts = append(parts, `
COALESCE(multirange.rngtypid, 0) AS rngtypid,`)
} else {
parts = append(parts, `
0 AS rngtypid,`)
}
parts = append(parts, `
COALESCE(pg_range.rngsubtype, 0) AS rngsubtype,
attnames, atttypids
FROM relationships
INNER JOIN pg_type ON (pg_type.oid = relationships.child)
LEFT OUTER JOIN pg_range ON (pg_type.oid = pg_range.rngtypid)`)
if supportsMultirange {
parts = append(parts, `
LEFT OUTER JOIN pg_range multirange ON (pg_type.oid = multirange.rngmultitypid)`)
}
parts = append(parts, `
LEFT OUTER JOIN composite USING (oid)
LEFT OUTER JOIN pg_namespace ON (pg_type.typnamespace = pg_namespace.oid)
WHERE NOT (typtype = 'b' AND typelem = 0)`)
parts = append(parts, `
GROUP BY typname, pg_namespace.nspname, typtype, typbasetype, typelem, pg_type.oid, pg_range.rngsubtype,`)
if supportsMultirange {
parts = append(parts, `
multirange.rngtypid,`)
}
parts = append(parts, `
attnames, atttypids
ORDER BY MAX(depth) desc, typname;`)
return strings.Join(parts, "")
}
type derivedTypeInfo struct {
Oid, Typbasetype, Typelem, Rngsubtype, Rngtypid uint32
TypeName, Typtype, NspName string
Attnames []string
Atttypids []uint32
}
// LoadTypes performs a single (complex) query, returning all the required
// information to register the named types, as well as any other types directly
// or indirectly required to complete the registration.
// The result of this call can be passed into RegisterTypes to complete the process.
func (c *Conn) LoadTypes(ctx context.Context, typeNames []string) ([]*pgtype.Type, error) {
m := c.TypeMap()
if typeNames == nil || len(typeNames) == 0 {
return nil, fmt.Errorf("No type names were supplied.")
}
// Disregard server version errors. This will result in
// the SQL not support recent structures such as multirange
serverVersion, _ := serverVersion(c)
sql := buildLoadDerivedTypesSQL(serverVersion, typeNames)
var rows Rows
var err error
if typeNames == nil {
rows, err = c.Query(ctx, sql, QueryExecModeSimpleProtocol)
} else {
rows, err = c.Query(ctx, sql, QueryExecModeSimpleProtocol, typeNames)
}
if err != nil {
return nil, fmt.Errorf("While generating load types query: %w", err)
}
defer rows.Close()
result := make([]*pgtype.Type, 0, 100)
for rows.Next() {
ti := derivedTypeInfo{}
err = rows.Scan(&ti.TypeName, &ti.NspName, &ti.Typtype, &ti.Typbasetype, &ti.Typelem, &ti.Oid, &ti.Rngtypid, &ti.Rngsubtype, &ti.Attnames, &ti.Atttypids)
if err != nil {
return nil, fmt.Errorf("While scanning type information: %w", err)
}
var type_ *pgtype.Type
switch ti.Typtype {
case "b": // array
dt, ok := m.TypeForOID(ti.Typelem)
if !ok {
return nil, fmt.Errorf("Array element OID %v not registered while loading pgtype %q", ti.Typelem, ti.TypeName)
}
type_ = &pgtype.Type{Name: ti.TypeName, OID: ti.Oid, Codec: &pgtype.ArrayCodec{ElementType: dt}}
case "c": // composite
var fields []pgtype.CompositeCodecField
for i, fieldName := range ti.Attnames {
dt, ok := m.TypeForOID(ti.Atttypids[i])
if !ok {
return nil, fmt.Errorf("Unknown field for composite type %q: field %q (OID %v) is not already registered.", ti.TypeName, fieldName, ti.Atttypids[i])
}
fields = append(fields, pgtype.CompositeCodecField{Name: fieldName, Type: dt})
}
type_ = &pgtype.Type{Name: ti.TypeName, OID: ti.Oid, Codec: &pgtype.CompositeCodec{Fields: fields}}
case "d": // domain
dt, ok := m.TypeForOID(ti.Typbasetype)
if !ok {
return nil, fmt.Errorf("Domain base type OID %v was not already registered, needed for %q", ti.Typbasetype, ti.TypeName)
}
type_ = &pgtype.Type{Name: ti.TypeName, OID: ti.Oid, Codec: dt.Codec}
case "e": // enum
type_ = &pgtype.Type{Name: ti.TypeName, OID: ti.Oid, Codec: &pgtype.EnumCodec{}}
case "r": // range
dt, ok := m.TypeForOID(ti.Rngsubtype)
if !ok {
return nil, fmt.Errorf("Range element OID %v was not already registered, needed for %q", ti.Rngsubtype, ti.TypeName)
}
type_ = &pgtype.Type{Name: ti.TypeName, OID: ti.Oid, Codec: &pgtype.RangeCodec{ElementType: dt}}
case "m": // multirange
dt, ok := m.TypeForOID(ti.Rngtypid)
if !ok {
return nil, fmt.Errorf("Multirange element OID %v was not already registered, needed for %q", ti.Rngtypid, ti.TypeName)
}
type_ = &pgtype.Type{Name: ti.TypeName, OID: ti.Oid, Codec: &pgtype.MultirangeCodec{ElementType: dt}}
default:
return nil, fmt.Errorf("Unknown typtype %q was found while registering %q", ti.Typtype, ti.TypeName)
}
if type_ != nil {
m.RegisterType(type_)
if ti.NspName != "" {
nspType := &pgtype.Type{Name: ti.NspName + "." + type_.Name, OID: type_.OID, Codec: type_.Codec}
m.RegisterType(nspType)
result = append(result, nspType)
}
result = append(result, type_)
}
}
return result, nil
}
// serverVersion returns the postgresql server version.
func serverVersion(c *Conn) (int64, error) {
serverVersionStr := c.PgConn().ParameterStatus("server_version")
serverVersionStr = regexp.MustCompile(`^[0-9]+`).FindString(serverVersionStr)
// if not PostgreSQL do nothing
if serverVersionStr == "" {
return 0, fmt.Errorf("Cannot identify server version in %q", serverVersionStr)
}
version, err := strconv.ParseInt(serverVersionStr, 10, 64)
if err != nil {
return 0, fmt.Errorf("postgres version parsing failed: %w", err)
}
return version, nil
}

View File

@ -11,9 +11,10 @@ The primary way of establishing a connection is with [pgx.Connect]:
conn, err := pgx.Connect(context.Background(), os.Getenv("DATABASE_URL"))
The database connection string can be in URL or DSN format. Both PostgreSQL settings and pgx settings can be specified
here. In addition, a config struct can be created by [ParseConfig] and modified before establishing the connection with
[ConnectConfig] to configure settings such as tracing that cannot be configured with a connection string.
The database connection string can be in URL or key/value format. Both PostgreSQL settings and pgx settings can be
specified here. In addition, a config struct can be created by [ParseConfig] and modified before establishing the
connection with [ConnectConfig] to configure settings such as tracing that cannot be configured with a connection
string.
Connection Pool
@ -23,8 +24,8 @@ github.com/jackc/pgx/v5/pgxpool for a concurrency safe connection pool.
Query Interface
pgx implements Query in the familiar database/sql style. However, pgx provides generic functions such as CollectRows and
ForEachRow that are a simpler and safer way of processing rows than manually calling rows.Next(), rows.Scan, and
rows.Err().
ForEachRow that are a simpler and safer way of processing rows than manually calling defer rows.Close(), rows.Next(),
rows.Scan, and rows.Err().
CollectRows can be used collect all returned rows into a slice.
@ -174,7 +175,7 @@ notification is received or the context is canceled.
Tracing and Logging
pgx supports tracing by setting ConnConfig.Tracer.
pgx supports tracing by setting ConnConfig.Tracer. To combine several tracers you can use the multitracer.Tracer.
In addition, the tracelog package provides the TraceLog type which lets a traditional logger act as a Tracer.

View File

@ -1,10 +1,8 @@
package pgx
import (
"database/sql/driver"
"fmt"
"github.com/jackc/pgx/v5/internal/anynil"
"github.com/jackc/pgx/v5/pgconn"
"github.com/jackc/pgx/v5/pgtype"
)
@ -23,10 +21,15 @@ type ExtendedQueryBuilder struct {
func (eqb *ExtendedQueryBuilder) Build(m *pgtype.Map, sd *pgconn.StatementDescription, args []any) error {
eqb.reset()
anynil.NormalizeSlice(args)
if sd == nil {
return eqb.appendParamsForQueryExecModeExec(m, args)
for i := range args {
err := eqb.appendParam(m, 0, pgtype.TextFormatCode, args[i])
if err != nil {
err = fmt.Errorf("failed to encode args[%d]: %w", i, err)
return err
}
}
return nil
}
if len(sd.ParamOIDs) != len(args) {
@ -113,10 +116,6 @@ func (eqb *ExtendedQueryBuilder) reset() {
}
func (eqb *ExtendedQueryBuilder) encodeExtendedParamValue(m *pgtype.Map, oid uint32, formatCode int16, arg any) ([]byte, error) {
if anynil.Is(arg) {
return nil, nil
}
if eqb.paramValueBytes == nil {
eqb.paramValueBytes = make([]byte, 0, 128)
}
@ -145,74 +144,3 @@ func (eqb *ExtendedQueryBuilder) chooseParameterFormatCode(m *pgtype.Map, oid ui
return m.FormatCodeForOID(oid)
}
// appendParamsForQueryExecModeExec appends the args to eqb.
//
// Parameters must be encoded in the text format because of differences in type conversion between timestamps and
// dates. In QueryExecModeExec we don't know what the actual PostgreSQL type is. To determine the type we use the
// Go type to OID type mapping registered by RegisterDefaultPgType. However, the Go time.Time represents both
// PostgreSQL timestamp[tz] and date. To use the binary format we would need to also specify what the PostgreSQL
// type OID is. But that would mean telling PostgreSQL that we have sent a timestamp[tz] when what is needed is a date.
// This means that the value is converted from text to timestamp[tz] to date. This means it does a time zone conversion
// before converting it to date. This means that dates can be shifted by one day. In text format without that double
// type conversion it takes the date directly and ignores time zone (i.e. it works).
//
// Given that the whole point of QueryExecModeExec is to operate without having to know the PostgreSQL types there is
// no way to safely use binary or to specify the parameter OIDs.
func (eqb *ExtendedQueryBuilder) appendParamsForQueryExecModeExec(m *pgtype.Map, args []any) error {
for _, arg := range args {
if arg == nil {
err := eqb.appendParam(m, 0, TextFormatCode, arg)
if err != nil {
return err
}
} else {
dt, ok := m.TypeForValue(arg)
if !ok {
var tv pgtype.TextValuer
if tv, ok = arg.(pgtype.TextValuer); ok {
t, err := tv.TextValue()
if err != nil {
return err
}
dt, ok = m.TypeForOID(pgtype.TextOID)
if ok {
arg = t
}
}
}
if !ok {
var dv driver.Valuer
if dv, ok = arg.(driver.Valuer); ok {
v, err := dv.Value()
if err != nil {
return err
}
dt, ok = m.TypeForValue(v)
if ok {
arg = v
}
}
}
if !ok {
var str fmt.Stringer
if str, ok = arg.(fmt.Stringer); ok {
dt, ok = m.TypeForOID(pgtype.TextOID)
if ok {
arg = str.String()
}
}
}
if !ok {
return &unknownArgumentTypeQueryExecModeExecError{arg: arg}
}
err := eqb.appendParam(m, dt.OID, TextFormatCode, arg)
if err != nil {
return err
}
}
}
return nil
}

View File

@ -1,36 +0,0 @@
package anynil
import "reflect"
// Is returns true if value is any type of nil. e.g. nil or []byte(nil).
func Is(value any) bool {
if value == nil {
return true
}
refVal := reflect.ValueOf(value)
switch refVal.Kind() {
case reflect.Chan, reflect.Func, reflect.Map, reflect.Ptr, reflect.UnsafePointer, reflect.Interface, reflect.Slice:
return refVal.IsNil()
default:
return false
}
}
// Normalize converts typed nils (e.g. []byte(nil)) into untyped nil. Other values are returned unmodified.
func Normalize(v any) any {
if Is(v) {
return nil
}
return v
}
// NormalizeSlice converts all typed nils (e.g. []byte(nil)) in s into untyped nils. Other values are unmodified. s is
// mutated in place.
func NormalizeSlice(s []any) {
for i := range s {
if Is(s[i]) {
s[i] = nil
}
}
}

View File

@ -4,6 +4,8 @@ import (
"context"
"errors"
"io"
"github.com/jackc/pgx/v5/pgtype"
)
// The PostgreSQL wire protocol has a limit of 1 GB - 1 per message. See definition of
@ -115,9 +117,10 @@ func (o *LargeObject) Read(p []byte) (int, error) {
expected = maxLargeObjectMessageLength
}
var res []byte
res := pgtype.PreallocBytes(p[nTotal:])
err := o.tx.QueryRow(o.ctx, "select loread($1, $2)", o.fd, expected).Scan(&res)
copy(p[nTotal:], res)
// We compute expected so that it always fits into p, so it should never happen
// that PreallocBytes's ScanBytes had to allocate a new slice.
nTotal += len(res)
if err != nil {
return nTotal, err

View File

@ -2,6 +2,7 @@ package pgx
import (
"context"
"fmt"
"strconv"
"strings"
"unicode/utf8"
@ -21,6 +22,34 @@ type NamedArgs map[string]any
// RewriteQuery implements the QueryRewriter interface.
func (na NamedArgs) RewriteQuery(ctx context.Context, conn *Conn, sql string, args []any) (newSQL string, newArgs []any, err error) {
return rewriteQuery(na, sql, false)
}
// StrictNamedArgs can be used in the same way as NamedArgs, but provided arguments are also checked to include all
// named arguments that the sql query uses, and no extra arguments.
type StrictNamedArgs map[string]any
// RewriteQuery implements the QueryRewriter interface.
func (sna StrictNamedArgs) RewriteQuery(ctx context.Context, conn *Conn, sql string, args []any) (newSQL string, newArgs []any, err error) {
return rewriteQuery(sna, sql, true)
}
type namedArg string
type sqlLexer struct {
src string
start int
pos int
nested int // multiline comment nesting level.
stateFn stateFn
parts []any
nameToOrdinal map[namedArg]int
}
type stateFn func(*sqlLexer) stateFn
func rewriteQuery(na map[string]any, sql string, isStrict bool) (newSQL string, newArgs []any, err error) {
l := &sqlLexer{
src: sql,
stateFn: rawState,
@ -44,27 +73,24 @@ func (na NamedArgs) RewriteQuery(ctx context.Context, conn *Conn, sql string, ar
newArgs = make([]any, len(l.nameToOrdinal))
for name, ordinal := range l.nameToOrdinal {
newArgs[ordinal-1] = na[string(name)]
var found bool
newArgs[ordinal-1], found = na[string(name)]
if isStrict && !found {
return "", nil, fmt.Errorf("argument %s found in sql query but not present in StrictNamedArgs", name)
}
}
if isStrict {
for name := range na {
if _, found := l.nameToOrdinal[namedArg(name)]; !found {
return "", nil, fmt.Errorf("argument %s of StrictNamedArgs not found in sql query", name)
}
}
}
return sb.String(), newArgs, nil
}
type namedArg string
type sqlLexer struct {
src string
start int
pos int
nested int // multiline comment nesting level.
stateFn stateFn
parts []any
nameToOrdinal map[namedArg]int
}
type stateFn func(*sqlLexer) stateFn
func rawState(l *sqlLexer) stateFn {
for {
r, width := utf8.DecodeRuneInString(l.src[l.pos:])

View File

@ -19,6 +19,7 @@ import (
"github.com/jackc/pgpassfile"
"github.com/jackc/pgservicefile"
"github.com/jackc/pgx/v5/pgconn/ctxwatch"
"github.com/jackc/pgx/v5/pgproto3"
)
@ -39,7 +40,12 @@ type Config struct {
DialFunc DialFunc // e.g. net.Dialer.DialContext
LookupFunc LookupFunc // e.g. net.Resolver.LookupHost
BuildFrontend BuildFrontendFunc
RuntimeParams map[string]string // Run-time parameters to set on connection as session default values (e.g. search_path or application_name)
// BuildContextWatcherHandler is called to create a ContextWatcherHandler for a connection. The handler is called
// when a context passed to a PgConn method is canceled.
BuildContextWatcherHandler func(*PgConn) ctxwatch.Handler
RuntimeParams map[string]string // Run-time parameters to set on connection as session default values (e.g. search_path or application_name)
KerberosSrvName string
KerberosSpn string
@ -70,7 +76,7 @@ type Config struct {
// ParseConfigOptions contains options that control how a config is built such as GetSSLPassword.
type ParseConfigOptions struct {
// GetSSLPassword gets the password to decrypt a SSL client certificate. This is analogous to the the libpq function
// GetSSLPassword gets the password to decrypt a SSL client certificate. This is analogous to the libpq function
// PQsetSSLKeyPassHook_OpenSSL.
GetSSLPassword GetSSLPasswordFunc
}
@ -112,6 +118,14 @@ type FallbackConfig struct {
TLSConfig *tls.Config // nil disables TLS
}
// connectOneConfig is the configuration for a single attempt to connect to a single host.
type connectOneConfig struct {
network string
address string
originalHostname string // original hostname before resolving
tlsConfig *tls.Config // nil disables TLS
}
// isAbsolutePath checks if the provided value is an absolute path either
// beginning with a forward slash (as on Linux-based systems) or with a capital
// letter A-Z followed by a colon and a backslash, e.g., "C:\", (as on Windows).
@ -146,11 +160,11 @@ func NetworkAddress(host string, port uint16) (network, address string) {
// ParseConfig builds a *Config from connString with similar behavior to the PostgreSQL standard C library libpq. It
// uses the same defaults as libpq (e.g. port=5432) and understands most PG* environment variables. ParseConfig closely
// matches the parsing behavior of libpq. connString may either be in URL format or keyword = value format (DSN style).
// See https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING for details. connString also may be
// empty to only read from the environment. If a password is not supplied it will attempt to read the .pgpass file.
// matches the parsing behavior of libpq. connString may either be in URL format or keyword = value format. See
// https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING for details. connString also may be empty
// to only read from the environment. If a password is not supplied it will attempt to read the .pgpass file.
//
// # Example DSN
// # Example Keyword/Value
// user=jack password=secret host=pg.example.com port=5432 dbname=mydb sslmode=verify-ca
//
// # Example URL
@ -169,7 +183,7 @@ func NetworkAddress(host string, port uint16) (network, address string) {
// postgres://jack:secret@foo.example.com:5432,bar.example.com:5432/mydb
//
// ParseConfig currently recognizes the following environment variable and their parameter key word equivalents passed
// via database URL or DSN:
// via database URL or keyword/value:
//
// PGHOST
// PGPORT
@ -233,16 +247,16 @@ func ParseConfigWithOptions(connString string, options ParseConfigOptions) (*Con
connStringSettings := make(map[string]string)
if connString != "" {
var err error
// connString may be a database URL or a DSN
// connString may be a database URL or in PostgreSQL keyword/value format
if strings.HasPrefix(connString, "postgres://") || strings.HasPrefix(connString, "postgresql://") {
connStringSettings, err = parseURLSettings(connString)
if err != nil {
return nil, &ParseConfigError{ConnString: connString, msg: "failed to parse as URL", err: err}
}
} else {
connStringSettings, err = parseDSNSettings(connString)
connStringSettings, err = parseKeywordValueSettings(connString)
if err != nil {
return nil, &ParseConfigError{ConnString: connString, msg: "failed to parse as DSN", err: err}
return nil, &ParseConfigError{ConnString: connString, msg: "failed to parse as keyword/value", err: err}
}
}
}
@ -266,6 +280,9 @@ func ParseConfigWithOptions(connString string, options ParseConfigOptions) (*Con
BuildFrontend: func(r io.Reader, w io.Writer) *pgproto3.Frontend {
return pgproto3.NewFrontend(r, w)
},
BuildContextWatcherHandler: func(pgConn *PgConn) ctxwatch.Handler {
return &DeadlineContextWatcherHandler{Conn: pgConn.conn}
},
OnPgError: func(_ *PgConn, pgErr *PgError) bool {
// we want to automatically close any fatal errors
if strings.EqualFold(pgErr.Severity, "FATAL") {
@ -450,14 +467,17 @@ func parseEnvSettings() map[string]string {
func parseURLSettings(connString string) (map[string]string, error) {
settings := make(map[string]string)
url, err := url.Parse(connString)
parsedURL, err := url.Parse(connString)
if err != nil {
if urlErr := new(url.Error); errors.As(err, &urlErr) {
return nil, urlErr.Err
}
return nil, err
}
if url.User != nil {
settings["user"] = url.User.Username()
if password, present := url.User.Password(); present {
if parsedURL.User != nil {
settings["user"] = parsedURL.User.Username()
if password, present := parsedURL.User.Password(); present {
settings["password"] = password
}
}
@ -465,7 +485,7 @@ func parseURLSettings(connString string) (map[string]string, error) {
// Handle multiple host:port's in url.Host by splitting them into host,host,host and port,port,port.
var hosts []string
var ports []string
for _, host := range strings.Split(url.Host, ",") {
for _, host := range strings.Split(parsedURL.Host, ",") {
if host == "" {
continue
}
@ -491,7 +511,7 @@ func parseURLSettings(connString string) (map[string]string, error) {
settings["port"] = strings.Join(ports, ",")
}
database := strings.TrimLeft(url.Path, "/")
database := strings.TrimLeft(parsedURL.Path, "/")
if database != "" {
settings["database"] = database
}
@ -500,7 +520,7 @@ func parseURLSettings(connString string) (map[string]string, error) {
"dbname": "database",
}
for k, v := range url.Query() {
for k, v := range parsedURL.Query() {
if k2, present := nameMap[k]; present {
k = k2
}
@ -517,7 +537,7 @@ func isIPOnly(host string) bool {
var asciiSpace = [256]uint8{'\t': 1, '\n': 1, '\v': 1, '\f': 1, '\r': 1, ' ': 1}
func parseDSNSettings(s string) (map[string]string, error) {
func parseKeywordValueSettings(s string) (map[string]string, error) {
settings := make(map[string]string)
nameMap := map[string]string{
@ -528,7 +548,7 @@ func parseDSNSettings(s string) (map[string]string, error) {
var key, val string
eqIdx := strings.IndexRune(s, '=')
if eqIdx < 0 {
return nil, errors.New("invalid dsn")
return nil, errors.New("invalid keyword/value")
}
key = strings.Trim(s[:eqIdx], " \t\n\r\v\f")
@ -580,7 +600,7 @@ func parseDSNSettings(s string) (map[string]string, error) {
}
if key == "" {
return nil, errors.New("invalid dsn")
return nil, errors.New("invalid keyword/value")
}
settings[key] = val
@ -637,6 +657,36 @@ func configTLS(settings map[string]string, thisHost string, parseConfigOptions P
tlsConfig := &tls.Config{}
if sslrootcert != "" {
var caCertPool *x509.CertPool
if sslrootcert == "system" {
var err error
caCertPool, err = x509.SystemCertPool()
if err != nil {
return nil, fmt.Errorf("unable to load system certificate pool: %w", err)
}
sslmode = "verify-full"
} else {
caCertPool = x509.NewCertPool()
caPath := sslrootcert
caCert, err := os.ReadFile(caPath)
if err != nil {
return nil, fmt.Errorf("unable to read CA file: %w", err)
}
if !caCertPool.AppendCertsFromPEM(caCert) {
return nil, errors.New("unable to add CA to cert pool")
}
}
tlsConfig.RootCAs = caCertPool
tlsConfig.ClientCAs = caCertPool
}
switch sslmode {
case "disable":
return []*tls.Config{nil}, nil
@ -694,23 +744,6 @@ func configTLS(settings map[string]string, thisHost string, parseConfigOptions P
return nil, errors.New("sslmode is invalid")
}
if sslrootcert != "" {
caCertPool := x509.NewCertPool()
caPath := sslrootcert
caCert, err := os.ReadFile(caPath)
if err != nil {
return nil, fmt.Errorf("unable to read CA file: %w", err)
}
if !caCertPool.AppendCertsFromPEM(caCert) {
return nil, errors.New("unable to add CA to cert pool")
}
tlsConfig.RootCAs = caCertPool
tlsConfig.ClientCAs = caCertPool
}
if (sslcert != "" && sslkey == "") || (sslcert == "" && sslkey != "") {
return nil, errors.New(`both "sslcert" and "sslkey" are required`)
}
@ -800,7 +833,8 @@ func parsePort(s string) (uint16, error) {
}
func makeDefaultDialer() *net.Dialer {
return &net.Dialer{KeepAlive: 5 * time.Minute}
// rely on GOLANG KeepAlive settings
return &net.Dialer{}
}
func makeDefaultResolver() *net.Resolver {

View File

@ -8,9 +8,8 @@ import (
// ContextWatcher watches a context and performs an action when the context is canceled. It can watch one context at a
// time.
type ContextWatcher struct {
onCancel func()
onUnwatchAfterCancel func()
unwatchChan chan struct{}
handler Handler
unwatchChan chan struct{}
lock sync.Mutex
watchInProgress bool
@ -20,11 +19,10 @@ type ContextWatcher struct {
// NewContextWatcher returns a ContextWatcher. onCancel will be called when a watched context is canceled.
// OnUnwatchAfterCancel will be called when Unwatch is called and the watched context had already been canceled and
// onCancel called.
func NewContextWatcher(onCancel func(), onUnwatchAfterCancel func()) *ContextWatcher {
func NewContextWatcher(handler Handler) *ContextWatcher {
cw := &ContextWatcher{
onCancel: onCancel,
onUnwatchAfterCancel: onUnwatchAfterCancel,
unwatchChan: make(chan struct{}),
handler: handler,
unwatchChan: make(chan struct{}),
}
return cw
@ -46,7 +44,7 @@ func (cw *ContextWatcher) Watch(ctx context.Context) {
go func() {
select {
case <-ctx.Done():
cw.onCancel()
cw.handler.HandleCancel(ctx)
cw.onCancelWasCalled = true
<-cw.unwatchChan
case <-cw.unwatchChan:
@ -66,8 +64,17 @@ func (cw *ContextWatcher) Unwatch() {
if cw.watchInProgress {
cw.unwatchChan <- struct{}{}
if cw.onCancelWasCalled {
cw.onUnwatchAfterCancel()
cw.handler.HandleUnwatchAfterCancel()
}
cw.watchInProgress = false
}
}
type Handler interface {
// HandleCancel is called when the context that a ContextWatcher is currently watching is canceled. canceledCtx is the
// context that was canceled.
HandleCancel(canceledCtx context.Context)
// HandleUnwatchAfterCancel is called when a ContextWatcher that called HandleCancel on this Handler is unwatched.
HandleUnwatchAfterCancel()
}

View File

@ -5,8 +5,8 @@ nearly the same level is the C library libpq.
Establishing a Connection
Use Connect to establish a connection. It accepts a connection string in URL or DSN and will read the environment for
libpq style environment variables.
Use Connect to establish a connection. It accepts a connection string in URL or keyword/value format and will read the
environment for libpq style environment variables.
Executing a Query
@ -20,13 +20,17 @@ result. The ReadAll method reads all query results into memory.
Pipeline Mode
Pipeline mode allows sending queries without having read the results of previously sent queries. It allows
control of exactly how many and when network round trips occur.
Pipeline mode allows sending queries without having read the results of previously sent queries. It allows control of
exactly how many and when network round trips occur.
Context Support
All potentially blocking operations take a context.Context. If a context is canceled while the method is in progress the
method immediately returns. In most circumstances, this will close the underlying connection.
All potentially blocking operations take a context.Context. The default behavior when a context is canceled is for the
method to immediately return. In most circumstances, this will also close the underlying connection. This behavior can
be customized by using BuildContextWatcherHandler on the Config to create a ctxwatch.Handler with different behavior.
This can be especially useful when queries that are frequently canceled and the overhead of creating new connections is
a problem. DeadlineContextWatcherHandler and CancelRequestContextWatcherHandler can be used to introduce a delay before
interrupting the query in such a way as to close the connection.
The CancelRequest method may be used to request the PostgreSQL server cancel an in-progress query without forcing the
client to abort.

View File

@ -12,13 +12,14 @@ import (
// SafeToRetry checks if the err is guaranteed to have occurred before sending any data to the server.
func SafeToRetry(err error) bool {
if e, ok := err.(interface{ SafeToRetry() bool }); ok {
return e.SafeToRetry()
var retryableErr interface{ SafeToRetry() bool }
if errors.As(err, &retryableErr) {
return retryableErr.SafeToRetry()
}
return false
}
// Timeout checks if err was was caused by a timeout. To be specific, it is true if err was caused within pgconn by a
// Timeout checks if err was caused by a timeout. To be specific, it is true if err was caused within pgconn by a
// context.DeadlineExceeded or an implementer of net.Error where Timeout() is true.
func Timeout(err error) bool {
var timeoutErr *errTimeout
@ -29,23 +30,24 @@ func Timeout(err error) bool {
// http://www.postgresql.org/docs/11/static/protocol-error-fields.html for
// detailed field description.
type PgError struct {
Severity string
Code string
Message string
Detail string
Hint string
Position int32
InternalPosition int32
InternalQuery string
Where string
SchemaName string
TableName string
ColumnName string
DataTypeName string
ConstraintName string
File string
Line int32
Routine string
Severity string
SeverityUnlocalized string
Code string
Message string
Detail string
Hint string
Position int32
InternalPosition int32
InternalQuery string
Where string
SchemaName string
TableName string
ColumnName string
DataTypeName string
ConstraintName string
File string
Line int32
Routine string
}
func (pe *PgError) Error() string {
@ -60,23 +62,37 @@ func (pe *PgError) SQLState() string {
// ConnectError is the error returned when a connection attempt fails.
type ConnectError struct {
Config *Config // The configuration that was used in the connection attempt.
msg string
err error
}
func (e *ConnectError) Error() string {
sb := &strings.Builder{}
fmt.Fprintf(sb, "failed to connect to `host=%s user=%s database=%s`: %s", e.Config.Host, e.Config.User, e.Config.Database, e.msg)
if e.err != nil {
fmt.Fprintf(sb, " (%s)", e.err.Error())
prefix := fmt.Sprintf("failed to connect to `user=%s database=%s`:", e.Config.User, e.Config.Database)
details := e.err.Error()
if strings.Contains(details, "\n") {
return prefix + "\n\t" + strings.ReplaceAll(details, "\n", "\n\t")
} else {
return prefix + " " + details
}
return sb.String()
}
func (e *ConnectError) Unwrap() error {
return e.err
}
type perDialConnectError struct {
address string
originalHostname string
err error
}
func (e *perDialConnectError) Error() string {
return fmt.Sprintf("%s (%s): %s", e.address, e.originalHostname, e.err.Error())
}
func (e *perDialConnectError) Unwrap() error {
return e.err
}
type connLockError struct {
status string
}
@ -195,10 +211,10 @@ func redactPW(connString string) string {
return redactURL(u)
}
}
quotedDSN := regexp.MustCompile(`password='[^']*'`)
connString = quotedDSN.ReplaceAllLiteralString(connString, "password=xxxxx")
plainDSN := regexp.MustCompile(`password=[^ ]*`)
connString = plainDSN.ReplaceAllLiteralString(connString, "password=xxxxx")
quotedKV := regexp.MustCompile(`password='[^']*'`)
connString = quotedKV.ReplaceAllLiteralString(connString, "password=xxxxx")
plainKV := regexp.MustCompile(`password=[^ ]*`)
connString = plainKV.ReplaceAllLiteralString(connString, "password=xxxxx")
brokenURL := regexp.MustCompile(`:[^:@]+?@`)
connString = brokenURL.ReplaceAllLiteralString(connString, ":xxxxxx@")
return connString

View File

@ -18,8 +18,8 @@ import (
"github.com/jackc/pgx/v5/internal/iobufpool"
"github.com/jackc/pgx/v5/internal/pgio"
"github.com/jackc/pgx/v5/pgconn/ctxwatch"
"github.com/jackc/pgx/v5/pgconn/internal/bgreader"
"github.com/jackc/pgx/v5/pgconn/internal/ctxwatch"
"github.com/jackc/pgx/v5/pgproto3"
)
@ -82,6 +82,8 @@ type PgConn struct {
slowWriteTimer *time.Timer
bgReaderStarted chan struct{}
customData map[string]any
config *Config
status byte // One of connStatus* constants
@ -103,8 +105,9 @@ type PgConn struct {
cleanupDone chan struct{}
}
// Connect establishes a connection to a PostgreSQL server using the environment and connString (in URL or DSN format)
// to provide configuration. See documentation for [ParseConfig] for details. ctx can be used to cancel a connect attempt.
// Connect establishes a connection to a PostgreSQL server using the environment and connString (in URL or keyword/value
// format) to provide configuration. See documentation for [ParseConfig] for details. ctx can be used to cancel a
// connect attempt.
func Connect(ctx context.Context, connString string) (*PgConn, error) {
config, err := ParseConfig(connString)
if err != nil {
@ -114,9 +117,9 @@ func Connect(ctx context.Context, connString string) (*PgConn, error) {
return ConnectConfig(ctx, config)
}
// Connect establishes a connection to a PostgreSQL server using the environment and connString (in URL or DSN format)
// and ParseConfigOptions to provide additional configuration. See documentation for [ParseConfig] for details. ctx can be
// used to cancel a connect attempt.
// Connect establishes a connection to a PostgreSQL server using the environment and connString (in URL or keyword/value
// format) and ParseConfigOptions to provide additional configuration. See documentation for [ParseConfig] for details.
// ctx can be used to cancel a connect attempt.
func ConnectWithOptions(ctx context.Context, connString string, parseConfigOptions ParseConfigOptions) (*PgConn, error) {
config, err := ParseConfigWithOptions(connString, parseConfigOptions)
if err != nil {
@ -131,15 +134,46 @@ func ConnectWithOptions(ctx context.Context, connString string, parseConfigOptio
//
// If config.Fallbacks are present they will sequentially be tried in case of error establishing network connection. An
// authentication error will terminate the chain of attempts (like libpq:
// https://www.postgresql.org/docs/11/libpq-connect.html#LIBPQ-MULTIPLE-HOSTS) and be returned as the error. Otherwise,
// if all attempts fail the last error is returned.
func ConnectConfig(octx context.Context, config *Config) (pgConn *PgConn, err error) {
// https://www.postgresql.org/docs/11/libpq-connect.html#LIBPQ-MULTIPLE-HOSTS) and be returned as the error.
func ConnectConfig(ctx context.Context, config *Config) (*PgConn, error) {
// Default values are set in ParseConfig. Enforce initial creation by ParseConfig rather than setting defaults from
// zero values.
if !config.createdByParseConfig {
panic("config must be created by ParseConfig")
}
var allErrors []error
connectConfigs, errs := buildConnectOneConfigs(ctx, config)
if len(errs) > 0 {
allErrors = append(allErrors, errs...)
}
if len(connectConfigs) == 0 {
return nil, &ConnectError{Config: config, err: fmt.Errorf("hostname resolving error: %w", errors.Join(allErrors...))}
}
pgConn, errs := connectPreferred(ctx, config, connectConfigs)
if len(errs) > 0 {
allErrors = append(allErrors, errs...)
return nil, &ConnectError{Config: config, err: errors.Join(allErrors...)}
}
if config.AfterConnect != nil {
err := config.AfterConnect(ctx, pgConn)
if err != nil {
pgConn.conn.Close()
return nil, &ConnectError{Config: config, err: fmt.Errorf("AfterConnect error: %w", err)}
}
}
return pgConn, nil
}
// buildConnectOneConfigs resolves hostnames and builds a list of connectOneConfigs to try connecting to. It returns a
// slice of successfully resolved connectOneConfigs and a slice of errors. It is possible for both slices to contain
// values if some hosts were successfully resolved and others were not.
func buildConnectOneConfigs(ctx context.Context, config *Config) ([]*connectOneConfig, []error) {
// Simplify usage by treating primary config and fallbacks the same.
fallbackConfigs := []*FallbackConfig{
{
@ -149,95 +183,28 @@ func ConnectConfig(octx context.Context, config *Config) (pgConn *PgConn, err er
},
}
fallbackConfigs = append(fallbackConfigs, config.Fallbacks...)
ctx := octx
fallbackConfigs, err = expandWithIPs(ctx, config.LookupFunc, fallbackConfigs)
if err != nil {
return nil, &ConnectError{Config: config, msg: "hostname resolving error", err: err}
}
if len(fallbackConfigs) == 0 {
return nil, &ConnectError{Config: config, msg: "hostname resolving error", err: errors.New("ip addr wasn't found")}
}
var configs []*connectOneConfig
foundBestServer := false
var fallbackConfig *FallbackConfig
for i, fc := range fallbackConfigs {
// ConnectTimeout restricts the whole connection process.
if config.ConnectTimeout != 0 {
// create new context first time or when previous host was different
if i == 0 || (fallbackConfigs[i].Host != fallbackConfigs[i-1].Host) {
var cancel context.CancelFunc
ctx, cancel = context.WithTimeout(octx, config.ConnectTimeout)
defer cancel()
}
} else {
ctx = octx
}
pgConn, err = connect(ctx, config, fc, false)
if err == nil {
foundBestServer = true
break
} else if pgerr, ok := err.(*PgError); ok {
err = &ConnectError{Config: config, msg: "server error", err: pgerr}
const ERRCODE_INVALID_PASSWORD = "28P01" // wrong password
const ERRCODE_INVALID_AUTHORIZATION_SPECIFICATION = "28000" // wrong password or bad pg_hba.conf settings
const ERRCODE_INVALID_CATALOG_NAME = "3D000" // db does not exist
const ERRCODE_INSUFFICIENT_PRIVILEGE = "42501" // missing connect privilege
if pgerr.Code == ERRCODE_INVALID_PASSWORD ||
pgerr.Code == ERRCODE_INVALID_AUTHORIZATION_SPECIFICATION && fc.TLSConfig != nil ||
pgerr.Code == ERRCODE_INVALID_CATALOG_NAME ||
pgerr.Code == ERRCODE_INSUFFICIENT_PRIVILEGE {
break
}
} else if cerr, ok := err.(*ConnectError); ok {
if _, ok := cerr.err.(*NotPreferredError); ok {
fallbackConfig = fc
}
}
}
var allErrors []error
if !foundBestServer && fallbackConfig != nil {
pgConn, err = connect(ctx, config, fallbackConfig, true)
if pgerr, ok := err.(*PgError); ok {
err = &ConnectError{Config: config, msg: "server error", err: pgerr}
}
}
if err != nil {
return nil, err // no need to wrap in connectError because it will already be wrapped in all cases except PgError
}
if config.AfterConnect != nil {
err := config.AfterConnect(ctx, pgConn)
if err != nil {
pgConn.conn.Close()
return nil, &ConnectError{Config: config, msg: "AfterConnect error", err: err}
}
}
return pgConn, nil
}
func expandWithIPs(ctx context.Context, lookupFn LookupFunc, fallbacks []*FallbackConfig) ([]*FallbackConfig, error) {
var configs []*FallbackConfig
var lookupErrors []error
for _, fb := range fallbacks {
for _, fb := range fallbackConfigs {
// skip resolve for unix sockets
if isAbsolutePath(fb.Host) {
configs = append(configs, &FallbackConfig{
Host: fb.Host,
Port: fb.Port,
TLSConfig: fb.TLSConfig,
network, address := NetworkAddress(fb.Host, fb.Port)
configs = append(configs, &connectOneConfig{
network: network,
address: address,
originalHostname: fb.Host,
tlsConfig: fb.TLSConfig,
})
continue
}
ips, err := lookupFn(ctx, fb.Host)
ips, err := config.LookupFunc(ctx, fb.Host)
if err != nil {
lookupErrors = append(lookupErrors, err)
allErrors = append(allErrors, err)
continue
}
@ -246,63 +213,126 @@ func expandWithIPs(ctx context.Context, lookupFn LookupFunc, fallbacks []*Fallba
if err == nil {
port, err := strconv.ParseUint(splitPort, 10, 16)
if err != nil {
return nil, fmt.Errorf("error parsing port (%s) from lookup: %w", splitPort, err)
return nil, []error{fmt.Errorf("error parsing port (%s) from lookup: %w", splitPort, err)}
}
configs = append(configs, &FallbackConfig{
Host: splitIP,
Port: uint16(port),
TLSConfig: fb.TLSConfig,
network, address := NetworkAddress(splitIP, uint16(port))
configs = append(configs, &connectOneConfig{
network: network,
address: address,
originalHostname: fb.Host,
tlsConfig: fb.TLSConfig,
})
} else {
configs = append(configs, &FallbackConfig{
Host: ip,
Port: fb.Port,
TLSConfig: fb.TLSConfig,
network, address := NetworkAddress(ip, fb.Port)
configs = append(configs, &connectOneConfig{
network: network,
address: address,
originalHostname: fb.Host,
tlsConfig: fb.TLSConfig,
})
}
}
}
// See https://github.com/jackc/pgx/issues/1464. When Go 1.20 can be used in pgx consider using errors.Join so all
// errors are reported.
if len(configs) == 0 && len(lookupErrors) > 0 {
return nil, lookupErrors[0]
}
return configs, nil
return configs, allErrors
}
func connect(ctx context.Context, config *Config, fallbackConfig *FallbackConfig,
// connectPreferred attempts to connect to the preferred host from connectOneConfigs. The connections are attempted in
// order. If a connection is successful it is returned. If no connection is successful then all errors are returned. If
// a connection attempt returns a [NotPreferredError], then that host will be used if no other hosts are successful.
func connectPreferred(ctx context.Context, config *Config, connectOneConfigs []*connectOneConfig) (*PgConn, []error) {
octx := ctx
var allErrors []error
var fallbackConnectOneConfig *connectOneConfig
for i, c := range connectOneConfigs {
// ConnectTimeout restricts the whole connection process.
if config.ConnectTimeout != 0 {
// create new context first time or when previous host was different
if i == 0 || (connectOneConfigs[i].address != connectOneConfigs[i-1].address) {
var cancel context.CancelFunc
ctx, cancel = context.WithTimeout(octx, config.ConnectTimeout)
defer cancel()
}
} else {
ctx = octx
}
pgConn, err := connectOne(ctx, config, c, false)
if pgConn != nil {
return pgConn, nil
}
allErrors = append(allErrors, err)
var pgErr *PgError
if errors.As(err, &pgErr) {
const ERRCODE_INVALID_PASSWORD = "28P01" // wrong password
const ERRCODE_INVALID_AUTHORIZATION_SPECIFICATION = "28000" // wrong password or bad pg_hba.conf settings
const ERRCODE_INVALID_CATALOG_NAME = "3D000" // db does not exist
const ERRCODE_INSUFFICIENT_PRIVILEGE = "42501" // missing connect privilege
if pgErr.Code == ERRCODE_INVALID_PASSWORD ||
pgErr.Code == ERRCODE_INVALID_AUTHORIZATION_SPECIFICATION && c.tlsConfig != nil ||
pgErr.Code == ERRCODE_INVALID_CATALOG_NAME ||
pgErr.Code == ERRCODE_INSUFFICIENT_PRIVILEGE {
return nil, allErrors
}
}
var npErr *NotPreferredError
if errors.As(err, &npErr) {
fallbackConnectOneConfig = c
}
}
if fallbackConnectOneConfig != nil {
pgConn, err := connectOne(ctx, config, fallbackConnectOneConfig, true)
if err == nil {
return pgConn, nil
}
allErrors = append(allErrors, err)
}
return nil, allErrors
}
// connectOne makes one connection attempt to a single host.
func connectOne(ctx context.Context, config *Config, connectConfig *connectOneConfig,
ignoreNotPreferredErr bool,
) (*PgConn, error) {
pgConn := new(PgConn)
pgConn.config = config
pgConn.cleanupDone = make(chan struct{})
pgConn.customData = make(map[string]any)
var err error
network, address := NetworkAddress(fallbackConfig.Host, fallbackConfig.Port)
netConn, err := config.DialFunc(ctx, network, address)
if err != nil {
return nil, &ConnectError{Config: config, msg: "dial error", err: normalizeTimeoutError(ctx, err)}
newPerDialConnectError := func(msg string, err error) *perDialConnectError {
err = normalizeTimeoutError(ctx, err)
e := &perDialConnectError{address: connectConfig.address, originalHostname: connectConfig.originalHostname, err: fmt.Errorf("%s: %w", msg, err)}
return e
}
pgConn.conn = netConn
pgConn.contextWatcher = newContextWatcher(netConn)
pgConn.contextWatcher.Watch(ctx)
pgConn.conn, err = config.DialFunc(ctx, connectConfig.network, connectConfig.address)
if err != nil {
return nil, newPerDialConnectError("dial error", err)
}
if fallbackConfig.TLSConfig != nil {
nbTLSConn, err := startTLS(netConn, fallbackConfig.TLSConfig)
if connectConfig.tlsConfig != nil {
pgConn.contextWatcher = ctxwatch.NewContextWatcher(&DeadlineContextWatcherHandler{Conn: pgConn.conn})
pgConn.contextWatcher.Watch(ctx)
tlsConn, err := startTLS(pgConn.conn, connectConfig.tlsConfig)
pgConn.contextWatcher.Unwatch() // Always unwatch `netConn` after TLS.
if err != nil {
netConn.Close()
return nil, &ConnectError{Config: config, msg: "tls error", err: normalizeTimeoutError(ctx, err)}
pgConn.conn.Close()
return nil, newPerDialConnectError("tls error", err)
}
pgConn.conn = nbTLSConn
pgConn.contextWatcher = newContextWatcher(nbTLSConn)
pgConn.contextWatcher.Watch(ctx)
pgConn.conn = tlsConn
}
pgConn.contextWatcher = ctxwatch.NewContextWatcher(config.BuildContextWatcherHandler(pgConn))
pgConn.contextWatcher.Watch(ctx)
defer pgConn.contextWatcher.Unwatch()
pgConn.parameterStatuses = make(map[string]string)
@ -336,7 +366,7 @@ func connect(ctx context.Context, config *Config, fallbackConfig *FallbackConfig
pgConn.frontend.Send(&startupMsg)
if err := pgConn.flushWithPotentialWriteReadDeadlock(); err != nil {
pgConn.conn.Close()
return nil, &ConnectError{Config: config, msg: "failed to write startup message", err: normalizeTimeoutError(ctx, err)}
return nil, newPerDialConnectError("failed to write startup message", err)
}
for {
@ -344,9 +374,9 @@ func connect(ctx context.Context, config *Config, fallbackConfig *FallbackConfig
if err != nil {
pgConn.conn.Close()
if err, ok := err.(*PgError); ok {
return nil, err
return nil, newPerDialConnectError("server error", err)
}
return nil, &ConnectError{Config: config, msg: "failed to receive message", err: normalizeTimeoutError(ctx, err)}
return nil, newPerDialConnectError("failed to receive message", err)
}
switch msg := msg.(type) {
@ -359,26 +389,26 @@ func connect(ctx context.Context, config *Config, fallbackConfig *FallbackConfig
err = pgConn.txPasswordMessage(pgConn.config.Password)
if err != nil {
pgConn.conn.Close()
return nil, &ConnectError{Config: config, msg: "failed to write password message", err: err}
return nil, newPerDialConnectError("failed to write password message", err)
}
case *pgproto3.AuthenticationMD5Password:
digestedPassword := "md5" + hexMD5(hexMD5(pgConn.config.Password+pgConn.config.User)+string(msg.Salt[:]))
err = pgConn.txPasswordMessage(digestedPassword)
if err != nil {
pgConn.conn.Close()
return nil, &ConnectError{Config: config, msg: "failed to write password message", err: err}
return nil, newPerDialConnectError("failed to write password message", err)
}
case *pgproto3.AuthenticationSASL:
err = pgConn.scramAuth(msg.AuthMechanisms)
if err != nil {
pgConn.conn.Close()
return nil, &ConnectError{Config: config, msg: "failed SASL auth", err: err}
return nil, newPerDialConnectError("failed SASL auth", err)
}
case *pgproto3.AuthenticationGSS:
err = pgConn.gssAuth()
if err != nil {
pgConn.conn.Close()
return nil, &ConnectError{Config: config, msg: "failed GSS auth", err: err}
return nil, newPerDialConnectError("failed GSS auth", err)
}
case *pgproto3.ReadyForQuery:
pgConn.status = connStatusIdle
@ -396,7 +426,7 @@ func connect(ctx context.Context, config *Config, fallbackConfig *FallbackConfig
return pgConn, nil
}
pgConn.conn.Close()
return nil, &ConnectError{Config: config, msg: "ValidateConnect failed", err: err}
return nil, newPerDialConnectError("ValidateConnect failed", err)
}
}
return pgConn, nil
@ -404,21 +434,14 @@ func connect(ctx context.Context, config *Config, fallbackConfig *FallbackConfig
// handled by ReceiveMessage
case *pgproto3.ErrorResponse:
pgConn.conn.Close()
return nil, ErrorResponseToPgError(msg)
return nil, newPerDialConnectError("server error", ErrorResponseToPgError(msg))
default:
pgConn.conn.Close()
return nil, &ConnectError{Config: config, msg: "received unexpected message", err: err}
return nil, newPerDialConnectError("received unexpected message", err)
}
}
}
func newContextWatcher(conn net.Conn) *ctxwatch.ContextWatcher {
return ctxwatch.NewContextWatcher(
func() { conn.SetDeadline(time.Date(1, 1, 1, 1, 1, 1, 1, time.UTC)) },
func() { conn.SetDeadline(time.Time{}) },
)
}
func startTLS(conn net.Conn, tlsConfig *tls.Config) (net.Conn, error) {
err := binary.Write(conn, binary.BigEndian, []int32{8, 80877103})
if err != nil {
@ -928,23 +951,24 @@ func (pgConn *PgConn) Deallocate(ctx context.Context, name string) error {
// ErrorResponseToPgError converts a wire protocol error message to a *PgError.
func ErrorResponseToPgError(msg *pgproto3.ErrorResponse) *PgError {
return &PgError{
Severity: msg.Severity,
Code: string(msg.Code),
Message: string(msg.Message),
Detail: string(msg.Detail),
Hint: msg.Hint,
Position: msg.Position,
InternalPosition: msg.InternalPosition,
InternalQuery: string(msg.InternalQuery),
Where: string(msg.Where),
SchemaName: string(msg.SchemaName),
TableName: string(msg.TableName),
ColumnName: string(msg.ColumnName),
DataTypeName: string(msg.DataTypeName),
ConstraintName: msg.ConstraintName,
File: string(msg.File),
Line: msg.Line,
Routine: string(msg.Routine),
Severity: msg.Severity,
SeverityUnlocalized: msg.SeverityUnlocalized,
Code: string(msg.Code),
Message: string(msg.Message),
Detail: string(msg.Detail),
Hint: msg.Hint,
Position: msg.Position,
InternalPosition: msg.InternalPosition,
InternalQuery: string(msg.InternalQuery),
Where: string(msg.Where),
SchemaName: string(msg.SchemaName),
TableName: string(msg.TableName),
ColumnName: string(msg.ColumnName),
DataTypeName: string(msg.DataTypeName),
ConstraintName: msg.ConstraintName,
File: string(msg.File),
Line: msg.Line,
Routine: string(msg.Routine),
}
}
@ -987,10 +1011,7 @@ func (pgConn *PgConn) CancelRequest(ctx context.Context) error {
defer cancelConn.Close()
if ctx != context.Background() {
contextWatcher := ctxwatch.NewContextWatcher(
func() { cancelConn.SetDeadline(time.Date(1, 1, 1, 1, 1, 1, 1, time.UTC)) },
func() { cancelConn.SetDeadline(time.Time{}) },
)
contextWatcher := ctxwatch.NewContextWatcher(&DeadlineContextWatcherHandler{Conn: cancelConn})
contextWatcher.Watch(ctx)
defer contextWatcher.Unwatch()
}
@ -1523,8 +1544,10 @@ func (rr *ResultReader) Read() *Result {
values := rr.Values()
row := make([][]byte, len(values))
for i := range row {
row[i] = make([]byte, len(values[i]))
copy(row[i], values[i])
if values[i] != nil {
row[i] = make([]byte, len(values[i]))
copy(row[i], values[i])
}
}
br.Rows = append(br.Rows, row)
}
@ -1879,6 +1902,11 @@ func (pgConn *PgConn) SyncConn(ctx context.Context) error {
return errors.New("SyncConn: conn never synchronized")
}
// CustomData returns a map that can be used to associate custom data with the connection.
func (pgConn *PgConn) CustomData() map[string]any {
return pgConn.customData
}
// HijackedConn is the result of hijacking a connection.
//
// Due to the necessary exposure of internal implementation details, it is not covered by the semantic versioning
@ -1891,6 +1919,7 @@ type HijackedConn struct {
TxStatus byte
Frontend *pgproto3.Frontend
Config *Config
CustomData map[string]any
}
// Hijack extracts the internal connection data. pgConn must be in an idle state. SyncConn should be called immediately
@ -1913,6 +1942,7 @@ func (pgConn *PgConn) Hijack() (*HijackedConn, error) {
TxStatus: pgConn.txStatus,
Frontend: pgConn.frontend,
Config: pgConn.config,
CustomData: pgConn.customData,
}, nil
}
@ -1932,13 +1962,14 @@ func Construct(hc *HijackedConn) (*PgConn, error) {
txStatus: hc.TxStatus,
frontend: hc.Frontend,
config: hc.Config,
customData: hc.CustomData,
status: connStatusIdle,
cleanupDone: make(chan struct{}),
}
pgConn.contextWatcher = newContextWatcher(pgConn.conn)
pgConn.contextWatcher = ctxwatch.NewContextWatcher(hc.Config.BuildContextWatcherHandler(pgConn))
pgConn.bgReader = bgreader.New(pgConn.conn)
pgConn.slowWriteTimer = time.AfterFunc(time.Duration(math.MaxInt64),
func() {
@ -2245,3 +2276,71 @@ func (p *Pipeline) Close() error {
return p.err
}
// DeadlineContextWatcherHandler handles canceled contexts by setting a deadline on a net.Conn.
type DeadlineContextWatcherHandler struct {
Conn net.Conn
// DeadlineDelay is the delay to set on the deadline set on net.Conn when the context is canceled.
DeadlineDelay time.Duration
}
func (h *DeadlineContextWatcherHandler) HandleCancel(ctx context.Context) {
h.Conn.SetDeadline(time.Now().Add(h.DeadlineDelay))
}
func (h *DeadlineContextWatcherHandler) HandleUnwatchAfterCancel() {
h.Conn.SetDeadline(time.Time{})
}
// CancelRequestContextWatcherHandler handles canceled contexts by sending a cancel request to the server. It also sets
// a deadline on a net.Conn as a fallback.
type CancelRequestContextWatcherHandler struct {
Conn *PgConn
// CancelRequestDelay is the delay before sending the cancel request to the server.
CancelRequestDelay time.Duration
// DeadlineDelay is the delay to set on the deadline set on net.Conn when the context is canceled.
DeadlineDelay time.Duration
cancelFinishedChan chan struct{}
handleUnwatchAfterCancelCalled func()
}
func (h *CancelRequestContextWatcherHandler) HandleCancel(context.Context) {
h.cancelFinishedChan = make(chan struct{})
var handleUnwatchedAfterCancelCalledCtx context.Context
handleUnwatchedAfterCancelCalledCtx, h.handleUnwatchAfterCancelCalled = context.WithCancel(context.Background())
deadline := time.Now().Add(h.DeadlineDelay)
h.Conn.conn.SetDeadline(deadline)
go func() {
defer close(h.cancelFinishedChan)
select {
case <-handleUnwatchedAfterCancelCalledCtx.Done():
return
case <-time.After(h.CancelRequestDelay):
}
cancelRequestCtx, cancel := context.WithDeadline(handleUnwatchedAfterCancelCalledCtx, deadline)
defer cancel()
h.Conn.CancelRequest(cancelRequestCtx)
// CancelRequest is inherently racy. Even though the cancel request has been received by the server at this point,
// it hasn't necessarily been delivered to the other connection. If we immediately return and the connection is
// immediately used then it is possible the CancelRequest will actually cancel our next query. The
// TestCancelRequestContextWatcherHandler Stress test can produce this error without the sleep below. The sleep time
// is arbitrary, but should be sufficient to prevent this error case.
time.Sleep(100 * time.Millisecond)
}()
}
func (h *CancelRequestContextWatcherHandler) HandleUnwatchAfterCancel() {
h.handleUnwatchAfterCancelCalled()
<-h.cancelFinishedChan
h.Conn.conn.SetDeadline(time.Time{})
}

View File

@ -99,7 +99,7 @@ func getValueFromJSON(v map[string]string) ([]byte, error) {
return nil, errors.New("unknown protocol representation")
}
// beginMessage begines a new message of type t. It appends the message type and a placeholder for the message length to
// beginMessage begins a new message of type t. It appends the message type and a placeholder for the message length to
// dst. It returns the new buffer and the position of the message length placeholder.
func beginMessage(dst []byte, t byte) ([]byte, int) {
dst = append(dst, t)

View File

@ -6,7 +6,6 @@ import (
"fmt"
"reflect"
"github.com/jackc/pgx/v5/internal/anynil"
"github.com/jackc/pgx/v5/internal/pgio"
)
@ -230,7 +229,7 @@ func (c *ArrayCodec) PlanScan(m *Map, oid uint32, format int16, target any) Scan
// target / arrayScanner might be a pointer to a nil. If it is create one so we can call ScanIndexType to plan the
// scan of the elements.
if anynil.Is(target) {
if isNil, _ := isNilDriverValuer(target); isNil {
arrayScanner = reflect.New(reflect.TypeOf(target).Elem()).Interface().(ArraySetter)
}

View File

@ -53,6 +53,9 @@ similar fashion to database/sql. The second is to use a pointer to a pointer.
return err
}
When using nullable pgtype types as parameters for queries, one has to remember
to explicitly set their Valid field to true, otherwise the parameter's value will be NULL.
JSON Support
pgtype automatically marshals and unmarshals data from json and jsonb PostgreSQL types.
@ -139,6 +142,16 @@ Compatibility with database/sql
pgtype also includes support for custom types implementing the database/sql.Scanner and database/sql/driver.Valuer
interfaces.
Encoding Typed Nils
pgtype encodes untyped and typed nils (e.g. nil and []byte(nil)) to the SQL NULL value without going through the Codec
system. This means that Codecs and other encoding logic do not have to handle nil or *T(nil).
However, database/sql compatibility requires Value to be called on T(nil) when T implements driver.Valuer. Therefore,
driver.Valuer values are only considered NULL when *T(nil) where driver.Valuer is implemented on T not on *T. See
https://github.com/golang/go/issues/8415 and
https://github.com/golang/go/commit/0ce1d79a6a771f7449ec493b993ed2a720917870.
Child Records
pgtype's support for arrays and composite records can be used to load records and their children in a single query. See

View File

@ -144,10 +144,15 @@ func (encodePlanIntervalCodecText) Encode(value any, buf []byte) (newBuf []byte,
hours := absMicroseconds / microsecondsPerHour
minutes := (absMicroseconds % microsecondsPerHour) / microsecondsPerMinute
seconds := (absMicroseconds % microsecondsPerMinute) / microsecondsPerSecond
microseconds := absMicroseconds % microsecondsPerSecond
timeStr := fmt.Sprintf("%02d:%02d:%02d.%06d", hours, minutes, seconds, microseconds)
timeStr := fmt.Sprintf("%02d:%02d:%02d", hours, minutes, seconds)
buf = append(buf, timeStr...)
microseconds := absMicroseconds % microsecondsPerSecond
if microseconds != 0 {
buf = append(buf, fmt.Sprintf(".%06d", microseconds)...)
}
return buf, nil
}

View File

@ -8,17 +8,20 @@ import (
"reflect"
)
type JSONCodec struct{}
type JSONCodec struct {
Marshal func(v any) ([]byte, error)
Unmarshal func(data []byte, v any) error
}
func (JSONCodec) FormatSupported(format int16) bool {
func (*JSONCodec) FormatSupported(format int16) bool {
return format == TextFormatCode || format == BinaryFormatCode
}
func (JSONCodec) PreferredFormat() int16 {
func (*JSONCodec) PreferredFormat() int16 {
return TextFormatCode
}
func (c JSONCodec) PlanEncode(m *Map, oid uint32, format int16, value any) EncodePlan {
func (c *JSONCodec) PlanEncode(m *Map, oid uint32, format int16, value any) EncodePlan {
switch value.(type) {
case string:
return encodePlanJSONCodecEitherFormatString{}
@ -34,7 +37,7 @@ func (c JSONCodec) PlanEncode(m *Map, oid uint32, format int16, value any) Encod
//
// https://github.com/jackc/pgx/issues/1430
//
// Check for driver.Valuer must come before json.Marshaler so that it is guaranteed to beused
// Check for driver.Valuer must come before json.Marshaler so that it is guaranteed to be used
// when both are implemented https://github.com/jackc/pgx/issues/1805
case driver.Valuer:
return &encodePlanDriverValuer{m: m, oid: oid, formatCode: format}
@ -44,7 +47,9 @@ func (c JSONCodec) PlanEncode(m *Map, oid uint32, format int16, value any) Encod
//
// https://github.com/jackc/pgx/issues/1681
case json.Marshaler:
return encodePlanJSONCodecEitherFormatMarshal{}
return &encodePlanJSONCodecEitherFormatMarshal{
marshal: c.Marshal,
}
}
// Because anything can be marshalled the normal wrapping in Map.PlanScan doesn't get a chance to run. So try the
@ -61,7 +66,9 @@ func (c JSONCodec) PlanEncode(m *Map, oid uint32, format int16, value any) Encod
}
}
return encodePlanJSONCodecEitherFormatMarshal{}
return &encodePlanJSONCodecEitherFormatMarshal{
marshal: c.Marshal,
}
}
type encodePlanJSONCodecEitherFormatString struct{}
@ -96,10 +103,12 @@ func (encodePlanJSONCodecEitherFormatJSONRawMessage) Encode(value any, buf []byt
return buf, nil
}
type encodePlanJSONCodecEitherFormatMarshal struct{}
type encodePlanJSONCodecEitherFormatMarshal struct {
marshal func(v any) ([]byte, error)
}
func (encodePlanJSONCodecEitherFormatMarshal) Encode(value any, buf []byte) (newBuf []byte, err error) {
jsonBytes, err := json.Marshal(value)
func (e *encodePlanJSONCodecEitherFormatMarshal) Encode(value any, buf []byte) (newBuf []byte, err error) {
jsonBytes, err := e.marshal(value)
if err != nil {
return nil, err
}
@ -108,7 +117,7 @@ func (encodePlanJSONCodecEitherFormatMarshal) Encode(value any, buf []byte) (new
return buf, nil
}
func (JSONCodec) PlanScan(m *Map, oid uint32, format int16, target any) ScanPlan {
func (c *JSONCodec) PlanScan(m *Map, oid uint32, format int16, target any) ScanPlan {
switch target.(type) {
case *string:
return scanPlanAnyToString{}
@ -141,7 +150,9 @@ func (JSONCodec) PlanScan(m *Map, oid uint32, format int16, target any) ScanPlan
return &scanPlanSQLScanner{formatCode: format}
}
return scanPlanJSONToJSONUnmarshal{}
return &scanPlanJSONToJSONUnmarshal{
unmarshal: c.Unmarshal,
}
}
type scanPlanAnyToString struct{}
@ -166,16 +177,11 @@ func (scanPlanJSONToByteSlice) Scan(src []byte, dst any) error {
return nil
}
type scanPlanJSONToBytesScanner struct{}
func (scanPlanJSONToBytesScanner) Scan(src []byte, dst any) error {
scanner := (dst).(BytesScanner)
return scanner.ScanBytes(src)
type scanPlanJSONToJSONUnmarshal struct {
unmarshal func(data []byte, v any) error
}
type scanPlanJSONToJSONUnmarshal struct{}
func (scanPlanJSONToJSONUnmarshal) Scan(src []byte, dst any) error {
func (s *scanPlanJSONToJSONUnmarshal) Scan(src []byte, dst any) error {
if src == nil {
dstValue := reflect.ValueOf(dst)
if dstValue.Kind() == reflect.Ptr {
@ -193,10 +199,10 @@ func (scanPlanJSONToJSONUnmarshal) Scan(src []byte, dst any) error {
elem := reflect.ValueOf(dst).Elem()
elem.Set(reflect.Zero(elem.Type()))
return json.Unmarshal(src, dst)
return s.unmarshal(src, dst)
}
func (c JSONCodec) DecodeDatabaseSQLValue(m *Map, oid uint32, format int16, src []byte) (driver.Value, error) {
func (c *JSONCodec) DecodeDatabaseSQLValue(m *Map, oid uint32, format int16, src []byte) (driver.Value, error) {
if src == nil {
return nil, nil
}
@ -206,12 +212,12 @@ func (c JSONCodec) DecodeDatabaseSQLValue(m *Map, oid uint32, format int16, src
return dstBuf, nil
}
func (c JSONCodec) DecodeValue(m *Map, oid uint32, format int16, src []byte) (any, error) {
func (c *JSONCodec) DecodeValue(m *Map, oid uint32, format int16, src []byte) (any, error) {
if src == nil {
return nil, nil
}
var dst any
err := json.Unmarshal(src, &dst)
err := c.Unmarshal(src, &dst)
return dst, err
}

View File

@ -2,29 +2,31 @@ package pgtype
import (
"database/sql/driver"
"encoding/json"
"fmt"
)
type JSONBCodec struct{}
type JSONBCodec struct {
Marshal func(v any) ([]byte, error)
Unmarshal func(data []byte, v any) error
}
func (JSONBCodec) FormatSupported(format int16) bool {
func (*JSONBCodec) FormatSupported(format int16) bool {
return format == TextFormatCode || format == BinaryFormatCode
}
func (JSONBCodec) PreferredFormat() int16 {
func (*JSONBCodec) PreferredFormat() int16 {
return TextFormatCode
}
func (JSONBCodec) PlanEncode(m *Map, oid uint32, format int16, value any) EncodePlan {
func (c *JSONBCodec) PlanEncode(m *Map, oid uint32, format int16, value any) EncodePlan {
switch format {
case BinaryFormatCode:
plan := JSONCodec{}.PlanEncode(m, oid, TextFormatCode, value)
plan := (&JSONCodec{Marshal: c.Marshal, Unmarshal: c.Unmarshal}).PlanEncode(m, oid, TextFormatCode, value)
if plan != nil {
return &encodePlanJSONBCodecBinaryWrapper{textPlan: plan}
}
case TextFormatCode:
return JSONCodec{}.PlanEncode(m, oid, format, value)
return (&JSONCodec{Marshal: c.Marshal, Unmarshal: c.Unmarshal}).PlanEncode(m, oid, format, value)
}
return nil
@ -39,15 +41,15 @@ func (plan *encodePlanJSONBCodecBinaryWrapper) Encode(value any, buf []byte) (ne
return plan.textPlan.Encode(value, buf)
}
func (JSONBCodec) PlanScan(m *Map, oid uint32, format int16, target any) ScanPlan {
func (c *JSONBCodec) PlanScan(m *Map, oid uint32, format int16, target any) ScanPlan {
switch format {
case BinaryFormatCode:
plan := JSONCodec{}.PlanScan(m, oid, TextFormatCode, target)
plan := (&JSONCodec{Marshal: c.Marshal, Unmarshal: c.Unmarshal}).PlanScan(m, oid, TextFormatCode, target)
if plan != nil {
return &scanPlanJSONBCodecBinaryUnwrapper{textPlan: plan}
}
case TextFormatCode:
return JSONCodec{}.PlanScan(m, oid, format, target)
return (&JSONCodec{Marshal: c.Marshal, Unmarshal: c.Unmarshal}).PlanScan(m, oid, format, target)
}
return nil
@ -73,7 +75,7 @@ func (plan *scanPlanJSONBCodecBinaryUnwrapper) Scan(src []byte, dst any) error {
return plan.textPlan.Scan(src[1:], dst)
}
func (c JSONBCodec) DecodeDatabaseSQLValue(m *Map, oid uint32, format int16, src []byte) (driver.Value, error) {
func (c *JSONBCodec) DecodeDatabaseSQLValue(m *Map, oid uint32, format int16, src []byte) (driver.Value, error) {
if src == nil {
return nil, nil
}
@ -100,7 +102,7 @@ func (c JSONBCodec) DecodeDatabaseSQLValue(m *Map, oid uint32, format int16, src
}
}
func (c JSONBCodec) DecodeValue(m *Map, oid uint32, format int16, src []byte) (any, error) {
func (c *JSONBCodec) DecodeValue(m *Map, oid uint32, format int16, src []byte) (any, error) {
if src == nil {
return nil, nil
}
@ -122,6 +124,6 @@ func (c JSONBCodec) DecodeValue(m *Map, oid uint32, format int16, src []byte) (a
}
var dst any
err := json.Unmarshal(src, &dst)
err := c.Unmarshal(src, &dst)
return dst, err
}

View File

@ -26,6 +26,8 @@ const (
XIDOID = 28
CIDOID = 29
JSONOID = 114
XMLOID = 142
XMLArrayOID = 143
JSONArrayOID = 199
PointOID = 600
LsegOID = 601
@ -41,6 +43,7 @@ const (
CircleOID = 718
CircleArrayOID = 719
UnknownOID = 705
Macaddr8OID = 774
MacaddrOID = 829
InetOID = 869
BoolArrayOID = 1000
@ -213,6 +216,15 @@ type Map struct {
TryWrapScanPlanFuncs []TryWrapScanPlanFunc
}
// Copy returns a new Map containing the same registered types.
func (m *Map) Copy() *Map {
newMap := NewMap()
for _, type_ := range m.oidToType {
newMap.RegisterType(type_)
}
return newMap
}
func NewMap() *Map {
defaultMapInitOnce.Do(initDefaultMap)
@ -247,6 +259,13 @@ func NewMap() *Map {
}
}
// RegisterTypes registers multiple data types in the sequence they are provided.
func (m *Map) RegisterTypes(types []*Type) {
for _, t := range types {
m.RegisterType(t)
}
}
// RegisterType registers a data type with the Map. t must not be mutated after it is registered.
func (m *Map) RegisterType(t *Type) {
m.oidToType[t.OID] = t
@ -554,17 +573,24 @@ func TryFindUnderlyingTypeScanPlan(dst any) (plan WrappedScanPlanNextSetter, nex
elemValue = dstValue.Elem()
}
nextDstType := elemKindToPointerTypes[elemValue.Kind()]
if nextDstType == nil && elemValue.Kind() == reflect.Slice {
if elemValue.Type().Elem().Kind() == reflect.Uint8 {
var v *[]byte
nextDstType = reflect.TypeOf(v)
if nextDstType == nil {
if elemValue.Kind() == reflect.Slice {
if elemValue.Type().Elem().Kind() == reflect.Uint8 {
var v *[]byte
nextDstType = reflect.TypeOf(v)
}
}
// Get underlying type of any array.
// https://github.com/jackc/pgx/issues/2107
if elemValue.Kind() == reflect.Array {
nextDstType = reflect.PointerTo(reflect.ArrayOf(elemValue.Len(), elemValue.Type().Elem()))
}
}
if nextDstType != nil && dstValue.Type() != nextDstType && dstValue.CanConvert(nextDstType) {
return &underlyingTypeScanPlan{dstType: dstValue.Type(), nextDstType: nextDstType}, dstValue.Convert(nextDstType).Interface(), true
}
}
return nil, nil, false
@ -1330,7 +1356,7 @@ func (plan *derefPointerEncodePlan) Encode(value any, buf []byte) (newBuf []byte
}
// TryWrapDerefPointerEncodePlan tries to dereference a pointer. e.g. If value was of type *string then a wrapper plan
// would be returned that derefences the value.
// would be returned that dereferences the value.
func TryWrapDerefPointerEncodePlan(value any) (plan WrappedEncodePlanNextSetter, nextValue any, ok bool) {
if _, ok := value.(driver.Valuer); ok {
return nil, nil, false
@ -1404,6 +1430,15 @@ func TryWrapFindUnderlyingTypeEncodePlan(value any) (plan WrappedEncodePlanNextS
return &underlyingTypeEncodePlan{nextValueType: byteSliceType}, refValue.Convert(byteSliceType).Interface(), true
}
// Get underlying type of any array.
// https://github.com/jackc/pgx/issues/2107
if refValue.Kind() == reflect.Array {
underlyingArrayType := reflect.ArrayOf(refValue.Len(), refValue.Type().Elem())
if refValue.Type() != underlyingArrayType {
return &underlyingTypeEncodePlan{nextValueType: underlyingArrayType}, refValue.Convert(underlyingArrayType).Interface(), true
}
}
return nil, nil, false
}
@ -1911,8 +1946,17 @@ func newEncodeError(value any, m *Map, oid uint32, formatCode int16, err error)
// (nil, nil). The caller of Encode is responsible for writing the correct NULL value or the length of the data
// written.
func (m *Map) Encode(oid uint32, formatCode int16, value any, buf []byte) (newBuf []byte, err error) {
if value == nil {
return nil, nil
if isNil, callNilDriverValuer := isNilDriverValuer(value); isNil {
if callNilDriverValuer {
newBuf, err = (&encodePlanDriverValuer{m: m, oid: oid, formatCode: formatCode}).Encode(value, buf)
if err != nil {
return nil, newEncodeError(value, m, oid, formatCode, err)
}
return newBuf, nil
} else {
return nil, nil
}
}
plan := m.PlanEncode(oid, formatCode, value)
@ -1967,3 +2011,55 @@ func (w *sqlScannerWrapper) Scan(src any) error {
return w.m.Scan(t.OID, TextFormatCode, bufSrc, w.v)
}
// canBeNil returns true if value can be nil.
func canBeNil(value any) bool {
refVal := reflect.ValueOf(value)
kind := refVal.Kind()
switch kind {
case reflect.Chan, reflect.Func, reflect.Map, reflect.Ptr, reflect.UnsafePointer, reflect.Interface, reflect.Slice:
return true
default:
return false
}
}
// valuerReflectType is a reflect.Type for driver.Valuer. It has confusing syntax because reflect.TypeOf returns nil
// when it's argument is a nil interface value. So we use a pointer to the interface and call Elem to get the actual
// type. Yuck.
//
// This can be simplified in Go 1.22 with reflect.TypeFor.
//
// var valuerReflectType = reflect.TypeFor[driver.Valuer]()
var valuerReflectType = reflect.TypeOf((*driver.Valuer)(nil)).Elem()
// isNilDriverValuer returns true if value is any type of nil unless it implements driver.Valuer. *T is not considered to implement
// driver.Valuer if it is only implemented by T.
func isNilDriverValuer(value any) (isNil bool, callNilDriverValuer bool) {
if value == nil {
return true, false
}
refVal := reflect.ValueOf(value)
kind := refVal.Kind()
switch kind {
case reflect.Chan, reflect.Func, reflect.Map, reflect.Ptr, reflect.UnsafePointer, reflect.Interface, reflect.Slice:
if !refVal.IsNil() {
return false, false
}
if _, ok := value.(driver.Valuer); ok {
if kind == reflect.Ptr {
// The type assertion will succeed if driver.Valuer is implemented on T or *T. Check if it is implemented on *T
// by checking if it is not implemented on *T.
return true, !refVal.Type().Elem().Implements(valuerReflectType)
} else {
return true, true
}
}
return true, false
default:
return false, false
}
}

View File

@ -2,6 +2,7 @@ package pgtype
import (
"encoding/json"
"encoding/xml"
"net"
"net/netip"
"reflect"
@ -65,11 +66,12 @@ func initDefaultMap() {
defaultMap.RegisterType(&Type{Name: "int4", OID: Int4OID, Codec: Int4Codec{}})
defaultMap.RegisterType(&Type{Name: "int8", OID: Int8OID, Codec: Int8Codec{}})
defaultMap.RegisterType(&Type{Name: "interval", OID: IntervalOID, Codec: IntervalCodec{}})
defaultMap.RegisterType(&Type{Name: "json", OID: JSONOID, Codec: JSONCodec{}})
defaultMap.RegisterType(&Type{Name: "jsonb", OID: JSONBOID, Codec: JSONBCodec{}})
defaultMap.RegisterType(&Type{Name: "json", OID: JSONOID, Codec: &JSONCodec{Marshal: json.Marshal, Unmarshal: json.Unmarshal}})
defaultMap.RegisterType(&Type{Name: "jsonb", OID: JSONBOID, Codec: &JSONBCodec{Marshal: json.Marshal, Unmarshal: json.Unmarshal}})
defaultMap.RegisterType(&Type{Name: "jsonpath", OID: JSONPathOID, Codec: &TextFormatOnlyCodec{TextCodec{}}})
defaultMap.RegisterType(&Type{Name: "line", OID: LineOID, Codec: LineCodec{}})
defaultMap.RegisterType(&Type{Name: "lseg", OID: LsegOID, Codec: LsegCodec{}})
defaultMap.RegisterType(&Type{Name: "macaddr8", OID: Macaddr8OID, Codec: MacaddrCodec{}})
defaultMap.RegisterType(&Type{Name: "macaddr", OID: MacaddrOID, Codec: MacaddrCodec{}})
defaultMap.RegisterType(&Type{Name: "name", OID: NameOID, Codec: TextCodec{}})
defaultMap.RegisterType(&Type{Name: "numeric", OID: NumericOID, Codec: NumericCodec{}})
@ -81,13 +83,14 @@ func initDefaultMap() {
defaultMap.RegisterType(&Type{Name: "text", OID: TextOID, Codec: TextCodec{}})
defaultMap.RegisterType(&Type{Name: "tid", OID: TIDOID, Codec: TIDCodec{}})
defaultMap.RegisterType(&Type{Name: "time", OID: TimeOID, Codec: TimeCodec{}})
defaultMap.RegisterType(&Type{Name: "timestamp", OID: TimestampOID, Codec: TimestampCodec{}})
defaultMap.RegisterType(&Type{Name: "timestamptz", OID: TimestamptzOID, Codec: TimestamptzCodec{}})
defaultMap.RegisterType(&Type{Name: "timestamp", OID: TimestampOID, Codec: &TimestampCodec{}})
defaultMap.RegisterType(&Type{Name: "timestamptz", OID: TimestamptzOID, Codec: &TimestamptzCodec{}})
defaultMap.RegisterType(&Type{Name: "unknown", OID: UnknownOID, Codec: TextCodec{}})
defaultMap.RegisterType(&Type{Name: "uuid", OID: UUIDOID, Codec: UUIDCodec{}})
defaultMap.RegisterType(&Type{Name: "varbit", OID: VarbitOID, Codec: BitsCodec{}})
defaultMap.RegisterType(&Type{Name: "varchar", OID: VarcharOID, Codec: TextCodec{}})
defaultMap.RegisterType(&Type{Name: "xid", OID: XIDOID, Codec: Uint32Codec{}})
defaultMap.RegisterType(&Type{Name: "xml", OID: XMLOID, Codec: &XMLCodec{Marshal: xml.Marshal, Unmarshal: xml.Unmarshal}})
// Range types
defaultMap.RegisterType(&Type{Name: "daterange", OID: DaterangeOID, Codec: &RangeCodec{ElementType: defaultMap.oidToType[DateOID]}})
@ -152,6 +155,7 @@ func initDefaultMap() {
defaultMap.RegisterType(&Type{Name: "_varbit", OID: VarbitArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[VarbitOID]}})
defaultMap.RegisterType(&Type{Name: "_varchar", OID: VarcharArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[VarcharOID]}})
defaultMap.RegisterType(&Type{Name: "_xid", OID: XIDArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[XIDOID]}})
defaultMap.RegisterType(&Type{Name: "_xml", OID: XMLArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[XMLOID]}})
// Integer types that directly map to a PostgreSQL type
registerDefaultPgTypeVariants[int16](defaultMap, "int2")

View File

@ -19,9 +19,11 @@ type TimeValuer interface {
// Time represents the PostgreSQL time type. The PostgreSQL time is a time of day without time zone.
//
// Time is represented as the number of microseconds since midnight in the same way that PostgreSQL does. Other time
// and date types in pgtype can use time.Time as the underlying representation. However, pgtype.Time type cannot due
// to needing to handle 24:00:00. time.Time converts that to 00:00:00 on the following day.
// Time is represented as the number of microseconds since midnight in the same way that PostgreSQL does. Other time and
// date types in pgtype can use time.Time as the underlying representation. However, pgtype.Time type cannot due to
// needing to handle 24:00:00. time.Time converts that to 00:00:00 on the following day.
//
// The time with time zone type is not supported. Use of time with time zone is discouraged by the PostgreSQL documentation.
type Time struct {
Microseconds int64 // Number of microseconds since midnight
Valid bool
@ -45,7 +47,12 @@ func (t *Time) Scan(src any) error {
switch src := src.(type) {
case string:
return scanPlanTextAnyToTimeScanner{}.Scan([]byte(src), t)
err := scanPlanTextAnyToTimeScanner{}.Scan([]byte(src), t)
if err != nil {
t.Microseconds = 0
t.Valid = false
}
return err
}
return fmt.Errorf("cannot scan %T", src)
@ -136,6 +143,8 @@ func (TimeCodec) PlanScan(m *Map, oid uint32, format int16, target any) ScanPlan
switch target.(type) {
case TimeScanner:
return scanPlanBinaryTimeToTimeScanner{}
case TextScanner:
return scanPlanBinaryTimeToTextScanner{}
}
case TextFormatCode:
switch target.(type) {
@ -165,6 +174,34 @@ func (scanPlanBinaryTimeToTimeScanner) Scan(src []byte, dst any) error {
return scanner.ScanTime(Time{Microseconds: usec, Valid: true})
}
type scanPlanBinaryTimeToTextScanner struct{}
func (scanPlanBinaryTimeToTextScanner) Scan(src []byte, dst any) error {
ts, ok := (dst).(TextScanner)
if !ok {
return ErrScanTargetTypeChanged
}
if src == nil {
return ts.ScanText(Text{})
}
if len(src) != 8 {
return fmt.Errorf("invalid length for time: %v", len(src))
}
usec := int64(binary.BigEndian.Uint64(src))
tim := Time{Microseconds: usec, Valid: true}
buf, err := TimeCodec{}.PlanEncode(nil, 0, TextFormatCode, tim).Encode(tim, nil)
if err != nil {
return err
}
return ts.ScanText(Text{String: string(buf), Valid: true})
}
type scanPlanTextAnyToTimeScanner struct{}
func (scanPlanTextAnyToTimeScanner) Scan(src []byte, dst any) error {
@ -176,7 +213,7 @@ func (scanPlanTextAnyToTimeScanner) Scan(src []byte, dst any) error {
s := string(src)
if len(s) < 8 {
if len(s) < 8 || s[2] != ':' || s[5] != ':' {
return fmt.Errorf("cannot decode %v into Time", s)
}
@ -199,6 +236,10 @@ func (scanPlanTextAnyToTimeScanner) Scan(src []byte, dst any) error {
usec += seconds * microsecondsPerSecond
if len(s) > 9 {
if s[8] != '.' || len(s) > 15 {
return fmt.Errorf("cannot decode %v into Time", s)
}
fraction := s[9:]
n, err := strconv.ParseInt(fraction, 10, 64)
if err != nil {

View File

@ -46,7 +46,7 @@ func (ts *Timestamp) Scan(src any) error {
switch src := src.(type) {
case string:
return scanPlanTextTimestampToTimestampScanner{}.Scan([]byte(src), ts)
return (&scanPlanTextTimestampToTimestampScanner{}).Scan([]byte(src), ts)
case time.Time:
*ts = Timestamp{Time: src, Valid: true}
return nil
@ -116,17 +116,21 @@ func (ts *Timestamp) UnmarshalJSON(b []byte) error {
return nil
}
type TimestampCodec struct{}
type TimestampCodec struct {
// ScanLocation is the location that the time is assumed to be in for scanning. This is different from
// TimestamptzCodec.ScanLocation in that this setting does change the instant in time that the timestamp represents.
ScanLocation *time.Location
}
func (TimestampCodec) FormatSupported(format int16) bool {
func (*TimestampCodec) FormatSupported(format int16) bool {
return format == TextFormatCode || format == BinaryFormatCode
}
func (TimestampCodec) PreferredFormat() int16 {
func (*TimestampCodec) PreferredFormat() int16 {
return BinaryFormatCode
}
func (TimestampCodec) PlanEncode(m *Map, oid uint32, format int16, value any) EncodePlan {
func (*TimestampCodec) PlanEncode(m *Map, oid uint32, format int16, value any) EncodePlan {
if _, ok := value.(TimestampValuer); !ok {
return nil
}
@ -220,27 +224,27 @@ func discardTimeZone(t time.Time) time.Time {
return t
}
func (TimestampCodec) PlanScan(m *Map, oid uint32, format int16, target any) ScanPlan {
func (c *TimestampCodec) PlanScan(m *Map, oid uint32, format int16, target any) ScanPlan {
switch format {
case BinaryFormatCode:
switch target.(type) {
case TimestampScanner:
return scanPlanBinaryTimestampToTimestampScanner{}
return &scanPlanBinaryTimestampToTimestampScanner{location: c.ScanLocation}
}
case TextFormatCode:
switch target.(type) {
case TimestampScanner:
return scanPlanTextTimestampToTimestampScanner{}
return &scanPlanTextTimestampToTimestampScanner{location: c.ScanLocation}
}
}
return nil
}
type scanPlanBinaryTimestampToTimestampScanner struct{}
type scanPlanBinaryTimestampToTimestampScanner struct{ location *time.Location }
func (scanPlanBinaryTimestampToTimestampScanner) Scan(src []byte, dst any) error {
func (plan *scanPlanBinaryTimestampToTimestampScanner) Scan(src []byte, dst any) error {
scanner := (dst).(TimestampScanner)
if src == nil {
@ -264,15 +268,18 @@ func (scanPlanBinaryTimestampToTimestampScanner) Scan(src []byte, dst any) error
microsecFromUnixEpochToY2K/1000000+microsecSinceY2K/1000000,
(microsecFromUnixEpochToY2K%1000000*1000)+(microsecSinceY2K%1000000*1000),
).UTC()
if plan.location != nil {
tim = time.Date(tim.Year(), tim.Month(), tim.Day(), tim.Hour(), tim.Minute(), tim.Second(), tim.Nanosecond(), plan.location)
}
ts = Timestamp{Time: tim, Valid: true}
}
return scanner.ScanTimestamp(ts)
}
type scanPlanTextTimestampToTimestampScanner struct{}
type scanPlanTextTimestampToTimestampScanner struct{ location *time.Location }
func (scanPlanTextTimestampToTimestampScanner) Scan(src []byte, dst any) error {
func (plan *scanPlanTextTimestampToTimestampScanner) Scan(src []byte, dst any) error {
scanner := (dst).(TimestampScanner)
if src == nil {
@ -302,13 +309,17 @@ func (scanPlanTextTimestampToTimestampScanner) Scan(src []byte, dst any) error {
tim = time.Date(year, tim.Month(), tim.Day(), tim.Hour(), tim.Minute(), tim.Second(), tim.Nanosecond(), tim.Location())
}
if plan.location != nil {
tim = time.Date(tim.Year(), tim.Month(), tim.Day(), tim.Hour(), tim.Minute(), tim.Second(), tim.Nanosecond(), plan.location)
}
ts = Timestamp{Time: tim, Valid: true}
}
return scanner.ScanTimestamp(ts)
}
func (c TimestampCodec) DecodeDatabaseSQLValue(m *Map, oid uint32, format int16, src []byte) (driver.Value, error) {
func (c *TimestampCodec) DecodeDatabaseSQLValue(m *Map, oid uint32, format int16, src []byte) (driver.Value, error) {
if src == nil {
return nil, nil
}
@ -326,7 +337,7 @@ func (c TimestampCodec) DecodeDatabaseSQLValue(m *Map, oid uint32, format int16,
return ts.Time, nil
}
func (c TimestampCodec) DecodeValue(m *Map, oid uint32, format int16, src []byte) (any, error) {
func (c *TimestampCodec) DecodeValue(m *Map, oid uint32, format int16, src []byte) (any, error) {
if src == nil {
return nil, nil
}

View File

@ -54,7 +54,7 @@ func (tstz *Timestamptz) Scan(src any) error {
switch src := src.(type) {
case string:
return scanPlanTextTimestamptzToTimestamptzScanner{}.Scan([]byte(src), tstz)
return (&scanPlanTextTimestamptzToTimestamptzScanner{}).Scan([]byte(src), tstz)
case time.Time:
*tstz = Timestamptz{Time: src, Valid: true}
return nil
@ -124,17 +124,21 @@ func (tstz *Timestamptz) UnmarshalJSON(b []byte) error {
return nil
}
type TimestamptzCodec struct{}
type TimestamptzCodec struct {
// ScanLocation is the location to return scanned timestamptz values in. This does not change the instant in time that
// the timestamptz represents.
ScanLocation *time.Location
}
func (TimestamptzCodec) FormatSupported(format int16) bool {
func (*TimestamptzCodec) FormatSupported(format int16) bool {
return format == TextFormatCode || format == BinaryFormatCode
}
func (TimestamptzCodec) PreferredFormat() int16 {
func (*TimestamptzCodec) PreferredFormat() int16 {
return BinaryFormatCode
}
func (TimestamptzCodec) PlanEncode(m *Map, oid uint32, format int16, value any) EncodePlan {
func (*TimestamptzCodec) PlanEncode(m *Map, oid uint32, format int16, value any) EncodePlan {
if _, ok := value.(TimestamptzValuer); !ok {
return nil
}
@ -220,27 +224,27 @@ func (encodePlanTimestamptzCodecText) Encode(value any, buf []byte) (newBuf []by
return buf, nil
}
func (TimestamptzCodec) PlanScan(m *Map, oid uint32, format int16, target any) ScanPlan {
func (c *TimestamptzCodec) PlanScan(m *Map, oid uint32, format int16, target any) ScanPlan {
switch format {
case BinaryFormatCode:
switch target.(type) {
case TimestamptzScanner:
return scanPlanBinaryTimestamptzToTimestamptzScanner{}
return &scanPlanBinaryTimestamptzToTimestamptzScanner{location: c.ScanLocation}
}
case TextFormatCode:
switch target.(type) {
case TimestamptzScanner:
return scanPlanTextTimestamptzToTimestamptzScanner{}
return &scanPlanTextTimestamptzToTimestamptzScanner{location: c.ScanLocation}
}
}
return nil
}
type scanPlanBinaryTimestamptzToTimestamptzScanner struct{}
type scanPlanBinaryTimestamptzToTimestamptzScanner struct{ location *time.Location }
func (scanPlanBinaryTimestamptzToTimestamptzScanner) Scan(src []byte, dst any) error {
func (plan *scanPlanBinaryTimestamptzToTimestamptzScanner) Scan(src []byte, dst any) error {
scanner := (dst).(TimestamptzScanner)
if src == nil {
@ -264,15 +268,18 @@ func (scanPlanBinaryTimestamptzToTimestamptzScanner) Scan(src []byte, dst any) e
microsecFromUnixEpochToY2K/1000000+microsecSinceY2K/1000000,
(microsecFromUnixEpochToY2K%1000000*1000)+(microsecSinceY2K%1000000*1000),
)
if plan.location != nil {
tim = tim.In(plan.location)
}
tstz = Timestamptz{Time: tim, Valid: true}
}
return scanner.ScanTimestamptz(tstz)
}
type scanPlanTextTimestamptzToTimestamptzScanner struct{}
type scanPlanTextTimestamptzToTimestamptzScanner struct{ location *time.Location }
func (scanPlanTextTimestamptzToTimestamptzScanner) Scan(src []byte, dst any) error {
func (plan *scanPlanTextTimestamptzToTimestamptzScanner) Scan(src []byte, dst any) error {
scanner := (dst).(TimestamptzScanner)
if src == nil {
@ -312,13 +319,17 @@ func (scanPlanTextTimestamptzToTimestamptzScanner) Scan(src []byte, dst any) err
tim = time.Date(year, tim.Month(), tim.Day(), tim.Hour(), tim.Minute(), tim.Second(), tim.Nanosecond(), tim.Location())
}
if plan.location != nil {
tim = tim.In(plan.location)
}
tstz = Timestamptz{Time: tim, Valid: true}
}
return scanner.ScanTimestamptz(tstz)
}
func (c TimestamptzCodec) DecodeDatabaseSQLValue(m *Map, oid uint32, format int16, src []byte) (driver.Value, error) {
func (c *TimestamptzCodec) DecodeDatabaseSQLValue(m *Map, oid uint32, format int16, src []byte) (driver.Value, error) {
if src == nil {
return nil, nil
}
@ -336,7 +347,7 @@ func (c TimestamptzCodec) DecodeDatabaseSQLValue(m *Map, oid uint32, format int1
return tstz.Time, nil
}
func (c TimestamptzCodec) DecodeValue(m *Map, oid uint32, format int16, src []byte) (any, error) {
func (c *TimestamptzCodec) DecodeValue(m *Map, oid uint32, format int16, src []byte) (any, error) {
if src == nil {
return nil, nil
}

View File

@ -205,6 +205,8 @@ func (Uint32Codec) PlanScan(m *Map, oid uint32, format int16, target any) ScanPl
return scanPlanBinaryUint32ToUint32{}
case Uint32Scanner:
return scanPlanBinaryUint32ToUint32Scanner{}
case TextScanner:
return scanPlanBinaryUint32ToTextScanner{}
}
case TextFormatCode:
switch target.(type) {
@ -282,6 +284,26 @@ func (scanPlanBinaryUint32ToUint32Scanner) Scan(src []byte, dst any) error {
return s.ScanUint32(Uint32{Uint32: n, Valid: true})
}
type scanPlanBinaryUint32ToTextScanner struct{}
func (scanPlanBinaryUint32ToTextScanner) Scan(src []byte, dst any) error {
s, ok := (dst).(TextScanner)
if !ok {
return ErrScanTargetTypeChanged
}
if src == nil {
return s.ScanText(Text{})
}
if len(src) != 4 {
return fmt.Errorf("invalid length for uint32: %v", len(src))
}
n := uint64(binary.BigEndian.Uint32(src))
return s.ScanText(Text{String: strconv.FormatUint(n, 10), Valid: true})
}
type scanPlanTextAnyToUint32Scanner struct{}
func (scanPlanTextAnyToUint32Scanner) Scan(src []byte, dst any) error {

198
vendor/github.com/jackc/pgx/v5/pgtype/xml.go generated vendored Normal file
View File

@ -0,0 +1,198 @@
package pgtype
import (
"database/sql"
"database/sql/driver"
"encoding/xml"
"fmt"
"reflect"
)
type XMLCodec struct {
Marshal func(v any) ([]byte, error)
Unmarshal func(data []byte, v any) error
}
func (*XMLCodec) FormatSupported(format int16) bool {
return format == TextFormatCode || format == BinaryFormatCode
}
func (*XMLCodec) PreferredFormat() int16 {
return TextFormatCode
}
func (c *XMLCodec) PlanEncode(m *Map, oid uint32, format int16, value any) EncodePlan {
switch value.(type) {
case string:
return encodePlanXMLCodecEitherFormatString{}
case []byte:
return encodePlanXMLCodecEitherFormatByteSlice{}
// Cannot rely on driver.Valuer being handled later because anything can be marshalled.
//
// https://github.com/jackc/pgx/issues/1430
//
// Check for driver.Valuer must come before xml.Marshaler so that it is guaranteed to be used
// when both are implemented https://github.com/jackc/pgx/issues/1805
case driver.Valuer:
return &encodePlanDriverValuer{m: m, oid: oid, formatCode: format}
// Must come before trying wrap encode plans because a pointer to a struct may be unwrapped to a struct that can be
// marshalled.
//
// https://github.com/jackc/pgx/issues/1681
case xml.Marshaler:
return &encodePlanXMLCodecEitherFormatMarshal{
marshal: c.Marshal,
}
}
// Because anything can be marshalled the normal wrapping in Map.PlanScan doesn't get a chance to run. So try the
// appropriate wrappers here.
for _, f := range []TryWrapEncodePlanFunc{
TryWrapDerefPointerEncodePlan,
TryWrapFindUnderlyingTypeEncodePlan,
} {
if wrapperPlan, nextValue, ok := f(value); ok {
if nextPlan := c.PlanEncode(m, oid, format, nextValue); nextPlan != nil {
wrapperPlan.SetNext(nextPlan)
return wrapperPlan
}
}
}
return &encodePlanXMLCodecEitherFormatMarshal{
marshal: c.Marshal,
}
}
type encodePlanXMLCodecEitherFormatString struct{}
func (encodePlanXMLCodecEitherFormatString) Encode(value any, buf []byte) (newBuf []byte, err error) {
xmlString := value.(string)
buf = append(buf, xmlString...)
return buf, nil
}
type encodePlanXMLCodecEitherFormatByteSlice struct{}
func (encodePlanXMLCodecEitherFormatByteSlice) Encode(value any, buf []byte) (newBuf []byte, err error) {
xmlBytes := value.([]byte)
if xmlBytes == nil {
return nil, nil
}
buf = append(buf, xmlBytes...)
return buf, nil
}
type encodePlanXMLCodecEitherFormatMarshal struct {
marshal func(v any) ([]byte, error)
}
func (e *encodePlanXMLCodecEitherFormatMarshal) Encode(value any, buf []byte) (newBuf []byte, err error) {
xmlBytes, err := e.marshal(value)
if err != nil {
return nil, err
}
buf = append(buf, xmlBytes...)
return buf, nil
}
func (c *XMLCodec) PlanScan(m *Map, oid uint32, format int16, target any) ScanPlan {
switch target.(type) {
case *string:
return scanPlanAnyToString{}
case **string:
// This is to fix **string scanning. It seems wrong to special case **string, but it's not clear what a better
// solution would be.
//
// https://github.com/jackc/pgx/issues/1470 -- **string
// https://github.com/jackc/pgx/issues/1691 -- ** anything else
if wrapperPlan, nextDst, ok := TryPointerPointerScanPlan(target); ok {
if nextPlan := m.planScan(oid, format, nextDst); nextPlan != nil {
if _, failed := nextPlan.(*scanPlanFail); !failed {
wrapperPlan.SetNext(nextPlan)
return wrapperPlan
}
}
}
case *[]byte:
return scanPlanXMLToByteSlice{}
case BytesScanner:
return scanPlanBinaryBytesToBytesScanner{}
// Cannot rely on sql.Scanner being handled later because scanPlanXMLToXMLUnmarshal will take precedence.
//
// https://github.com/jackc/pgx/issues/1418
case sql.Scanner:
return &scanPlanSQLScanner{formatCode: format}
}
return &scanPlanXMLToXMLUnmarshal{
unmarshal: c.Unmarshal,
}
}
type scanPlanXMLToByteSlice struct{}
func (scanPlanXMLToByteSlice) Scan(src []byte, dst any) error {
dstBuf := dst.(*[]byte)
if src == nil {
*dstBuf = nil
return nil
}
*dstBuf = make([]byte, len(src))
copy(*dstBuf, src)
return nil
}
type scanPlanXMLToXMLUnmarshal struct {
unmarshal func(data []byte, v any) error
}
func (s *scanPlanXMLToXMLUnmarshal) Scan(src []byte, dst any) error {
if src == nil {
dstValue := reflect.ValueOf(dst)
if dstValue.Kind() == reflect.Ptr {
el := dstValue.Elem()
switch el.Kind() {
case reflect.Ptr, reflect.Slice, reflect.Map, reflect.Interface, reflect.Struct:
el.Set(reflect.Zero(el.Type()))
return nil
}
}
return fmt.Errorf("cannot scan NULL into %T", dst)
}
elem := reflect.ValueOf(dst).Elem()
elem.Set(reflect.Zero(elem.Type()))
return s.unmarshal(src, dst)
}
func (c *XMLCodec) DecodeDatabaseSQLValue(m *Map, oid uint32, format int16, src []byte) (driver.Value, error) {
if src == nil {
return nil, nil
}
dstBuf := make([]byte, len(src))
copy(dstBuf, src)
return dstBuf, nil
}
func (c *XMLCodec) DecodeValue(m *Map, oid uint32, format int16, src []byte) (any, error) {
if src == nil {
return nil, nil
}
var dst any
err := c.Unmarshal(src, &dst)
return dst, err
}

View File

@ -26,6 +26,10 @@ func (c *Conn) Release() {
res := c.res
c.res = nil
if c.p.releaseTracer != nil {
c.p.releaseTracer.TraceRelease(c.p, TraceReleaseData{Conn: conn})
}
if conn.IsClosed() || conn.PgConn().IsBusy() || conn.PgConn().TxStatus() != 'I' {
res.Destroy()
// Signal to the health check to run since we just destroyed a connections

View File

@ -8,7 +8,7 @@ The primary way of creating a pool is with [pgxpool.New]:
pool, err := pgxpool.New(context.Background(), os.Getenv("DATABASE_URL"))
The database connection string can be in URL or DSN format. PostgreSQL settings, pgx settings, and pool settings can be
The database connection string can be in URL or keyword/value format. PostgreSQL settings, pgx settings, and pool settings can be
specified here. In addition, a config struct can be created by [ParseConfig].
config, err := pgxpool.ParseConfig(os.Getenv("DATABASE_URL"))

View File

@ -95,6 +95,9 @@ type Pool struct {
healthCheckChan chan struct{}
acquireTracer AcquireTracer
releaseTracer ReleaseTracer
closeOnce sync.Once
closeChan chan struct{}
}
@ -195,6 +198,14 @@ func NewWithConfig(ctx context.Context, config *Config) (*Pool, error) {
closeChan: make(chan struct{}),
}
if t, ok := config.ConnConfig.Tracer.(AcquireTracer); ok {
p.acquireTracer = t
}
if t, ok := config.ConnConfig.Tracer.(ReleaseTracer); ok {
p.releaseTracer = t
}
var err error
p.p, err = puddle.NewPool(
&puddle.Config[*connResource]{
@ -279,7 +290,7 @@ func NewWithConfig(ctx context.Context, config *Config) (*Pool, error) {
//
// See Config for definitions of these arguments.
//
// # Example DSN
// # Example Keyword/Value
// user=jack password=secret host=pg.example.com port=5432 dbname=mydb sslmode=verify-ca pool_max_conns=10
//
// # Example URL
@ -498,7 +509,18 @@ func (p *Pool) createIdleResources(parentCtx context.Context, targetResources in
}
// Acquire returns a connection (*Conn) from the Pool
func (p *Pool) Acquire(ctx context.Context) (*Conn, error) {
func (p *Pool) Acquire(ctx context.Context) (c *Conn, err error) {
if p.acquireTracer != nil {
ctx = p.acquireTracer.TraceAcquireStart(ctx, p, TraceAcquireStartData{})
defer func() {
var conn *pgx.Conn
if c != nil {
conn = c.Conn()
}
p.acquireTracer.TraceAcquireEnd(ctx, p, TraceAcquireEndData{Conn: conn, Err: err})
}()
}
for {
res, err := p.p.Acquire(ctx)
if err != nil {

33
vendor/github.com/jackc/pgx/v5/pgxpool/tracer.go generated vendored Normal file
View File

@ -0,0 +1,33 @@
package pgxpool
import (
"context"
"github.com/jackc/pgx/v5"
)
// AcquireTracer traces Acquire.
type AcquireTracer interface {
// TraceAcquireStart is called at the beginning of Acquire.
// The returned context is used for the rest of the call and will be passed to the TraceAcquireEnd.
TraceAcquireStart(ctx context.Context, pool *Pool, data TraceAcquireStartData) context.Context
// TraceAcquireEnd is called when a connection has been acquired.
TraceAcquireEnd(ctx context.Context, pool *Pool, data TraceAcquireEndData)
}
type TraceAcquireStartData struct{}
type TraceAcquireEndData struct {
Conn *pgx.Conn
Err error
}
// ReleaseTracer traces Release.
type ReleaseTracer interface {
// TraceRelease is called at the beginning of Release.
TraceRelease(pool *Pool, data TraceReleaseData)
}
type TraceReleaseData struct {
Conn *pgx.Conn
}

View File

@ -18,9 +18,10 @@ func (tx *Tx) Begin(ctx context.Context) (pgx.Tx, error) {
return tx.t.Begin(ctx)
}
// Commit commits the transaction and returns the associated connection back to the Pool. Commit will return ErrTxClosed
// if the Tx is already closed, but is otherwise safe to call multiple times. If the commit fails with a rollback status
// (e.g. the transaction was already in a broken state) then ErrTxCommitRollback will be returned.
// Commit commits the transaction and returns the associated connection back to the Pool. Commit will return an error
// where errors.Is(ErrTxClosed) is true if the Tx is already closed, but is otherwise safe to call multiple times. If
// the commit fails with a rollback status (e.g. the transaction was already in a broken state) then ErrTxCommitRollback
// will be returned.
func (tx *Tx) Commit(ctx context.Context) error {
err := tx.t.Commit(ctx)
if tx.c != nil {
@ -30,9 +31,9 @@ func (tx *Tx) Commit(ctx context.Context) error {
return err
}
// Rollback rolls back the transaction and returns the associated connection back to the Pool. Rollback will return ErrTxClosed
// if the Tx is already closed, but is otherwise safe to call multiple times. Hence, defer tx.Rollback() is safe even if
// tx.Commit() will be called first in a non-error condition.
// Rollback rolls back the transaction and returns the associated connection back to the Pool. Rollback will return
// where an error where errors.Is(ErrTxClosed) is true if the Tx is already closed, but is otherwise safe to call
// multiple times. Hence, defer tx.Rollback() is safe even if tx.Commit() will be called first in a non-error condition.
func (tx *Tx) Rollback(ctx context.Context) error {
err := tx.t.Rollback(ctx)
if tx.c != nil {

View File

@ -6,6 +6,7 @@ import (
"fmt"
"reflect"
"strings"
"sync"
"time"
"github.com/jackc/pgx/v5/pgconn"
@ -418,6 +419,8 @@ type CollectableRow interface {
type RowToFunc[T any] func(row CollectableRow) (T, error)
// AppendRows iterates through rows, calling fn for each row, and appending the results into a slice of T.
//
// This function closes the rows automatically on return.
func AppendRows[T any, S ~[]T](slice S, rows Rows, fn RowToFunc[T]) (S, error) {
defer rows.Close()
@ -437,12 +440,16 @@ func AppendRows[T any, S ~[]T](slice S, rows Rows, fn RowToFunc[T]) (S, error) {
}
// CollectRows iterates through rows, calling fn for each row, and collecting the results into a slice of T.
//
// This function closes the rows automatically on return.
func CollectRows[T any](rows Rows, fn RowToFunc[T]) ([]T, error) {
return AppendRows([]T{}, rows, fn)
}
// CollectOneRow calls fn for the first row in rows and returns the result. If no rows are found returns an error where errors.Is(ErrNoRows) is true.
// CollectOneRow is to CollectRows as QueryRow is to Query.
//
// This function closes the rows automatically on return.
func CollectOneRow[T any](rows Rows, fn RowToFunc[T]) (T, error) {
defer rows.Close()
@ -468,6 +475,8 @@ func CollectOneRow[T any](rows Rows, fn RowToFunc[T]) (T, error) {
// CollectExactlyOneRow calls fn for the first row in rows and returns the result.
// - If no rows are found returns an error where errors.Is(ErrNoRows) is true.
// - If more than 1 row is found returns an error where errors.Is(ErrTooManyRows) is true.
//
// This function closes the rows automatically on return.
func CollectExactlyOneRow[T any](rows Rows, fn RowToFunc[T]) (T, error) {
defer rows.Close()
@ -541,7 +550,7 @@ func (rs *mapRowScanner) ScanRow(rows Rows) error {
// ignored.
func RowToStructByPos[T any](row CollectableRow) (T, error) {
var value T
err := row.Scan(&positionalStructRowScanner{ptrToStruct: &value})
err := (&positionalStructRowScanner{ptrToStruct: &value}).ScanRow(row)
return value, err
}
@ -550,7 +559,7 @@ func RowToStructByPos[T any](row CollectableRow) (T, error) {
// the field will be ignored.
func RowToAddrOfStructByPos[T any](row CollectableRow) (*T, error) {
var value T
err := row.Scan(&positionalStructRowScanner{ptrToStruct: &value})
err := (&positionalStructRowScanner{ptrToStruct: &value}).ScanRow(row)
return &value, err
}
@ -558,46 +567,60 @@ type positionalStructRowScanner struct {
ptrToStruct any
}
func (rs *positionalStructRowScanner) ScanRow(rows Rows) error {
dst := rs.ptrToStruct
dstValue := reflect.ValueOf(dst)
if dstValue.Kind() != reflect.Ptr {
return fmt.Errorf("dst not a pointer")
func (rs *positionalStructRowScanner) ScanRow(rows CollectableRow) error {
typ := reflect.TypeOf(rs.ptrToStruct).Elem()
fields := lookupStructFields(typ)
if len(rows.RawValues()) > len(fields) {
return fmt.Errorf(
"got %d values, but dst struct has only %d fields",
len(rows.RawValues()),
len(fields),
)
}
dstElemValue := dstValue.Elem()
scanTargets := rs.appendScanTargets(dstElemValue, nil)
if len(rows.RawValues()) > len(scanTargets) {
return fmt.Errorf("got %d values, but dst struct has only %d fields", len(rows.RawValues()), len(scanTargets))
}
scanTargets := setupStructScanTargets(rs.ptrToStruct, fields)
return rows.Scan(scanTargets...)
}
func (rs *positionalStructRowScanner) appendScanTargets(dstElemValue reflect.Value, scanTargets []any) []any {
dstElemType := dstElemValue.Type()
// Map from reflect.Type -> []structRowField
var positionalStructFieldMap sync.Map
if scanTargets == nil {
scanTargets = make([]any, 0, dstElemType.NumField())
func lookupStructFields(t reflect.Type) []structRowField {
if cached, ok := positionalStructFieldMap.Load(t); ok {
return cached.([]structRowField)
}
for i := 0; i < dstElemType.NumField(); i++ {
sf := dstElemType.Field(i)
fieldStack := make([]int, 0, 1)
fields := computeStructFields(t, make([]structRowField, 0, t.NumField()), &fieldStack)
fieldsIface, _ := positionalStructFieldMap.LoadOrStore(t, fields)
return fieldsIface.([]structRowField)
}
func computeStructFields(
t reflect.Type,
fields []structRowField,
fieldStack *[]int,
) []structRowField {
tail := len(*fieldStack)
*fieldStack = append(*fieldStack, 0)
for i := 0; i < t.NumField(); i++ {
sf := t.Field(i)
(*fieldStack)[tail] = i
// Handle anonymous struct embedding, but do not try to handle embedded pointers.
if sf.Anonymous && sf.Type.Kind() == reflect.Struct {
scanTargets = rs.appendScanTargets(dstElemValue.Field(i), scanTargets)
fields = computeStructFields(sf.Type, fields, fieldStack)
} else if sf.PkgPath == "" {
dbTag, _ := sf.Tag.Lookup(structTagKey)
if dbTag == "-" {
// Field is ignored, skip it.
continue
}
scanTargets = append(scanTargets, dstElemValue.Field(i).Addr().Interface())
fields = append(fields, structRowField{
path: append([]int(nil), *fieldStack...),
})
}
}
return scanTargets
*fieldStack = (*fieldStack)[:tail]
return fields
}
// RowToStructByName returns a T scanned from row. T must be a struct. T must have the same number of named public
@ -605,7 +628,7 @@ func (rs *positionalStructRowScanner) appendScanTargets(dstElemValue reflect.Val
// column name can be overridden with a "db" struct tag. If the "db" struct tag is "-" then the field will be ignored.
func RowToStructByName[T any](row CollectableRow) (T, error) {
var value T
err := row.Scan(&namedStructRowScanner{ptrToStruct: &value})
err := (&namedStructRowScanner{ptrToStruct: &value}).ScanRow(row)
return value, err
}
@ -615,7 +638,7 @@ func RowToStructByName[T any](row CollectableRow) (T, error) {
// then the field will be ignored.
func RowToAddrOfStructByName[T any](row CollectableRow) (*T, error) {
var value T
err := row.Scan(&namedStructRowScanner{ptrToStruct: &value})
err := (&namedStructRowScanner{ptrToStruct: &value}).ScanRow(row)
return &value, err
}
@ -624,7 +647,7 @@ func RowToAddrOfStructByName[T any](row CollectableRow) (*T, error) {
// column name can be overridden with a "db" struct tag. If the "db" struct tag is "-" then the field will be ignored.
func RowToStructByNameLax[T any](row CollectableRow) (T, error) {
var value T
err := row.Scan(&namedStructRowScanner{ptrToStruct: &value, lax: true})
err := (&namedStructRowScanner{ptrToStruct: &value, lax: true}).ScanRow(row)
return value, err
}
@ -634,7 +657,7 @@ func RowToStructByNameLax[T any](row CollectableRow) (T, error) {
// then the field will be ignored.
func RowToAddrOfStructByNameLax[T any](row CollectableRow) (*T, error) {
var value T
err := row.Scan(&namedStructRowScanner{ptrToStruct: &value, lax: true})
err := (&namedStructRowScanner{ptrToStruct: &value, lax: true}).ScanRow(row)
return &value, err
}
@ -643,64 +666,123 @@ type namedStructRowScanner struct {
lax bool
}
func (rs *namedStructRowScanner) ScanRow(rows Rows) error {
dst := rs.ptrToStruct
dstValue := reflect.ValueOf(dst)
if dstValue.Kind() != reflect.Ptr {
return fmt.Errorf("dst not a pointer")
}
dstElemValue := dstValue.Elem()
scanTargets, err := rs.appendScanTargets(dstElemValue, nil, rows.FieldDescriptions())
func (rs *namedStructRowScanner) ScanRow(rows CollectableRow) error {
typ := reflect.TypeOf(rs.ptrToStruct).Elem()
fldDescs := rows.FieldDescriptions()
namedStructFields, err := lookupNamedStructFields(typ, fldDescs)
if err != nil {
return err
}
for i, t := range scanTargets {
if t == nil {
return fmt.Errorf("struct doesn't have corresponding row field %s", rows.FieldDescriptions()[i].Name)
}
if !rs.lax && namedStructFields.missingField != "" {
return fmt.Errorf("cannot find field %s in returned row", namedStructFields.missingField)
}
fields := namedStructFields.fields
scanTargets := setupStructScanTargets(rs.ptrToStruct, fields)
return rows.Scan(scanTargets...)
}
const structTagKey = "db"
// Map from namedStructFieldMap -> *namedStructFields
var namedStructFieldMap sync.Map
func fieldPosByName(fldDescs []pgconn.FieldDescription, field string) (i int) {
i = -1
for i, desc := range fldDescs {
// Snake case support.
field = strings.ReplaceAll(field, "_", "")
descName := strings.ReplaceAll(desc.Name, "_", "")
if strings.EqualFold(descName, field) {
return i
}
}
return
type namedStructFieldsKey struct {
t reflect.Type
colNames string
}
func (rs *namedStructRowScanner) appendScanTargets(dstElemValue reflect.Value, scanTargets []any, fldDescs []pgconn.FieldDescription) ([]any, error) {
var err error
dstElemType := dstElemValue.Type()
type namedStructFields struct {
fields []structRowField
// missingField is the first field from the struct without a corresponding row field.
// This is used to construct the correct error message for non-lax queries.
missingField string
}
if scanTargets == nil {
scanTargets = make([]any, len(fldDescs))
func lookupNamedStructFields(
t reflect.Type,
fldDescs []pgconn.FieldDescription,
) (*namedStructFields, error) {
key := namedStructFieldsKey{
t: t,
colNames: joinFieldNames(fldDescs),
}
if cached, ok := namedStructFieldMap.Load(key); ok {
return cached.(*namedStructFields), nil
}
for i := 0; i < dstElemType.NumField(); i++ {
sf := dstElemType.Field(i)
// We could probably do two-levels of caching, where we compute the key -> fields mapping
// for a type only once, cache it by type, then use that to compute the column -> fields
// mapping for a given set of columns.
fieldStack := make([]int, 0, 1)
fields, missingField := computeNamedStructFields(
fldDescs,
t,
make([]structRowField, len(fldDescs)),
&fieldStack,
)
for i, f := range fields {
if f.path == nil {
return nil, fmt.Errorf(
"struct doesn't have corresponding row field %s",
fldDescs[i].Name,
)
}
}
fieldsIface, _ := namedStructFieldMap.LoadOrStore(
key,
&namedStructFields{fields: fields, missingField: missingField},
)
return fieldsIface.(*namedStructFields), nil
}
func joinFieldNames(fldDescs []pgconn.FieldDescription) string {
switch len(fldDescs) {
case 0:
return ""
case 1:
return fldDescs[0].Name
}
totalSize := len(fldDescs) - 1 // Space for separator bytes.
for _, d := range fldDescs {
totalSize += len(d.Name)
}
var b strings.Builder
b.Grow(totalSize)
b.WriteString(fldDescs[0].Name)
for _, d := range fldDescs[1:] {
b.WriteByte(0) // Join with NUL byte as it's (presumably) not a valid column character.
b.WriteString(d.Name)
}
return b.String()
}
func computeNamedStructFields(
fldDescs []pgconn.FieldDescription,
t reflect.Type,
fields []structRowField,
fieldStack *[]int,
) ([]structRowField, string) {
var missingField string
tail := len(*fieldStack)
*fieldStack = append(*fieldStack, 0)
for i := 0; i < t.NumField(); i++ {
sf := t.Field(i)
(*fieldStack)[tail] = i
if sf.PkgPath != "" && !sf.Anonymous {
// Field is unexported, skip it.
continue
}
// Handle anonymous struct embedding, but do not try to handle embedded pointers.
if sf.Anonymous && sf.Type.Kind() == reflect.Struct {
scanTargets, err = rs.appendScanTargets(dstElemValue.Field(i), scanTargets, fldDescs)
if err != nil {
return nil, err
var missingSubField string
fields, missingSubField = computeNamedStructFields(
fldDescs,
sf.Type,
fields,
fieldStack,
)
if missingField == "" {
missingField = missingSubField
}
} else {
dbTag, dbTagPresent := sf.Tag.Lookup(structTagKey)
@ -715,19 +797,60 @@ func (rs *namedStructRowScanner) appendScanTargets(dstElemValue reflect.Value, s
if !dbTagPresent {
colName = sf.Name
}
fpos := fieldPosByName(fldDescs, colName)
fpos := fieldPosByName(fldDescs, colName, !dbTagPresent)
if fpos == -1 {
if rs.lax {
continue
if missingField == "" {
missingField = colName
}
return nil, fmt.Errorf("cannot find field %s in returned row", colName)
continue
}
if fpos >= len(scanTargets) && !rs.lax {
return nil, fmt.Errorf("cannot find field %s in returned row", colName)
fields[fpos] = structRowField{
path: append([]int(nil), *fieldStack...),
}
scanTargets[fpos] = dstElemValue.Field(i).Addr().Interface()
}
}
*fieldStack = (*fieldStack)[:tail]
return scanTargets, err
return fields, missingField
}
const structTagKey = "db"
func fieldPosByName(fldDescs []pgconn.FieldDescription, field string, normalize bool) (i int) {
i = -1
if normalize {
field = strings.ReplaceAll(field, "_", "")
}
for i, desc := range fldDescs {
if normalize {
if strings.EqualFold(strings.ReplaceAll(desc.Name, "_", ""), field) {
return i
}
} else {
if desc.Name == field {
return i
}
}
}
return
}
// structRowField describes a field of a struct.
//
// TODO: It would be a bit more efficient to track the path using the pointer
// offset within the (outermost) struct and use unsafe.Pointer arithmetic to
// construct references when scanning rows. However, it's not clear it's worth
// using unsafe for this.
type structRowField struct {
path []int
}
func setupStructScanTargets(receiver any, fields []structRowField) []any {
scanTargets := make([]any, len(fields))
v := reflect.ValueOf(receiver).Elem()
for i, f := range fields {
scanTargets[i] = v.FieldByIndex(f.path).Addr().Interface()
}
return scanTargets
}

View File

@ -7,7 +7,7 @@
// return err
// }
//
// Or from a DSN string.
// Or from a keyword/value string.
//
// db, err := sql.Open("pgx", "user=postgres password=secret host=localhost port=5432 database=pgx_test sslmode=disable")
// if err != nil {
@ -75,6 +75,7 @@ import (
"math"
"math/rand"
"reflect"
"slices"
"strconv"
"strings"
"sync"
@ -98,7 +99,7 @@ func init() {
// if pgx driver was already registered by different pgx major version then we
// skip registration under the default name.
if !contains(sql.Drivers(), "pgx") {
if !slices.Contains(sql.Drivers(), "pgx") {
sql.Register("pgx", pgxDriver)
}
sql.Register("pgx/v5", pgxDriver)
@ -120,17 +121,6 @@ func init() {
}
}
// TODO replace by slices.Contains when experimental package will be merged to stdlib
// https://pkg.go.dev/golang.org/x/exp/slices#Contains
func contains(list []string, y string) bool {
for _, x := range list {
if x == y {
return true
}
}
return false
}
// OptionOpenDB options for configuring the driver when opening a new db pool.
type OptionOpenDB func(*connector)
@ -805,6 +795,16 @@ func (r *Rows) Next(dest []driver.Value) error {
}
return d.Value()
}
case pgtype.XMLOID:
var d []byte
scanPlan := m.PlanScan(dataTypeOID, format, &d)
r.valueFuncs[i] = func(src []byte) (driver.Value, error) {
err := scanPlan.Scan(src, &d)
if err != nil {
return nil, err
}
return d, nil
}
default:
var d string
scanPlan := m.PlanScan(dataTypeOID, format, &d)

View File

@ -3,7 +3,6 @@ package pgx
import (
"errors"
"github.com/jackc/pgx/v5/internal/anynil"
"github.com/jackc/pgx/v5/internal/pgio"
"github.com/jackc/pgx/v5/pgtype"
)
@ -15,10 +14,6 @@ const (
)
func convertSimpleArgument(m *pgtype.Map, arg any) (any, error) {
if anynil.Is(arg) {
return nil, nil
}
buf, err := m.Encode(0, TextFormatCode, arg, []byte{})
if err != nil {
return nil, err
@ -30,10 +25,6 @@ func convertSimpleArgument(m *pgtype.Map, arg any) (any, error) {
}
func encodeCopyValue(m *pgtype.Map, buf []byte, oid uint32, arg any) ([]byte, error) {
if anynil.Is(arg) {
return pgio.AppendInt32(buf, -1), nil
}
sp := len(buf)
buf = pgio.AppendInt32(buf, -1)
argBuf, err := m.Encode(oid, BinaryFormatCode, arg, buf)

View File

@ -1,3 +1,8 @@
# 2.2.2 (September 10, 2024)
* Add empty acquire time to stats (Maxim Ivanov)
* Stop importing nanotime from runtime via linkname (maypok86)
# 2.2.1 (July 15, 2023)
* Fix: CreateResource cannot overflow pool. This changes documented behavior of CreateResource. Previously,

View File

@ -1,4 +1,4 @@
[![](https://godoc.org/github.com/jackc/puddle?status.svg)](https://godoc.org/github.com/jackc/puddle)
[![Go Reference](https://pkg.go.dev/badge/github.com/jackc/puddle/v2.svg)](https://pkg.go.dev/github.com/jackc/puddle/v2)
![Build Status](https://github.com/jackc/puddle/actions/workflows/ci.yml/badge.svg)
# Puddle

16
vendor/github.com/jackc/puddle/v2/nanotime.go generated vendored Normal file
View File

@ -0,0 +1,16 @@
package puddle
import "time"
// nanotime returns the time in nanoseconds since process start.
//
// This approach, described at
// https://github.com/golang/go/issues/61765#issuecomment-1672090302,
// is fast, monotonic, and portable, and avoids the previous
// dependence on runtime.nanotime using the (unsafe) linkname hack.
// In particular, time.Since does less work than time.Now.
func nanotime() int64 {
return time.Since(globalStart).Nanoseconds()
}
var globalStart = time.Now()

View File

@ -1,13 +0,0 @@
//go:build purego || appengine || js
// This file contains the safe implementation of nanotime using time.Now().
package puddle
import (
"time"
)
func nanotime() int64 {
return time.Now().UnixNano()
}

View File

@ -1,12 +0,0 @@
//go:build !purego && !appengine && !js
// This file contains the implementation of nanotime using runtime.nanotime.
package puddle
import "unsafe"
var _ = unsafe.Sizeof(0)
//go:linkname nanotime runtime.nanotime
func nanotime() int64

View File

@ -139,6 +139,7 @@ type Pool[T any] struct {
acquireCount int64
acquireDuration time.Duration
emptyAcquireCount int64
emptyAcquireWaitTime time.Duration
canceledAcquireCount atomic.Int64
resetCount int
@ -154,7 +155,7 @@ type Config[T any] struct {
MaxSize int32
}
// NewPool creates a new pool. Panics if maxSize is less than 1.
// NewPool creates a new pool. Returns an error iff MaxSize is less than 1.
func NewPool[T any](config *Config[T]) (*Pool[T], error) {
if config.MaxSize < 1 {
return nil, errors.New("MaxSize must be >= 1")
@ -202,6 +203,7 @@ type Stat struct {
acquireCount int64
acquireDuration time.Duration
emptyAcquireCount int64
emptyAcquireWaitTime time.Duration
canceledAcquireCount int64
}
@ -251,6 +253,13 @@ func (s *Stat) EmptyAcquireCount() int64 {
return s.emptyAcquireCount
}
// EmptyAcquireWaitTime returns the cumulative time waited for successful acquires
// from the pool for a resource to be released or constructed because the pool was
// empty.
func (s *Stat) EmptyAcquireWaitTime() time.Duration {
return s.emptyAcquireWaitTime
}
// CanceledAcquireCount returns the cumulative count of acquires from the pool
// that were canceled by a context.
func (s *Stat) CanceledAcquireCount() int64 {
@ -266,6 +275,7 @@ func (p *Pool[T]) Stat() *Stat {
maxResources: p.maxSize,
acquireCount: p.acquireCount,
emptyAcquireCount: p.emptyAcquireCount,
emptyAcquireWaitTime: p.emptyAcquireWaitTime,
canceledAcquireCount: p.canceledAcquireCount.Load(),
acquireDuration: p.acquireDuration,
}
@ -363,11 +373,13 @@ func (p *Pool[T]) acquire(ctx context.Context) (*Resource[T], error) {
// If a resource is available in the pool.
if res := p.tryAcquireIdleResource(); res != nil {
waitTime := time.Duration(nanotime() - startNano)
if waitedForLock {
p.emptyAcquireCount += 1
p.emptyAcquireWaitTime += waitTime
}
p.acquireCount += 1
p.acquireDuration += time.Duration(nanotime() - startNano)
p.acquireDuration += waitTime
p.mux.Unlock()
return res, nil
}
@ -391,7 +403,9 @@ func (p *Pool[T]) acquire(ctx context.Context) (*Resource[T], error) {
p.emptyAcquireCount += 1
p.acquireCount += 1
p.acquireDuration += time.Duration(nanotime() - startNano)
waitTime := time.Duration(nanotime() - startNano)
p.acquireDuration += waitTime
p.emptyAcquireWaitTime += waitTime
return res, nil
}

View File

@ -1 +0,0 @@
# Hello

4
vendor/golang.org/x/crypto/LICENSE generated vendored
View File

@ -1,4 +1,4 @@
Copyright (c) 2009 The Go Authors. All rights reserved.
Copyright 2009 The Go Authors.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer.
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
* Neither the name of Google LLC nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.

View File

@ -16,7 +16,7 @@ Hash Functions SHA-1, SHA-224, SHA-256, SHA-384 and SHA-512 for HMAC. To
choose, you can pass the `New` functions from the different SHA packages to
pbkdf2.Key.
*/
package pbkdf2 // import "golang.org/x/crypto/pbkdf2"
package pbkdf2
import (
"crypto/hmac"

4
vendor/golang.org/x/mod/LICENSE generated vendored
View File

@ -1,4 +1,4 @@
Copyright (c) 2009 The Go Authors. All rights reserved.
Copyright 2009 The Go Authors.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer.
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
* Neither the name of Google LLC nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.

4
vendor/golang.org/x/sync/LICENSE generated vendored
View File

@ -1,4 +1,4 @@
Copyright (c) 2009 The Go Authors. All rights reserved.
Copyright 2009 The Go Authors.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer.
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
* Neither the name of Google LLC nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.

4
vendor/golang.org/x/sys/LICENSE generated vendored
View File

@ -1,4 +1,4 @@
Copyright (c) 2009 The Go Authors. All rights reserved.
Copyright 2009 The Go Authors.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer.
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
* Neither the name of Google LLC nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.

View File

@ -156,7 +156,7 @@ from the generated architecture-specific files listed below, and merge these
into a common file for each OS.
The merge is performed in the following steps:
1. Construct the set of common code that is idential in all architecture-specific files.
1. Construct the set of common code that is identical in all architecture-specific files.
2. Write this common code to the merged file.
3. Remove the common code from all architecture-specific files.

View File

@ -9,9 +9,11 @@
#define PSALAA 1208(R0)
#define GTAB64(x) 80(x)
#define LCA64(x) 88(x)
#define SAVSTACK_ASYNC(x) 336(x) // in the LCA
#define CAA(x) 8(x)
#define EDCHPXV(x) 1016(x) // in the CAA
#define SAVSTACK_ASYNC(x) 336(x) // in the LCA
#define CEECAATHDID(x) 976(x) // in the CAA
#define EDCHPXV(x) 1016(x) // in the CAA
#define GOCB(x) 1104(x) // in the CAA
// SS_*, where x=SAVSTACK_ASYNC
#define SS_LE(x) 0(x)
@ -19,394 +21,125 @@
#define SS_ERRNO(x) 16(x)
#define SS_ERRNOJR(x) 20(x)
#define LE_CALL BYTE $0x0D; BYTE $0x76; // BL R7, R6
// Function Descriptor Offsets
#define __errno 0x156*16
#define __err2ad 0x16C*16
TEXT ·clearErrno(SB),NOSPLIT,$0-0
BL addrerrno<>(SB)
MOVD $0, 0(R3)
// Call Instructions
#define LE_CALL BYTE $0x0D; BYTE $0x76 // BL R7, R6
#define SVC_LOAD BYTE $0x0A; BYTE $0x08 // SVC 08 LOAD
#define SVC_DELETE BYTE $0x0A; BYTE $0x09 // SVC 09 DELETE
DATA zosLibVec<>(SB)/8, $0
GLOBL zosLibVec<>(SB), NOPTR, $8
TEXT ·initZosLibVec(SB), NOSPLIT|NOFRAME, $0-0
MOVW PSALAA, R8
MOVD LCA64(R8), R8
MOVD CAA(R8), R8
MOVD EDCHPXV(R8), R8
MOVD R8, zosLibVec<>(SB)
RET
TEXT ·GetZosLibVec(SB), NOSPLIT|NOFRAME, $0-0
MOVD zosLibVec<>(SB), R8
MOVD R8, ret+0(FP)
RET
TEXT ·clearErrno(SB), NOSPLIT, $0-0
BL addrerrno<>(SB)
MOVD $0, 0(R3)
RET
// Returns the address of errno in R3.
TEXT addrerrno<>(SB),NOSPLIT|NOFRAME,$0-0
TEXT addrerrno<>(SB), NOSPLIT|NOFRAME, $0-0
// Get library control area (LCA).
MOVW PSALAA, R8
MOVD LCA64(R8), R8
MOVW PSALAA, R8
MOVD LCA64(R8), R8
// Get __errno FuncDesc.
MOVD CAA(R8), R9
MOVD EDCHPXV(R9), R9
ADD $(0x156*16), R9
LMG 0(R9), R5, R6
MOVD CAA(R8), R9
MOVD EDCHPXV(R9), R9
ADD $(__errno), R9
LMG 0(R9), R5, R6
// Switch to saved LE stack.
MOVD SAVSTACK_ASYNC(R8), R9
MOVD 0(R9), R4
MOVD $0, 0(R9)
MOVD SAVSTACK_ASYNC(R8), R9
MOVD 0(R9), R4
MOVD $0, 0(R9)
// Call __errno function.
LE_CALL
NOPH
// Switch back to Go stack.
XOR R0, R0 // Restore R0 to $0.
MOVD R4, 0(R9) // Save stack pointer.
RET
TEXT ·syscall_syscall(SB),NOSPLIT,$0-56
BL runtime·entersyscall(SB)
MOVD a1+8(FP), R1
MOVD a2+16(FP), R2
MOVD a3+24(FP), R3
// Get library control area (LCA).
MOVW PSALAA, R8
MOVD LCA64(R8), R8
// Get function.
MOVD CAA(R8), R9
MOVD EDCHPXV(R9), R9
MOVD trap+0(FP), R5
SLD $4, R5
ADD R5, R9
LMG 0(R9), R5, R6
// Restore LE stack.
MOVD SAVSTACK_ASYNC(R8), R9
MOVD 0(R9), R4
MOVD $0, 0(R9)
// Call function.
LE_CALL
NOPH
XOR R0, R0 // Restore R0 to $0.
MOVD R4, 0(R9) // Save stack pointer.
MOVD R3, r1+32(FP)
MOVD R0, r2+40(FP)
MOVD R0, err+48(FP)
MOVW R3, R4
CMP R4, $-1
BNE done
BL addrerrno<>(SB)
MOVWZ 0(R3), R3
MOVD R3, err+48(FP)
done:
BL runtime·exitsyscall(SB)
RET
TEXT ·syscall_rawsyscall(SB),NOSPLIT,$0-56
MOVD a1+8(FP), R1
MOVD a2+16(FP), R2
MOVD a3+24(FP), R3
// Get library control area (LCA).
MOVW PSALAA, R8
MOVD LCA64(R8), R8
// Get function.
MOVD CAA(R8), R9
MOVD EDCHPXV(R9), R9
MOVD trap+0(FP), R5
SLD $4, R5
ADD R5, R9
LMG 0(R9), R5, R6
// Restore LE stack.
MOVD SAVSTACK_ASYNC(R8), R9
MOVD 0(R9), R4
MOVD $0, 0(R9)
// Call function.
LE_CALL
NOPH
XOR R0, R0 // Restore R0 to $0.
MOVD R4, 0(R9) // Save stack pointer.
MOVD R3, r1+32(FP)
MOVD R0, r2+40(FP)
MOVD R0, err+48(FP)
MOVW R3, R4
CMP R4, $-1
BNE done
BL addrerrno<>(SB)
MOVWZ 0(R3), R3
MOVD R3, err+48(FP)
done:
RET
TEXT ·syscall_syscall6(SB),NOSPLIT,$0-80
BL runtime·entersyscall(SB)
MOVD a1+8(FP), R1
MOVD a2+16(FP), R2
MOVD a3+24(FP), R3
// Get library control area (LCA).
MOVW PSALAA, R8
MOVD LCA64(R8), R8
// Get function.
MOVD CAA(R8), R9
MOVD EDCHPXV(R9), R9
MOVD trap+0(FP), R5
SLD $4, R5
ADD R5, R9
LMG 0(R9), R5, R6
// Restore LE stack.
MOVD SAVSTACK_ASYNC(R8), R9
MOVD 0(R9), R4
MOVD $0, 0(R9)
// Fill in parameter list.
MOVD a4+32(FP), R12
MOVD R12, (2176+24)(R4)
MOVD a5+40(FP), R12
MOVD R12, (2176+32)(R4)
MOVD a6+48(FP), R12
MOVD R12, (2176+40)(R4)
// Call function.
LE_CALL
NOPH
XOR R0, R0 // Restore R0 to $0.
MOVD R4, 0(R9) // Save stack pointer.
MOVD R3, r1+56(FP)
MOVD R0, r2+64(FP)
MOVD R0, err+72(FP)
MOVW R3, R4
CMP R4, $-1
BNE done
BL addrerrno<>(SB)
MOVWZ 0(R3), R3
MOVD R3, err+72(FP)
done:
BL runtime·exitsyscall(SB)
RET
TEXT ·syscall_rawsyscall6(SB),NOSPLIT,$0-80
MOVD a1+8(FP), R1
MOVD a2+16(FP), R2
MOVD a3+24(FP), R3
// Get library control area (LCA).
MOVW PSALAA, R8
MOVD LCA64(R8), R8
// Get function.
MOVD CAA(R8), R9
MOVD EDCHPXV(R9), R9
MOVD trap+0(FP), R5
SLD $4, R5
ADD R5, R9
LMG 0(R9), R5, R6
// Restore LE stack.
MOVD SAVSTACK_ASYNC(R8), R9
MOVD 0(R9), R4
MOVD $0, 0(R9)
// Fill in parameter list.
MOVD a4+32(FP), R12
MOVD R12, (2176+24)(R4)
MOVD a5+40(FP), R12
MOVD R12, (2176+32)(R4)
MOVD a6+48(FP), R12
MOVD R12, (2176+40)(R4)
// Call function.
LE_CALL
NOPH
XOR R0, R0 // Restore R0 to $0.
MOVD R4, 0(R9) // Save stack pointer.
MOVD R3, r1+56(FP)
MOVD R0, r2+64(FP)
MOVD R0, err+72(FP)
MOVW R3, R4
CMP R4, $-1
BNE done
BL ·rrno<>(SB)
MOVWZ 0(R3), R3
MOVD R3, err+72(FP)
done:
RET
TEXT ·syscall_syscall9(SB),NOSPLIT,$0
BL runtime·entersyscall(SB)
MOVD a1+8(FP), R1
MOVD a2+16(FP), R2
MOVD a3+24(FP), R3
// Get library control area (LCA).
MOVW PSALAA, R8
MOVD LCA64(R8), R8
// Get function.
MOVD CAA(R8), R9
MOVD EDCHPXV(R9), R9
MOVD trap+0(FP), R5
SLD $4, R5
ADD R5, R9
LMG 0(R9), R5, R6
// Restore LE stack.
MOVD SAVSTACK_ASYNC(R8), R9
MOVD 0(R9), R4
MOVD $0, 0(R9)
// Fill in parameter list.
MOVD a4+32(FP), R12
MOVD R12, (2176+24)(R4)
MOVD a5+40(FP), R12
MOVD R12, (2176+32)(R4)
MOVD a6+48(FP), R12
MOVD R12, (2176+40)(R4)
MOVD a7+56(FP), R12
MOVD R12, (2176+48)(R4)
MOVD a8+64(FP), R12
MOVD R12, (2176+56)(R4)
MOVD a9+72(FP), R12
MOVD R12, (2176+64)(R4)
// Call function.
LE_CALL
NOPH
XOR R0, R0 // Restore R0 to $0.
MOVD R4, 0(R9) // Save stack pointer.
MOVD R3, r1+80(FP)
MOVD R0, r2+88(FP)
MOVD R0, err+96(FP)
MOVW R3, R4
CMP R4, $-1
BNE done
BL addrerrno<>(SB)
MOVWZ 0(R3), R3
MOVD R3, err+96(FP)
done:
BL runtime·exitsyscall(SB)
RET
TEXT ·syscall_rawsyscall9(SB),NOSPLIT,$0
MOVD a1+8(FP), R1
MOVD a2+16(FP), R2
MOVD a3+24(FP), R3
// Get library control area (LCA).
MOVW PSALAA, R8
MOVD LCA64(R8), R8
// Get function.
MOVD CAA(R8), R9
MOVD EDCHPXV(R9), R9
MOVD trap+0(FP), R5
SLD $4, R5
ADD R5, R9
LMG 0(R9), R5, R6
// Restore LE stack.
MOVD SAVSTACK_ASYNC(R8), R9
MOVD 0(R9), R4
MOVD $0, 0(R9)
// Fill in parameter list.
MOVD a4+32(FP), R12
MOVD R12, (2176+24)(R4)
MOVD a5+40(FP), R12
MOVD R12, (2176+32)(R4)
MOVD a6+48(FP), R12
MOVD R12, (2176+40)(R4)
MOVD a7+56(FP), R12
MOVD R12, (2176+48)(R4)
MOVD a8+64(FP), R12
MOVD R12, (2176+56)(R4)
MOVD a9+72(FP), R12
MOVD R12, (2176+64)(R4)
// Call function.
LE_CALL
NOPH
XOR R0, R0 // Restore R0 to $0.
MOVD R4, 0(R9) // Save stack pointer.
MOVD R3, r1+80(FP)
MOVD R0, r2+88(FP)
MOVD R0, err+96(FP)
MOVW R3, R4
CMP R4, $-1
BNE done
BL addrerrno<>(SB)
MOVWZ 0(R3), R3
MOVD R3, err+96(FP)
done:
XOR R0, R0 // Restore R0 to $0.
MOVD R4, 0(R9) // Save stack pointer.
RET
// func svcCall(fnptr unsafe.Pointer, argv *unsafe.Pointer, dsa *uint64)
TEXT ·svcCall(SB),NOSPLIT,$0
BL runtime·save_g(SB) // Save g and stack pointer
MOVW PSALAA, R8
MOVD LCA64(R8), R8
MOVD SAVSTACK_ASYNC(R8), R9
MOVD R15, 0(R9)
TEXT ·svcCall(SB), NOSPLIT, $0
BL runtime·save_g(SB) // Save g and stack pointer
MOVW PSALAA, R8
MOVD LCA64(R8), R8
MOVD SAVSTACK_ASYNC(R8), R9
MOVD R15, 0(R9)
MOVD argv+8(FP), R1 // Move function arguments into registers
MOVD dsa+16(FP), g
MOVD fnptr+0(FP), R15
MOVD argv+8(FP), R1 // Move function arguments into registers
MOVD dsa+16(FP), g
MOVD fnptr+0(FP), R15
BYTE $0x0D // Branch to function
BYTE $0xEF
BYTE $0x0D // Branch to function
BYTE $0xEF
BL runtime·load_g(SB) // Restore g and stack pointer
MOVW PSALAA, R8
MOVD LCA64(R8), R8
MOVD SAVSTACK_ASYNC(R8), R9
MOVD 0(R9), R15
BL runtime·load_g(SB) // Restore g and stack pointer
MOVW PSALAA, R8
MOVD LCA64(R8), R8
MOVD SAVSTACK_ASYNC(R8), R9
MOVD 0(R9), R15
RET
// func svcLoad(name *byte) unsafe.Pointer
TEXT ·svcLoad(SB),NOSPLIT,$0
MOVD R15, R2 // Save go stack pointer
MOVD name+0(FP), R0 // Move SVC args into registers
MOVD $0x80000000, R1
MOVD $0, R15
BYTE $0x0A // SVC 08 LOAD
BYTE $0x08
MOVW R15, R3 // Save return code from SVC
MOVD R2, R15 // Restore go stack pointer
CMP R3, $0 // Check SVC return code
BNE error
TEXT ·svcLoad(SB), NOSPLIT, $0
MOVD R15, R2 // Save go stack pointer
MOVD name+0(FP), R0 // Move SVC args into registers
MOVD $0x80000000, R1
MOVD $0, R15
SVC_LOAD
MOVW R15, R3 // Save return code from SVC
MOVD R2, R15 // Restore go stack pointer
CMP R3, $0 // Check SVC return code
BNE error
MOVD $-2, R3 // Reset last bit of entry point to zero
AND R0, R3
MOVD R3, addr+8(FP) // Return entry point returned by SVC
CMP R0, R3 // Check if last bit of entry point was set
BNE done
MOVD $-2, R3 // Reset last bit of entry point to zero
AND R0, R3
MOVD R3, ret+8(FP) // Return entry point returned by SVC
CMP R0, R3 // Check if last bit of entry point was set
BNE done
MOVD R15, R2 // Save go stack pointer
MOVD $0, R15 // Move SVC args into registers (entry point still in r0 from SVC 08)
BYTE $0x0A // SVC 09 DELETE
BYTE $0x09
MOVD R2, R15 // Restore go stack pointer
MOVD R15, R2 // Save go stack pointer
MOVD $0, R15 // Move SVC args into registers (entry point still in r0 from SVC 08)
SVC_DELETE
MOVD R2, R15 // Restore go stack pointer
error:
MOVD $0, addr+8(FP) // Return 0 on failure
MOVD $0, ret+8(FP) // Return 0 on failure
done:
XOR R0, R0 // Reset r0 to 0
XOR R0, R0 // Reset r0 to 0
RET
// func svcUnload(name *byte, fnptr unsafe.Pointer) int64
TEXT ·svcUnload(SB),NOSPLIT,$0
MOVD R15, R2 // Save go stack pointer
MOVD name+0(FP), R0 // Move SVC args into registers
MOVD addr+8(FP), R15
BYTE $0x0A // SVC 09
BYTE $0x09
XOR R0, R0 // Reset r0 to 0
MOVD R15, R1 // Save SVC return code
MOVD R2, R15 // Restore go stack pointer
MOVD R1, rc+0(FP) // Return SVC return code
TEXT ·svcUnload(SB), NOSPLIT, $0
MOVD R15, R2 // Save go stack pointer
MOVD name+0(FP), R0 // Move SVC args into registers
MOVD fnptr+8(FP), R15
SVC_DELETE
XOR R0, R0 // Reset r0 to 0
MOVD R15, R1 // Save SVC return code
MOVD R2, R15 // Restore go stack pointer
MOVD R1, ret+16(FP) // Return SVC return code
RET
// func gettid() uint64
@ -417,7 +150,233 @@ TEXT ·gettid(SB), NOSPLIT, $0
// Get CEECAATHDID
MOVD CAA(R8), R9
MOVD 0x3D0(R9), R9
MOVD CEECAATHDID(R9), R9
MOVD R9, ret+0(FP)
RET
//
// Call LE function, if the return is -1
// errno and errno2 is retrieved
//
TEXT ·CallLeFuncWithErr(SB), NOSPLIT, $0
MOVW PSALAA, R8
MOVD LCA64(R8), R8
MOVD CAA(R8), R9
MOVD g, GOCB(R9)
// Restore LE stack.
MOVD SAVSTACK_ASYNC(R8), R9 // R9-> LE stack frame saving address
MOVD 0(R9), R4 // R4-> restore previously saved stack frame pointer
MOVD parms_base+8(FP), R7 // R7 -> argument array
MOVD parms_len+16(FP), R8 // R8 number of arguments
// arg 1 ---> R1
CMP R8, $0
BEQ docall
SUB $1, R8
MOVD 0(R7), R1
// arg 2 ---> R2
CMP R8, $0
BEQ docall
SUB $1, R8
ADD $8, R7
MOVD 0(R7), R2
// arg 3 --> R3
CMP R8, $0
BEQ docall
SUB $1, R8
ADD $8, R7
MOVD 0(R7), R3
CMP R8, $0
BEQ docall
MOVD $2176+16, R6 // starting LE stack address-8 to store 4th argument
repeat:
ADD $8, R7
MOVD 0(R7), R0 // advance arg pointer by 8 byte
ADD $8, R6 // advance LE argument address by 8 byte
MOVD R0, (R4)(R6*1) // copy argument from go-slice to le-frame
SUB $1, R8
CMP R8, $0
BNE repeat
docall:
MOVD funcdesc+0(FP), R8 // R8-> function descriptor
LMG 0(R8), R5, R6
MOVD $0, 0(R9) // R9 address of SAVSTACK_ASYNC
LE_CALL // balr R7, R6 (return #1)
NOPH
MOVD R3, ret+32(FP)
CMP R3, $-1 // compare result to -1
BNE done
// retrieve errno and errno2
MOVD zosLibVec<>(SB), R8
ADD $(__errno), R8
LMG 0(R8), R5, R6
LE_CALL // balr R7, R6 __errno (return #3)
NOPH
MOVWZ 0(R3), R3
MOVD R3, err+48(FP)
MOVD zosLibVec<>(SB), R8
ADD $(__err2ad), R8
LMG 0(R8), R5, R6
LE_CALL // balr R7, R6 __err2ad (return #2)
NOPH
MOVW (R3), R2 // retrieve errno2
MOVD R2, errno2+40(FP) // store in return area
done:
MOVD R4, 0(R9) // Save stack pointer.
RET
//
// Call LE function, if the return is 0
// errno and errno2 is retrieved
//
TEXT ·CallLeFuncWithPtrReturn(SB), NOSPLIT, $0
MOVW PSALAA, R8
MOVD LCA64(R8), R8
MOVD CAA(R8), R9
MOVD g, GOCB(R9)
// Restore LE stack.
MOVD SAVSTACK_ASYNC(R8), R9 // R9-> LE stack frame saving address
MOVD 0(R9), R4 // R4-> restore previously saved stack frame pointer
MOVD parms_base+8(FP), R7 // R7 -> argument array
MOVD parms_len+16(FP), R8 // R8 number of arguments
// arg 1 ---> R1
CMP R8, $0
BEQ docall
SUB $1, R8
MOVD 0(R7), R1
// arg 2 ---> R2
CMP R8, $0
BEQ docall
SUB $1, R8
ADD $8, R7
MOVD 0(R7), R2
// arg 3 --> R3
CMP R8, $0
BEQ docall
SUB $1, R8
ADD $8, R7
MOVD 0(R7), R3
CMP R8, $0
BEQ docall
MOVD $2176+16, R6 // starting LE stack address-8 to store 4th argument
repeat:
ADD $8, R7
MOVD 0(R7), R0 // advance arg pointer by 8 byte
ADD $8, R6 // advance LE argument address by 8 byte
MOVD R0, (R4)(R6*1) // copy argument from go-slice to le-frame
SUB $1, R8
CMP R8, $0
BNE repeat
docall:
MOVD funcdesc+0(FP), R8 // R8-> function descriptor
LMG 0(R8), R5, R6
MOVD $0, 0(R9) // R9 address of SAVSTACK_ASYNC
LE_CALL // balr R7, R6 (return #1)
NOPH
MOVD R3, ret+32(FP)
CMP R3, $0 // compare result to 0
BNE done
// retrieve errno and errno2
MOVD zosLibVec<>(SB), R8
ADD $(__errno), R8
LMG 0(R8), R5, R6
LE_CALL // balr R7, R6 __errno (return #3)
NOPH
MOVWZ 0(R3), R3
MOVD R3, err+48(FP)
MOVD zosLibVec<>(SB), R8
ADD $(__err2ad), R8
LMG 0(R8), R5, R6
LE_CALL // balr R7, R6 __err2ad (return #2)
NOPH
MOVW (R3), R2 // retrieve errno2
MOVD R2, errno2+40(FP) // store in return area
XOR R2, R2
MOVWZ R2, (R3) // clear errno2
done:
MOVD R4, 0(R9) // Save stack pointer.
RET
//
// function to test if a pointer can be safely dereferenced (content read)
// return 0 for succces
//
TEXT ·ptrtest(SB), NOSPLIT, $0-16
MOVD arg+0(FP), R10 // test pointer in R10
// set up R2 to point to CEECAADMC
BYTE $0xE3; BYTE $0x20; BYTE $0x04; BYTE $0xB8; BYTE $0x00; BYTE $0x17 // llgt 2,1208
BYTE $0xB9; BYTE $0x17; BYTE $0x00; BYTE $0x22 // llgtr 2,2
BYTE $0xA5; BYTE $0x26; BYTE $0x7F; BYTE $0xFF // nilh 2,32767
BYTE $0xE3; BYTE $0x22; BYTE $0x00; BYTE $0x58; BYTE $0x00; BYTE $0x04 // lg 2,88(2)
BYTE $0xE3; BYTE $0x22; BYTE $0x00; BYTE $0x08; BYTE $0x00; BYTE $0x04 // lg 2,8(2)
BYTE $0x41; BYTE $0x22; BYTE $0x03; BYTE $0x68 // la 2,872(2)
// set up R5 to point to the "shunt" path which set 1 to R3 (failure)
BYTE $0xB9; BYTE $0x82; BYTE $0x00; BYTE $0x33 // xgr 3,3
BYTE $0xA7; BYTE $0x55; BYTE $0x00; BYTE $0x04 // bras 5,lbl1
BYTE $0xA7; BYTE $0x39; BYTE $0x00; BYTE $0x01 // lghi 3,1
// if r3 is not zero (failed) then branch to finish
BYTE $0xB9; BYTE $0x02; BYTE $0x00; BYTE $0x33 // lbl1 ltgr 3,3
BYTE $0xA7; BYTE $0x74; BYTE $0x00; BYTE $0x08 // brc b'0111',lbl2
// stomic store shunt address in R5 into CEECAADMC
BYTE $0xE3; BYTE $0x52; BYTE $0x00; BYTE $0x00; BYTE $0x00; BYTE $0x24 // stg 5,0(2)
// now try reading from the test pointer in R10, if it fails it branches to the "lghi" instruction above
BYTE $0xE3; BYTE $0x9A; BYTE $0x00; BYTE $0x00; BYTE $0x00; BYTE $0x04 // lg 9,0(10)
// finish here, restore 0 into CEECAADMC
BYTE $0xB9; BYTE $0x82; BYTE $0x00; BYTE $0x99 // lbl2 xgr 9,9
BYTE $0xE3; BYTE $0x92; BYTE $0x00; BYTE $0x00; BYTE $0x00; BYTE $0x24 // stg 9,0(2)
MOVD R3, ret+8(FP) // result in R3
RET
//
// function to test if a untptr can be loaded from a pointer
// return 1: the 8-byte content
// 2: 0 for success, 1 for failure
//
// func safeload(ptr uintptr) ( value uintptr, error uintptr)
TEXT ·safeload(SB), NOSPLIT, $0-24
MOVD ptr+0(FP), R10 // test pointer in R10
MOVD $0x0, R6
BYTE $0xE3; BYTE $0x20; BYTE $0x04; BYTE $0xB8; BYTE $0x00; BYTE $0x17 // llgt 2,1208
BYTE $0xB9; BYTE $0x17; BYTE $0x00; BYTE $0x22 // llgtr 2,2
BYTE $0xA5; BYTE $0x26; BYTE $0x7F; BYTE $0xFF // nilh 2,32767
BYTE $0xE3; BYTE $0x22; BYTE $0x00; BYTE $0x58; BYTE $0x00; BYTE $0x04 // lg 2,88(2)
BYTE $0xE3; BYTE $0x22; BYTE $0x00; BYTE $0x08; BYTE $0x00; BYTE $0x04 // lg 2,8(2)
BYTE $0x41; BYTE $0x22; BYTE $0x03; BYTE $0x68 // la 2,872(2)
BYTE $0xB9; BYTE $0x82; BYTE $0x00; BYTE $0x33 // xgr 3,3
BYTE $0xA7; BYTE $0x55; BYTE $0x00; BYTE $0x04 // bras 5,lbl1
BYTE $0xA7; BYTE $0x39; BYTE $0x00; BYTE $0x01 // lghi 3,1
BYTE $0xB9; BYTE $0x02; BYTE $0x00; BYTE $0x33 // lbl1 ltgr 3,3
BYTE $0xA7; BYTE $0x74; BYTE $0x00; BYTE $0x08 // brc b'0111',lbl2
BYTE $0xE3; BYTE $0x52; BYTE $0x00; BYTE $0x00; BYTE $0x00; BYTE $0x24 // stg 5,0(2)
BYTE $0xE3; BYTE $0x6A; BYTE $0x00; BYTE $0x00; BYTE $0x00; BYTE $0x04 // lg 6,0(10)
BYTE $0xB9; BYTE $0x82; BYTE $0x00; BYTE $0x99 // lbl2 xgr 9,9
BYTE $0xE3; BYTE $0x92; BYTE $0x00; BYTE $0x00; BYTE $0x00; BYTE $0x24 // stg 9,0(2)
MOVD R6, value+8(FP) // result in R6
MOVD R3, error+16(FP) // error in R3
RET

657
vendor/golang.org/x/sys/unix/bpxsvc_zos.go generated vendored Normal file
View File

@ -0,0 +1,657 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build zos
package unix
import (
"bytes"
"fmt"
"unsafe"
)
//go:noescape
func bpxcall(plist []unsafe.Pointer, bpx_offset int64)
//go:noescape
func A2e([]byte)
//go:noescape
func E2a([]byte)
const (
BPX4STA = 192 // stat
BPX4FST = 104 // fstat
BPX4LST = 132 // lstat
BPX4OPN = 156 // open
BPX4CLO = 72 // close
BPX4CHR = 500 // chattr
BPX4FCR = 504 // fchattr
BPX4LCR = 1180 // lchattr
BPX4CTW = 492 // cond_timed_wait
BPX4GTH = 1056 // __getthent
BPX4PTQ = 412 // pthread_quiesc
BPX4PTR = 320 // ptrace
)
const (
//options
//byte1
BPX_OPNFHIGH = 0x80
//byte2
BPX_OPNFEXEC = 0x80
//byte3
BPX_O_NOLARGEFILE = 0x08
BPX_O_LARGEFILE = 0x04
BPX_O_ASYNCSIG = 0x02
BPX_O_SYNC = 0x01
//byte4
BPX_O_CREXCL = 0xc0
BPX_O_CREAT = 0x80
BPX_O_EXCL = 0x40
BPX_O_NOCTTY = 0x20
BPX_O_TRUNC = 0x10
BPX_O_APPEND = 0x08
BPX_O_NONBLOCK = 0x04
BPX_FNDELAY = 0x04
BPX_O_RDWR = 0x03
BPX_O_RDONLY = 0x02
BPX_O_WRONLY = 0x01
BPX_O_ACCMODE = 0x03
BPX_O_GETFL = 0x0f
//mode
// byte1 (file type)
BPX_FT_DIR = 1
BPX_FT_CHARSPEC = 2
BPX_FT_REGFILE = 3
BPX_FT_FIFO = 4
BPX_FT_SYMLINK = 5
BPX_FT_SOCKET = 6
//byte3
BPX_S_ISUID = 0x08
BPX_S_ISGID = 0x04
BPX_S_ISVTX = 0x02
BPX_S_IRWXU1 = 0x01
BPX_S_IRUSR = 0x01
//byte4
BPX_S_IRWXU2 = 0xc0
BPX_S_IWUSR = 0x80
BPX_S_IXUSR = 0x40
BPX_S_IRWXG = 0x38
BPX_S_IRGRP = 0x20
BPX_S_IWGRP = 0x10
BPX_S_IXGRP = 0x08
BPX_S_IRWXOX = 0x07
BPX_S_IROTH = 0x04
BPX_S_IWOTH = 0x02
BPX_S_IXOTH = 0x01
CW_INTRPT = 1
CW_CONDVAR = 32
CW_TIMEOUT = 64
PGTHA_NEXT = 2
PGTHA_CURRENT = 1
PGTHA_FIRST = 0
PGTHA_LAST = 3
PGTHA_PROCESS = 0x80
PGTHA_CONTTY = 0x40
PGTHA_PATH = 0x20
PGTHA_COMMAND = 0x10
PGTHA_FILEDATA = 0x08
PGTHA_THREAD = 0x04
PGTHA_PTAG = 0x02
PGTHA_COMMANDLONG = 0x01
PGTHA_THREADFAST = 0x80
PGTHA_FILEPATH = 0x40
PGTHA_THDSIGMASK = 0x20
// thread quiece mode
QUIESCE_TERM int32 = 1
QUIESCE_FORCE int32 = 2
QUIESCE_QUERY int32 = 3
QUIESCE_FREEZE int32 = 4
QUIESCE_UNFREEZE int32 = 5
FREEZE_THIS_THREAD int32 = 6
FREEZE_EXIT int32 = 8
QUIESCE_SRB int32 = 9
)
type Pgtha struct {
Pid uint32 // 0
Tid0 uint32 // 4
Tid1 uint32
Accesspid byte // C
Accesstid byte // D
Accessasid uint16 // E
Loginname [8]byte // 10
Flag1 byte // 18
Flag1b2 byte // 19
}
type Bpxystat_t struct { // DSECT BPXYSTAT
St_id [4]uint8 // 0
St_length uint16 // 0x4
St_version uint16 // 0x6
St_mode uint32 // 0x8
St_ino uint32 // 0xc
St_dev uint32 // 0x10
St_nlink uint32 // 0x14
St_uid uint32 // 0x18
St_gid uint32 // 0x1c
St_size uint64 // 0x20
St_atime uint32 // 0x28
St_mtime uint32 // 0x2c
St_ctime uint32 // 0x30
St_rdev uint32 // 0x34
St_auditoraudit uint32 // 0x38
St_useraudit uint32 // 0x3c
St_blksize uint32 // 0x40
St_createtime uint32 // 0x44
St_auditid [4]uint32 // 0x48
St_res01 uint32 // 0x58
Ft_ccsid uint16 // 0x5c
Ft_flags uint16 // 0x5e
St_res01a [2]uint32 // 0x60
St_res02 uint32 // 0x68
St_blocks uint32 // 0x6c
St_opaque [3]uint8 // 0x70
St_visible uint8 // 0x73
St_reftime uint32 // 0x74
St_fid uint64 // 0x78
St_filefmt uint8 // 0x80
St_fspflag2 uint8 // 0x81
St_res03 [2]uint8 // 0x82
St_ctimemsec uint32 // 0x84
St_seclabel [8]uint8 // 0x88
St_res04 [4]uint8 // 0x90
// end of version 1
_ uint32 // 0x94
St_atime64 uint64 // 0x98
St_mtime64 uint64 // 0xa0
St_ctime64 uint64 // 0xa8
St_createtime64 uint64 // 0xb0
St_reftime64 uint64 // 0xb8
_ uint64 // 0xc0
St_res05 [16]uint8 // 0xc8
// end of version 2
}
type BpxFilestatus struct {
Oflag1 byte
Oflag2 byte
Oflag3 byte
Oflag4 byte
}
type BpxMode struct {
Ftype byte
Mode1 byte
Mode2 byte
Mode3 byte
}
// Thr attribute structure for extended attributes
type Bpxyatt_t struct { // DSECT BPXYATT
Att_id [4]uint8
Att_version uint16
Att_res01 [2]uint8
Att_setflags1 uint8
Att_setflags2 uint8
Att_setflags3 uint8
Att_setflags4 uint8
Att_mode uint32
Att_uid uint32
Att_gid uint32
Att_opaquemask [3]uint8
Att_visblmaskres uint8
Att_opaque [3]uint8
Att_visibleres uint8
Att_size_h uint32
Att_size_l uint32
Att_atime uint32
Att_mtime uint32
Att_auditoraudit uint32
Att_useraudit uint32
Att_ctime uint32
Att_reftime uint32
// end of version 1
Att_filefmt uint8
Att_res02 [3]uint8
Att_filetag uint32
Att_res03 [8]uint8
// end of version 2
Att_atime64 uint64
Att_mtime64 uint64
Att_ctime64 uint64
Att_reftime64 uint64
Att_seclabel [8]uint8
Att_ver3res02 [8]uint8
// end of version 3
}
func BpxOpen(name string, options *BpxFilestatus, mode *BpxMode) (rv int32, rc int32, rn int32) {
if len(name) < 1024 {
var namebuf [1024]byte
sz := int32(copy(namebuf[:], name))
A2e(namebuf[:sz])
var parms [7]unsafe.Pointer
parms[0] = unsafe.Pointer(&sz)
parms[1] = unsafe.Pointer(&namebuf[0])
parms[2] = unsafe.Pointer(options)
parms[3] = unsafe.Pointer(mode)
parms[4] = unsafe.Pointer(&rv)
parms[5] = unsafe.Pointer(&rc)
parms[6] = unsafe.Pointer(&rn)
bpxcall(parms[:], BPX4OPN)
return rv, rc, rn
}
return -1, -1, -1
}
func BpxClose(fd int32) (rv int32, rc int32, rn int32) {
var parms [4]unsafe.Pointer
parms[0] = unsafe.Pointer(&fd)
parms[1] = unsafe.Pointer(&rv)
parms[2] = unsafe.Pointer(&rc)
parms[3] = unsafe.Pointer(&rn)
bpxcall(parms[:], BPX4CLO)
return rv, rc, rn
}
func BpxFileFStat(fd int32, st *Bpxystat_t) (rv int32, rc int32, rn int32) {
st.St_id = [4]uint8{0xe2, 0xe3, 0xc1, 0xe3}
st.St_version = 2
stat_sz := uint32(unsafe.Sizeof(*st))
var parms [6]unsafe.Pointer
parms[0] = unsafe.Pointer(&fd)
parms[1] = unsafe.Pointer(&stat_sz)
parms[2] = unsafe.Pointer(st)
parms[3] = unsafe.Pointer(&rv)
parms[4] = unsafe.Pointer(&rc)
parms[5] = unsafe.Pointer(&rn)
bpxcall(parms[:], BPX4FST)
return rv, rc, rn
}
func BpxFileStat(name string, st *Bpxystat_t) (rv int32, rc int32, rn int32) {
if len(name) < 1024 {
var namebuf [1024]byte
sz := int32(copy(namebuf[:], name))
A2e(namebuf[:sz])
st.St_id = [4]uint8{0xe2, 0xe3, 0xc1, 0xe3}
st.St_version = 2
stat_sz := uint32(unsafe.Sizeof(*st))
var parms [7]unsafe.Pointer
parms[0] = unsafe.Pointer(&sz)
parms[1] = unsafe.Pointer(&namebuf[0])
parms[2] = unsafe.Pointer(&stat_sz)
parms[3] = unsafe.Pointer(st)
parms[4] = unsafe.Pointer(&rv)
parms[5] = unsafe.Pointer(&rc)
parms[6] = unsafe.Pointer(&rn)
bpxcall(parms[:], BPX4STA)
return rv, rc, rn
}
return -1, -1, -1
}
func BpxFileLStat(name string, st *Bpxystat_t) (rv int32, rc int32, rn int32) {
if len(name) < 1024 {
var namebuf [1024]byte
sz := int32(copy(namebuf[:], name))
A2e(namebuf[:sz])
st.St_id = [4]uint8{0xe2, 0xe3, 0xc1, 0xe3}
st.St_version = 2
stat_sz := uint32(unsafe.Sizeof(*st))
var parms [7]unsafe.Pointer
parms[0] = unsafe.Pointer(&sz)
parms[1] = unsafe.Pointer(&namebuf[0])
parms[2] = unsafe.Pointer(&stat_sz)
parms[3] = unsafe.Pointer(st)
parms[4] = unsafe.Pointer(&rv)
parms[5] = unsafe.Pointer(&rc)
parms[6] = unsafe.Pointer(&rn)
bpxcall(parms[:], BPX4LST)
return rv, rc, rn
}
return -1, -1, -1
}
func BpxChattr(path string, attr *Bpxyatt_t) (rv int32, rc int32, rn int32) {
if len(path) >= 1024 {
return -1, -1, -1
}
var namebuf [1024]byte
sz := int32(copy(namebuf[:], path))
A2e(namebuf[:sz])
attr_sz := uint32(unsafe.Sizeof(*attr))
var parms [7]unsafe.Pointer
parms[0] = unsafe.Pointer(&sz)
parms[1] = unsafe.Pointer(&namebuf[0])
parms[2] = unsafe.Pointer(&attr_sz)
parms[3] = unsafe.Pointer(attr)
parms[4] = unsafe.Pointer(&rv)
parms[5] = unsafe.Pointer(&rc)
parms[6] = unsafe.Pointer(&rn)
bpxcall(parms[:], BPX4CHR)
return rv, rc, rn
}
func BpxLchattr(path string, attr *Bpxyatt_t) (rv int32, rc int32, rn int32) {
if len(path) >= 1024 {
return -1, -1, -1
}
var namebuf [1024]byte
sz := int32(copy(namebuf[:], path))
A2e(namebuf[:sz])
attr_sz := uint32(unsafe.Sizeof(*attr))
var parms [7]unsafe.Pointer
parms[0] = unsafe.Pointer(&sz)
parms[1] = unsafe.Pointer(&namebuf[0])
parms[2] = unsafe.Pointer(&attr_sz)
parms[3] = unsafe.Pointer(attr)
parms[4] = unsafe.Pointer(&rv)
parms[5] = unsafe.Pointer(&rc)
parms[6] = unsafe.Pointer(&rn)
bpxcall(parms[:], BPX4LCR)
return rv, rc, rn
}
func BpxFchattr(fd int32, attr *Bpxyatt_t) (rv int32, rc int32, rn int32) {
attr_sz := uint32(unsafe.Sizeof(*attr))
var parms [6]unsafe.Pointer
parms[0] = unsafe.Pointer(&fd)
parms[1] = unsafe.Pointer(&attr_sz)
parms[2] = unsafe.Pointer(attr)
parms[3] = unsafe.Pointer(&rv)
parms[4] = unsafe.Pointer(&rc)
parms[5] = unsafe.Pointer(&rn)
bpxcall(parms[:], BPX4FCR)
return rv, rc, rn
}
func BpxCondTimedWait(sec uint32, nsec uint32, events uint32, secrem *uint32, nsecrem *uint32) (rv int32, rc int32, rn int32) {
var parms [8]unsafe.Pointer
parms[0] = unsafe.Pointer(&sec)
parms[1] = unsafe.Pointer(&nsec)
parms[2] = unsafe.Pointer(&events)
parms[3] = unsafe.Pointer(secrem)
parms[4] = unsafe.Pointer(nsecrem)
parms[5] = unsafe.Pointer(&rv)
parms[6] = unsafe.Pointer(&rc)
parms[7] = unsafe.Pointer(&rn)
bpxcall(parms[:], BPX4CTW)
return rv, rc, rn
}
func BpxGetthent(in *Pgtha, outlen *uint32, out unsafe.Pointer) (rv int32, rc int32, rn int32) {
var parms [7]unsafe.Pointer
inlen := uint32(26) // nothing else will work. Go says Pgtha is 28-byte because of alignment, but Pgtha is "packed" and must be 26-byte
parms[0] = unsafe.Pointer(&inlen)
parms[1] = unsafe.Pointer(&in)
parms[2] = unsafe.Pointer(outlen)
parms[3] = unsafe.Pointer(&out)
parms[4] = unsafe.Pointer(&rv)
parms[5] = unsafe.Pointer(&rc)
parms[6] = unsafe.Pointer(&rn)
bpxcall(parms[:], BPX4GTH)
return rv, rc, rn
}
func ZosJobname() (jobname string, err error) {
var pgtha Pgtha
pgtha.Pid = uint32(Getpid())
pgtha.Accesspid = PGTHA_CURRENT
pgtha.Flag1 = PGTHA_PROCESS
var out [256]byte
var outlen uint32
outlen = 256
rv, rc, rn := BpxGetthent(&pgtha, &outlen, unsafe.Pointer(&out[0]))
if rv == 0 {
gthc := []byte{0x87, 0xa3, 0x88, 0x83} // 'gthc' in ebcdic
ix := bytes.Index(out[:], gthc)
if ix == -1 {
err = fmt.Errorf("BPX4GTH: gthc return data not found")
return
}
jn := out[ix+80 : ix+88] // we didn't declare Pgthc, but jobname is 8-byte at offset 80
E2a(jn)
jobname = string(bytes.TrimRight(jn, " "))
} else {
err = fmt.Errorf("BPX4GTH: rc=%d errno=%d reason=code=0x%x", rv, rc, rn)
}
return
}
func Bpx4ptq(code int32, data string) (rv int32, rc int32, rn int32) {
var userdata [8]byte
var parms [5]unsafe.Pointer
copy(userdata[:], data+" ")
A2e(userdata[:])
parms[0] = unsafe.Pointer(&code)
parms[1] = unsafe.Pointer(&userdata[0])
parms[2] = unsafe.Pointer(&rv)
parms[3] = unsafe.Pointer(&rc)
parms[4] = unsafe.Pointer(&rn)
bpxcall(parms[:], BPX4PTQ)
return rv, rc, rn
}
const (
PT_TRACE_ME = 0 // Debug this process
PT_READ_I = 1 // Read a full word
PT_READ_D = 2 // Read a full word
PT_READ_U = 3 // Read control info
PT_WRITE_I = 4 //Write a full word
PT_WRITE_D = 5 //Write a full word
PT_CONTINUE = 7 //Continue the process
PT_KILL = 8 //Terminate the process
PT_READ_GPR = 11 // Read GPR, CR, PSW
PT_READ_FPR = 12 // Read FPR
PT_READ_VR = 13 // Read VR
PT_WRITE_GPR = 14 // Write GPR, CR, PSW
PT_WRITE_FPR = 15 // Write FPR
PT_WRITE_VR = 16 // Write VR
PT_READ_BLOCK = 17 // Read storage
PT_WRITE_BLOCK = 19 // Write storage
PT_READ_GPRH = 20 // Read GPRH
PT_WRITE_GPRH = 21 // Write GPRH
PT_REGHSET = 22 // Read all GPRHs
PT_ATTACH = 30 // Attach to a process
PT_DETACH = 31 // Detach from a process
PT_REGSET = 32 // Read all GPRs
PT_REATTACH = 33 // Reattach to a process
PT_LDINFO = 34 // Read loader info
PT_MULTI = 35 // Multi process mode
PT_LD64INFO = 36 // RMODE64 Info Area
PT_BLOCKREQ = 40 // Block request
PT_THREAD_INFO = 60 // Read thread info
PT_THREAD_MODIFY = 61
PT_THREAD_READ_FOCUS = 62
PT_THREAD_WRITE_FOCUS = 63
PT_THREAD_HOLD = 64
PT_THREAD_SIGNAL = 65
PT_EXPLAIN = 66
PT_EVENTS = 67
PT_THREAD_INFO_EXTENDED = 68
PT_REATTACH2 = 71
PT_CAPTURE = 72
PT_UNCAPTURE = 73
PT_GET_THREAD_TCB = 74
PT_GET_ALET = 75
PT_SWAPIN = 76
PT_EXTENDED_EVENT = 98
PT_RECOVER = 99 // Debug a program check
PT_GPR0 = 0 // General purpose register 0
PT_GPR1 = 1 // General purpose register 1
PT_GPR2 = 2 // General purpose register 2
PT_GPR3 = 3 // General purpose register 3
PT_GPR4 = 4 // General purpose register 4
PT_GPR5 = 5 // General purpose register 5
PT_GPR6 = 6 // General purpose register 6
PT_GPR7 = 7 // General purpose register 7
PT_GPR8 = 8 // General purpose register 8
PT_GPR9 = 9 // General purpose register 9
PT_GPR10 = 10 // General purpose register 10
PT_GPR11 = 11 // General purpose register 11
PT_GPR12 = 12 // General purpose register 12
PT_GPR13 = 13 // General purpose register 13
PT_GPR14 = 14 // General purpose register 14
PT_GPR15 = 15 // General purpose register 15
PT_FPR0 = 16 // Floating point register 0
PT_FPR1 = 17 // Floating point register 1
PT_FPR2 = 18 // Floating point register 2
PT_FPR3 = 19 // Floating point register 3
PT_FPR4 = 20 // Floating point register 4
PT_FPR5 = 21 // Floating point register 5
PT_FPR6 = 22 // Floating point register 6
PT_FPR7 = 23 // Floating point register 7
PT_FPR8 = 24 // Floating point register 8
PT_FPR9 = 25 // Floating point register 9
PT_FPR10 = 26 // Floating point register 10
PT_FPR11 = 27 // Floating point register 11
PT_FPR12 = 28 // Floating point register 12
PT_FPR13 = 29 // Floating point register 13
PT_FPR14 = 30 // Floating point register 14
PT_FPR15 = 31 // Floating point register 15
PT_FPC = 32 // Floating point control register
PT_PSW = 40 // PSW
PT_PSW0 = 40 // Left half of the PSW
PT_PSW1 = 41 // Right half of the PSW
PT_CR0 = 42 // Control register 0
PT_CR1 = 43 // Control register 1
PT_CR2 = 44 // Control register 2
PT_CR3 = 45 // Control register 3
PT_CR4 = 46 // Control register 4
PT_CR5 = 47 // Control register 5
PT_CR6 = 48 // Control register 6
PT_CR7 = 49 // Control register 7
PT_CR8 = 50 // Control register 8
PT_CR9 = 51 // Control register 9
PT_CR10 = 52 // Control register 10
PT_CR11 = 53 // Control register 11
PT_CR12 = 54 // Control register 12
PT_CR13 = 55 // Control register 13
PT_CR14 = 56 // Control register 14
PT_CR15 = 57 // Control register 15
PT_GPRH0 = 58 // GP High register 0
PT_GPRH1 = 59 // GP High register 1
PT_GPRH2 = 60 // GP High register 2
PT_GPRH3 = 61 // GP High register 3
PT_GPRH4 = 62 // GP High register 4
PT_GPRH5 = 63 // GP High register 5
PT_GPRH6 = 64 // GP High register 6
PT_GPRH7 = 65 // GP High register 7
PT_GPRH8 = 66 // GP High register 8
PT_GPRH9 = 67 // GP High register 9
PT_GPRH10 = 68 // GP High register 10
PT_GPRH11 = 69 // GP High register 11
PT_GPRH12 = 70 // GP High register 12
PT_GPRH13 = 71 // GP High register 13
PT_GPRH14 = 72 // GP High register 14
PT_GPRH15 = 73 // GP High register 15
PT_VR0 = 74 // Vector register 0
PT_VR1 = 75 // Vector register 1
PT_VR2 = 76 // Vector register 2
PT_VR3 = 77 // Vector register 3
PT_VR4 = 78 // Vector register 4
PT_VR5 = 79 // Vector register 5
PT_VR6 = 80 // Vector register 6
PT_VR7 = 81 // Vector register 7
PT_VR8 = 82 // Vector register 8
PT_VR9 = 83 // Vector register 9
PT_VR10 = 84 // Vector register 10
PT_VR11 = 85 // Vector register 11
PT_VR12 = 86 // Vector register 12
PT_VR13 = 87 // Vector register 13
PT_VR14 = 88 // Vector register 14
PT_VR15 = 89 // Vector register 15
PT_VR16 = 90 // Vector register 16
PT_VR17 = 91 // Vector register 17
PT_VR18 = 92 // Vector register 18
PT_VR19 = 93 // Vector register 19
PT_VR20 = 94 // Vector register 20
PT_VR21 = 95 // Vector register 21
PT_VR22 = 96 // Vector register 22
PT_VR23 = 97 // Vector register 23
PT_VR24 = 98 // Vector register 24
PT_VR25 = 99 // Vector register 25
PT_VR26 = 100 // Vector register 26
PT_VR27 = 101 // Vector register 27
PT_VR28 = 102 // Vector register 28
PT_VR29 = 103 // Vector register 29
PT_VR30 = 104 // Vector register 30
PT_VR31 = 105 // Vector register 31
PT_PSWG = 106 // PSWG
PT_PSWG0 = 106 // Bytes 0-3
PT_PSWG1 = 107 // Bytes 4-7
PT_PSWG2 = 108 // Bytes 8-11 (IA high word)
PT_PSWG3 = 109 // Bytes 12-15 (IA low word)
)
func Bpx4ptr(request int32, pid int32, addr unsafe.Pointer, data unsafe.Pointer, buffer unsafe.Pointer) (rv int32, rc int32, rn int32) {
var parms [8]unsafe.Pointer
parms[0] = unsafe.Pointer(&request)
parms[1] = unsafe.Pointer(&pid)
parms[2] = unsafe.Pointer(&addr)
parms[3] = unsafe.Pointer(&data)
parms[4] = unsafe.Pointer(&buffer)
parms[5] = unsafe.Pointer(&rv)
parms[6] = unsafe.Pointer(&rc)
parms[7] = unsafe.Pointer(&rn)
bpxcall(parms[:], BPX4PTR)
return rv, rc, rn
}
func copyU8(val uint8, dest []uint8) int {
if len(dest) < 1 {
return 0
}
dest[0] = val
return 1
}
func copyU8Arr(src, dest []uint8) int {
if len(dest) < len(src) {
return 0
}
for i, v := range src {
dest[i] = v
}
return len(src)
}
func copyU16(val uint16, dest []uint16) int {
if len(dest) < 1 {
return 0
}
dest[0] = val
return 1
}
func copyU32(val uint32, dest []uint32) int {
if len(dest) < 1 {
return 0
}
dest[0] = val
return 1
}
func copyU32Arr(src, dest []uint32) int {
if len(dest) < len(src) {
return 0
}
for i, v := range src {
dest[i] = v
}
return len(src)
}
func copyU64(val uint64, dest []uint64) int {
if len(dest) < 1 {
return 0
}
dest[0] = val
return 1
}

Some files were not shown because too many files have changed in this diff Show More