1
0
mirror of https://github.com/ManyakRus/starter.git synced 2025-11-25 23:02:22 +02:00

сделал folders.go

This commit is contained in:
Nikitin Aleksandr
2023-07-20 15:54:11 +03:00
parent 71fc53b2f8
commit d86504a070
254 changed files with 11087 additions and 7197 deletions

View File

@@ -15,7 +15,7 @@ run:
mod:
clear
go get -u ./...
go mod tidy -compat=1.18
go mod tidy -compat=1.20
go mod vendor
go fmt ./...
build:
@@ -26,15 +26,11 @@ build:
lint:
clear
go fmt ./...
golangci-lint run ./internal/v0/...
golangci-lint run ./pkg/v0/...
gocyclo -over 10 ./internal/v0
gocyclo -over 10 ./pkg/v0
gocritic check ./internal/v0/...
gocritic check ./pkg/v0/...
staticcheck ./internal/v0/...
staticcheck ./pkg/v0/...
run.test:
golangci-lint run ./...
gocyclo -over 10 ./
gocritic check ./...
staticcheck ./...
test.run:
clear
go fmt ./...
go test -coverprofile cover.out ./...

View File

@@ -1,6 +1,8 @@
package config
import (
"github.com/ManyakRus/starter/micro"
"os"
"testing"
)
@@ -13,3 +15,14 @@ func TestLoadEnv(t *testing.T) {
//t.Error("Error TestLoadEnv")
}
func TestLoadEnv_from_file(t *testing.T) {
dir := micro.ProgramDir()
FileName := dir + "test.sh"
LoadEnv_from_file(FileName)
value := os.Getenv("SERVICE_NAME")
if value == "" {
t.Error("TestLoadEnv_from_file() error: value =''")
}
}

View File

@@ -9,7 +9,8 @@ import (
// Ctx хранит глобальный контекст программы
// не использовать
var Ctx context.Context
// * - чтоб можно было засунуть ссылку на чужой контекст
var Ctx *context.Context
// CancelContext - функция отмены глобального контекста
var CancelContext func()
@@ -32,16 +33,20 @@ func GetContext() context.Context {
onceCtx.Do(func() {
CtxBg := context.Background()
Ctx, CancelContext = context.WithCancel(CtxBg)
var Ctx0 context.Context
Ctx0, CancelContext = context.WithCancel(CtxBg)
Ctx = &Ctx0
})
return Ctx
return *Ctx
}
// GetNewContext - создаёт и возвращает новый контекст приложения
func GetNewContext() context.Context {
CtxBg := context.Background()
Ctx, CancelContext = context.WithCancel(CtxBg)
var Ctx0 context.Context
Ctx0, CancelContext = context.WithCancel(CtxBg)
Ctx = &Ctx0
return Ctx
return *Ctx
}

View File

@@ -13,9 +13,10 @@ type File struct {
}
type Folder struct {
Name string
Files []*File
Folders map[string]*Folder
FileName string
Name string
Files []*File
Folders map[string]*Folder
}
func (f *Folder) String() string {
@@ -30,7 +31,7 @@ func FindFoldersTree(dir string, NeedFolders, NeedFiles, NeedDot bool, exclude s
var nodes = map[string]interface{}{}
var walkFun filepath.WalkFunc = func(p string, info os.FileInfo, err error) error {
if info.IsDir() {
nodes[p] = &Folder{path.Base(p), []*File{}, map[string]*Folder{}}
nodes[p] = &Folder{p, path.Base(p), []*File{}, map[string]*Folder{}}
} else {
nodes[p] = &File{path.Base(p)}
}
@@ -50,28 +51,36 @@ func FindFoldersTree(dir string, NeedFolders, NeedFiles, NeedDot bool, exclude s
parentFolder = nodes[path.Dir(key)].(*Folder)
}
// найдём название Папки/Файла
var Name string
switch value.(type) {
case *File:
Name = value.(*File).Name
case *Folder:
Name = value.(*Folder).Name
}
// проверка скрытые файлы с точкой
if NeedDot == false && len(Name) > 0 && Name[0:1] == "." {
continue
}
// проверка кроме exclude
if exclude != "" && len(Name) >= len(exclude) && Name[0:len(exclude)] == exclude {
continue
}
//
switch v := value.(type) {
case *File:
if NeedFiles == false {
break
}
if NeedDot == false && len(v.Name) > 0 && v.Name[0:1] == "." {
break
}
if exclude != "" && len(v.Name) >= len(exclude) && v.Name[0:len(exclude)] == exclude {
break
}
parentFolder.Files = append(parentFolder.Files, v)
case *Folder:
if NeedFolders == false {
break
}
if NeedDot == false && len(v.Name) > 0 && v.Name[0:1] == "." {
break
}
if exclude != "" && len(v.Name) >= len(exclude) && v.Name[0:len(exclude)] == exclude {
break
}
parentFolder.Folders[v.Name] = v
}
}

View File

@@ -8,5 +8,8 @@ import (
func TestFindFolders(t *testing.T) {
//dir := `/home/user/GolandProjects/!sanek/image_packages/`
dir := micro.ProgramDir()
FindFoldersTree(dir, true, false, false, "vendor")
Otvet := FindFoldersTree(dir, true, false, false, "vendor")
if Otvet == nil {
t.Log("TestFindFolders() error: Otvet = nil")
}
}

44
go.mod
View File

@@ -1,15 +1,15 @@
module github.com/ManyakRus/starter
go 1.18
go 1.20
require (
github.com/ManyakRus/logrus v0.0.0-20230426064230-515895169d22
github.com/camunda/zeebe/clients/go/v8 v8.2.5
github.com/camunda/zeebe/clients/go/v8 v8.2.7
github.com/denisenkom/go-mssqldb v0.12.3
github.com/emersion/go-imap v1.2.1
github.com/emersion/go-message v0.16.0
github.com/go-faster/errors v0.6.1
github.com/gofiber/fiber/v2 v2.46.0
github.com/gofiber/fiber/v2 v2.47.0
github.com/golang-module/carbon/v2 v2.2.3
github.com/gotd/contrib v0.19.0
github.com/gotd/td v0.83.0
@@ -19,17 +19,17 @@ require (
github.com/lib/pq v1.10.9
github.com/mattn/go-sqlite3 v1.14.17
github.com/mdp/qrterminal/v3 v3.1.1
github.com/minio/minio-go/v7 v7.0.56
github.com/nats-io/nats.go v1.26.0
github.com/sashabaranov/go-openai v1.9.5
github.com/minio/minio-go/v7 v7.0.57
github.com/nats-io/nats.go v1.27.0
github.com/sashabaranov/go-openai v1.11.2
github.com/segmentio/kafka-go v0.4.40
github.com/xhit/go-simple-mail/v2 v2.13.0
gitlab.aescorp.ru/dsp_dev/claim/common/object_model v1.0.171
go.mau.fi/whatsmeow v0.0.0-20230601124015-46f2ff088640
golang.org/x/crypto v0.9.0
gitlab.aescorp.ru/dsp_dev/claim/common/object_model v1.0.191
go.mau.fi/whatsmeow v0.0.0-20230616194828-be0edabb0bf3
golang.org/x/crypto v0.10.0
golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1
gorm.io/driver/postgres v1.5.2
gorm.io/driver/sqlserver v1.5.0
gorm.io/driver/sqlserver v1.5.1
gorm.io/gorm v1.25.1
)
@@ -61,12 +61,12 @@ require (
github.com/jackc/pgproto3/v2 v2.3.2 // indirect
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect
github.com/jackc/pgtype v1.14.0 // indirect
github.com/jackc/pgx/v5 v5.3.1 // indirect
github.com/jackc/pgx/v5 v5.4.1 // indirect
github.com/jinzhu/inflection v1.0.0 // indirect
github.com/jinzhu/now v1.1.5 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/klauspost/compress v1.16.5 // indirect
github.com/klauspost/cpuid/v2 v2.2.4 // indirect
github.com/klauspost/compress v1.16.6 // indirect
github.com/klauspost/cpuid/v2 v2.2.5 // indirect
github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-isatty v0.0.19 // indirect
github.com/mattn/go-runewidth v0.0.14 // indirect
@@ -81,14 +81,14 @@ require (
github.com/nats-io/nkeys v0.4.4 // indirect
github.com/nats-io/nuid v1.0.1 // indirect
github.com/philhofer/fwd v1.1.2 // indirect
github.com/pierrec/lz4/v4 v4.1.17 // indirect
github.com/pierrec/lz4/v4 v4.1.18 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/rivo/uniseg v0.4.4 // indirect
github.com/rs/xid v1.5.0 // indirect
github.com/savsgio/dictpool v0.0.0-20221023140959-7bf2e61cea94 // indirect
github.com/savsgio/gotils v0.0.0-20230208104028-c358bd845dee // indirect
github.com/segmentio/asm v1.2.0 // indirect
github.com/sirupsen/logrus v1.9.2 // indirect
github.com/sirupsen/logrus v1.9.3 // indirect
github.com/stretchr/testify v1.8.4 // indirect
github.com/tinylib/msgp v1.1.8 // indirect
github.com/toorop/go-dkim v0.0.0-20201103131630-e1cd1a0a5208 // indirect
@@ -103,15 +103,15 @@ require (
go.uber.org/atomic v1.11.0 // indirect
go.uber.org/multierr v1.11.0 // indirect
go.uber.org/zap v1.24.0 // indirect
golang.org/x/net v0.10.0 // indirect
golang.org/x/oauth2 v0.8.0 // indirect
golang.org/x/sync v0.2.0 // indirect
golang.org/x/sys v0.8.0 // indirect
golang.org/x/term v0.8.0 // indirect
golang.org/x/text v0.9.0 // indirect
golang.org/x/net v0.11.0 // indirect
golang.org/x/oauth2 v0.9.0 // indirect
golang.org/x/sync v0.3.0 // indirect
golang.org/x/sys v0.9.0 // indirect
golang.org/x/term v0.9.0 // indirect
golang.org/x/text v0.10.0 // indirect
google.golang.org/appengine v1.6.7 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc // indirect
google.golang.org/grpc v1.55.0 // indirect
google.golang.org/grpc v1.56.0 // indirect
google.golang.org/protobuf v1.30.0 // indirect
gopkg.in/ini.v1 v1.67.0 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect

100
go.sum
View File

@@ -1,16 +1,11 @@
filippo.io/edwards25519 v1.0.0 h1:0wAIcmJUqRdI8IJ/3eGi5/HwXZWPujYXXlkrQogz0Ek=
filippo.io/edwards25519 v1.0.0/go.mod h1:N1IkdkCkiLB6tki+MYJoSx2JTY9NUlxZE7eHn5EwJns=
github.com/Azure/azure-sdk-for-go/sdk/azcore v0.19.0/go.mod h1:h6H6c8enJmmocHUbLiiGY6sx7f9i+X3m1CHdd5c6Rdw=
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.0.0/go.mod h1:uGG2W01BaETf0Ozp+QxxKJdMBNRWPdstHG0Fmdwn1/U=
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.1.2/go.mod h1:uGG2W01BaETf0Ozp+QxxKJdMBNRWPdstHG0Fmdwn1/U=
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.6.0/go.mod h1:bjGvMhVMb+EEm3VRNQawDMUyMMjo+S5ewNjflkep/0Q=
github.com/Azure/azure-sdk-for-go/sdk/azidentity v0.11.0/go.mod h1:HcM1YX14R7CJcghJGOYCgdezslRSVzqwLf/q+4Y2r/0=
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.1/go.mod h1:gLa1CL2RNE4s7M3yopJ/p0iq5DdY6Yv5ZUt9MTRZOQM=
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0/go.mod h1:OQeznEEkTZ9OrhHJoDD8ZDq51FHgXjqtP9z6bEwBq9U=
github.com/Azure/azure-sdk-for-go/sdk/internal v0.7.0/go.mod h1:yqy467j36fJxcRV2TzfVZ1pCb5vxm4BtZPUdYWe/Xo8=
github.com/Azure/azure-sdk-for-go/sdk/internal v1.0.0/go.mod h1:eWRD7oawr1Mu1sLCawqVc0CUiF43ia3qQMxLscsKQ9w=
github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0/go.mod h1:okt5dMMTOFjX/aovMlrjvvXoPMBVSPzk9185BT0+eZM=
github.com/AzureAD/microsoft-authentication-library-for-go v0.8.1/go.mod h1:4qFor3D/HDsvBME35Xy9rwW9DecL+M2sNw1ybjPtwA0=
github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0/go.mod h1:kgDmCTgBzIEPFElEF+FK0SdjAor06dRq2Go927dnQ6o=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/ManyakRus/logrus v0.0.0-20230426064230-515895169d22 h1:DrHNlqXfwvCpCqn4MfRn4NBtezqWL5GLor3jC7QFPj0=
@@ -22,8 +17,8 @@ github.com/asaskevich/govalidator v0.0.0-20200108200545-475eaeb16496/go.mod h1:o
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so=
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw=
github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8=
github.com/camunda/zeebe/clients/go/v8 v8.2.5 h1:140au6RwNLOORhPLF27GUgWTqppNh1MZ1wXlMl6XTRA=
github.com/camunda/zeebe/clients/go/v8 v8.2.5/go.mod h1:Y6we21faDAKtsEULpzKHV8YP1PkC7RqRanuafrnBoVI=
github.com/camunda/zeebe/clients/go/v8 v8.2.7 h1:fkL1ExWIqsVMg56nGDl/XfWf7W4PE3lzI9p1MdE3IkU=
github.com/camunda/zeebe/clients/go/v8 v8.2.7/go.mod h1:epiKnbOo1QeAD7BhdQoDNsdE0dJ2qoAx4uGxob1gHAQ=
github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM=
github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I=
@@ -83,11 +78,10 @@ github.com/gobwas/pool v0.2.0 h1:QEmUOlnSjWtnpRGHF3SauEiOsy82Cup83Vf2LcMlnc8=
github.com/gobwas/pool v0.2.0/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw=
github.com/gobwas/ws v1.0.2 h1:CoAavW/wd/kulfZmSIBt6p24n4j7tHgNVCjsfHVNUbo=
github.com/gobwas/ws v1.0.2/go.mod h1:szmBTxLgaFppYjEmNtny/v3w89xOydFnnZMcgRRu/EM=
github.com/gofiber/fiber/v2 v2.46.0 h1:wkkWotblsGVlLjXj2dpgKQAYHtXumsK/HyFugQM68Ns=
github.com/gofiber/fiber/v2 v2.46.0/go.mod h1:DNl0/c37WLe0g92U6lx1VMQuxGUQY5V7EIaVoEsUffc=
github.com/gofiber/fiber/v2 v2.47.0 h1:EN5lHVCc+Pyqh5OEsk8fzRiifgwpbrP0rulQ4iNf3fs=
github.com/gofiber/fiber/v2 v2.47.0/go.mod h1:mbFMVN1lQuzziTkkakgtKKdjfsXSw9BKR5lmcNksUoU=
github.com/gofrs/uuid v4.0.0+incompatible h1:1SD/1F5pU8p29ybwgQSwpQk+mwdRrXCYuPhW6m+TnJw=
github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
github.com/golang-jwt/jwt/v4 v4.4.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
github.com/golang-jwt/jwt/v4 v4.4.3/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
github.com/golang-module/carbon/v2 v2.2.3 h1:WvGIc5+qzq9drNzH+Gnjh1TZ0JgDY/IA+m2Dvk7Qm4Q=
@@ -110,7 +104,6 @@ github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4=
@@ -172,18 +165,16 @@ github.com/jackc/pgx/v4 v4.0.0-pre1.0.20190824185557-6972a5742186/go.mod h1:X+GQ
github.com/jackc/pgx/v4 v4.12.1-0.20210724153913-640aa07df17c/go.mod h1:1QD0+tgSXP7iUjYm9C1NxKhny7lq6ee99u/z+IHFcgs=
github.com/jackc/pgx/v4 v4.18.1 h1:YP7G1KABtKpB5IHrO9vYwSrCOhs7p3uqhvhhQBptya0=
github.com/jackc/pgx/v4 v4.18.1/go.mod h1:FydWkUyadDmdNH/mHnGob881GawxeEm7TcMCzkb+qQE=
github.com/jackc/pgx/v5 v5.3.1 h1:Fcr8QJ1ZeLi5zsPZqQeUZhNhxfkkKBOgJuYkJHoBOtU=
github.com/jackc/pgx/v5 v5.3.1/go.mod h1:t3JDKnCBlYIc0ewLF0Q7B8MXmoIaBOZj/ic7iHozM/8=
github.com/jackc/pgx/v5 v5.4.1 h1:oKfB/FhuVtit1bBM3zNRRsZ925ZkMN3HXL+LgLUM9lE=
github.com/jackc/pgx/v5 v5.4.1/go.mod h1:q6iHT8uDNXWiFNOlRqJzBTaSH3+2xCXkokxHZC5qWFY=
github.com/jackc/puddle v0.0.0-20190413234325-e4ced69a3a2b/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
github.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
github.com/jackc/puddle v1.1.3/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
github.com/jackc/puddle v1.3.0/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
github.com/jcmturner/aescts/v2 v2.0.0/go.mod h1:AiaICIRyfYg35RUkr8yESTqvSy7csK90qZ5xfvvsoNs=
github.com/jcmturner/dnsutils/v2 v2.0.0/go.mod h1:b0TnjGOvI/n42bZa+hmXL+kFJZsFT7G4t3HTlQ184QM=
github.com/jcmturner/gofork v1.0.0/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o=
github.com/jcmturner/gofork v1.7.6/go.mod h1:1622LH6i/EZqLloHfE7IeZ0uEJwMSUyQ/nDd82IeqRo=
github.com/jcmturner/goidentity/v6 v6.0.1/go.mod h1:X1YW3bgtvwAXju7V3LCIMpY0Gbxyjn/mY9zx4tFonSg=
github.com/jcmturner/gokrb5/v8 v8.4.2/go.mod h1:sb+Xq/fTY5yktf/VxLsE3wlfPqQjp0aWNYyvBVK62bc=
github.com/jcmturner/gokrb5/v8 v8.4.4/go.mod h1:1btQEpgT6k+unzCwX1KdWMEwPPkkgBtP+F6aCACiMrs=
github.com/jcmturner/rpc/v2 v2.0.3/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc=
github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E=
@@ -200,11 +191,11 @@ github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHm
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.10.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU=
github.com/klauspost/compress v1.16.5 h1:IFV2oUNUzZaz+XyusxpLzpzS8Pt5rh0Z16For/djlyI=
github.com/klauspost/compress v1.16.5/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
github.com/klauspost/compress v1.16.6 h1:91SKEy4K37vkp255cJ8QesJhjyRO0hn9i9G0GoUwLsk=
github.com/klauspost/compress v1.16.6/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
github.com/klauspost/cpuid/v2 v2.2.4 h1:acbojRNwl3o09bUq+yDCtZFc1aiwaAAxtcn8YkZXnvk=
github.com/klauspost/cpuid/v2 v2.2.4/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY=
github.com/klauspost/cpuid/v2 v2.2.5 h1:0E5MSMDEoAulmXNFquVs//DdoomxaoTY1kUhbc/qbZg=
github.com/klauspost/cpuid/v2 v2.2.5/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
@@ -239,14 +230,13 @@ github.com/mattn/go-sqlite3 v1.14.17 h1:mCRHCLDUBXgpKAqIKsaAaAsrAlbkeomtRFKXh2L6
github.com/mattn/go-sqlite3 v1.14.17/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg=
github.com/mdp/qrterminal/v3 v3.1.1 h1:cIPwg3QU0OIm9+ce/lRfWXhPwEjOSKwk3HBwL3HBTyc=
github.com/mdp/qrterminal/v3 v3.1.1/go.mod h1:5lJlXe7Jdr8wlPDdcsJttv1/knsRgzXASyr4dcGZqNU=
github.com/microsoft/go-mssqldb v0.21.0/go.mod h1:+4wZTUnz/SV6nffv+RRRB/ss8jPng5Sho2SmM1l2ts4=
github.com/microsoft/go-mssqldb v1.1.0 h1:jsV+tpvcPTbNNKW0o3kiCD69kOHICsfjZ2VcVu2lKYc=
github.com/microsoft/go-mssqldb v1.1.0/go.mod h1:LzkFdl4z2Ck+Hi+ycGOTbL56VEfgoyA2DvYejrNGbRk=
github.com/minio/highwayhash v1.0.2 h1:Aak5U0nElisjDCfPSG79Tgzkn2gl66NxOMspRrKnA/g=
github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34=
github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM=
github.com/minio/minio-go/v7 v7.0.56 h1:pkZplIEHu8vinjkmhsexcXpWth2tjVLphrTZx6fBVZY=
github.com/minio/minio-go/v7 v7.0.56/go.mod h1:NUDy4A4oXPq1l2yK6LTSvCEzAMeIcoz9lcj5dbzSrRE=
github.com/minio/minio-go/v7 v7.0.57 h1:xsFiOiWjpC1XAGbFEUOzj1/gMXGz7ljfxifwcb/5YXU=
github.com/minio/minio-go/v7 v7.0.57/go.mod h1:NUDy4A4oXPq1l2yK6LTSvCEzAMeIcoz9lcj5dbzSrRE=
github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM=
github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8=
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
@@ -258,14 +248,13 @@ github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lN
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/modocache/gover v0.0.0-20171022184752-b58185e213c5/go.mod h1:caMODM3PzxT8aQXRPkAt8xlV/e7d7w8GM5g0fa5F0D8=
github.com/montanaflynn/stats v0.6.6/go.mod h1:etXPPgVO6n31NxCd9KQUMvCM+ve0ruNzt6R8Bnaayow=
github.com/montanaflynn/stats v0.7.0/go.mod h1:etXPPgVO6n31NxCd9KQUMvCM+ve0ruNzt6R8Bnaayow=
github.com/nats-io/jwt/v2 v2.3.0 h1:z2mA1a7tIf5ShggOFlR1oBPgd6hGqcDYsISxZByUzdI=
github.com/nats-io/jwt/v2 v2.3.0/go.mod h1:0tqz9Hlu6bCBFLWAASKhE5vUA4c24L9KPUUgvwumE/k=
github.com/nats-io/nats-server/v2 v2.8.4 h1:0jQzze1T9mECg8YZEl8+WYUXb9JKluJfCBriPUtluB4=
github.com/nats-io/nats-server/v2 v2.8.4/go.mod h1:8zZa+Al3WsESfmgSs98Fi06dRWLH5Bnq90m5bKD/eT4=
github.com/nats-io/nats.go v1.26.0 h1:fWJTYPnZ8DzxIaqIHOAMfColuznchnd5Ab5dbJpgPIE=
github.com/nats-io/nats.go v1.26.0/go.mod h1:XpbWUlOElGwTYbMR7imivs7jJj9GtK7ypv321Wp6pjc=
github.com/nats-io/nats.go v1.27.0 h1:3o9fsPhmoKm+yK7rekH2GtWoE+D9jFbw8N3/ayI1C00=
github.com/nats-io/nats.go v1.27.0/go.mod h1:XpbWUlOElGwTYbMR7imivs7jJj9GtK7ypv321Wp6pjc=
github.com/nats-io/nkeys v0.3.0/go.mod h1:gvUNGjVcM2IPr5rCsRsC6Wb3Hr2CQAm08dsxtV6A5y4=
github.com/nats-io/nkeys v0.4.4 h1:xvBJ8d69TznjcQl9t6//Q5xXuVhyYiSos6RPtvQNTwA=
github.com/nats-io/nkeys v0.4.4/go.mod h1:XUkxdLPTufzlihbamfzQ7mw/VGx6ObUs+0bN5sNvt64=
@@ -275,10 +264,9 @@ github.com/philhofer/fwd v1.1.1/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG
github.com/philhofer/fwd v1.1.2 h1:bnDivRJ1EWPjUIRXV5KfORO897HTbpFAQddBdE8t7Gw=
github.com/philhofer/fwd v1.1.2/go.mod h1:qkPdfjR2SIEbspLqpe1tO4n5yICnr2DY7mqEx2tUTP0=
github.com/pierrec/lz4/v4 v4.1.15/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
github.com/pierrec/lz4/v4 v4.1.17 h1:kV4Ip+/hUBC+8T6+2EgburRtkE9ef4nbY3f4dFhGjMc=
github.com/pierrec/lz4/v4 v4.1.17/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
github.com/pierrec/lz4/v4 v4.1.18 h1:xaKrnTkyoqfh1YItXl56+6KJNVYWlEEPuAQW9xsplYQ=
github.com/pierrec/lz4/v4 v4.1.18/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
github.com/pkg/browser v0.0.0-20180916011732-0a3d74bf9ce4/go.mod h1:4OwLy04Bl9Ef3GJJCoec+30X3LQs/0/m4HFRt/2LUSA=
github.com/pkg/browser v0.0.0-20210115035449-ce105d075bb4/go.mod h1:N6UoU20jOqggOuDwUaBQpluzLNDqif3kq9z2wpdYEfQ=
github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
@@ -294,8 +282,8 @@ github.com/rs/xid v1.5.0 h1:mKX4bl4iPYJtEIxp6CYiUuLQ/8DYMoz0PUdtGgMFRVc=
github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg=
github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU=
github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc=
github.com/sashabaranov/go-openai v1.9.5 h1:z1VCMXsfnug+U0ceTTIXr/L26AYl9jafqA9lptlSX0c=
github.com/sashabaranov/go-openai v1.9.5/go.mod h1:lj5b/K+zjTSFxVLijLSTDZuP7adOgerWeFyZLUhAKRg=
github.com/sashabaranov/go-openai v1.11.2 h1:HuMf+18eldSKbqVblyeCQbtcqSpGVfqTshvi8Bn6zes=
github.com/sashabaranov/go-openai v1.11.2/go.mod h1:lj5b/K+zjTSFxVLijLSTDZuP7adOgerWeFyZLUhAKRg=
github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
github.com/savsgio/dictpool v0.0.0-20221023140959-7bf2e61cea94 h1:rmMl4fXJhKMNWl+K+r/fq4FbbKI+Ia2m9hYBLm2h4G4=
github.com/savsgio/dictpool v0.0.0-20221023140959-7bf2e61cea94/go.mod h1:90zrgN3D/WJsDd1iXHT96alCoN2KJo6/4x1DZC3wZs8=
@@ -311,8 +299,8 @@ github.com/shopspring/decimal v1.2.0 h1:abSATXmQEYyShuxI4/vyW3tV1MrKAJzCZ/0zLUXY
github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o=
github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/sirupsen/logrus v1.9.2 h1:oxx1eChJGI6Uks2ZC4W1zpLlVgqB8ner4EuQwV4Ik1Y=
github.com/sirupsen/logrus v1.9.2/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
@@ -359,12 +347,12 @@ github.com/xhit/go-simple-mail/v2 v2.13.0/go.mod h1:b7P5ygho6SYE+VIqpxA6QkYfv4te
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q=
gitlab.aescorp.ru/dsp_dev/claim/common/object_model v1.0.171 h1:lAk/J6zGsxudQs1CVRmNikmCfL6Tsgb0NkfueVm2NNw=
gitlab.aescorp.ru/dsp_dev/claim/common/object_model v1.0.171/go.mod h1:GipQ/BQkeb4EgKMMXyZtfnydEqSooOxQCxCqhbnp5/k=
gitlab.aescorp.ru/dsp_dev/claim/common/object_model v1.0.191 h1:mXQD9yf90a0DTsGoPmF9K/YOCDyGizkko4/vQkhInT4=
gitlab.aescorp.ru/dsp_dev/claim/common/object_model v1.0.191/go.mod h1:ebgXvp8SHE+OD/Aw24PgGcJ04ztJdALAckffNI+3370=
go.mau.fi/libsignal v0.1.0 h1:vAKI/nJ5tMhdzke4cTK1fb0idJzz1JuEIpmjprueC+c=
go.mau.fi/libsignal v0.1.0/go.mod h1:R8ovrTezxtUNzCQE5PH30StOQWWeBskBsWE55vMfY9I=
go.mau.fi/whatsmeow v0.0.0-20230601124015-46f2ff088640 h1:y6EK/+hX6hOY69oubStlxBSVZw6UTS7xxgVaFEPK6Xk=
go.mau.fi/whatsmeow v0.0.0-20230601124015-46f2ff088640/go.mod h1:+ObGpFE6cbbY4hKc1FmQH9MVfqaemmlXGXSnwDvCOyE=
go.mau.fi/whatsmeow v0.0.0-20230616194828-be0edabb0bf3 h1:CByCFmtYMsj/4tO+j9YvLLcUa5xnnrDyJXFbl52quhU=
go.mau.fi/whatsmeow v0.0.0-20230616194828-be0edabb0bf3/go.mod h1:+ObGpFE6cbbY4hKc1FmQH9MVfqaemmlXGXSnwDvCOyE=
go.opentelemetry.io/otel v1.16.0 h1:Z7GVAX/UkAXPKsy94IU+i6thsQS4nb7LviLpnaNeW8s=
go.opentelemetry.io/otel v1.16.0/go.mod h1:vl0h9NUa1D5s1nv3A5vZOYWn8av4K8Ml6JDeHrT/bx4=
go.opentelemetry.io/otel/trace v1.16.0 h1:8JRpaObFoW0pxuVPapkgH8UhHQj+bJW8jJsCZEu5MQs=
@@ -394,19 +382,17 @@ golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8U
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20201016220609-9e8e0b390897/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20201112155050-0c6587e931a9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20201203163018-be400aefbc4c/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
golang.org/x/crypto v0.0.0-20210314154223-e6e6c4f2bb5b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20220511200225-c6db032c6c88/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.0.0-20221005025214-4161e89ecf1b/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58=
golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU=
golang.org/x/crypto v0.9.0 h1:LF6fAI+IutBocDJ2OT0Q1g8plpYljMZ4+lty+dsqw3g=
golang.org/x/crypto v0.9.0/go.mod h1:yrmDGqONDYtNj3tH8X9dzUun2m2lzPa9ngI6/RUPGR0=
golang.org/x/crypto v0.10.0 h1:LKqV2xt9+kDzSTfOhx4FrkEBcMrAgHSYgzywV9zcGmM=
golang.org/x/crypto v0.10.0/go.mod h1:o4eNf7Ede1fv+hwOwZsTHl9EsPFO6q6ZvYR8vYfY45I=
golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1 h1:k/i9J1pBpvlfR+9QsetwPyERsqu1GIbi967PQMq3Ivc=
golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1/go.mod h1:V1LtkGg67GoY2N1AnLN78QLrzxkLyJw7RJb1gzOOz9w=
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
@@ -422,28 +408,27 @@ golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20201010224723-4f7140c49acb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210610132358-84b48f89b13b/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.3.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE=
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M=
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
golang.org/x/oauth2 v0.8.0 h1:6dkIjl3j3LtZ/O3sTgZTMsLKSftL/B8Zgq4huOIIUu8=
golang.org/x/oauth2 v0.8.0/go.mod h1:yr7u4HXZRm1R1kBWqr/xKNqewf0plRYoB7sla+BCIXE=
golang.org/x/net v0.11.0 h1:Gi2tvZIJyBtO9SDr1q9h5hEQCp/4L2RQ+ar0qjx2oNU=
golang.org/x/net v0.11.0/go.mod h1:2L/ixqYpgIVXmeoSA/4Lu7BzTG4KIyPIryS4IsOd1oQ=
golang.org/x/oauth2 v0.9.0 h1:BPpt2kU7oMRq3kCHAA1tbSEshXRw1LpG2ztgDwrzuAs=
golang.org/x/oauth2 v0.9.0/go.mod h1:qYgFZaFiu6Wg24azG8bdV52QJXJGbZzIIsRCdVKzbLw=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.2.0 h1:PUR+T4wwASmuSTYdKjYHI5TD22Wy5ogLU5qZCOLxBrI=
golang.org/x/sync v0.2.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E=
golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -456,30 +441,28 @@ golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210616045830-e2b7044e8c71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220224120231-95c6836cb0e7/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.8.0 h1:EBmGv8NaZBZTWvrbjNoL6HVt+IVy3QDQpJs7VRIw3tU=
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.9.0 h1:KS/R3tvhPqvJvwcKfnBHJwwthS11LRhmM5D59eEXa0s=
golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA=
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U=
golang.org/x/term v0.8.0 h1:n5xxQn2i3PC0yLAbjTpNT85q/Kgzcr2gIoX9OrJUols=
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
golang.org/x/term v0.9.0 h1:GRRCnKYhdQrD8kfRAdQ6Zcw1P0OcELxGLKJvtjVMZ28=
golang.org/x/term v0.9.0/go.mod h1:M6DEAAIenWoTxdKrOltXcmDY3rSplQUkrvaDU5FcQyo=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
@@ -490,8 +473,9 @@ golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE=
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.10.0 h1:UpjohKhiEgNc0CSauXmwYftY1+LlaC75SJwh0SgCX58=
golang.org/x/text v0.10.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
@@ -517,8 +501,8 @@ google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6
google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc h1:XSJ8Vk1SWuNr8S18z1NZSziL0CPIXLCCMDOEFtHBOFc=
google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA=
google.golang.org/grpc v1.55.0 h1:3Oj82/tFSCeUrRTg/5E/7d/W5A1tj6Ky1ABAuZuv5ag=
google.golang.org/grpc v1.55.0/go.mod h1:iYEXKGkEBhg1PjZQvoYEVPTDkHo1/bjTnfwTeGONTY8=
google.golang.org/grpc v1.56.0 h1:+y7Bs8rtMd07LeXmL3NxcTLn7mUkbKZqEpPhMNkwJEE=
google.golang.org/grpc v1.56.0/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng=
@@ -543,8 +527,8 @@ gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gorm.io/driver/postgres v1.5.2 h1:ytTDxxEv+MplXOfFe3Lzm7SjG09fcdb3Z/c056DTBx0=
gorm.io/driver/postgres v1.5.2/go.mod h1:fmpX0m2I1PKuR7mKZiEluwrP3hbs+ps7JIGMUBpCgl8=
gorm.io/driver/sqlserver v1.5.0 h1:zol7ePfY1XiPfjEvjjBh4VatIF3kycNcPE0EMCXznHY=
gorm.io/driver/sqlserver v1.5.0/go.mod h1:tBAqioK34BHl0Iiez+BFfG5/K9nDAlhLxRkgc2qy3+4=
gorm.io/driver/sqlserver v1.5.1 h1:wpyW/pR26U94uaujltiFGXY7fd2Jw5hC9PB1ZF/Y5s4=
gorm.io/driver/sqlserver v1.5.1/go.mod h1:AYHzzte2msKTmYBYsSIq8ZUsznLJwBdkB2wpI+kt0nM=
gorm.io/gorm v1.25.1 h1:nsSALe5Pr+cM3V1qwwQ7rOkw+6UeLrX5O4v3llhHa64=
gorm.io/gorm v1.25.1/go.mod h1:L4uxeKpfBml98NYqVqwAdmV1a2nBtAec/cf3fpucW/k=
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=

View File

@@ -6,7 +6,10 @@ import (
"context"
"errors"
"fmt"
"hash/fnv"
"reflect"
"runtime"
"strconv"
"strings"
"unicode"
@@ -384,4 +387,243 @@ func gogo_chan(fn func() error, chanErr chan error) {
chanErr <- err
}
// CheckInnKpp - проверяет правильность ИНН и КПП
func CheckInnKpp(Inn, Kpp string, is_individual bool) error {
var err error
if Inn == "" {
Text1 := "ИНН не должен быть пустой"
err = errors.New(Text1)
return err
}
if is_individual == true {
if len(Inn) != 12 {
Text1 := "Длина ИНН должна быть 12 символов"
err = errors.New(Text1)
return err
}
if len(Kpp) != 0 {
Text1 := "КПП должен быть пустой"
err = errors.New(Text1)
return err
}
} else {
if len(Inn) != 10 {
Text1 := "Длина ИНН должна быть 10 символов"
err = errors.New(Text1)
return err
}
if len(Kpp) != 9 {
Text1 := "КПП должен быть 9 символов"
err = errors.New(Text1)
return err
}
err = CheckINNControlSum(Inn)
}
return err
}
// CheckINNControlSum - проверяет правильность ИНН по контрольной сумме
func CheckINNControlSum(Inn string) error {
var err error
if len(Inn) == 10 {
err = CheckINNControlSum10(Inn)
} else if len(Inn) == 12 {
err = CheckINNControlSum12(Inn)
} else {
err = errors.New("ИНН должен быть 10 или 12 символов")
}
return err
}
// CheckINNControlSum10 - проверяет правильность 10-значного ИНН по контрольной сумме
func CheckINNControlSum10(Inn string) error {
var err error
MassKoef := [10]int{2, 4, 10, 3, 5, 9, 4, 6, 8, 0}
var sum int
var x int
for i, _ := range Inn {
s := Inn[i : i+1]
var err1 error
x, err1 = strconv.Atoi(s)
if err1 != nil {
err = errors.New("Неправильная цифра в ИНН: " + s)
return err
}
sum = sum + x*MassKoef[i]
}
ControlSum := sum % 11
ControlSum = ControlSum % 10
if ControlSum != x {
err = errors.New("Неправильная контрольная сумма ИНН")
return err
}
return err
}
// CheckINNControlSum2 - проверяет правильность 12-значного ИНН по контрольной сумме
func CheckINNControlSum12(Inn string) error {
var err error
//контрольное чилос по 11 знакам
MassKoef := [11]int{7, 2, 4, 10, 3, 5, 9, 4, 6, 8, 0}
var sum int
var x11 int
for i := 0; i < 11; i++ {
s := Inn[i : i+1]
var err1 error
x, err1 := strconv.Atoi(s)
if err1 != nil {
err = errors.New("Неправильная цифра в ИНН: " + s)
return err
}
if i == 10 {
x11 = x
}
sum = sum + x*MassKoef[i]
}
ControlSum := sum % 11
ControlSum = ControlSum % 10
if ControlSum != x11 {
err = errors.New("Неправильная контрольная сумма ИНН")
return err
}
//контрольное чилос по 12 знакам
MassKoef2 := [12]int{3, 7, 2, 4, 10, 3, 5, 9, 4, 6, 8, 0}
var sum2 int
var x12 int
for i := 0; i < 12; i++ {
s := Inn[i : i+1]
var err1 error
x, err1 := strconv.Atoi(s)
if err1 != nil {
err = errors.New("Неправильная цифра в ИНН: " + s)
return err
}
if i == 11 {
x12 = x
}
sum2 = sum2 + x*MassKoef2[i]
}
ControlSum2 := sum2 % 11
ControlSum2 = ControlSum2 % 10
if ControlSum2 != x12 {
err = errors.New("Неправильная контрольная сумма ИНН")
return err
}
return err
}
// StringFromInt64 - возвращает строку из числа
func StringFromInt64(i int64) string {
Otvet := ""
Otvet = strconv.FormatInt(i, 10)
return Otvet
}
// StringDate - возвращает строку дата без времени
func StringDate(t time.Time) string {
Otvet := ""
Otvet = t.Format("02.01.2006")
return Otvet
}
// ProgramDir_bin - возвращает каталог "bin" или каталог программы
func ProgramDir_bin() string {
Otvet := ""
dir := ProgramDir()
FileName := dir + "bin" + SeparatorFile()
ok, _ := FileExists(FileName)
if ok == true {
return FileName
}
Otvet = dir
return Otvet
}
// SaveTempFile - записывает массив байт в файл
func SaveTempFile(bytes []byte) string {
Otvet, err := SaveTempFile_err(bytes)
if err != nil {
TextError := fmt.Sprint("SaveTempFile() error: ", err)
print(TextError)
panic(TextError)
}
return Otvet
}
// SaveTempFile_err - записывает массив байт в файл, возвращает ошибку
func SaveTempFile_err(bytes []byte) (string, error) {
Otvet := ""
// create and open a temporary file
f, err := os.CreateTemp("", "") // in Go version older than 1.17 you can use ioutil.TempFile
if err != nil {
return Otvet, err
}
// close and remove the temporary file at the end of the program
defer f.Close()
defer os.Remove(f.Name())
// write data to the temporary file
if _, err := f.Write(bytes); err != nil {
return Otvet, err
}
Otvet = f.Name()
return Otvet, err
}
// Hash - возвращает число хэш из строки
func Hash(s string) uint32 {
h := fnv.New32a()
h.Write([]byte(s))
return h.Sum32()
}
// TextError - возвращает текст ошибки из error
func TextError(err error) string {
Otvet := ""
if err != nil {
Otvet = err.Error()
}
return Otvet
}
// GetType - возвращает строку тип объекта
func GetType(myvar interface{}) string {
return reflect.TypeOf(myvar).String()
}

View File

@@ -245,3 +245,85 @@ func TestMinDate(t *testing.T) {
t.Error("microfunctions_test.TestMinDate() error: Otvet != ", now)
}
}
func TestCheckINNControlSum(t *testing.T) {
Inn := ""
err := CheckINNControlSum(Inn)
if err == nil {
t.Error("TestCheckINNControlSum() error")
}
}
func TestCheckINNControlSum10(t *testing.T) {
Inn := "5111002549"
err := CheckINNControlSum10(Inn)
if err != nil {
t.Error("TestCheckINNControlSum10() error: ", err)
}
}
func TestCheckINNControlSum12(t *testing.T) {
Inn := "510800222725"
err := CheckINNControlSum12(Inn)
if err != nil {
t.Error("TestCheckINNControlSum12() error: ", err)
}
}
func TestStringFromInt64(t *testing.T) {
Otvet := StringFromInt64(0)
if Otvet != "0" {
t.Error("TestStringFromInt64() error: != '0'")
}
}
func TestStringDate(t *testing.T) {
Otvet := StringDate(time.Now())
if Otvet == "" {
t.Error("TestStringDate() error: =''")
}
}
func TestProgramDir_bin(t *testing.T) {
Otvet := ProgramDir_bin()
if Otvet == "" {
t.Error("TestProgramDir_bin() error: =''")
}
}
func TestSaveTempFile(t *testing.T) {
bytes := []byte("123")
Otvet := SaveTempFile(bytes)
if Otvet == "" {
t.Error("TestSaveTempFile() error: Otvet =''")
}
}
func TestHash(t *testing.T) {
Otvet := Hash("123")
if Otvet == 0 {
t.Error("TestHash() error: =0")
}
}
func TestTextError(t *testing.T) {
err := errors.New("1")
s := TextError(err)
if s != "1" {
t.Error("TestTextError() error")
}
}
func TestGetType(t *testing.T) {
Otvet := GetType(1)
if Otvet != "int" {
t.Error("TestGetType() error: Otvet: ", Otvet)
}
}

View File

@@ -49,7 +49,7 @@ func Connect() {
FillSettings()
}
ping.Ping(Settings.MINIO_HOST, Settings.MINIO_PORT)
//ping.Ping(Settings.MINIO_HOST, Settings.MINIO_PORT)
err := Connect_err()
if err != nil {

View File

@@ -106,7 +106,10 @@ func TestCreateBucketCtx(t *testing.T) {
defer CloseConnection()
ctxMain := contextmain.GetContext()
CreateBucketCtx(ctxMain, "claim", "moscow")
err := CreateBucketCtx_err(ctxMain, "claim", "moscow")
if err != nil {
t.Error("TestCreateBucketCtx() error: ", err)
}
}
func TestUploadFileCtx(t *testing.T) {

View File

@@ -252,6 +252,7 @@ func StartDB() {
// FillSettings загружает переменные окружения в структуру из файла или из переменных окружения
func FillSettings() {
Settings = SettingsINI{}
// заполним из переменных оуружения
Settings.DB_HOST = os.Getenv("DB_HOST")
Settings.DB_PORT = os.Getenv("DB_PORT")
Settings.DB_NAME = os.Getenv("DB_NAME")
@@ -259,6 +260,16 @@ func FillSettings() {
Settings.DB_USER = os.Getenv("DB_USER")
Settings.DB_PASSWORD = os.Getenv("DB_PASSWORD")
// заполним из переменных оуружения как у Нечаева
if Settings.DB_HOST == "" {
Settings.DB_HOST = os.Getenv("STORE_HOST")
Settings.DB_PORT = os.Getenv("STORE_PORT")
Settings.DB_NAME = os.Getenv("STORE_NAME")
Settings.DB_SCHEMA = os.Getenv("STORE_SCHEME")
Settings.DB_USER = os.Getenv("STORE_LOGIN")
Settings.DB_PASSWORD = os.Getenv("STORE_PASSWORD")
}
if Settings.DB_HOST == "" {
log.Panicln("Need fill DB_HOST ! in os.ENV ")
}

View File

@@ -41,6 +41,11 @@ var TotalMessagesSendingNow int32
// SecondsWaitTotalMessagesSendingNow - количество секунд ожидания для отправки последнего сообщения
const SecondsWaitTotalMessagesSendingNow = 10
// SetWaitGroup_Main - присваивает внешний WaitGroup
func SetWaitGroup_Main(wg *sync.WaitGroup) {
wgMain = wg
}
// GetWaitGroup_Main - возвращает группу ожидания завершения всех частей программы
func GetWaitGroup_Main() *sync.WaitGroup {
lockWGMain.Lock()
@@ -50,9 +55,11 @@ func GetWaitGroup_Main() *sync.WaitGroup {
// wgMain = &sync.WaitGroup{}
//}
onceWGMain.Do(func() {
if wgMain == nil {
//onceWGMain.Do(func() {
wgMain = &sync.WaitGroup{}
})
//})
}
return wgMain
}

View File

@@ -1,5 +1,9 @@
package stopapp
import (
"testing"
)
//import (
// "testing"
//
@@ -39,3 +43,11 @@ package stopapp
//func TestWaitTotalMessagesSendingNow(t *testing.T) {
// WaitTotalMessagesSendingNow("stopapp_test")
//}
func TestSetWaitGroup_Main(t *testing.T) {
SetWaitGroup_Main(nil)
wg := GetWaitGroup_Main()
if wg == nil {
t.Error("TestSetWaitGroup_Main() error: wg = nil")
}
}

View File

@@ -109,7 +109,7 @@ func Test_termAuth_Phone(t *testing.T) {
a := termAuth{
phone: "111",
}
got, err := a.Phone(contextmain.Ctx)
got, err := a.Phone(contextmain.GetContext())
if got != "111" {
t.Error("telegramclient_test.Test_termAuth_Phone() error: ", err)
}

1
test.sh Executable file
View File

@@ -0,0 +1 @@
export SERVICE_NAME="nikitin"

View File

@@ -77,7 +77,7 @@ func (fi bindataFileInfo) Sys() interface{} {
return nil
}
var _version = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xb2\xd0\x33\xd2\x33\xe5\x02\x04\x00\x00\xff\xff\x6e\xc5\x0f\x0d\x06\x00\x00\x00")
var _version = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xb2\xd0\x33\xd2\x33\xe7\x02\x04\x00\x00\xff\xff\xec\xa7\x39\x3f\x06\x00\x00\x00")
func versionBytes() ([]byte, error) {
return bindataRead(
@@ -92,7 +92,7 @@ func version() (*asset, error) {
return nil, err
}
info := bindataFileInfo{name: "VERSION", size: 6, mode: os.FileMode(420), modTime: time.Unix(1684244223, 0)}
info := bindataFileInfo{name: "VERSION", size: 6, mode: os.FileMode(420), modTime: time.Unix(1686657931, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}

View File

@@ -64,9 +64,7 @@ func MapStructMarshal(value reflect.Value, tag string, omitempty, omitminus bool
dostrconv = strings.Contains(tagvalue, ",string")
}
if name == "-" {
if omitminus {
continue
} else {
if !omitminus {
name = ""
}
}

View File

@@ -11,6 +11,7 @@
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package zbc
import (

View File

@@ -30,7 +30,7 @@ import (
)
// Version of current fiber package
const Version = "2.46.0"
const Version = "2.47.0"
// Handler defines a function to serve HTTP requests.
type Handler = func(*Ctx) error
@@ -609,18 +609,23 @@ func (app *App) SetTLSHandler(tlsHandler *TLSHandler) {
// Name Assign name to specific route.
func (app *App) Name(name string) Router {
app.mutex.Lock()
defer app.mutex.Unlock()
latestGroup := app.latestRoute.group
if latestGroup != nil {
app.latestRoute.Name = latestGroup.name + name
} else {
app.latestRoute.Name = name
for _, routes := range app.stack {
for _, route := range routes {
if route.Path == app.latestRoute.path {
route.Name = name
if route.group != nil {
route.Name = route.group.name + route.Name
}
}
}
}
if err := app.hooks.executeOnNameHooks(*app.latestRoute); err != nil {
panic(err)
}
app.mutex.Unlock()
return app
}
@@ -754,12 +759,16 @@ func (app *App) Patch(path string, handlers ...Handler) Router {
// Add allows you to specify a HTTP method to register a route
func (app *App) Add(method, path string, handlers ...Handler) Router {
return app.register(method, path, nil, handlers...)
app.register(method, path, nil, handlers...)
return app
}
// Static will create a file server serving static files
func (app *App) Static(prefix, root string, config ...Static) Router {
return app.registerStatic(prefix, root, config...)
app.registerStatic(prefix, root, config...)
return app
}
// All will register the handler on all HTTP methods
@@ -1083,10 +1092,6 @@ func (app *App) serverErrorHandler(fctx *fasthttp.RequestCtx, err error) {
// startupProcess Is the method which executes all the necessary processes just before the start of the server.
func (app *App) startupProcess() *App {
if err := app.hooks.executeOnListenHooks(); err != nil {
panic(err)
}
app.mutex.Lock()
defer app.mutex.Unlock()
@@ -1097,3 +1102,10 @@ func (app *App) startupProcess() *App {
return app
}
// Run onListen hooks. If they return an error, panic.
func (app *App) runOnListenHooks() {
if err := app.hooks.executeOnListenHooks(); err != nil {
panic(err)
}
}

View File

@@ -597,7 +597,7 @@ func (c *Ctx) GetRespHeader(key string, defaultValue ...string) string {
func (c *Ctx) GetReqHeaders() map[string]string {
headers := make(map[string]string)
c.Request().Header.VisitAll(func(k, v []byte) {
headers[string(k)] = c.app.getString(v)
headers[c.app.getString(k)] = c.app.getString(v)
})
return headers
@@ -609,7 +609,7 @@ func (c *Ctx) GetReqHeaders() map[string]string {
func (c *Ctx) GetRespHeaders() map[string]string {
headers := make(map[string]string)
c.Response().Header.VisitAll(func(k, v []byte) {
headers[string(k)] = c.app.getString(v)
headers[c.app.getString(k)] = c.app.getString(v)
})
return headers
@@ -1054,6 +1054,35 @@ func (c *Ctx) Query(key string, defaultValue ...string) string {
return defaultString(c.app.getString(c.fasthttp.QueryArgs().Peek(key)), defaultValue)
}
// Queries returns a map of query parameters and their values.
//
// GET /?name=alex&wanna_cake=2&id=
// Queries()["name"] == "alex"
// Queries()["wanna_cake"] == "2"
// Queries()["id"] == ""
//
// GET /?field1=value1&field1=value2&field2=value3
// Queries()["field1"] == "value2"
// Queries()["field2"] == "value3"
//
// GET /?list_a=1&list_a=2&list_a=3&list_b[]=1&list_b[]=2&list_b[]=3&list_c=1,2,3
// Queries()["list_a"] == "3"
// Queries()["list_b[]"] == "3"
// Queries()["list_c"] == "1,2,3"
//
// GET /api/search?filters.author.name=John&filters.category.name=Technology&filters[customer][name]=Alice&filters[status]=pending
// Queries()["filters.author.name"] == "John"
// Queries()["filters.category.name"] == "Technology"
// Queries()["filters[customer][name]"] == "Alice"
// Queries()["filters[status]"] == "pending"
func (c *Ctx) Queries() map[string]string {
m := make(map[string]string, c.Context().QueryArgs().Len())
c.Context().QueryArgs().VisitAll(func(key, value []byte) {
m[c.app.getString(key)] = c.app.getString(value)
})
return m
}
// QueryInt returns integer value of key string parameter in the url.
// Default to empty or invalid key is 0.
//

View File

@@ -11,17 +11,26 @@ import (
// Group struct
type Group struct {
app *App
parentGroup *Group
name string
app *App
parentGroup *Group
name string
anyRouteDefined bool
Prefix string
}
// Name Assign name to specific route.
// Name Assign name to specific route or group itself.
//
// If this method is used before any route added to group, it'll set group name and OnGroupNameHook will be used.
// Otherwise, it'll set route name and OnName hook will be used.
func (grp *Group) Name(name string) Router {
grp.app.mutex.Lock()
if grp.anyRouteDefined {
grp.app.Name(name)
return grp
}
grp.app.mutex.Lock()
if grp.parentGroup != nil {
grp.name = grp.parentGroup.name + name
} else {
@@ -76,6 +85,10 @@ func (grp *Group) Use(args ...interface{}) Router {
grp.app.register(methodUse, getGroupPath(grp.Prefix, prefix), grp, handlers...)
}
if !grp.anyRouteDefined {
grp.anyRouteDefined = true
}
return grp
}
@@ -135,12 +148,22 @@ func (grp *Group) Patch(path string, handlers ...Handler) Router {
// Add allows you to specify a HTTP method to register a route
func (grp *Group) Add(method, path string, handlers ...Handler) Router {
return grp.app.register(method, getGroupPath(grp.Prefix, path), grp, handlers...)
grp.app.register(method, getGroupPath(grp.Prefix, path), grp, handlers...)
if !grp.anyRouteDefined {
grp.anyRouteDefined = true
}
return grp
}
// Static will create a file server serving static files
func (grp *Group) Static(prefix, root string, config ...Static) Router {
return grp.app.registerStatic(getGroupPath(grp.Prefix, prefix), root, config...)
grp.app.registerStatic(getGroupPath(grp.Prefix, prefix), root, config...)
if !grp.anyRouteDefined {
grp.anyRouteDefined = true
}
return grp
}
// All will register the handler on all HTTP methods
@@ -158,7 +181,7 @@ func (grp *Group) All(path string, handlers ...Handler) Router {
func (grp *Group) Group(prefix string, handlers ...Handler) Router {
prefix = getGroupPath(grp.Prefix, prefix)
if len(handlers) > 0 {
_ = grp.app.register(methodUse, prefix, grp, handlers...)
grp.app.register(methodUse, prefix, grp, handlers...)
}
// Create new group

View File

@@ -25,6 +25,16 @@ import (
"github.com/valyala/fasthttp"
)
// acceptType is a struct that holds the parsed value of an Accept header
// along with quality, specificity, and order.
// used for sorting accept headers.
type acceptedType struct {
spec string
quality float64
specificity int
order int
}
// getTLSConfig returns a net listener's tls config
func getTLSConfig(ln net.Listener) *tls.Config {
// Get listener type
@@ -263,33 +273,89 @@ func acceptsOfferType(spec, offerType string) bool {
func getOffer(header string, isAccepted func(spec, offer string) bool, offers ...string) string {
if len(offers) == 0 {
return ""
} else if header == "" {
}
if header == "" {
return offers[0]
}
for _, offer := range offers {
if len(offer) == 0 {
continue
// Parse header and get accepted types with their quality and specificity
// See: https://www.rfc-editor.org/rfc/rfc9110#name-content-negotiation-fields
spec, commaPos, order := "", 0, 0
acceptedTypes := make([]acceptedType, 0, 20)
for len(header) > 0 {
order++
// Skip spaces
header = utils.TrimLeft(header, ' ')
// Get spec
commaPos = strings.IndexByte(header, ',')
if commaPos != -1 {
spec = utils.Trim(header[:commaPos], ' ')
} else {
spec = utils.TrimLeft(header, ' ')
}
spec, commaPos := "", 0
for len(header) > 0 && commaPos != -1 {
commaPos = strings.IndexByte(header, ',')
if commaPos != -1 {
spec = utils.Trim(header[:commaPos], ' ')
} else {
spec = utils.TrimLeft(header, ' ')
}
if factorSign := strings.IndexByte(spec, ';'); factorSign != -1 {
spec = spec[:factorSign]
}
// isAccepted if the current offer is accepted
if isAccepted(spec, offer) {
return offer
// Get quality
quality := 1.0
if factorSign := strings.IndexByte(spec, ';'); factorSign != -1 {
factor := utils.Trim(spec[factorSign+1:], ' ')
if strings.HasPrefix(factor, "q=") {
if q, err := fasthttp.ParseUfloat(utils.UnsafeBytes(factor[2:])); err == nil {
quality = q
}
}
spec = spec[:factorSign]
}
// Skip if quality is 0.0
// See: https://www.rfc-editor.org/rfc/rfc9110#quality.values
if quality == 0.0 {
if commaPos != -1 {
header = header[commaPos+1:]
} else {
break
}
continue
}
// Get specificity
specificity := 0
// check for wildcard this could be a mime */* or a wildcard character *
if spec == "*/*" || spec == "*" {
specificity = 1
} else if strings.HasSuffix(spec, "/*") {
specificity = 2
} else if strings.IndexByte(spec, '/') != -1 {
specificity = 3
} else {
specificity = 4
}
// Add to accepted types
acceptedTypes = append(acceptedTypes, acceptedType{spec, quality, specificity, order})
// Next
if commaPos != -1 {
header = header[commaPos+1:]
} else {
break
}
}
if len(acceptedTypes) > 1 {
// Sort accepted types by quality and specificity, preserving order of equal elements
sortAcceptedTypes(&acceptedTypes)
}
// Find the first offer that matches the accepted types
for _, acceptedType := range acceptedTypes {
for _, offer := range offers {
if len(offer) == 0 {
continue
}
if isAccepted(acceptedType.spec, offer) {
return offer
}
}
}
@@ -297,6 +363,35 @@ func getOffer(header string, isAccepted func(spec, offer string) bool, offers ..
return ""
}
// sortAcceptedTypes sorts accepted types by quality and specificity, preserving order of equal elements
//
// Parameters are not supported, they are ignored when sorting by specificity.
//
// See: https://www.rfc-editor.org/rfc/rfc9110#name-content-negotiation-fields
func sortAcceptedTypes(at *[]acceptedType) {
if at == nil || len(*at) < 2 {
return
}
acceptedTypes := *at
for i := 1; i < len(acceptedTypes); i++ {
lo, hi := 0, i-1
for lo <= hi {
mid := (lo + hi) / 2
if acceptedTypes[i].quality < acceptedTypes[mid].quality ||
(acceptedTypes[i].quality == acceptedTypes[mid].quality && acceptedTypes[i].specificity < acceptedTypes[mid].specificity) ||
(acceptedTypes[i].quality == acceptedTypes[mid].quality && acceptedTypes[i].specificity == acceptedTypes[mid].specificity && acceptedTypes[i].order > acceptedTypes[mid].order) {
lo = mid + 1
} else {
hi = mid - 1
}
}
for j := i; j > lo; j-- {
acceptedTypes[j-1], acceptedTypes[j] = acceptedTypes[j], acceptedTypes[j-1]
}
}
}
func matchEtag(s, etag string) bool {
if s == etag || s == "W/"+etag || "W/"+s == etag {
return true

View File

@@ -30,6 +30,9 @@ func (app *App) Listener(ln net.Listener) error {
// prepare the server for the start
app.startupProcess()
// run hooks
app.runOnListenHooks()
// Print startup message
if !app.config.DisableStartupMessage {
app.startupMessage(ln.Addr().String(), getTLSConfig(ln) != nil, "")
@@ -68,6 +71,9 @@ func (app *App) Listen(addr string) error {
// prepare the server for the start
app.startupProcess()
// run hooks
app.runOnListenHooks()
// Print startup message
if !app.config.DisableStartupMessage {
app.startupMessage(ln.Addr().String(), false, "")
@@ -130,6 +136,9 @@ func (app *App) ListenTLSWithCertificate(addr string, cert tls.Certificate) erro
// prepare the server for the start
app.startupProcess()
// run hooks
app.runOnListenHooks()
// Print startup message
if !app.config.DisableStartupMessage {
app.startupMessage(ln.Addr().String(), true, "")
@@ -202,6 +211,9 @@ func (app *App) ListenMutualTLSWithCertificate(addr string, cert tls.Certificate
// prepare the server for the start
app.startupProcess()
// run hooks
app.runOnListenHooks()
// Print startup message
if !app.config.DisableStartupMessage {
app.startupMessage(ln.Addr().String(), true, "")
@@ -457,10 +469,10 @@ func (app *App) printRoutesMessage() {
return routes[i].path < routes[j].path
})
_, _ = fmt.Fprintf(w, "%smethod\t%s| %spath\t%s| %sname\t%s| %shandlers\n", colors.Blue, colors.White, colors.Green, colors.White, colors.Cyan, colors.White, colors.Yellow)
_, _ = fmt.Fprintf(w, "%s------\t%s| %s----\t%s| %s----\t%s| %s--------\n", colors.Blue, colors.White, colors.Green, colors.White, colors.Cyan, colors.White, colors.Yellow)
_, _ = fmt.Fprintf(w, "%smethod\t%s| %spath\t%s| %sname\t%s| %shandlers\t%s\n", colors.Blue, colors.White, colors.Green, colors.White, colors.Cyan, colors.White, colors.Yellow, colors.Reset)
_, _ = fmt.Fprintf(w, "%s------\t%s| %s----\t%s| %s----\t%s| %s--------\t%s\n", colors.Blue, colors.White, colors.Green, colors.White, colors.Cyan, colors.White, colors.Yellow, colors.Reset)
for _, route := range routes {
_, _ = fmt.Fprintf(w, "%s%s\t%s| %s%s\t%s| %s%s\t%s| %s%s\n", colors.Blue, route.method, colors.White, colors.Green, route.path, colors.White, colors.Cyan, route.name, colors.White, colors.Yellow, route.handlers)
_, _ = fmt.Fprintf(w, "%s%s\t%s| %s%s\t%s| %s%s\t%s| %s%s%s\n", colors.Blue, route.method, colors.White, colors.Green, route.path, colors.White, colors.Cyan, route.name, colors.White, colors.Yellow, route.handlers, colors.Reset)
}
_ = w.Flush() //nolint:errcheck // It is fine to ignore the error here

View File

@@ -126,6 +126,10 @@ func (app *App) prefork(network, addr string, tlsConfig *tls.Config) error {
}()
}
// Run onListen hooks
// Hooks have to be run here as different as non-prefork mode due to they should run as child or master
app.runOnListenHooks()
// Print startup message
if !app.config.DisableStartupMessage {
app.startupMessage(addr, tlsConfig != nil, ","+strings.Join(pids, ","))

View File

@@ -225,7 +225,7 @@ func (*App) copyRoute(route *Route) *Route {
}
}
func (app *App) register(method, pathRaw string, group *Group, handlers ...Handler) Router {
func (app *App) register(method, pathRaw string, group *Group, handlers ...Handler) {
// Uppercase HTTP methods
method = utils.ToUpper(method)
// Check if the HTTP method is valid unless it's USE
@@ -302,10 +302,9 @@ func (app *App) register(method, pathRaw string, group *Group, handlers ...Handl
// Add route to stack
app.addRoute(method, &route, isMount)
}
return app
}
func (app *App) registerStatic(prefix, root string, config ...Static) Router {
func (app *App) registerStatic(prefix, root string, config ...Static) {
// For security we want to restrict to the current work directory.
if root == "" {
root = "."
@@ -441,7 +440,6 @@ func (app *App) registerStatic(prefix, root string, config ...Static) Router {
app.addRoute(MethodGet, &route)
// Add HEAD route
app.addRoute(MethodHead, &route)
return app
}
func (app *App) addRoute(method string, route *Route, isMounted ...bool) {

View File

@@ -1,3 +1,31 @@
# 5.4.1 (June 18, 2023)
* Fix: concurrency bug with pgtypeDefaultMap and simple protocol (Lev Zakharov)
* Add TxOptions.BeginQuery to allow overriding the default BEGIN query
# 5.4.0 (June 14, 2023)
* Replace platform specific syscalls for non-blocking IO with more traditional goroutines and deadlines. This returns to the v4 approach with some additional improvements and fixes. This restores the ability to use a pgx.Conn over an ssh.Conn as well as other non-TCP or Unix socket connections. In addition, it is a significantly simpler implementation that is less likely to have cross platform issues.
* Optimization: The default type registrations are now shared among all connections. This saves about 100KB of memory per connection. `pgtype.Type` and `pgtype.Codec` values are now required to be immutable after registration. This was already necessary in most cases but wasn't documented until now. (Lev Zakharov)
* Fix: Ensure pgxpool.Pool.QueryRow.Scan releases connection on panic
* CancelRequest: don't try to read the reply (Nicola Murino)
* Fix: correctly handle bool type aliases (Wichert Akkerman)
* Fix: pgconn.CancelRequest: Fix unix sockets: don't use RemoteAddr()
* Fix: pgx.Conn memory leak with prepared statement caching (Evan Jones)
* Add BeforeClose to pgxpool.Pool (Evan Cordell)
* Fix: various hstore fixes and optimizations (Evan Jones)
* Fix: RowToStructByPos with embedded unexported struct
* Support different bool string representations (Lev Zakharov)
* Fix: error when using BatchResults.Exec on a select that returns an error after some rows.
* Fix: pipelineBatchResults.Exec() not returning error from ResultReader
* Fix: pipeline batch results not closing pipeline when error occurs while reading directly from results instead of using
a callback.
* Fix: scanning a table type into a struct
* Fix: scan array of record to pointer to slice of struct
* Fix: handle null for json (Cemre Mengu)
* Batch Query callback is called even when there is an error
* Add RowTo(AddrOf)StructByNameLax (Audi P. Risa P)
# 5.3.1 (February 27, 2023)
* Fix: Support v4 and v5 stdlib in same program (Tomáš Procházka)

View File

@@ -132,13 +132,24 @@ These adapters can be used with the tracelog package.
* [github.com/jackc/pgx-logrus](https://github.com/jackc/pgx-logrus)
* [github.com/jackc/pgx-zap](https://github.com/jackc/pgx-zap)
* [github.com/jackc/pgx-zerolog](https://github.com/jackc/pgx-zerolog)
* [github.com/mcosta74/pgx-slog](https://github.com/mcosta74/pgx-slog)
## 3rd Party Libraries with PGX Support
### [github.com/pashagolub/pgxmock](https://github.com/pashagolub/pgxmock)
pgxmock is a mock library implementing pgx interfaces.
pgxmock has one and only purpose - to simulate pgx behavior in tests, without needing a real database connection.
### [github.com/georgysavva/scany](https://github.com/georgysavva/scany)
Library for scanning data from a database into Go structs and more.
### [github.com/vingarcia/ksql](https://github.com/vingarcia/ksql)
A carefully designed SQL client for making using SQL easier,
more productive, and less error-prone on Golang.
### [https://github.com/otan/gopgkrb5](https://github.com/otan/gopgkrb5)
Adds GSSAPI / Kerberos authentication support.

View File

@@ -21,13 +21,10 @@ type batchItemFunc func(br BatchResults) error
// Query sets fn to be called when the response to qq is received.
func (qq *QueuedQuery) Query(fn func(rows Rows) error) {
qq.fn = func(br BatchResults) error {
rows, err := br.Query()
if err != nil {
return err
}
rows, _ := br.Query()
defer rows.Close()
err = fn(rows)
err := fn(rows)
if err != nil {
return err
}
@@ -142,7 +139,10 @@ func (br *batchResults) Exec() (pgconn.CommandTag, error) {
}
commandTag, err := br.mrr.ResultReader().Close()
br.err = err
if err != nil {
br.err = err
br.mrr.Close()
}
if br.conn.batchTracer != nil {
br.conn.batchTracer.TraceBatchQuery(br.ctx, br.conn, TraceBatchQueryData{
@@ -228,7 +228,7 @@ func (br *batchResults) Close() error {
for br.err == nil && !br.closed && br.b != nil && br.qqIdx < len(br.b.queuedQueries) {
if br.b.queuedQueries[br.qqIdx].fn != nil {
err := br.b.queuedQueries[br.qqIdx].fn(br)
if err != nil && br.err == nil {
if err != nil {
br.err = err
}
} else {
@@ -290,7 +290,7 @@ func (br *pipelineBatchResults) Exec() (pgconn.CommandTag, error) {
results, err := br.pipeline.GetResults()
if err != nil {
br.err = err
return pgconn.CommandTag{}, err
return pgconn.CommandTag{}, br.err
}
var commandTag pgconn.CommandTag
switch results := results.(type) {
@@ -309,7 +309,7 @@ func (br *pipelineBatchResults) Exec() (pgconn.CommandTag, error) {
})
}
return commandTag, err
return commandTag, br.err
}
// Query reads the results from the next query in the batch as if the query has been sent with Query.
@@ -384,24 +384,20 @@ func (br *pipelineBatchResults) Close() error {
}
}()
if br.err != nil {
return br.err
}
if br.lastRows != nil && br.lastRows.err != nil {
if br.err == nil && br.lastRows != nil && br.lastRows.err != nil {
br.err = br.lastRows.err
return br.err
}
if br.closed {
return nil
return br.err
}
// Read and run fn for all remaining items
for br.err == nil && !br.closed && br.b != nil && br.qqIdx < len(br.b.queuedQueries) {
if br.b.queuedQueries[br.qqIdx].fn != nil {
err := br.b.queuedQueries[br.qqIdx].fn(br)
if err != nil && br.err == nil {
if err != nil {
br.err = err
}
} else {

View File

@@ -178,7 +178,7 @@ func ParseConfigWithOptions(connString string, options ParseConfigOptions) (*Con
case "simple_protocol":
defaultQueryExecMode = QueryExecModeSimpleProtocol
default:
return nil, fmt.Errorf("invalid default_query_exec_mode: %v", err)
return nil, fmt.Errorf("invalid default_query_exec_mode: %s", s)
}
}
@@ -382,11 +382,9 @@ func quoteIdentifier(s string) string {
return `"` + strings.ReplaceAll(s, `"`, `""`) + `"`
}
// Ping executes an empty sql statement against the *Conn
// If the sql returns without error, the database Ping is considered successful, otherwise, the error is returned.
// Ping delegates to the underlying *pgconn.PgConn.Ping.
func (c *Conn) Ping(ctx context.Context) error {
_, err := c.Exec(ctx, ";")
return err
return c.pgConn.Ping(ctx)
}
// PgConn returns the underlying *pgconn.PgConn. This is an escape hatch method that allows lower level access to the
@@ -585,8 +583,10 @@ const (
QueryExecModeCacheDescribe
// Get the statement description on every execution. This uses the extended protocol. Queries require two round trips
// to execute. It does not use prepared statements (allowing usage with most connection poolers) and is safe even
// when the the database schema is modified concurrently.
// to execute. It does not use named prepared statements. But it does use the unnamed prepared statement to get the
// statement description on the first round trip and then uses it to execute the query on the second round trip. This
// may cause problems with connection poolers that switch the underlying connection between round trips. It is safe
// even when the the database schema is modified concurrently.
QueryExecModeDescribeExec
// Assume the PostgreSQL query parameter types based on the Go type of the arguments. This uses the extended protocol
@@ -648,6 +648,9 @@ type QueryRewriter interface {
// returned Rows even if an error is returned. The error will be the available in rows.Err() after rows are closed. It
// is allowed to ignore the error returned from Query and handle it in Rows.
//
// It is possible for a call of FieldDescriptions on the returned Rows to return nil even if the Query call did not
// return an error.
//
// It is possible for a query to return one or more rows before encountering an error. In most cases the rows should be
// collected before processing rather than processed while receiving each row. This avoids the possibility of the
// application processing rows from a query that the server rejected. The CollectRows function is useful here.
@@ -975,7 +978,7 @@ func (c *Conn) sendBatchQueryExecModeExec(ctx context.Context, b *Batch) *batchR
func (c *Conn) sendBatchQueryExecModeCacheStatement(ctx context.Context, b *Batch) (pbr *pipelineBatchResults) {
if c.statementCache == nil {
return &pipelineBatchResults{ctx: ctx, conn: c, err: errDisabledStatementCache}
return &pipelineBatchResults{ctx: ctx, conn: c, err: errDisabledStatementCache, closed: true}
}
distinctNewQueries := []*pgconn.StatementDescription{}
@@ -1007,7 +1010,7 @@ func (c *Conn) sendBatchQueryExecModeCacheStatement(ctx context.Context, b *Batc
func (c *Conn) sendBatchQueryExecModeCacheDescribe(ctx context.Context, b *Batch) (pbr *pipelineBatchResults) {
if c.descriptionCache == nil {
return &pipelineBatchResults{ctx: ctx, conn: c, err: errDisabledDescriptionCache}
return &pipelineBatchResults{ctx: ctx, conn: c, err: errDisabledDescriptionCache, closed: true}
}
distinctNewQueries := []*pgconn.StatementDescription{}
@@ -1074,18 +1077,18 @@ func (c *Conn) sendBatchExtendedWithDescription(ctx context.Context, b *Batch, d
err := pipeline.Sync()
if err != nil {
return &pipelineBatchResults{ctx: ctx, conn: c, err: err}
return &pipelineBatchResults{ctx: ctx, conn: c, err: err, closed: true}
}
for _, sd := range distinctNewQueries {
results, err := pipeline.GetResults()
if err != nil {
return &pipelineBatchResults{ctx: ctx, conn: c, err: err}
return &pipelineBatchResults{ctx: ctx, conn: c, err: err, closed: true}
}
resultSD, ok := results.(*pgconn.StatementDescription)
if !ok {
return &pipelineBatchResults{ctx: ctx, conn: c, err: fmt.Errorf("expected statement description, got %T", results)}
return &pipelineBatchResults{ctx: ctx, conn: c, err: fmt.Errorf("expected statement description, got %T", results), closed: true}
}
// Fill in the previously empty / pending statement descriptions.
@@ -1095,12 +1098,12 @@ func (c *Conn) sendBatchExtendedWithDescription(ctx context.Context, b *Batch, d
results, err := pipeline.GetResults()
if err != nil {
return &pipelineBatchResults{ctx: ctx, conn: c, err: err}
return &pipelineBatchResults{ctx: ctx, conn: c, err: err, closed: true}
}
_, ok := results.(*pgconn.PipelineSync)
if !ok {
return &pipelineBatchResults{ctx: ctx, conn: c, err: fmt.Errorf("expected sync, got %T", results)}
return &pipelineBatchResults{ctx: ctx, conn: c, err: fmt.Errorf("expected sync, got %T", results), closed: true}
}
}
@@ -1117,7 +1120,7 @@ func (c *Conn) sendBatchExtendedWithDescription(ctx context.Context, b *Batch, d
if err != nil {
// we wrap the error so we the user can understand which query failed inside the batch
err = fmt.Errorf("error building query %s: %w", bi.query, err)
return &pipelineBatchResults{ctx: ctx, conn: c, err: err}
return &pipelineBatchResults{ctx: ctx, conn: c, err: err, closed: true}
}
if bi.sd.Name == "" {
@@ -1129,7 +1132,7 @@ func (c *Conn) sendBatchExtendedWithDescription(ctx context.Context, b *Batch, d
err := pipeline.Sync()
if err != nil {
return &pipelineBatchResults{ctx: ctx, conn: c, err: err}
return &pipelineBatchResults{ctx: ctx, conn: c, err: err, closed: true}
}
return &pipelineBatchResults{
@@ -1282,7 +1285,9 @@ func (c *Conn) getCompositeFields(ctx context.Context, oid uint32) ([]pgtype.Com
var fieldOID uint32
rows, _ := c.Query(ctx, `select attname, atttypid
from pg_attribute
where attrelid=$1 and not attisdropped
where attrelid=$1
and not attisdropped
and attnum > 0
order by attnum`,
typrelid,
)
@@ -1324,6 +1329,7 @@ func (c *Conn) deallocateInvalidatedCachedStatements(ctx context.Context) error
for _, sd := range invalidatedStatements {
pipeline.SendDeallocate(sd.Name)
delete(c.preparedStatements, sd.Name)
}
err := pipeline.Sync()

View File

@@ -1,70 +0,0 @@
package nbconn
import (
"sync"
)
const minBufferQueueLen = 8
type bufferQueue struct {
lock sync.Mutex
queue []*[]byte
r, w int
}
func (bq *bufferQueue) pushBack(buf *[]byte) {
bq.lock.Lock()
defer bq.lock.Unlock()
if bq.w >= len(bq.queue) {
bq.growQueue()
}
bq.queue[bq.w] = buf
bq.w++
}
func (bq *bufferQueue) pushFront(buf *[]byte) {
bq.lock.Lock()
defer bq.lock.Unlock()
if bq.w >= len(bq.queue) {
bq.growQueue()
}
copy(bq.queue[bq.r+1:bq.w+1], bq.queue[bq.r:bq.w])
bq.queue[bq.r] = buf
bq.w++
}
func (bq *bufferQueue) popFront() *[]byte {
bq.lock.Lock()
defer bq.lock.Unlock()
if bq.r == bq.w {
return nil
}
buf := bq.queue[bq.r]
bq.queue[bq.r] = nil // Clear reference so it can be garbage collected.
bq.r++
if bq.r == bq.w {
bq.r = 0
bq.w = 0
if len(bq.queue) > minBufferQueueLen {
bq.queue = make([]*[]byte, minBufferQueueLen)
}
}
return buf
}
func (bq *bufferQueue) growQueue() {
desiredLen := (len(bq.queue) + 1) * 3 / 2
if desiredLen < minBufferQueueLen {
desiredLen = minBufferQueueLen
}
newQueue := make([]*[]byte, desiredLen)
copy(newQueue, bq.queue)
bq.queue = newQueue
}

View File

@@ -1,520 +0,0 @@
// Package nbconn implements a non-blocking net.Conn wrapper.
//
// It is designed to solve three problems.
//
// The first is resolving the deadlock that can occur when both sides of a connection are blocked writing because all
// buffers between are full. See https://github.com/jackc/pgconn/issues/27 for discussion.
//
// The second is the inability to use a write deadline with a TLS.Conn without killing the connection.
//
// The third is to efficiently check if a connection has been closed via a non-blocking read.
package nbconn
import (
"crypto/tls"
"errors"
"net"
"os"
"sync"
"sync/atomic"
"syscall"
"time"
"github.com/jackc/pgx/v5/internal/iobufpool"
)
var errClosed = errors.New("closed")
var ErrWouldBlock = new(wouldBlockError)
const fakeNonblockingWriteWaitDuration = 100 * time.Millisecond
const minNonblockingReadWaitDuration = time.Microsecond
const maxNonblockingReadWaitDuration = 100 * time.Millisecond
// NonBlockingDeadline is a magic value that when passed to Set[Read]Deadline places the connection in non-blocking read
// mode.
var NonBlockingDeadline = time.Date(1900, 1, 1, 0, 0, 0, 608536336, time.UTC)
// disableSetDeadlineDeadline is a magic value that when passed to Set[Read|Write]Deadline causes those methods to
// ignore all future calls.
var disableSetDeadlineDeadline = time.Date(1900, 1, 1, 0, 0, 0, 968549727, time.UTC)
// wouldBlockError implements net.Error so tls.Conn will recognize ErrWouldBlock as a temporary error.
type wouldBlockError struct{}
func (*wouldBlockError) Error() string {
return "would block"
}
func (*wouldBlockError) Timeout() bool { return true }
func (*wouldBlockError) Temporary() bool { return true }
// Conn is a net.Conn where Write never blocks and always succeeds. Flush or Read must be called to actually write to
// the underlying connection.
type Conn interface {
net.Conn
// Flush flushes any buffered writes.
Flush() error
// BufferReadUntilBlock reads and buffers any successfully read bytes until the read would block.
BufferReadUntilBlock() error
}
// NetConn is a non-blocking net.Conn wrapper. It implements net.Conn.
type NetConn struct {
// 64 bit fields accessed with atomics must be at beginning of struct to guarantee alignment for certain 32-bit
// architectures. See BUGS section of https://pkg.go.dev/sync/atomic and https://github.com/jackc/pgx/issues/1288 and
// https://github.com/jackc/pgx/issues/1307. Only access with atomics
closed int64 // 0 = not closed, 1 = closed
conn net.Conn
rawConn syscall.RawConn
readQueue bufferQueue
writeQueue bufferQueue
readFlushLock sync.Mutex
// non-blocking writes with syscall.RawConn are done with a callback function. By using these fields instead of the
// callback functions closure to pass the buf argument and receive the n and err results we avoid some allocations.
nonblockWriteFunc func(fd uintptr) (done bool)
nonblockWriteBuf []byte
nonblockWriteErr error
nonblockWriteN int
// non-blocking reads with syscall.RawConn are done with a callback function. By using these fields instead of the
// callback functions closure to pass the buf argument and receive the n and err results we avoid some allocations.
nonblockReadFunc func(fd uintptr) (done bool)
nonblockReadBuf []byte
nonblockReadErr error
nonblockReadN int
readDeadlineLock sync.Mutex
readDeadline time.Time
readNonblocking bool
fakeNonBlockingShortReadCount int
fakeNonblockingReadWaitDuration time.Duration
writeDeadlineLock sync.Mutex
writeDeadline time.Time
}
func NewNetConn(conn net.Conn, fakeNonBlockingIO bool) *NetConn {
nc := &NetConn{
conn: conn,
fakeNonblockingReadWaitDuration: maxNonblockingReadWaitDuration,
}
if !fakeNonBlockingIO {
if sc, ok := conn.(syscall.Conn); ok {
if rawConn, err := sc.SyscallConn(); err == nil {
nc.rawConn = rawConn
}
}
}
return nc
}
// Read implements io.Reader.
func (c *NetConn) Read(b []byte) (n int, err error) {
if c.isClosed() {
return 0, errClosed
}
c.readFlushLock.Lock()
defer c.readFlushLock.Unlock()
err = c.flush()
if err != nil {
return 0, err
}
for n < len(b) {
buf := c.readQueue.popFront()
if buf == nil {
break
}
copiedN := copy(b[n:], *buf)
if copiedN < len(*buf) {
*buf = (*buf)[copiedN:]
c.readQueue.pushFront(buf)
} else {
iobufpool.Put(buf)
}
n += copiedN
}
// If any bytes were already buffered return them without trying to do a Read. Otherwise, when the caller is trying to
// Read up to len(b) bytes but all available bytes have already been buffered the underlying Read would block.
if n > 0 {
return n, nil
}
var readNonblocking bool
c.readDeadlineLock.Lock()
readNonblocking = c.readNonblocking
c.readDeadlineLock.Unlock()
var readN int
if readNonblocking {
readN, err = c.nonblockingRead(b[n:])
} else {
readN, err = c.conn.Read(b[n:])
}
n += readN
return n, err
}
// Write implements io.Writer. It never blocks due to buffering all writes. It will only return an error if the Conn is
// closed. Call Flush to actually write to the underlying connection.
func (c *NetConn) Write(b []byte) (n int, err error) {
if c.isClosed() {
return 0, errClosed
}
buf := iobufpool.Get(len(b))
copy(*buf, b)
c.writeQueue.pushBack(buf)
return len(b), nil
}
func (c *NetConn) Close() (err error) {
swapped := atomic.CompareAndSwapInt64(&c.closed, 0, 1)
if !swapped {
return errClosed
}
defer func() {
closeErr := c.conn.Close()
if err == nil {
err = closeErr
}
}()
c.readFlushLock.Lock()
defer c.readFlushLock.Unlock()
err = c.flush()
if err != nil {
return err
}
return nil
}
func (c *NetConn) LocalAddr() net.Addr {
return c.conn.LocalAddr()
}
func (c *NetConn) RemoteAddr() net.Addr {
return c.conn.RemoteAddr()
}
// SetDeadline is the equivalent of calling SetReadDealine(t) and SetWriteDeadline(t).
func (c *NetConn) SetDeadline(t time.Time) error {
err := c.SetReadDeadline(t)
if err != nil {
return err
}
return c.SetWriteDeadline(t)
}
// SetReadDeadline sets the read deadline as t. If t == NonBlockingDeadline then future reads will be non-blocking.
func (c *NetConn) SetReadDeadline(t time.Time) error {
if c.isClosed() {
return errClosed
}
c.readDeadlineLock.Lock()
defer c.readDeadlineLock.Unlock()
if c.readDeadline == disableSetDeadlineDeadline {
return nil
}
if t == disableSetDeadlineDeadline {
c.readDeadline = t
return nil
}
if t == NonBlockingDeadline {
c.readNonblocking = true
t = time.Time{}
} else {
c.readNonblocking = false
}
c.readDeadline = t
return c.conn.SetReadDeadline(t)
}
func (c *NetConn) SetWriteDeadline(t time.Time) error {
if c.isClosed() {
return errClosed
}
c.writeDeadlineLock.Lock()
defer c.writeDeadlineLock.Unlock()
if c.writeDeadline == disableSetDeadlineDeadline {
return nil
}
if t == disableSetDeadlineDeadline {
c.writeDeadline = t
return nil
}
c.writeDeadline = t
return c.conn.SetWriteDeadline(t)
}
func (c *NetConn) Flush() error {
if c.isClosed() {
return errClosed
}
c.readFlushLock.Lock()
defer c.readFlushLock.Unlock()
return c.flush()
}
// flush does the actual work of flushing the writeQueue. readFlushLock must already be held.
func (c *NetConn) flush() error {
var stopChan chan struct{}
var errChan chan error
defer func() {
if stopChan != nil {
select {
case stopChan <- struct{}{}:
case <-errChan:
}
}
}()
for buf := c.writeQueue.popFront(); buf != nil; buf = c.writeQueue.popFront() {
remainingBuf := *buf
for len(remainingBuf) > 0 {
n, err := c.nonblockingWrite(remainingBuf)
remainingBuf = remainingBuf[n:]
if err != nil {
if !errors.Is(err, ErrWouldBlock) {
*buf = (*buf)[:len(remainingBuf)]
copy(*buf, remainingBuf)
c.writeQueue.pushFront(buf)
return err
}
// Writing was blocked. Reading might unblock it.
if stopChan == nil {
stopChan, errChan = c.bufferNonblockingRead()
}
select {
case err := <-errChan:
stopChan = nil
return err
default:
}
}
}
iobufpool.Put(buf)
}
return nil
}
func (c *NetConn) BufferReadUntilBlock() error {
for {
buf := iobufpool.Get(8 * 1024)
n, err := c.nonblockingRead(*buf)
if n > 0 {
*buf = (*buf)[:n]
c.readQueue.pushBack(buf)
} else if n == 0 {
iobufpool.Put(buf)
}
if err != nil {
if errors.Is(err, ErrWouldBlock) {
return nil
} else {
return err
}
}
}
}
func (c *NetConn) bufferNonblockingRead() (stopChan chan struct{}, errChan chan error) {
stopChan = make(chan struct{})
errChan = make(chan error, 1)
go func() {
for {
err := c.BufferReadUntilBlock()
if err != nil {
errChan <- err
return
}
select {
case <-stopChan:
return
default:
}
}
}()
return stopChan, errChan
}
func (c *NetConn) isClosed() bool {
closed := atomic.LoadInt64(&c.closed)
return closed == 1
}
func (c *NetConn) nonblockingWrite(b []byte) (n int, err error) {
if c.rawConn == nil {
return c.fakeNonblockingWrite(b)
} else {
return c.realNonblockingWrite(b)
}
}
func (c *NetConn) fakeNonblockingWrite(b []byte) (n int, err error) {
c.writeDeadlineLock.Lock()
defer c.writeDeadlineLock.Unlock()
deadline := time.Now().Add(fakeNonblockingWriteWaitDuration)
if c.writeDeadline.IsZero() || deadline.Before(c.writeDeadline) {
err = c.conn.SetWriteDeadline(deadline)
if err != nil {
return 0, err
}
defer func() {
// Ignoring error resetting deadline as there is nothing that can reasonably be done if it fails.
c.conn.SetWriteDeadline(c.writeDeadline)
if err != nil {
if errors.Is(err, os.ErrDeadlineExceeded) {
err = ErrWouldBlock
}
}
}()
}
return c.conn.Write(b)
}
func (c *NetConn) nonblockingRead(b []byte) (n int, err error) {
if c.rawConn == nil {
return c.fakeNonblockingRead(b)
} else {
return c.realNonblockingRead(b)
}
}
func (c *NetConn) fakeNonblockingRead(b []byte) (n int, err error) {
c.readDeadlineLock.Lock()
defer c.readDeadlineLock.Unlock()
// The first 5 reads only read 1 byte at a time. This should give us 4 chances to read when we are sure the bytes are
// already in Go or the OS's receive buffer.
if c.fakeNonBlockingShortReadCount < 5 && len(b) > 0 && c.fakeNonblockingReadWaitDuration < minNonblockingReadWaitDuration {
b = b[:1]
}
startTime := time.Now()
deadline := startTime.Add(c.fakeNonblockingReadWaitDuration)
if c.readDeadline.IsZero() || deadline.Before(c.readDeadline) {
err = c.conn.SetReadDeadline(deadline)
if err != nil {
return 0, err
}
defer func() {
// If the read was successful and the wait duration is not already the minimum
if err == nil && c.fakeNonblockingReadWaitDuration > minNonblockingReadWaitDuration {
endTime := time.Now()
if n > 0 && c.fakeNonBlockingShortReadCount < 5 {
c.fakeNonBlockingShortReadCount++
}
// The wait duration should be 2x the fastest read that has occurred. This should give reasonable assurance that
// a Read deadline will not block a read before it has a chance to read data already in Go or the OS's receive
// buffer.
proposedWait := endTime.Sub(startTime) * 2
if proposedWait < minNonblockingReadWaitDuration {
proposedWait = minNonblockingReadWaitDuration
}
if proposedWait < c.fakeNonblockingReadWaitDuration {
c.fakeNonblockingReadWaitDuration = proposedWait
}
}
// Ignoring error resetting deadline as there is nothing that can reasonably be done if it fails.
c.conn.SetReadDeadline(c.readDeadline)
if err != nil {
if errors.Is(err, os.ErrDeadlineExceeded) {
err = ErrWouldBlock
}
}
}()
}
return c.conn.Read(b)
}
// syscall.Conn is interface
// TLSClient establishes a TLS connection as a client over conn using config.
//
// To avoid the first Read on the returned *TLSConn also triggering a Write due to the TLS handshake and thereby
// potentially causing a read and write deadlines to behave unexpectedly, Handshake is called explicitly before the
// *TLSConn is returned.
func TLSClient(conn *NetConn, config *tls.Config) (*TLSConn, error) {
tc := tls.Client(conn, config)
err := tc.Handshake()
if err != nil {
return nil, err
}
// Ensure last written part of Handshake is actually sent.
err = conn.Flush()
if err != nil {
return nil, err
}
return &TLSConn{
tlsConn: tc,
nbConn: conn,
}, nil
}
// TLSConn is a TLS wrapper around a *Conn. It works around a temporary write error (such as a timeout) being fatal to a
// tls.Conn.
type TLSConn struct {
tlsConn *tls.Conn
nbConn *NetConn
}
func (tc *TLSConn) Read(b []byte) (n int, err error) { return tc.tlsConn.Read(b) }
func (tc *TLSConn) Write(b []byte) (n int, err error) { return tc.tlsConn.Write(b) }
func (tc *TLSConn) BufferReadUntilBlock() error { return tc.nbConn.BufferReadUntilBlock() }
func (tc *TLSConn) Flush() error { return tc.nbConn.Flush() }
func (tc *TLSConn) LocalAddr() net.Addr { return tc.tlsConn.LocalAddr() }
func (tc *TLSConn) RemoteAddr() net.Addr { return tc.tlsConn.RemoteAddr() }
func (tc *TLSConn) Close() error {
// tls.Conn.closeNotify() sets a 5 second deadline to avoid blocking, sends a TLS alert close notification, and then
// sets the deadline to now. This causes NetConn's Close not to be able to flush the write buffer. Instead we set our
// own 5 second deadline then make all set deadlines no-op.
tc.tlsConn.SetDeadline(time.Now().Add(time.Second * 5))
tc.tlsConn.SetDeadline(disableSetDeadlineDeadline)
return tc.tlsConn.Close()
}
func (tc *TLSConn) SetDeadline(t time.Time) error { return tc.tlsConn.SetDeadline(t) }
func (tc *TLSConn) SetReadDeadline(t time.Time) error { return tc.tlsConn.SetReadDeadline(t) }
func (tc *TLSConn) SetWriteDeadline(t time.Time) error { return tc.tlsConn.SetWriteDeadline(t) }

View File

@@ -1,11 +0,0 @@
//go:build !unix
package nbconn
func (c *NetConn) realNonblockingWrite(b []byte) (n int, err error) {
return c.fakeNonblockingWrite(b)
}
func (c *NetConn) realNonblockingRead(b []byte) (n int, err error) {
return c.fakeNonblockingRead(b)
}

View File

@@ -1,81 +0,0 @@
//go:build unix
package nbconn
import (
"errors"
"io"
"syscall"
)
// realNonblockingWrite does a non-blocking write. readFlushLock must already be held.
func (c *NetConn) realNonblockingWrite(b []byte) (n int, err error) {
if c.nonblockWriteFunc == nil {
c.nonblockWriteFunc = func(fd uintptr) (done bool) {
c.nonblockWriteN, c.nonblockWriteErr = syscall.Write(int(fd), c.nonblockWriteBuf)
return true
}
}
c.nonblockWriteBuf = b
c.nonblockWriteN = 0
c.nonblockWriteErr = nil
err = c.rawConn.Write(c.nonblockWriteFunc)
n = c.nonblockWriteN
c.nonblockWriteBuf = nil // ensure that no reference to b is kept.
if err == nil && c.nonblockWriteErr != nil {
if errors.Is(c.nonblockWriteErr, syscall.EWOULDBLOCK) {
err = ErrWouldBlock
} else {
err = c.nonblockWriteErr
}
}
if err != nil {
// n may be -1 when an error occurs.
if n < 0 {
n = 0
}
return n, err
}
return n, nil
}
func (c *NetConn) realNonblockingRead(b []byte) (n int, err error) {
if c.nonblockReadFunc == nil {
c.nonblockReadFunc = func(fd uintptr) (done bool) {
c.nonblockReadN, c.nonblockReadErr = syscall.Read(int(fd), c.nonblockReadBuf)
return true
}
}
c.nonblockReadBuf = b
c.nonblockReadN = 0
c.nonblockReadErr = nil
err = c.rawConn.Read(c.nonblockReadFunc)
n = c.nonblockReadN
c.nonblockReadBuf = nil // ensure that no reference to b is kept.
if err == nil && c.nonblockReadErr != nil {
if errors.Is(c.nonblockReadErr, syscall.EWOULDBLOCK) {
err = ErrWouldBlock
} else {
err = c.nonblockReadErr
}
}
if err != nil {
// n may be -1 when an error occurs.
if n < 0 {
n = 0
}
return n, err
}
// syscall read did not return an error and 0 bytes were read means EOF.
if n == 0 {
return 0, io.EOF
}
return n, nil
}

View File

@@ -42,7 +42,7 @@ func (c *PgConn) scramAuth(serverAuthMechanisms []string) error {
Data: sc.clientFirstMessage(),
}
c.frontend.Send(saslInitialResponse)
err = c.frontend.Flush()
err = c.flushWithPotentialWriteReadDeadlock()
if err != nil {
return err
}
@@ -62,7 +62,7 @@ func (c *PgConn) scramAuth(serverAuthMechanisms []string) error {
Data: []byte(sc.clientFinalMessage()),
}
c.frontend.Send(saslResponse)
err = c.frontend.Flush()
err = c.flushWithPotentialWriteReadDeadlock()
if err != nil {
return err
}

View File

@@ -0,0 +1,132 @@
// Package bgreader provides a io.Reader that can optionally buffer reads in the background.
package bgreader
import (
"io"
"sync"
"github.com/jackc/pgx/v5/internal/iobufpool"
)
const (
bgReaderStatusStopped = iota
bgReaderStatusRunning
bgReaderStatusStopping
)
// BGReader is an io.Reader that can optionally buffer reads in the background. It is safe for concurrent use.
type BGReader struct {
r io.Reader
cond *sync.Cond
bgReaderStatus int32
readResults []readResult
}
type readResult struct {
buf *[]byte
err error
}
// Start starts the backgrounder reader. If the background reader is already running this is a no-op. The background
// reader will stop automatically when the underlying reader returns an error.
func (r *BGReader) Start() {
r.cond.L.Lock()
defer r.cond.L.Unlock()
switch r.bgReaderStatus {
case bgReaderStatusStopped:
r.bgReaderStatus = bgReaderStatusRunning
go r.bgRead()
case bgReaderStatusRunning:
// no-op
case bgReaderStatusStopping:
r.bgReaderStatus = bgReaderStatusRunning
}
}
// Stop tells the background reader to stop after the in progress Read returns. It is safe to call Stop when the
// background reader is not running.
func (r *BGReader) Stop() {
r.cond.L.Lock()
defer r.cond.L.Unlock()
switch r.bgReaderStatus {
case bgReaderStatusStopped:
// no-op
case bgReaderStatusRunning:
r.bgReaderStatus = bgReaderStatusStopping
case bgReaderStatusStopping:
// no-op
}
}
func (r *BGReader) bgRead() {
keepReading := true
for keepReading {
buf := iobufpool.Get(8192)
n, err := r.r.Read(*buf)
*buf = (*buf)[:n]
r.cond.L.Lock()
r.readResults = append(r.readResults, readResult{buf: buf, err: err})
if r.bgReaderStatus == bgReaderStatusStopping || err != nil {
r.bgReaderStatus = bgReaderStatusStopped
keepReading = false
}
r.cond.L.Unlock()
r.cond.Broadcast()
}
}
// Read implements the io.Reader interface.
func (r *BGReader) Read(p []byte) (int, error) {
r.cond.L.Lock()
defer r.cond.L.Unlock()
if len(r.readResults) > 0 {
return r.readFromReadResults(p)
}
// There are no unread background read results and the background reader is stopped.
if r.bgReaderStatus == bgReaderStatusStopped {
return r.r.Read(p)
}
// Wait for results from the background reader
for len(r.readResults) == 0 {
r.cond.Wait()
}
return r.readFromReadResults(p)
}
// readBackgroundResults reads a result previously read by the background reader. r.cond.L must be held.
func (r *BGReader) readFromReadResults(p []byte) (int, error) {
buf := r.readResults[0].buf
var err error
n := copy(p, *buf)
if n == len(*buf) {
err = r.readResults[0].err
iobufpool.Put(buf)
if len(r.readResults) == 1 {
r.readResults = nil
} else {
r.readResults = r.readResults[1:]
}
} else {
*buf = (*buf)[n:]
r.readResults[0].buf = buf
}
return n, err
}
func New(r io.Reader) *BGReader {
return &BGReader{
r: r,
cond: &sync.Cond{
L: &sync.Mutex{},
},
}
}

View File

@@ -63,7 +63,7 @@ func (c *PgConn) gssAuth() error {
Data: nextData,
}
c.frontend.Send(gssResponse)
err = c.frontend.Flush()
err = c.flushWithPotentialWriteReadDeadlock()
if err != nil {
return err
}

View File

@@ -13,11 +13,12 @@ import (
"net"
"strconv"
"strings"
"sync"
"time"
"github.com/jackc/pgx/v5/internal/iobufpool"
"github.com/jackc/pgx/v5/internal/nbconn"
"github.com/jackc/pgx/v5/internal/pgio"
"github.com/jackc/pgx/v5/pgconn/internal/bgreader"
"github.com/jackc/pgx/v5/pgconn/internal/ctxwatch"
"github.com/jackc/pgx/v5/pgproto3"
)
@@ -65,17 +66,24 @@ type NotificationHandler func(*PgConn, *Notification)
// PgConn is a low-level PostgreSQL connection handle. It is not safe for concurrent usage.
type PgConn struct {
conn nbconn.Conn // the non-blocking wrapper for the underlying TCP or unix domain socket connection
conn net.Conn
pid uint32 // backend pid
secretKey uint32 // key to use to send a cancel query message to the server
parameterStatuses map[string]string // parameters that have been reported by the server
txStatus byte
frontend *pgproto3.Frontend
bgReader *bgreader.BGReader
slowWriteTimer *time.Timer
config *Config
status byte // One of connStatus* constants
bufferingReceive bool
bufferingReceiveMux sync.Mutex
bufferingReceiveMsg pgproto3.BackendMessage
bufferingReceiveErr error
peekedMsg pgproto3.BackendMessage
// Reusable / preallocated resources
@@ -266,14 +274,13 @@ func connect(ctx context.Context, config *Config, fallbackConfig *FallbackConfig
if err != nil {
return nil, &connectError{config: config, msg: "dial error", err: normalizeTimeoutError(ctx, err)}
}
nbNetConn := nbconn.NewNetConn(netConn, false)
pgConn.conn = nbNetConn
pgConn.contextWatcher = newContextWatcher(nbNetConn)
pgConn.conn = netConn
pgConn.contextWatcher = newContextWatcher(netConn)
pgConn.contextWatcher.Watch(ctx)
if fallbackConfig.TLSConfig != nil {
nbTLSConn, err := startTLS(nbNetConn, fallbackConfig.TLSConfig)
nbTLSConn, err := startTLS(netConn, fallbackConfig.TLSConfig)
pgConn.contextWatcher.Unwatch() // Always unwatch `netConn` after TLS.
if err != nil {
netConn.Close()
@@ -289,7 +296,9 @@ func connect(ctx context.Context, config *Config, fallbackConfig *FallbackConfig
pgConn.parameterStatuses = make(map[string]string)
pgConn.status = connStatusConnecting
pgConn.frontend = config.BuildFrontend(pgConn.conn, pgConn.conn)
pgConn.bgReader = bgreader.New(pgConn.conn)
pgConn.slowWriteTimer = time.AfterFunc(time.Duration(math.MaxInt64), pgConn.bgReader.Start)
pgConn.frontend = config.BuildFrontend(pgConn.bgReader, pgConn.conn)
startupMsg := pgproto3.StartupMessage{
ProtocolVersion: pgproto3.ProtocolVersionNumber,
@@ -307,9 +316,9 @@ func connect(ctx context.Context, config *Config, fallbackConfig *FallbackConfig
}
pgConn.frontend.Send(&startupMsg)
if err := pgConn.frontend.Flush(); err != nil {
if err := pgConn.flushWithPotentialWriteReadDeadlock(); err != nil {
pgConn.conn.Close()
return nil, &connectError{config: config, msg: "failed to write startup message", err: err}
return nil, &connectError{config: config, msg: "failed to write startup message", err: normalizeTimeoutError(ctx, err)}
}
for {
@@ -392,7 +401,7 @@ func newContextWatcher(conn net.Conn) *ctxwatch.ContextWatcher {
)
}
func startTLS(conn *nbconn.NetConn, tlsConfig *tls.Config) (*nbconn.TLSConn, error) {
func startTLS(conn net.Conn, tlsConfig *tls.Config) (net.Conn, error) {
err := binary.Write(conn, binary.BigEndian, []int32{8, 80877103})
if err != nil {
return nil, err
@@ -407,17 +416,12 @@ func startTLS(conn *nbconn.NetConn, tlsConfig *tls.Config) (*nbconn.TLSConn, err
return nil, errors.New("server refused TLS connection")
}
tlsConn, err := nbconn.TLSClient(conn, tlsConfig)
if err != nil {
return nil, err
}
return tlsConn, nil
return tls.Client(conn, tlsConfig), nil
}
func (pgConn *PgConn) txPasswordMessage(password string) (err error) {
pgConn.frontend.Send(&pgproto3.PasswordMessage{Password: password})
return pgConn.frontend.Flush()
return pgConn.flushWithPotentialWriteReadDeadlock()
}
func hexMD5(s string) string {
@@ -426,6 +430,24 @@ func hexMD5(s string) string {
return hex.EncodeToString(hash.Sum(nil))
}
func (pgConn *PgConn) signalMessage() chan struct{} {
if pgConn.bufferingReceive {
panic("BUG: signalMessage when already in progress")
}
pgConn.bufferingReceive = true
pgConn.bufferingReceiveMux.Lock()
ch := make(chan struct{})
go func() {
pgConn.bufferingReceiveMsg, pgConn.bufferingReceiveErr = pgConn.frontend.Receive()
pgConn.bufferingReceiveMux.Unlock()
close(ch)
}()
return ch
}
// ReceiveMessage receives one wire protocol message from the PostgreSQL server. It must only be used when the
// connection is not busy. e.g. It is an error to call ReceiveMessage while reading the result of a query. The messages
// are still handled by the core pgconn message handling system so receiving a NotificationResponse will still trigger
@@ -465,13 +487,25 @@ func (pgConn *PgConn) peekMessage() (pgproto3.BackendMessage, error) {
return pgConn.peekedMsg, nil
}
msg, err := pgConn.frontend.Receive()
var msg pgproto3.BackendMessage
var err error
if pgConn.bufferingReceive {
pgConn.bufferingReceiveMux.Lock()
msg = pgConn.bufferingReceiveMsg
err = pgConn.bufferingReceiveErr
pgConn.bufferingReceiveMux.Unlock()
pgConn.bufferingReceive = false
// If a timeout error happened in the background try the read again.
var netErr net.Error
if errors.As(err, &netErr) && netErr.Timeout() {
msg, err = pgConn.frontend.Receive()
}
} else {
msg, err = pgConn.frontend.Receive()
}
if err != nil {
if errors.Is(err, nbconn.ErrWouldBlock) {
return nil, err
}
// Close on anything other than timeout error - everything else is fatal
var netErr net.Error
isNetErr := errors.As(err, &netErr)
@@ -582,7 +616,7 @@ func (pgConn *PgConn) Close(ctx context.Context) error {
//
// See https://github.com/jackc/pgx/issues/637
pgConn.frontend.Send(&pgproto3.Terminate{})
pgConn.frontend.Flush()
pgConn.flushWithPotentialWriteReadDeadlock()
return pgConn.conn.Close()
}
@@ -609,7 +643,7 @@ func (pgConn *PgConn) asyncClose() {
pgConn.conn.SetDeadline(deadline)
pgConn.frontend.Send(&pgproto3.Terminate{})
pgConn.frontend.Flush()
pgConn.flushWithPotentialWriteReadDeadlock()
}()
}
@@ -784,7 +818,7 @@ func (pgConn *PgConn) Prepare(ctx context.Context, name, sql string, paramOIDs [
pgConn.frontend.SendParse(&pgproto3.Parse{Name: name, Query: sql, ParameterOIDs: paramOIDs})
pgConn.frontend.SendDescribe(&pgproto3.Describe{ObjectType: 'S', Name: name})
pgConn.frontend.SendSync(&pgproto3.Sync{})
err := pgConn.frontend.Flush()
err := pgConn.flushWithPotentialWriteReadDeadlock()
if err != nil {
pgConn.asyncClose()
return nil, err
@@ -857,9 +891,28 @@ func (pgConn *PgConn) CancelRequest(ctx context.Context) error {
// the connection config. This is important in high availability configurations where fallback connections may be
// specified or DNS may be used to load balance.
serverAddr := pgConn.conn.RemoteAddr()
cancelConn, err := pgConn.config.DialFunc(ctx, serverAddr.Network(), serverAddr.String())
var serverNetwork string
var serverAddress string
if serverAddr.Network() == "unix" {
// for unix sockets, RemoteAddr() calls getpeername() which returns the name the
// server passed to bind(). For Postgres, this is always a relative path "./.s.PGSQL.5432"
// so connecting to it will fail. Fall back to the config's value
serverNetwork, serverAddress = NetworkAddress(pgConn.config.Host, pgConn.config.Port)
} else {
serverNetwork, serverAddress = serverAddr.Network(), serverAddr.String()
}
cancelConn, err := pgConn.config.DialFunc(ctx, serverNetwork, serverAddress)
if err != nil {
return err
// In case of unix sockets, RemoteAddr() returns only the file part of the path. If the
// first connect failed, try the config.
if serverAddr.Network() != "unix" {
return err
}
serverNetwork, serverAddr := NetworkAddress(pgConn.config.Host, pgConn.config.Port)
cancelConn, err = pgConn.config.DialFunc(ctx, serverNetwork, serverAddr)
if err != nil {
return err
}
}
defer cancelConn.Close()
@@ -877,17 +930,11 @@ func (pgConn *PgConn) CancelRequest(ctx context.Context) error {
binary.BigEndian.PutUint32(buf[4:8], 80877102)
binary.BigEndian.PutUint32(buf[8:12], uint32(pgConn.pid))
binary.BigEndian.PutUint32(buf[12:16], uint32(pgConn.secretKey))
// Postgres will process the request and close the connection
// so when don't need to read the reply
// https://www.postgresql.org/docs/current/protocol-flow.html#id-1.10.6.7.10
_, err = cancelConn.Write(buf)
if err != nil {
return err
}
_, err = cancelConn.Read(buf)
if err != io.EOF {
return err
}
return nil
return err
}
// WaitForNotification waits for a LISTON/NOTIFY message to be received. It returns an error if a notification was not
@@ -953,7 +1000,7 @@ func (pgConn *PgConn) Exec(ctx context.Context, sql string) *MultiResultReader {
}
pgConn.frontend.SendQuery(&pgproto3.Query{String: sql})
err := pgConn.frontend.Flush()
err := pgConn.flushWithPotentialWriteReadDeadlock()
if err != nil {
pgConn.asyncClose()
pgConn.contextWatcher.Unwatch()
@@ -1064,7 +1111,7 @@ func (pgConn *PgConn) execExtendedSuffix(result *ResultReader) {
pgConn.frontend.SendExecute(&pgproto3.Execute{})
pgConn.frontend.SendSync(&pgproto3.Sync{})
err := pgConn.frontend.Flush()
err := pgConn.flushWithPotentialWriteReadDeadlock()
if err != nil {
pgConn.asyncClose()
result.concludeCommand(CommandTag{}, err)
@@ -1097,7 +1144,7 @@ func (pgConn *PgConn) CopyTo(ctx context.Context, w io.Writer, sql string) (Comm
// Send copy to command
pgConn.frontend.SendQuery(&pgproto3.Query{String: sql})
err := pgConn.frontend.Flush()
err := pgConn.flushWithPotentialWriteReadDeadlock()
if err != nil {
pgConn.asyncClose()
pgConn.unlock()
@@ -1153,85 +1200,91 @@ func (pgConn *PgConn) CopyFrom(ctx context.Context, r io.Reader, sql string) (Co
defer pgConn.contextWatcher.Unwatch()
}
// Send copy to command
// Send copy from query
pgConn.frontend.SendQuery(&pgproto3.Query{String: sql})
err := pgConn.frontend.Flush()
err := pgConn.flushWithPotentialWriteReadDeadlock()
if err != nil {
pgConn.asyncClose()
return CommandTag{}, err
}
err = pgConn.conn.SetReadDeadline(nbconn.NonBlockingDeadline)
if err != nil {
pgConn.asyncClose()
return CommandTag{}, err
}
nonblocking := true
defer func() {
if nonblocking {
pgConn.conn.SetReadDeadline(time.Time{})
// Send copy data
abortCopyChan := make(chan struct{})
copyErrChan := make(chan error, 1)
signalMessageChan := pgConn.signalMessage()
var wg sync.WaitGroup
wg.Add(1)
go func() {
defer wg.Done()
buf := iobufpool.Get(65536)
defer iobufpool.Put(buf)
(*buf)[0] = 'd'
for {
n, readErr := r.Read((*buf)[5:cap(*buf)])
if n > 0 {
*buf = (*buf)[0 : n+5]
pgio.SetInt32((*buf)[1:], int32(n+4))
writeErr := pgConn.frontend.SendUnbufferedEncodedCopyData(*buf)
if writeErr != nil {
// Write errors are always fatal, but we can't use asyncClose because we are in a different goroutine. Not
// setting pgConn.status or closing pgConn.cleanupDone for the same reason.
pgConn.conn.Close()
copyErrChan <- writeErr
return
}
}
if readErr != nil {
copyErrChan <- readErr
return
}
select {
case <-abortCopyChan:
return
default:
}
}
}()
buf := iobufpool.Get(65536)
defer iobufpool.Put(buf)
(*buf)[0] = 'd'
var readErr, pgErr error
for pgErr == nil {
// Read chunk from r.
var n int
n, readErr = r.Read((*buf)[5:cap(*buf)])
// Send chunk to PostgreSQL.
if n > 0 {
*buf = (*buf)[0 : n+5]
pgio.SetInt32((*buf)[1:], int32(n+4))
writeErr := pgConn.frontend.SendUnbufferedEncodedCopyData(*buf)
if writeErr != nil {
pgConn.asyncClose()
return CommandTag{}, err
}
}
// Abort loop if there was a read error.
if readErr != nil {
break
}
// Read messages until error or none available.
for pgErr == nil {
msg, err := pgConn.receiveMessage()
if err != nil {
if errors.Is(err, nbconn.ErrWouldBlock) {
break
}
pgConn.asyncClose()
var pgErr error
var copyErr error
for copyErr == nil && pgErr == nil {
select {
case copyErr = <-copyErrChan:
case <-signalMessageChan:
// If pgConn.receiveMessage encounters an error it will call pgConn.asyncClose. But that is a race condition with
// the goroutine. So instead check pgConn.bufferingReceiveErr which will have been set by the signalMessage. If an
// error is found then forcibly close the connection without sending the Terminate message.
if err := pgConn.bufferingReceiveErr; err != nil {
pgConn.status = connStatusClosed
pgConn.conn.Close()
close(pgConn.cleanupDone)
return CommandTag{}, normalizeTimeoutError(ctx, err)
}
msg, _ := pgConn.receiveMessage()
switch msg := msg.(type) {
case *pgproto3.ErrorResponse:
pgErr = ErrorResponseToPgError(msg)
break
default:
signalMessageChan = pgConn.signalMessage()
}
}
}
close(abortCopyChan)
// Make sure io goroutine finishes before writing.
wg.Wait()
err = pgConn.conn.SetReadDeadline(time.Time{})
if err != nil {
pgConn.asyncClose()
return CommandTag{}, err
}
nonblocking = false
if readErr == io.EOF || pgErr != nil {
if copyErr == io.EOF || pgErr != nil {
pgConn.frontend.Send(&pgproto3.CopyDone{})
} else {
pgConn.frontend.Send(&pgproto3.CopyFail{Message: readErr.Error()})
pgConn.frontend.Send(&pgproto3.CopyFail{Message: copyErr.Error()})
}
err = pgConn.frontend.Flush()
err = pgConn.flushWithPotentialWriteReadDeadlock()
if err != nil {
pgConn.asyncClose()
return CommandTag{}, err
@@ -1426,7 +1479,8 @@ func (rr *ResultReader) NextRow() bool {
}
// FieldDescriptions returns the field descriptions for the current result set. The returned slice is only valid until
// the ResultReader is closed.
// the ResultReader is closed. It may return nil (for example, if the query did not return a result set or an error was
// encountered.)
func (rr *ResultReader) FieldDescriptions() []FieldDescription {
return rr.fieldDescriptions
}
@@ -1592,7 +1646,9 @@ func (pgConn *PgConn) ExecBatch(ctx context.Context, batch *Batch) *MultiResultR
batch.buf = (&pgproto3.Sync{}).Encode(batch.buf)
pgConn.enterPotentialWriteReadDeadlock()
_, err := pgConn.conn.Write(batch.buf)
pgConn.exitPotentialWriteReadDeadlock()
if err != nil {
multiResult.closed = true
multiResult.err = err
@@ -1620,29 +1676,72 @@ func (pgConn *PgConn) EscapeString(s string) (string, error) {
return strings.Replace(s, "'", "''", -1), nil
}
// CheckConn checks the underlying connection without writing any bytes. This is currently implemented by reading and
// buffering until the read would block or an error occurs. This can be used to check if the server has closed the
// connection. If this is done immediately before sending a query it reduces the chances a query will be sent that fails
// CheckConn checks the underlying connection without writing any bytes. This is currently implemented by doing a read
// with a very short deadline. This can be useful because a TCP connection can be broken such that a write will appear
// to succeed even though it will never actually reach the server. Reading immediately before a write will detect this
// condition. If this is done immediately before sending a query it reduces the chances a query will be sent that fails
// without the client knowing whether the server received it or not.
//
// Deprecated: CheckConn is deprecated in favor of Ping. CheckConn cannot detect all types of broken connections where
// the write would still appear to succeed. Prefer Ping unless on a high latency connection.
func (pgConn *PgConn) CheckConn() error {
err := pgConn.conn.BufferReadUntilBlock()
if err != nil && !errors.Is(err, nbconn.ErrWouldBlock) {
return err
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Millisecond)
defer cancel()
_, err := pgConn.ReceiveMessage(ctx)
if err != nil {
if !Timeout(err) {
return err
}
}
return nil
}
// Ping pings the server. This can be useful because a TCP connection can be broken such that a write will appear to
// succeed even though it will never actually reach the server. Pinging immediately before sending a query reduces the
// chances a query will be sent that fails without the client knowing whether the server received it or not.
func (pgConn *PgConn) Ping(ctx context.Context) error {
return pgConn.Exec(ctx, "-- ping").Close()
}
// makeCommandTag makes a CommandTag. It does not retain a reference to buf or buf's underlying memory.
func (pgConn *PgConn) makeCommandTag(buf []byte) CommandTag {
return CommandTag{s: string(buf)}
}
// enterPotentialWriteReadDeadlock must be called before a write that could deadlock if the server is simultaneously
// blocked writing to us.
func (pgConn *PgConn) enterPotentialWriteReadDeadlock() {
// The time to wait is somewhat arbitrary. A Write should only take as long as the syscall and memcpy to the OS
// outbound network buffer unless the buffer is full (which potentially is a block). It needs to be long enough for
// the normal case, but short enough not to kill performance if a block occurs.
//
// In addition, on Windows the default timer resolution is 15.6ms. So setting the timer to less than that is
// ineffective.
pgConn.slowWriteTimer.Reset(15 * time.Millisecond)
}
// exitPotentialWriteReadDeadlock must be called after a call to enterPotentialWriteReadDeadlock.
func (pgConn *PgConn) exitPotentialWriteReadDeadlock() {
if !pgConn.slowWriteTimer.Reset(time.Duration(math.MaxInt64)) {
pgConn.slowWriteTimer.Stop()
}
}
func (pgConn *PgConn) flushWithPotentialWriteReadDeadlock() error {
pgConn.enterPotentialWriteReadDeadlock()
err := pgConn.frontend.Flush()
pgConn.exitPotentialWriteReadDeadlock()
return err
}
// HijackedConn is the result of hijacking a connection.
//
// Due to the necessary exposure of internal implementation details, it is not covered by the semantic versioning
// compatibility.
type HijackedConn struct {
Conn nbconn.Conn // the non-blocking wrapper of the underlying TCP or unix domain socket connection
Conn net.Conn
PID uint32 // backend pid
SecretKey uint32 // key to use to send a cancel query message to the server
ParameterStatuses map[string]string // parameters that have been reported by the server
@@ -1695,6 +1794,8 @@ func Construct(hc *HijackedConn) (*PgConn, error) {
}
pgConn.contextWatcher = newContextWatcher(pgConn.conn)
pgConn.bgReader = bgreader.New(pgConn.conn)
pgConn.slowWriteTimer = time.AfterFunc(time.Duration(math.MaxInt64), pgConn.bgReader.Start)
return pgConn, nil
}
@@ -1817,7 +1918,7 @@ func (p *Pipeline) Flush() error {
return errors.New("pipeline closed")
}
err := p.conn.frontend.Flush()
err := p.conn.flushWithPotentialWriteReadDeadlock()
if err != nil {
err = normalizeTimeoutError(p.ctx, err)

View File

@@ -363,12 +363,13 @@ func quoteArrayElement(src string) string {
}
func isSpace(ch byte) bool {
// see https://github.com/postgres/postgres/blob/REL_12_STABLE/src/backend/parser/scansup.c#L224
return ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r' || ch == '\f'
// see array_isspace:
// https://github.com/postgres/postgres/blob/master/src/backend/utils/adt/arrayfuncs.c
return ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r' || ch == '\v' || ch == '\f'
}
func quoteArrayElementIfNeeded(src string) string {
if src == "" || (len(src) == 4 && strings.ToLower(src) == "null") || isSpace(src[0]) || isSpace(src[len(src)-1]) || strings.ContainsAny(src, `{},"\`) {
if src == "" || (len(src) == 4 && strings.EqualFold(src, "null")) || isSpace(src[0]) || isSpace(src[len(src)-1]) || strings.ContainsAny(src, `{},"\`) {
return quoteArrayElement(src)
}
return src

View File

@@ -1,10 +1,12 @@
package pgtype
import (
"bytes"
"database/sql/driver"
"encoding/json"
"fmt"
"strconv"
"strings"
)
type BoolScanner interface {
@@ -264,8 +266,8 @@ func (scanPlanTextAnyToBool) Scan(src []byte, dst any) error {
return fmt.Errorf("cannot scan NULL into %T", dst)
}
if len(src) != 1 {
return fmt.Errorf("invalid length for bool: %v", len(src))
if len(src) == 0 {
return fmt.Errorf("cannot scan empty string into %T", dst)
}
p, ok := (dst).(*bool)
@@ -273,7 +275,12 @@ func (scanPlanTextAnyToBool) Scan(src []byte, dst any) error {
return ErrScanTargetTypeChanged
}
*p = src[0] == 't'
v, err := planTextToBool(src)
if err != nil {
return err
}
*p = v
return nil
}
@@ -309,9 +316,28 @@ func (scanPlanTextAnyToBoolScanner) Scan(src []byte, dst any) error {
return s.ScanBool(Bool{})
}
if len(src) != 1 {
return fmt.Errorf("invalid length for bool: %v", len(src))
if len(src) == 0 {
return fmt.Errorf("cannot scan empty string into %T", dst)
}
return s.ScanBool(Bool{Bool: src[0] == 't', Valid: true})
v, err := planTextToBool(src)
if err != nil {
return err
}
return s.ScanBool(Bool{Bool: v, Valid: true})
}
// https://www.postgresql.org/docs/11/datatype-boolean.html
func planTextToBool(src []byte) (bool, error) {
s := string(bytes.ToLower(bytes.TrimSpace(src)))
switch {
case strings.HasPrefix("true", s), strings.HasPrefix("yes", s), s == "on", s == "1":
return true, nil
case strings.HasPrefix("false", s), strings.HasPrefix("no", s), strings.HasPrefix("off", s), s == "0":
return false, nil
default:
return false, fmt.Errorf("unknown boolean string representation %q", src)
}
}

View File

@@ -64,6 +64,9 @@ func underlyingNumberType(val any) (any, bool) {
case reflect.String:
convVal := refVal.String()
return convVal, reflect.TypeOf(convVal) != refVal.Type()
case reflect.Bool:
convVal := refVal.Bool()
return convVal, reflect.TypeOf(convVal) != refVal.Type()
}
return nil, false
@@ -262,7 +265,7 @@ func int64AssignTo(srcVal int64, srcValid bool, dst any) error {
*v = uint8(srcVal)
case *uint16:
if srcVal < 0 {
return fmt.Errorf("%d is less than zero for uint32", srcVal)
return fmt.Errorf("%d is less than zero for uint16", srcVal)
} else if srcVal > math.MaxUint16 {
return fmt.Errorf("%d is greater than maximum value for uint16", srcVal)
}

View File

@@ -1,14 +1,11 @@
package pgtype
import (
"bytes"
"database/sql/driver"
"encoding/binary"
"errors"
"fmt"
"strings"
"unicode"
"unicode/utf8"
"github.com/jackc/pgx/v5/internal/pgio"
)
@@ -43,7 +40,7 @@ func (h *Hstore) Scan(src any) error {
switch src := src.(type) {
case string:
return scanPlanTextAnyToHstoreScanner{}.Scan([]byte(src), h)
return scanPlanTextAnyToHstoreScanner{}.scanString(src, h)
}
return fmt.Errorf("cannot scan %T", src)
@@ -137,13 +134,20 @@ func (encodePlanHstoreCodecText) Encode(value any, buf []byte) (newBuf []byte, e
buf = append(buf, ',')
}
buf = append(buf, quoteHstoreElementIfNeeded(k)...)
// unconditionally quote hstore keys/values like Postgres does
// this avoids a Mac OS X Postgres hstore parsing bug:
// https://www.postgresql.org/message-id/CA%2BHWA9awUW0%2BRV_gO9r1ABZwGoZxPztcJxPy8vMFSTbTfi4jig%40mail.gmail.com
buf = append(buf, '"')
buf = append(buf, quoteArrayReplacer.Replace(k)...)
buf = append(buf, '"')
buf = append(buf, "=>"...)
if v == nil {
buf = append(buf, "NULL"...)
} else {
buf = append(buf, quoteHstoreElementIfNeeded(*v)...)
buf = append(buf, '"')
buf = append(buf, quoteArrayReplacer.Replace(*v)...)
buf = append(buf, '"')
}
}
@@ -174,25 +178,28 @@ func (scanPlanBinaryHstoreToHstoreScanner) Scan(src []byte, dst any) error {
scanner := (dst).(HstoreScanner)
if src == nil {
return scanner.ScanHstore(Hstore{})
return scanner.ScanHstore(Hstore(nil))
}
rp := 0
if len(src[rp:]) < 4 {
const uint32Len = 4
if len(src[rp:]) < uint32Len {
return fmt.Errorf("hstore incomplete %v", src)
}
pairCount := int(int32(binary.BigEndian.Uint32(src[rp:])))
rp += 4
rp += uint32Len
hstore := make(Hstore, pairCount)
// one allocation for all *string, rather than one per string, just like text parsing
valueStrings := make([]string, pairCount)
for i := 0; i < pairCount; i++ {
if len(src[rp:]) < 4 {
if len(src[rp:]) < uint32Len {
return fmt.Errorf("hstore incomplete %v", src)
}
keyLen := int(int32(binary.BigEndian.Uint32(src[rp:])))
rp += 4
rp += uint32Len
if len(src[rp:]) < keyLen {
return fmt.Errorf("hstore incomplete %v", src)
@@ -200,26 +207,17 @@ func (scanPlanBinaryHstoreToHstoreScanner) Scan(src []byte, dst any) error {
key := string(src[rp : rp+keyLen])
rp += keyLen
if len(src[rp:]) < 4 {
if len(src[rp:]) < uint32Len {
return fmt.Errorf("hstore incomplete %v", src)
}
valueLen := int(int32(binary.BigEndian.Uint32(src[rp:])))
rp += 4
var valueBuf []byte
if valueLen >= 0 {
valueBuf = src[rp : rp+valueLen]
valueStrings[i] = string(src[rp : rp+valueLen])
rp += valueLen
}
var value Text
err := scanPlanTextAnyToTextScanner{}.Scan(valueBuf, &value)
if err != nil {
return err
}
if value.Valid {
hstore[key] = &value.String
hstore[key] = &valueStrings[i]
} else {
hstore[key] = nil
}
@@ -230,28 +228,22 @@ func (scanPlanBinaryHstoreToHstoreScanner) Scan(src []byte, dst any) error {
type scanPlanTextAnyToHstoreScanner struct{}
func (scanPlanTextAnyToHstoreScanner) Scan(src []byte, dst any) error {
func (s scanPlanTextAnyToHstoreScanner) Scan(src []byte, dst any) error {
scanner := (dst).(HstoreScanner)
if src == nil {
return scanner.ScanHstore(Hstore{})
return scanner.ScanHstore(Hstore(nil))
}
return s.scanString(string(src), scanner)
}
keys, values, err := parseHstore(string(src))
// scanString does not return nil hstore values because string cannot be nil.
func (scanPlanTextAnyToHstoreScanner) scanString(src string, scanner HstoreScanner) error {
hstore, err := parseHstore(src)
if err != nil {
return err
}
m := make(Hstore, len(keys))
for i := range keys {
if values[i].Valid {
m[keys[i]] = &values[i].String
} else {
m[keys[i]] = nil
}
}
return scanner.ScanHstore(m)
return scanner.ScanHstore(hstore)
}
func (c HstoreCodec) DecodeDatabaseSQLValue(m *Map, oid uint32, format int16, src []byte) (driver.Value, error) {
@@ -271,191 +263,217 @@ func (c HstoreCodec) DecodeValue(m *Map, oid uint32, format int16, src []byte) (
return hstore, nil
}
var quoteHstoreReplacer = strings.NewReplacer(`\`, `\\`, `"`, `\"`)
func quoteHstoreElement(src string) string {
return `"` + quoteArrayReplacer.Replace(src) + `"`
}
func quoteHstoreElementIfNeeded(src string) string {
if src == "" || (len(src) == 4 && strings.ToLower(src) == "null") || strings.ContainsAny(src, ` {},"\=>`) {
return quoteArrayElement(src)
}
return src
}
const (
hsPre = iota
hsKey
hsSep
hsVal
hsNul
hsNext
)
type hstoreParser struct {
str string
pos int
str string
pos int
nextBackslash int
}
func newHSP(in string) *hstoreParser {
return &hstoreParser{
pos: 0,
str: in,
pos: 0,
str: in,
nextBackslash: strings.IndexByte(in, '\\'),
}
}
func (p *hstoreParser) Consume() (r rune, end bool) {
func (p *hstoreParser) atEnd() bool {
return p.pos >= len(p.str)
}
// consume returns the next byte of the string, or end if the string is done.
func (p *hstoreParser) consume() (b byte, end bool) {
if p.pos >= len(p.str) {
end = true
return
return 0, true
}
r, w := utf8.DecodeRuneInString(p.str[p.pos:])
p.pos += w
return
b = p.str[p.pos]
p.pos++
return b, false
}
func (p *hstoreParser) Peek() (r rune, end bool) {
if p.pos >= len(p.str) {
end = true
return
}
r, _ = utf8.DecodeRuneInString(p.str[p.pos:])
return
func unexpectedByteErr(actualB byte, expectedB byte) error {
return fmt.Errorf("expected '%c' ('%#v'); found '%c' ('%#v')", expectedB, expectedB, actualB, actualB)
}
// parseHstore parses the string representation of an hstore column (the same
// you would get from an ordinary SELECT) into two slices of keys and values. it
// is used internally in the default parsing of hstores.
func parseHstore(s string) (k []string, v []Text, err error) {
if s == "" {
return
// consumeExpectedByte consumes expectedB from the string, or returns an error.
func (p *hstoreParser) consumeExpectedByte(expectedB byte) error {
nextB, end := p.consume()
if end {
return fmt.Errorf("expected '%c' ('%#v'); found end", expectedB, expectedB)
}
if nextB != expectedB {
return unexpectedByteErr(nextB, expectedB)
}
return nil
}
// consumeExpected2 consumes two expected bytes or returns an error.
// This was a bit faster than using a string argument (better inlining? Not sure).
func (p *hstoreParser) consumeExpected2(one byte, two byte) error {
if p.pos+2 > len(p.str) {
return errors.New("unexpected end of string")
}
if p.str[p.pos] != one {
return unexpectedByteErr(p.str[p.pos], one)
}
if p.str[p.pos+1] != two {
return unexpectedByteErr(p.str[p.pos+1], two)
}
p.pos += 2
return nil
}
var errEOSInQuoted = errors.New(`found end before closing double-quote ('"')`)
// consumeDoubleQuoted consumes a double-quoted string from p. The double quote must have been
// parsed already. This copies the string from the backing string so it can be garbage collected.
func (p *hstoreParser) consumeDoubleQuoted() (string, error) {
// fast path: assume most keys/values do not contain escapes
nextDoubleQuote := strings.IndexByte(p.str[p.pos:], '"')
if nextDoubleQuote == -1 {
return "", errEOSInQuoted
}
nextDoubleQuote += p.pos
if p.nextBackslash == -1 || p.nextBackslash > nextDoubleQuote {
// clone the string from the source string to ensure it can be garbage collected separately
// TODO: use strings.Clone on Go 1.20; this could get optimized away
s := strings.Clone(p.str[p.pos:nextDoubleQuote])
p.pos = nextDoubleQuote + 1
return s, nil
}
buf := bytes.Buffer{}
keys := []string{}
values := []Text{}
// slow path: string contains escapes
s, err := p.consumeDoubleQuotedWithEscapes(p.nextBackslash)
p.nextBackslash = strings.IndexByte(p.str[p.pos:], '\\')
if p.nextBackslash != -1 {
p.nextBackslash += p.pos
}
return s, err
}
// consumeDoubleQuotedWithEscapes consumes a double-quoted string containing escapes, starting
// at p.pos, and with the first backslash at firstBackslash. This copies the string so it can be
// garbage collected separately.
func (p *hstoreParser) consumeDoubleQuotedWithEscapes(firstBackslash int) (string, error) {
// copy the prefix that does not contain backslashes
var builder strings.Builder
builder.WriteString(p.str[p.pos:firstBackslash])
// skip to the backslash
p.pos = firstBackslash
// copy bytes until the end, unescaping backslashes
for {
nextB, end := p.consume()
if end {
return "", errEOSInQuoted
} else if nextB == '"' {
break
} else if nextB == '\\' {
// escape: skip the backslash and copy the char
nextB, end = p.consume()
if end {
return "", errEOSInQuoted
}
if !(nextB == '\\' || nextB == '"') {
return "", fmt.Errorf("unexpected escape in quoted string: found '%#v'", nextB)
}
builder.WriteByte(nextB)
} else {
// normal byte: copy it
builder.WriteByte(nextB)
}
}
return builder.String(), nil
}
// consumePairSeparator consumes the Hstore pair separator ", " or returns an error.
func (p *hstoreParser) consumePairSeparator() error {
return p.consumeExpected2(',', ' ')
}
// consumeKVSeparator consumes the Hstore key/value separator "=>" or returns an error.
func (p *hstoreParser) consumeKVSeparator() error {
return p.consumeExpected2('=', '>')
}
// consumeDoubleQuotedOrNull consumes the Hstore key/value separator "=>" or returns an error.
func (p *hstoreParser) consumeDoubleQuotedOrNull() (Text, error) {
// peek at the next byte
if p.atEnd() {
return Text{}, errors.New("found end instead of value")
}
next := p.str[p.pos]
if next == 'N' {
// must be the exact string NULL: use consumeExpected2 twice
err := p.consumeExpected2('N', 'U')
if err != nil {
return Text{}, err
}
err = p.consumeExpected2('L', 'L')
if err != nil {
return Text{}, err
}
return Text{String: "", Valid: false}, nil
} else if next != '"' {
return Text{}, unexpectedByteErr(next, '"')
}
// skip the double quote
p.pos += 1
s, err := p.consumeDoubleQuoted()
if err != nil {
return Text{}, err
}
return Text{String: s, Valid: true}, nil
}
func parseHstore(s string) (Hstore, error) {
p := newHSP(s)
r, end := p.Consume()
state := hsPre
for !end {
switch state {
case hsPre:
if r == '"' {
state = hsKey
} else {
err = errors.New("String does not begin with \"")
}
case hsKey:
switch r {
case '"': //End of the key
keys = append(keys, buf.String())
buf = bytes.Buffer{}
state = hsSep
case '\\': //Potential escaped character
n, end := p.Consume()
switch {
case end:
err = errors.New("Found EOS in key, expecting character or \"")
case n == '"', n == '\\':
buf.WriteRune(n)
default:
buf.WriteRune(r)
buf.WriteRune(n)
}
default: //Any other character
buf.WriteRune(r)
}
case hsSep:
if r == '=' {
r, end = p.Consume()
switch {
case end:
err = errors.New("Found EOS after '=', expecting '>'")
case r == '>':
r, end = p.Consume()
switch {
case end:
err = errors.New("Found EOS after '=>', expecting '\"' or 'NULL'")
case r == '"':
state = hsVal
case r == 'N':
state = hsNul
default:
err = fmt.Errorf("Invalid character '%c' after '=>', expecting '\"' or 'NULL'", r)
}
default:
err = fmt.Errorf("Invalid character after '=', expecting '>'")
}
} else {
err = fmt.Errorf("Invalid character '%c' after value, expecting '='", r)
}
case hsVal:
switch r {
case '"': //End of the value
values = append(values, Text{String: buf.String(), Valid: true})
buf = bytes.Buffer{}
state = hsNext
case '\\': //Potential escaped character
n, end := p.Consume()
switch {
case end:
err = errors.New("Found EOS in key, expecting character or \"")
case n == '"', n == '\\':
buf.WriteRune(n)
default:
buf.WriteRune(r)
buf.WriteRune(n)
}
default: //Any other character
buf.WriteRune(r)
}
case hsNul:
nulBuf := make([]rune, 3)
nulBuf[0] = r
for i := 1; i < 3; i++ {
r, end = p.Consume()
if end {
err = errors.New("Found EOS in NULL value")
return
}
nulBuf[i] = r
}
if nulBuf[0] == 'U' && nulBuf[1] == 'L' && nulBuf[2] == 'L' {
values = append(values, Text{})
state = hsNext
} else {
err = fmt.Errorf("Invalid NULL value: 'N%s'", string(nulBuf))
}
case hsNext:
if r == ',' {
r, end = p.Consume()
switch {
case end:
err = errors.New("Found EOS after ',', expcting space")
case (unicode.IsSpace(r)):
r, end = p.Consume()
state = hsKey
default:
err = fmt.Errorf("Invalid character '%c' after ', ', expecting \"", r)
}
} else {
err = fmt.Errorf("Invalid character '%c' after value, expecting ','", r)
// This is an over-estimate of the number of key/value pairs. Use '>' because I am guessing it
// is less likely to occur in keys/values than '=' or ','.
numPairsEstimate := strings.Count(s, ">")
// makes one allocation of strings for the entire Hstore, rather than one allocation per value.
valueStrings := make([]string, 0, numPairsEstimate)
result := make(Hstore, numPairsEstimate)
first := true
for !p.atEnd() {
if !first {
err := p.consumePairSeparator()
if err != nil {
return nil, err
}
} else {
first = false
}
err := p.consumeExpectedByte('"')
if err != nil {
return
return nil, err
}
key, err := p.consumeDoubleQuoted()
if err != nil {
return nil, err
}
err = p.consumeKVSeparator()
if err != nil {
return nil, err
}
value, err := p.consumeDoubleQuotedOrNull()
if err != nil {
return nil, err
}
if value.Valid {
valueStrings = append(valueStrings, value.String)
result[key] = &valueStrings[len(valueStrings)-1]
} else {
result[key] = nil
}
r, end = p.Consume()
}
if state != hsNext {
err = errors.New("Improperly formatted hstore")
return
}
k = keys
v = values
return
return result, nil
}

View File

@@ -150,7 +150,7 @@ func (scanPlanJSONToJSONUnmarshal) Scan(src []byte, dst any) error {
if dstValue.Kind() == reflect.Ptr {
el := dstValue.Elem()
switch el.Kind() {
case reflect.Ptr, reflect.Slice, reflect.Map:
case reflect.Ptr, reflect.Slice, reflect.Map, reflect.Interface:
el.Set(reflect.Zero(el.Type()))
return nil
}

View File

@@ -147,7 +147,7 @@ const (
BinaryFormatCode = 1
)
// A Codec converts between Go and PostgreSQL values.
// A Codec converts between Go and PostgreSQL values. A Codec must not be mutated after it is registered with a Map.
type Codec interface {
// FormatSupported returns true if the format is supported.
FormatSupported(int16) bool
@@ -178,6 +178,7 @@ func (e *nullAssignmentError) Error() string {
return fmt.Sprintf("cannot assign NULL to %T", e.dst)
}
// Type represents a PostgreSQL data type. It must not be mutated after it is registered with a Map.
type Type struct {
Codec Codec
Name string
@@ -211,7 +212,9 @@ type Map struct {
}
func NewMap() *Map {
m := &Map{
defaultMapInitOnce.Do(initDefaultMap)
return &Map{
oidToType: make(map[uint32]*Type),
nameToType: make(map[string]*Type),
reflectTypeToName: make(map[reflect.Type]string),
@@ -240,184 +243,9 @@ func NewMap() *Map {
TryWrapPtrArrayScanPlan,
},
}
// Base types
m.RegisterType(&Type{Name: "aclitem", OID: ACLItemOID, Codec: &TextFormatOnlyCodec{TextCodec{}}})
m.RegisterType(&Type{Name: "bit", OID: BitOID, Codec: BitsCodec{}})
m.RegisterType(&Type{Name: "bool", OID: BoolOID, Codec: BoolCodec{}})
m.RegisterType(&Type{Name: "box", OID: BoxOID, Codec: BoxCodec{}})
m.RegisterType(&Type{Name: "bpchar", OID: BPCharOID, Codec: TextCodec{}})
m.RegisterType(&Type{Name: "bytea", OID: ByteaOID, Codec: ByteaCodec{}})
m.RegisterType(&Type{Name: "char", OID: QCharOID, Codec: QCharCodec{}})
m.RegisterType(&Type{Name: "cid", OID: CIDOID, Codec: Uint32Codec{}})
m.RegisterType(&Type{Name: "cidr", OID: CIDROID, Codec: InetCodec{}})
m.RegisterType(&Type{Name: "circle", OID: CircleOID, Codec: CircleCodec{}})
m.RegisterType(&Type{Name: "date", OID: DateOID, Codec: DateCodec{}})
m.RegisterType(&Type{Name: "float4", OID: Float4OID, Codec: Float4Codec{}})
m.RegisterType(&Type{Name: "float8", OID: Float8OID, Codec: Float8Codec{}})
m.RegisterType(&Type{Name: "inet", OID: InetOID, Codec: InetCodec{}})
m.RegisterType(&Type{Name: "int2", OID: Int2OID, Codec: Int2Codec{}})
m.RegisterType(&Type{Name: "int4", OID: Int4OID, Codec: Int4Codec{}})
m.RegisterType(&Type{Name: "int8", OID: Int8OID, Codec: Int8Codec{}})
m.RegisterType(&Type{Name: "interval", OID: IntervalOID, Codec: IntervalCodec{}})
m.RegisterType(&Type{Name: "json", OID: JSONOID, Codec: JSONCodec{}})
m.RegisterType(&Type{Name: "jsonb", OID: JSONBOID, Codec: JSONBCodec{}})
m.RegisterType(&Type{Name: "jsonpath", OID: JSONPathOID, Codec: &TextFormatOnlyCodec{TextCodec{}}})
m.RegisterType(&Type{Name: "line", OID: LineOID, Codec: LineCodec{}})
m.RegisterType(&Type{Name: "lseg", OID: LsegOID, Codec: LsegCodec{}})
m.RegisterType(&Type{Name: "macaddr", OID: MacaddrOID, Codec: MacaddrCodec{}})
m.RegisterType(&Type{Name: "name", OID: NameOID, Codec: TextCodec{}})
m.RegisterType(&Type{Name: "numeric", OID: NumericOID, Codec: NumericCodec{}})
m.RegisterType(&Type{Name: "oid", OID: OIDOID, Codec: Uint32Codec{}})
m.RegisterType(&Type{Name: "path", OID: PathOID, Codec: PathCodec{}})
m.RegisterType(&Type{Name: "point", OID: PointOID, Codec: PointCodec{}})
m.RegisterType(&Type{Name: "polygon", OID: PolygonOID, Codec: PolygonCodec{}})
m.RegisterType(&Type{Name: "record", OID: RecordOID, Codec: RecordCodec{}})
m.RegisterType(&Type{Name: "text", OID: TextOID, Codec: TextCodec{}})
m.RegisterType(&Type{Name: "tid", OID: TIDOID, Codec: TIDCodec{}})
m.RegisterType(&Type{Name: "time", OID: TimeOID, Codec: TimeCodec{}})
m.RegisterType(&Type{Name: "timestamp", OID: TimestampOID, Codec: TimestampCodec{}})
m.RegisterType(&Type{Name: "timestamptz", OID: TimestamptzOID, Codec: TimestamptzCodec{}})
m.RegisterType(&Type{Name: "unknown", OID: UnknownOID, Codec: TextCodec{}})
m.RegisterType(&Type{Name: "uuid", OID: UUIDOID, Codec: UUIDCodec{}})
m.RegisterType(&Type{Name: "varbit", OID: VarbitOID, Codec: BitsCodec{}})
m.RegisterType(&Type{Name: "varchar", OID: VarcharOID, Codec: TextCodec{}})
m.RegisterType(&Type{Name: "xid", OID: XIDOID, Codec: Uint32Codec{}})
// Range types
m.RegisterType(&Type{Name: "daterange", OID: DaterangeOID, Codec: &RangeCodec{ElementType: m.oidToType[DateOID]}})
m.RegisterType(&Type{Name: "int4range", OID: Int4rangeOID, Codec: &RangeCodec{ElementType: m.oidToType[Int4OID]}})
m.RegisterType(&Type{Name: "int8range", OID: Int8rangeOID, Codec: &RangeCodec{ElementType: m.oidToType[Int8OID]}})
m.RegisterType(&Type{Name: "numrange", OID: NumrangeOID, Codec: &RangeCodec{ElementType: m.oidToType[NumericOID]}})
m.RegisterType(&Type{Name: "tsrange", OID: TsrangeOID, Codec: &RangeCodec{ElementType: m.oidToType[TimestampOID]}})
m.RegisterType(&Type{Name: "tstzrange", OID: TstzrangeOID, Codec: &RangeCodec{ElementType: m.oidToType[TimestamptzOID]}})
// Multirange types
m.RegisterType(&Type{Name: "datemultirange", OID: DatemultirangeOID, Codec: &MultirangeCodec{ElementType: m.oidToType[DaterangeOID]}})
m.RegisterType(&Type{Name: "int4multirange", OID: Int4multirangeOID, Codec: &MultirangeCodec{ElementType: m.oidToType[Int4rangeOID]}})
m.RegisterType(&Type{Name: "int8multirange", OID: Int8multirangeOID, Codec: &MultirangeCodec{ElementType: m.oidToType[Int8rangeOID]}})
m.RegisterType(&Type{Name: "nummultirange", OID: NummultirangeOID, Codec: &MultirangeCodec{ElementType: m.oidToType[NumrangeOID]}})
m.RegisterType(&Type{Name: "tsmultirange", OID: TsmultirangeOID, Codec: &MultirangeCodec{ElementType: m.oidToType[TsrangeOID]}})
m.RegisterType(&Type{Name: "tstzmultirange", OID: TstzmultirangeOID, Codec: &MultirangeCodec{ElementType: m.oidToType[TstzrangeOID]}})
// Array types
m.RegisterType(&Type{Name: "_aclitem", OID: ACLItemArrayOID, Codec: &ArrayCodec{ElementType: m.oidToType[ACLItemOID]}})
m.RegisterType(&Type{Name: "_bit", OID: BitArrayOID, Codec: &ArrayCodec{ElementType: m.oidToType[BitOID]}})
m.RegisterType(&Type{Name: "_bool", OID: BoolArrayOID, Codec: &ArrayCodec{ElementType: m.oidToType[BoolOID]}})
m.RegisterType(&Type{Name: "_box", OID: BoxArrayOID, Codec: &ArrayCodec{ElementType: m.oidToType[BoxOID]}})
m.RegisterType(&Type{Name: "_bpchar", OID: BPCharArrayOID, Codec: &ArrayCodec{ElementType: m.oidToType[BPCharOID]}})
m.RegisterType(&Type{Name: "_bytea", OID: ByteaArrayOID, Codec: &ArrayCodec{ElementType: m.oidToType[ByteaOID]}})
m.RegisterType(&Type{Name: "_char", OID: QCharArrayOID, Codec: &ArrayCodec{ElementType: m.oidToType[QCharOID]}})
m.RegisterType(&Type{Name: "_cid", OID: CIDArrayOID, Codec: &ArrayCodec{ElementType: m.oidToType[CIDOID]}})
m.RegisterType(&Type{Name: "_cidr", OID: CIDRArrayOID, Codec: &ArrayCodec{ElementType: m.oidToType[CIDROID]}})
m.RegisterType(&Type{Name: "_circle", OID: CircleArrayOID, Codec: &ArrayCodec{ElementType: m.oidToType[CircleOID]}})
m.RegisterType(&Type{Name: "_date", OID: DateArrayOID, Codec: &ArrayCodec{ElementType: m.oidToType[DateOID]}})
m.RegisterType(&Type{Name: "_daterange", OID: DaterangeArrayOID, Codec: &ArrayCodec{ElementType: m.oidToType[DaterangeOID]}})
m.RegisterType(&Type{Name: "_float4", OID: Float4ArrayOID, Codec: &ArrayCodec{ElementType: m.oidToType[Float4OID]}})
m.RegisterType(&Type{Name: "_float8", OID: Float8ArrayOID, Codec: &ArrayCodec{ElementType: m.oidToType[Float8OID]}})
m.RegisterType(&Type{Name: "_inet", OID: InetArrayOID, Codec: &ArrayCodec{ElementType: m.oidToType[InetOID]}})
m.RegisterType(&Type{Name: "_int2", OID: Int2ArrayOID, Codec: &ArrayCodec{ElementType: m.oidToType[Int2OID]}})
m.RegisterType(&Type{Name: "_int4", OID: Int4ArrayOID, Codec: &ArrayCodec{ElementType: m.oidToType[Int4OID]}})
m.RegisterType(&Type{Name: "_int4range", OID: Int4rangeArrayOID, Codec: &ArrayCodec{ElementType: m.oidToType[Int4rangeOID]}})
m.RegisterType(&Type{Name: "_int8", OID: Int8ArrayOID, Codec: &ArrayCodec{ElementType: m.oidToType[Int8OID]}})
m.RegisterType(&Type{Name: "_int8range", OID: Int8rangeArrayOID, Codec: &ArrayCodec{ElementType: m.oidToType[Int8rangeOID]}})
m.RegisterType(&Type{Name: "_interval", OID: IntervalArrayOID, Codec: &ArrayCodec{ElementType: m.oidToType[IntervalOID]}})
m.RegisterType(&Type{Name: "_json", OID: JSONArrayOID, Codec: &ArrayCodec{ElementType: m.oidToType[JSONOID]}})
m.RegisterType(&Type{Name: "_jsonb", OID: JSONBArrayOID, Codec: &ArrayCodec{ElementType: m.oidToType[JSONBOID]}})
m.RegisterType(&Type{Name: "_jsonpath", OID: JSONPathArrayOID, Codec: &ArrayCodec{ElementType: m.oidToType[JSONPathOID]}})
m.RegisterType(&Type{Name: "_line", OID: LineArrayOID, Codec: &ArrayCodec{ElementType: m.oidToType[LineOID]}})
m.RegisterType(&Type{Name: "_lseg", OID: LsegArrayOID, Codec: &ArrayCodec{ElementType: m.oidToType[LsegOID]}})
m.RegisterType(&Type{Name: "_macaddr", OID: MacaddrArrayOID, Codec: &ArrayCodec{ElementType: m.oidToType[MacaddrOID]}})
m.RegisterType(&Type{Name: "_name", OID: NameArrayOID, Codec: &ArrayCodec{ElementType: m.oidToType[NameOID]}})
m.RegisterType(&Type{Name: "_numeric", OID: NumericArrayOID, Codec: &ArrayCodec{ElementType: m.oidToType[NumericOID]}})
m.RegisterType(&Type{Name: "_numrange", OID: NumrangeArrayOID, Codec: &ArrayCodec{ElementType: m.oidToType[NumrangeOID]}})
m.RegisterType(&Type{Name: "_oid", OID: OIDArrayOID, Codec: &ArrayCodec{ElementType: m.oidToType[OIDOID]}})
m.RegisterType(&Type{Name: "_path", OID: PathArrayOID, Codec: &ArrayCodec{ElementType: m.oidToType[PathOID]}})
m.RegisterType(&Type{Name: "_point", OID: PointArrayOID, Codec: &ArrayCodec{ElementType: m.oidToType[PointOID]}})
m.RegisterType(&Type{Name: "_polygon", OID: PolygonArrayOID, Codec: &ArrayCodec{ElementType: m.oidToType[PolygonOID]}})
m.RegisterType(&Type{Name: "_record", OID: RecordArrayOID, Codec: &ArrayCodec{ElementType: m.oidToType[RecordOID]}})
m.RegisterType(&Type{Name: "_text", OID: TextArrayOID, Codec: &ArrayCodec{ElementType: m.oidToType[TextOID]}})
m.RegisterType(&Type{Name: "_tid", OID: TIDArrayOID, Codec: &ArrayCodec{ElementType: m.oidToType[TIDOID]}})
m.RegisterType(&Type{Name: "_time", OID: TimeArrayOID, Codec: &ArrayCodec{ElementType: m.oidToType[TimeOID]}})
m.RegisterType(&Type{Name: "_timestamp", OID: TimestampArrayOID, Codec: &ArrayCodec{ElementType: m.oidToType[TimestampOID]}})
m.RegisterType(&Type{Name: "_timestamptz", OID: TimestamptzArrayOID, Codec: &ArrayCodec{ElementType: m.oidToType[TimestamptzOID]}})
m.RegisterType(&Type{Name: "_tsrange", OID: TsrangeArrayOID, Codec: &ArrayCodec{ElementType: m.oidToType[TsrangeOID]}})
m.RegisterType(&Type{Name: "_tstzrange", OID: TstzrangeArrayOID, Codec: &ArrayCodec{ElementType: m.oidToType[TstzrangeOID]}})
m.RegisterType(&Type{Name: "_uuid", OID: UUIDArrayOID, Codec: &ArrayCodec{ElementType: m.oidToType[UUIDOID]}})
m.RegisterType(&Type{Name: "_varbit", OID: VarbitArrayOID, Codec: &ArrayCodec{ElementType: m.oidToType[VarbitOID]}})
m.RegisterType(&Type{Name: "_varchar", OID: VarcharArrayOID, Codec: &ArrayCodec{ElementType: m.oidToType[VarcharOID]}})
m.RegisterType(&Type{Name: "_xid", OID: XIDArrayOID, Codec: &ArrayCodec{ElementType: m.oidToType[XIDOID]}})
// Integer types that directly map to a PostgreSQL type
registerDefaultPgTypeVariants[int16](m, "int2")
registerDefaultPgTypeVariants[int32](m, "int4")
registerDefaultPgTypeVariants[int64](m, "int8")
// Integer types that do not have a direct match to a PostgreSQL type
registerDefaultPgTypeVariants[int8](m, "int8")
registerDefaultPgTypeVariants[int](m, "int8")
registerDefaultPgTypeVariants[uint8](m, "int8")
registerDefaultPgTypeVariants[uint16](m, "int8")
registerDefaultPgTypeVariants[uint32](m, "int8")
registerDefaultPgTypeVariants[uint64](m, "numeric")
registerDefaultPgTypeVariants[uint](m, "numeric")
registerDefaultPgTypeVariants[float32](m, "float4")
registerDefaultPgTypeVariants[float64](m, "float8")
registerDefaultPgTypeVariants[bool](m, "bool")
registerDefaultPgTypeVariants[time.Time](m, "timestamptz")
registerDefaultPgTypeVariants[time.Duration](m, "interval")
registerDefaultPgTypeVariants[string](m, "text")
registerDefaultPgTypeVariants[[]byte](m, "bytea")
registerDefaultPgTypeVariants[net.IP](m, "inet")
registerDefaultPgTypeVariants[net.IPNet](m, "cidr")
registerDefaultPgTypeVariants[netip.Addr](m, "inet")
registerDefaultPgTypeVariants[netip.Prefix](m, "cidr")
// pgtype provided structs
registerDefaultPgTypeVariants[Bits](m, "varbit")
registerDefaultPgTypeVariants[Bool](m, "bool")
registerDefaultPgTypeVariants[Box](m, "box")
registerDefaultPgTypeVariants[Circle](m, "circle")
registerDefaultPgTypeVariants[Date](m, "date")
registerDefaultPgTypeVariants[Range[Date]](m, "daterange")
registerDefaultPgTypeVariants[Multirange[Range[Date]]](m, "datemultirange")
registerDefaultPgTypeVariants[Float4](m, "float4")
registerDefaultPgTypeVariants[Float8](m, "float8")
registerDefaultPgTypeVariants[Range[Float8]](m, "numrange") // There is no PostgreSQL builtin float8range so map it to numrange.
registerDefaultPgTypeVariants[Multirange[Range[Float8]]](m, "nummultirange") // There is no PostgreSQL builtin float8multirange so map it to nummultirange.
registerDefaultPgTypeVariants[Int2](m, "int2")
registerDefaultPgTypeVariants[Int4](m, "int4")
registerDefaultPgTypeVariants[Range[Int4]](m, "int4range")
registerDefaultPgTypeVariants[Multirange[Range[Int4]]](m, "int4multirange")
registerDefaultPgTypeVariants[Int8](m, "int8")
registerDefaultPgTypeVariants[Range[Int8]](m, "int8range")
registerDefaultPgTypeVariants[Multirange[Range[Int8]]](m, "int8multirange")
registerDefaultPgTypeVariants[Interval](m, "interval")
registerDefaultPgTypeVariants[Line](m, "line")
registerDefaultPgTypeVariants[Lseg](m, "lseg")
registerDefaultPgTypeVariants[Numeric](m, "numeric")
registerDefaultPgTypeVariants[Range[Numeric]](m, "numrange")
registerDefaultPgTypeVariants[Multirange[Range[Numeric]]](m, "nummultirange")
registerDefaultPgTypeVariants[Path](m, "path")
registerDefaultPgTypeVariants[Point](m, "point")
registerDefaultPgTypeVariants[Polygon](m, "polygon")
registerDefaultPgTypeVariants[TID](m, "tid")
registerDefaultPgTypeVariants[Text](m, "text")
registerDefaultPgTypeVariants[Time](m, "time")
registerDefaultPgTypeVariants[Timestamp](m, "timestamp")
registerDefaultPgTypeVariants[Timestamptz](m, "timestamptz")
registerDefaultPgTypeVariants[Range[Timestamp]](m, "tsrange")
registerDefaultPgTypeVariants[Multirange[Range[Timestamp]]](m, "tsmultirange")
registerDefaultPgTypeVariants[Range[Timestamptz]](m, "tstzrange")
registerDefaultPgTypeVariants[Multirange[Range[Timestamptz]]](m, "tstzmultirange")
registerDefaultPgTypeVariants[UUID](m, "uuid")
return m
}
// RegisterType registers a data type with the Map. t must not be mutated after it is registered.
func (m *Map) RegisterType(t *Type) {
m.oidToType[t.OID] = t
m.nameToType[t.Name] = t
@@ -449,13 +277,22 @@ func (m *Map) RegisterDefaultPgType(value any, name string) {
}
}
// TypeForOID returns the Type registered for the given OID. The returned Type must not be mutated.
func (m *Map) TypeForOID(oid uint32) (*Type, bool) {
dt, ok := m.oidToType[oid]
if dt, ok := m.oidToType[oid]; ok {
return dt, true
}
dt, ok := defaultMap.oidToType[oid]
return dt, ok
}
// TypeForName returns the Type registered for the given name. The returned Type must not be mutated.
func (m *Map) TypeForName(name string) (*Type, bool) {
dt, ok := m.nameToType[name]
if dt, ok := m.nameToType[name]; ok {
return dt, true
}
dt, ok := defaultMap.nameToType[name]
return dt, ok
}
@@ -463,30 +300,39 @@ func (m *Map) buildReflectTypeToType() {
m.reflectTypeToType = make(map[reflect.Type]*Type)
for reflectType, name := range m.reflectTypeToName {
if dt, ok := m.nameToType[name]; ok {
if dt, ok := m.TypeForName(name); ok {
m.reflectTypeToType[reflectType] = dt
}
}
}
// TypeForValue finds a data type suitable for v. Use RegisterType to register types that can encode and decode
// themselves. Use RegisterDefaultPgType to register that can be handled by a registered data type.
// themselves. Use RegisterDefaultPgType to register that can be handled by a registered data type. The returned Type
// must not be mutated.
func (m *Map) TypeForValue(v any) (*Type, bool) {
if m.reflectTypeToType == nil {
m.buildReflectTypeToType()
}
dt, ok := m.reflectTypeToType[reflect.TypeOf(v)]
if dt, ok := m.reflectTypeToType[reflect.TypeOf(v)]; ok {
return dt, true
}
dt, ok := defaultMap.reflectTypeToType[reflect.TypeOf(v)]
return dt, ok
}
// FormatCodeForOID returns the preferred format code for type oid. If the type is not registered it returns the text
// format code.
func (m *Map) FormatCodeForOID(oid uint32) int16 {
fc, ok := m.oidToFormatCode[oid]
if ok {
if fc, ok := m.oidToFormatCode[oid]; ok {
return fc
}
if fc, ok := defaultMap.oidToFormatCode[oid]; ok {
return fc
}
return TextFormatCode
}
@@ -587,6 +433,14 @@ func (plan *scanPlanFail) Scan(src []byte, dst any) error {
return plan.Scan(src, dst)
}
}
for oid := range defaultMap.oidToType {
if _, ok := plan.m.oidToType[oid]; !ok {
plan := plan.m.planScan(oid, plan.formatCode, dst)
if _, ok := plan.(*scanPlanFail); !ok {
return plan.Scan(src, dst)
}
}
}
}
var format string
@@ -600,7 +454,7 @@ func (plan *scanPlanFail) Scan(src []byte, dst any) error {
}
var dataTypeName string
if t, ok := plan.m.oidToType[plan.oid]; ok {
if t, ok := plan.m.TypeForOID(plan.oid); ok {
dataTypeName = t.Name
} else {
dataTypeName = "unknown type"
@@ -666,6 +520,7 @@ var elemKindToPointerTypes map[reflect.Kind]reflect.Type = map[reflect.Kind]refl
reflect.Float32: reflect.TypeOf(new(float32)),
reflect.Float64: reflect.TypeOf(new(float64)),
reflect.String: reflect.TypeOf(new(string)),
reflect.Bool: reflect.TypeOf(new(bool)),
}
type underlyingTypeScanPlan struct {
@@ -1089,15 +944,16 @@ func TryWrapPtrSliceScanPlan(target any) (plan WrappedScanPlanNextSetter, nextVa
return &wrapPtrSliceScanPlan[time.Time]{}, (*FlatArray[time.Time])(target), true
}
targetValue := reflect.ValueOf(target)
if targetValue.Kind() != reflect.Ptr {
targetType := reflect.TypeOf(target)
if targetType.Kind() != reflect.Ptr {
return nil, nil, false
}
targetElemValue := targetValue.Elem()
targetElemType := targetType.Elem()
if targetElemValue.Kind() == reflect.Slice {
return &wrapPtrSliceReflectScanPlan{}, &anySliceArrayReflect{slice: targetElemValue}, true
if targetElemType.Kind() == reflect.Slice {
slice := reflect.New(targetElemType).Elem()
return &wrapPtrSliceReflectScanPlan{}, &anySliceArrayReflect{slice: slice}, true
}
return nil, nil, false
}
@@ -1198,6 +1054,10 @@ func (m *Map) PlanScan(oid uint32, formatCode int16, target any) ScanPlan {
}
func (m *Map) planScan(oid uint32, formatCode int16, target any) ScanPlan {
if target == nil {
return &scanPlanFail{m: m, oid: oid, formatCode: formatCode}
}
if _, ok := target.(*UndecodedBytes); ok {
return scanPlanAnyToUndecodedBytes{}
}
@@ -1514,6 +1374,7 @@ var kindToTypes map[reflect.Kind]reflect.Type = map[reflect.Kind]reflect.Type{
reflect.Float32: reflect.TypeOf(float32(0)),
reflect.Float64: reflect.TypeOf(float64(0)),
reflect.String: reflect.TypeOf(""),
reflect.Bool: reflect.TypeOf(false),
}
type underlyingTypeEncodePlan struct {
@@ -2039,13 +1900,13 @@ func newEncodeError(value any, m *Map, oid uint32, formatCode int16, err error)
}
var dataTypeName string
if t, ok := m.oidToType[oid]; ok {
if t, ok := m.TypeForOID(oid); ok {
dataTypeName = t.Name
} else {
dataTypeName = "unknown type"
}
return fmt.Errorf("unable to encode %#v into %s format for %s (OID %d): %s", value, format, dataTypeName, oid, err)
return fmt.Errorf("unable to encode %#v into %s format for %s (OID %d): %w", value, format, dataTypeName, oid, err)
}
// Encode appends the encoded bytes of value to buf. If value is the SQL value NULL then append nothing and return

223
vendor/github.com/jackc/pgx/v5/pgtype/pgtype_default.go generated vendored Normal file
View File

@@ -0,0 +1,223 @@
package pgtype
import (
"net"
"net/netip"
"reflect"
"sync"
"time"
)
var (
// defaultMap contains default mappings between PostgreSQL server types and Go type handling logic.
defaultMap *Map
defaultMapInitOnce = sync.Once{}
)
func initDefaultMap() {
defaultMap = &Map{
oidToType: make(map[uint32]*Type),
nameToType: make(map[string]*Type),
reflectTypeToName: make(map[reflect.Type]string),
oidToFormatCode: make(map[uint32]int16),
memoizedScanPlans: make(map[uint32]map[reflect.Type][2]ScanPlan),
memoizedEncodePlans: make(map[uint32]map[reflect.Type][2]EncodePlan),
TryWrapEncodePlanFuncs: []TryWrapEncodePlanFunc{
TryWrapDerefPointerEncodePlan,
TryWrapBuiltinTypeEncodePlan,
TryWrapFindUnderlyingTypeEncodePlan,
TryWrapStructEncodePlan,
TryWrapSliceEncodePlan,
TryWrapMultiDimSliceEncodePlan,
TryWrapArrayEncodePlan,
},
TryWrapScanPlanFuncs: []TryWrapScanPlanFunc{
TryPointerPointerScanPlan,
TryWrapBuiltinTypeScanPlan,
TryFindUnderlyingTypeScanPlan,
TryWrapStructScanPlan,
TryWrapPtrSliceScanPlan,
TryWrapPtrMultiDimSliceScanPlan,
TryWrapPtrArrayScanPlan,
},
}
// Base types
defaultMap.RegisterType(&Type{Name: "aclitem", OID: ACLItemOID, Codec: &TextFormatOnlyCodec{TextCodec{}}})
defaultMap.RegisterType(&Type{Name: "bit", OID: BitOID, Codec: BitsCodec{}})
defaultMap.RegisterType(&Type{Name: "bool", OID: BoolOID, Codec: BoolCodec{}})
defaultMap.RegisterType(&Type{Name: "box", OID: BoxOID, Codec: BoxCodec{}})
defaultMap.RegisterType(&Type{Name: "bpchar", OID: BPCharOID, Codec: TextCodec{}})
defaultMap.RegisterType(&Type{Name: "bytea", OID: ByteaOID, Codec: ByteaCodec{}})
defaultMap.RegisterType(&Type{Name: "char", OID: QCharOID, Codec: QCharCodec{}})
defaultMap.RegisterType(&Type{Name: "cid", OID: CIDOID, Codec: Uint32Codec{}})
defaultMap.RegisterType(&Type{Name: "cidr", OID: CIDROID, Codec: InetCodec{}})
defaultMap.RegisterType(&Type{Name: "circle", OID: CircleOID, Codec: CircleCodec{}})
defaultMap.RegisterType(&Type{Name: "date", OID: DateOID, Codec: DateCodec{}})
defaultMap.RegisterType(&Type{Name: "float4", OID: Float4OID, Codec: Float4Codec{}})
defaultMap.RegisterType(&Type{Name: "float8", OID: Float8OID, Codec: Float8Codec{}})
defaultMap.RegisterType(&Type{Name: "inet", OID: InetOID, Codec: InetCodec{}})
defaultMap.RegisterType(&Type{Name: "int2", OID: Int2OID, Codec: Int2Codec{}})
defaultMap.RegisterType(&Type{Name: "int4", OID: Int4OID, Codec: Int4Codec{}})
defaultMap.RegisterType(&Type{Name: "int8", OID: Int8OID, Codec: Int8Codec{}})
defaultMap.RegisterType(&Type{Name: "interval", OID: IntervalOID, Codec: IntervalCodec{}})
defaultMap.RegisterType(&Type{Name: "json", OID: JSONOID, Codec: JSONCodec{}})
defaultMap.RegisterType(&Type{Name: "jsonb", OID: JSONBOID, Codec: JSONBCodec{}})
defaultMap.RegisterType(&Type{Name: "jsonpath", OID: JSONPathOID, Codec: &TextFormatOnlyCodec{TextCodec{}}})
defaultMap.RegisterType(&Type{Name: "line", OID: LineOID, Codec: LineCodec{}})
defaultMap.RegisterType(&Type{Name: "lseg", OID: LsegOID, Codec: LsegCodec{}})
defaultMap.RegisterType(&Type{Name: "macaddr", OID: MacaddrOID, Codec: MacaddrCodec{}})
defaultMap.RegisterType(&Type{Name: "name", OID: NameOID, Codec: TextCodec{}})
defaultMap.RegisterType(&Type{Name: "numeric", OID: NumericOID, Codec: NumericCodec{}})
defaultMap.RegisterType(&Type{Name: "oid", OID: OIDOID, Codec: Uint32Codec{}})
defaultMap.RegisterType(&Type{Name: "path", OID: PathOID, Codec: PathCodec{}})
defaultMap.RegisterType(&Type{Name: "point", OID: PointOID, Codec: PointCodec{}})
defaultMap.RegisterType(&Type{Name: "polygon", OID: PolygonOID, Codec: PolygonCodec{}})
defaultMap.RegisterType(&Type{Name: "record", OID: RecordOID, Codec: RecordCodec{}})
defaultMap.RegisterType(&Type{Name: "text", OID: TextOID, Codec: TextCodec{}})
defaultMap.RegisterType(&Type{Name: "tid", OID: TIDOID, Codec: TIDCodec{}})
defaultMap.RegisterType(&Type{Name: "time", OID: TimeOID, Codec: TimeCodec{}})
defaultMap.RegisterType(&Type{Name: "timestamp", OID: TimestampOID, Codec: TimestampCodec{}})
defaultMap.RegisterType(&Type{Name: "timestamptz", OID: TimestamptzOID, Codec: TimestamptzCodec{}})
defaultMap.RegisterType(&Type{Name: "unknown", OID: UnknownOID, Codec: TextCodec{}})
defaultMap.RegisterType(&Type{Name: "uuid", OID: UUIDOID, Codec: UUIDCodec{}})
defaultMap.RegisterType(&Type{Name: "varbit", OID: VarbitOID, Codec: BitsCodec{}})
defaultMap.RegisterType(&Type{Name: "varchar", OID: VarcharOID, Codec: TextCodec{}})
defaultMap.RegisterType(&Type{Name: "xid", OID: XIDOID, Codec: Uint32Codec{}})
// Range types
defaultMap.RegisterType(&Type{Name: "daterange", OID: DaterangeOID, Codec: &RangeCodec{ElementType: defaultMap.oidToType[DateOID]}})
defaultMap.RegisterType(&Type{Name: "int4range", OID: Int4rangeOID, Codec: &RangeCodec{ElementType: defaultMap.oidToType[Int4OID]}})
defaultMap.RegisterType(&Type{Name: "int8range", OID: Int8rangeOID, Codec: &RangeCodec{ElementType: defaultMap.oidToType[Int8OID]}})
defaultMap.RegisterType(&Type{Name: "numrange", OID: NumrangeOID, Codec: &RangeCodec{ElementType: defaultMap.oidToType[NumericOID]}})
defaultMap.RegisterType(&Type{Name: "tsrange", OID: TsrangeOID, Codec: &RangeCodec{ElementType: defaultMap.oidToType[TimestampOID]}})
defaultMap.RegisterType(&Type{Name: "tstzrange", OID: TstzrangeOID, Codec: &RangeCodec{ElementType: defaultMap.oidToType[TimestamptzOID]}})
// Multirange types
defaultMap.RegisterType(&Type{Name: "datemultirange", OID: DatemultirangeOID, Codec: &MultirangeCodec{ElementType: defaultMap.oidToType[DaterangeOID]}})
defaultMap.RegisterType(&Type{Name: "int4multirange", OID: Int4multirangeOID, Codec: &MultirangeCodec{ElementType: defaultMap.oidToType[Int4rangeOID]}})
defaultMap.RegisterType(&Type{Name: "int8multirange", OID: Int8multirangeOID, Codec: &MultirangeCodec{ElementType: defaultMap.oidToType[Int8rangeOID]}})
defaultMap.RegisterType(&Type{Name: "nummultirange", OID: NummultirangeOID, Codec: &MultirangeCodec{ElementType: defaultMap.oidToType[NumrangeOID]}})
defaultMap.RegisterType(&Type{Name: "tsmultirange", OID: TsmultirangeOID, Codec: &MultirangeCodec{ElementType: defaultMap.oidToType[TsrangeOID]}})
defaultMap.RegisterType(&Type{Name: "tstzmultirange", OID: TstzmultirangeOID, Codec: &MultirangeCodec{ElementType: defaultMap.oidToType[TstzrangeOID]}})
// Array types
defaultMap.RegisterType(&Type{Name: "_aclitem", OID: ACLItemArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[ACLItemOID]}})
defaultMap.RegisterType(&Type{Name: "_bit", OID: BitArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[BitOID]}})
defaultMap.RegisterType(&Type{Name: "_bool", OID: BoolArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[BoolOID]}})
defaultMap.RegisterType(&Type{Name: "_box", OID: BoxArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[BoxOID]}})
defaultMap.RegisterType(&Type{Name: "_bpchar", OID: BPCharArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[BPCharOID]}})
defaultMap.RegisterType(&Type{Name: "_bytea", OID: ByteaArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[ByteaOID]}})
defaultMap.RegisterType(&Type{Name: "_char", OID: QCharArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[QCharOID]}})
defaultMap.RegisterType(&Type{Name: "_cid", OID: CIDArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[CIDOID]}})
defaultMap.RegisterType(&Type{Name: "_cidr", OID: CIDRArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[CIDROID]}})
defaultMap.RegisterType(&Type{Name: "_circle", OID: CircleArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[CircleOID]}})
defaultMap.RegisterType(&Type{Name: "_date", OID: DateArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[DateOID]}})
defaultMap.RegisterType(&Type{Name: "_daterange", OID: DaterangeArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[DaterangeOID]}})
defaultMap.RegisterType(&Type{Name: "_float4", OID: Float4ArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[Float4OID]}})
defaultMap.RegisterType(&Type{Name: "_float8", OID: Float8ArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[Float8OID]}})
defaultMap.RegisterType(&Type{Name: "_inet", OID: InetArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[InetOID]}})
defaultMap.RegisterType(&Type{Name: "_int2", OID: Int2ArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[Int2OID]}})
defaultMap.RegisterType(&Type{Name: "_int4", OID: Int4ArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[Int4OID]}})
defaultMap.RegisterType(&Type{Name: "_int4range", OID: Int4rangeArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[Int4rangeOID]}})
defaultMap.RegisterType(&Type{Name: "_int8", OID: Int8ArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[Int8OID]}})
defaultMap.RegisterType(&Type{Name: "_int8range", OID: Int8rangeArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[Int8rangeOID]}})
defaultMap.RegisterType(&Type{Name: "_interval", OID: IntervalArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[IntervalOID]}})
defaultMap.RegisterType(&Type{Name: "_json", OID: JSONArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[JSONOID]}})
defaultMap.RegisterType(&Type{Name: "_jsonb", OID: JSONBArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[JSONBOID]}})
defaultMap.RegisterType(&Type{Name: "_jsonpath", OID: JSONPathArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[JSONPathOID]}})
defaultMap.RegisterType(&Type{Name: "_line", OID: LineArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[LineOID]}})
defaultMap.RegisterType(&Type{Name: "_lseg", OID: LsegArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[LsegOID]}})
defaultMap.RegisterType(&Type{Name: "_macaddr", OID: MacaddrArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[MacaddrOID]}})
defaultMap.RegisterType(&Type{Name: "_name", OID: NameArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[NameOID]}})
defaultMap.RegisterType(&Type{Name: "_numeric", OID: NumericArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[NumericOID]}})
defaultMap.RegisterType(&Type{Name: "_numrange", OID: NumrangeArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[NumrangeOID]}})
defaultMap.RegisterType(&Type{Name: "_oid", OID: OIDArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[OIDOID]}})
defaultMap.RegisterType(&Type{Name: "_path", OID: PathArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[PathOID]}})
defaultMap.RegisterType(&Type{Name: "_point", OID: PointArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[PointOID]}})
defaultMap.RegisterType(&Type{Name: "_polygon", OID: PolygonArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[PolygonOID]}})
defaultMap.RegisterType(&Type{Name: "_record", OID: RecordArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[RecordOID]}})
defaultMap.RegisterType(&Type{Name: "_text", OID: TextArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[TextOID]}})
defaultMap.RegisterType(&Type{Name: "_tid", OID: TIDArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[TIDOID]}})
defaultMap.RegisterType(&Type{Name: "_time", OID: TimeArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[TimeOID]}})
defaultMap.RegisterType(&Type{Name: "_timestamp", OID: TimestampArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[TimestampOID]}})
defaultMap.RegisterType(&Type{Name: "_timestamptz", OID: TimestamptzArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[TimestamptzOID]}})
defaultMap.RegisterType(&Type{Name: "_tsrange", OID: TsrangeArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[TsrangeOID]}})
defaultMap.RegisterType(&Type{Name: "_tstzrange", OID: TstzrangeArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[TstzrangeOID]}})
defaultMap.RegisterType(&Type{Name: "_uuid", OID: UUIDArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[UUIDOID]}})
defaultMap.RegisterType(&Type{Name: "_varbit", OID: VarbitArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[VarbitOID]}})
defaultMap.RegisterType(&Type{Name: "_varchar", OID: VarcharArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[VarcharOID]}})
defaultMap.RegisterType(&Type{Name: "_xid", OID: XIDArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[XIDOID]}})
// Integer types that directly map to a PostgreSQL type
registerDefaultPgTypeVariants[int16](defaultMap, "int2")
registerDefaultPgTypeVariants[int32](defaultMap, "int4")
registerDefaultPgTypeVariants[int64](defaultMap, "int8")
// Integer types that do not have a direct match to a PostgreSQL type
registerDefaultPgTypeVariants[int8](defaultMap, "int8")
registerDefaultPgTypeVariants[int](defaultMap, "int8")
registerDefaultPgTypeVariants[uint8](defaultMap, "int8")
registerDefaultPgTypeVariants[uint16](defaultMap, "int8")
registerDefaultPgTypeVariants[uint32](defaultMap, "int8")
registerDefaultPgTypeVariants[uint64](defaultMap, "numeric")
registerDefaultPgTypeVariants[uint](defaultMap, "numeric")
registerDefaultPgTypeVariants[float32](defaultMap, "float4")
registerDefaultPgTypeVariants[float64](defaultMap, "float8")
registerDefaultPgTypeVariants[bool](defaultMap, "bool")
registerDefaultPgTypeVariants[time.Time](defaultMap, "timestamptz")
registerDefaultPgTypeVariants[time.Duration](defaultMap, "interval")
registerDefaultPgTypeVariants[string](defaultMap, "text")
registerDefaultPgTypeVariants[[]byte](defaultMap, "bytea")
registerDefaultPgTypeVariants[net.IP](defaultMap, "inet")
registerDefaultPgTypeVariants[net.IPNet](defaultMap, "cidr")
registerDefaultPgTypeVariants[netip.Addr](defaultMap, "inet")
registerDefaultPgTypeVariants[netip.Prefix](defaultMap, "cidr")
// pgtype provided structs
registerDefaultPgTypeVariants[Bits](defaultMap, "varbit")
registerDefaultPgTypeVariants[Bool](defaultMap, "bool")
registerDefaultPgTypeVariants[Box](defaultMap, "box")
registerDefaultPgTypeVariants[Circle](defaultMap, "circle")
registerDefaultPgTypeVariants[Date](defaultMap, "date")
registerDefaultPgTypeVariants[Range[Date]](defaultMap, "daterange")
registerDefaultPgTypeVariants[Multirange[Range[Date]]](defaultMap, "datemultirange")
registerDefaultPgTypeVariants[Float4](defaultMap, "float4")
registerDefaultPgTypeVariants[Float8](defaultMap, "float8")
registerDefaultPgTypeVariants[Range[Float8]](defaultMap, "numrange") // There is no PostgreSQL builtin float8range so map it to numrange.
registerDefaultPgTypeVariants[Multirange[Range[Float8]]](defaultMap, "nummultirange") // There is no PostgreSQL builtin float8multirange so map it to nummultirange.
registerDefaultPgTypeVariants[Int2](defaultMap, "int2")
registerDefaultPgTypeVariants[Int4](defaultMap, "int4")
registerDefaultPgTypeVariants[Range[Int4]](defaultMap, "int4range")
registerDefaultPgTypeVariants[Multirange[Range[Int4]]](defaultMap, "int4multirange")
registerDefaultPgTypeVariants[Int8](defaultMap, "int8")
registerDefaultPgTypeVariants[Range[Int8]](defaultMap, "int8range")
registerDefaultPgTypeVariants[Multirange[Range[Int8]]](defaultMap, "int8multirange")
registerDefaultPgTypeVariants[Interval](defaultMap, "interval")
registerDefaultPgTypeVariants[Line](defaultMap, "line")
registerDefaultPgTypeVariants[Lseg](defaultMap, "lseg")
registerDefaultPgTypeVariants[Numeric](defaultMap, "numeric")
registerDefaultPgTypeVariants[Range[Numeric]](defaultMap, "numrange")
registerDefaultPgTypeVariants[Multirange[Range[Numeric]]](defaultMap, "nummultirange")
registerDefaultPgTypeVariants[Path](defaultMap, "path")
registerDefaultPgTypeVariants[Point](defaultMap, "point")
registerDefaultPgTypeVariants[Polygon](defaultMap, "polygon")
registerDefaultPgTypeVariants[TID](defaultMap, "tid")
registerDefaultPgTypeVariants[Text](defaultMap, "text")
registerDefaultPgTypeVariants[Time](defaultMap, "time")
registerDefaultPgTypeVariants[Timestamp](defaultMap, "timestamp")
registerDefaultPgTypeVariants[Timestamptz](defaultMap, "timestamptz")
registerDefaultPgTypeVariants[Range[Timestamp]](defaultMap, "tsrange")
registerDefaultPgTypeVariants[Multirange[Range[Timestamp]]](defaultMap, "tsmultirange")
registerDefaultPgTypeVariants[Range[Timestamptz]](defaultMap, "tstzrange")
registerDefaultPgTypeVariants[Multirange[Range[Timestamptz]]](defaultMap, "tstzmultirange")
registerDefaultPgTypeVariants[UUID](defaultMap, "uuid")
defaultMap.buildReflectTypeToType()
}

View File

@@ -3,6 +3,7 @@ package pgtype
import (
"database/sql/driver"
"encoding/binary"
"encoding/json"
"fmt"
"strings"
"time"
@@ -66,6 +67,55 @@ func (ts Timestamp) Value() (driver.Value, error) {
return ts.Time, nil
}
func (ts Timestamp) MarshalJSON() ([]byte, error) {
if !ts.Valid {
return []byte("null"), nil
}
var s string
switch ts.InfinityModifier {
case Finite:
s = ts.Time.Format(time.RFC3339Nano)
case Infinity:
s = "infinity"
case NegativeInfinity:
s = "-infinity"
}
return json.Marshal(s)
}
func (ts *Timestamp) UnmarshalJSON(b []byte) error {
var s *string
err := json.Unmarshal(b, &s)
if err != nil {
return err
}
if s == nil {
*ts = Timestamp{}
return nil
}
switch *s {
case "infinity":
*ts = Timestamp{Valid: true, InfinityModifier: Infinity}
case "-infinity":
*ts = Timestamp{Valid: true, InfinityModifier: -Infinity}
default:
// PostgreSQL uses ISO 8601 for to_json function and casting from a string to timestamptz
tim, err := time.Parse(time.RFC3339Nano, *s)
if err != nil {
return err
}
*ts = Timestamp{Time: tim, Valid: true}
}
return nil
}
type TimestampCodec struct{}
func (TimestampCodec) FormatSupported(format int16) bool {

View File

@@ -28,12 +28,16 @@ type Rows interface {
// to call Close after rows is already closed.
Close()
// Err returns any error that occurred while reading.
// Err returns any error that occurred while reading. Err must only be called after the Rows is closed (either by
// calling Close or by Next returning false). If it is called early it may return nil even if there was an error
// executing the query.
Err() error
// CommandTag returns the command tag from this query. It is only available after Rows is closed.
CommandTag() pgconn.CommandTag
// FieldDescriptions returns the field descriptions of the columns. It may return nil. In particular this can occur
// when there was an error executing the query.
FieldDescriptions() []pgconn.FieldDescription
// Next prepares the next row for reading. It returns true if there is another
@@ -533,13 +537,11 @@ func (rs *positionalStructRowScanner) appendScanTargets(dstElemValue reflect.Val
for i := 0; i < dstElemType.NumField(); i++ {
sf := dstElemType.Field(i)
if sf.PkgPath == "" {
// Handle anonymous struct embedding, but do not try to handle embedded pointers.
if sf.Anonymous && sf.Type.Kind() == reflect.Struct {
scanTargets = rs.appendScanTargets(dstElemValue.Field(i), scanTargets)
} else {
scanTargets = append(scanTargets, dstElemValue.Field(i).Addr().Interface())
}
// Handle anonymous struct embedding, but do not try to handle embedded pointers.
if sf.Anonymous && sf.Type.Kind() == reflect.Struct {
scanTargets = rs.appendScanTargets(dstElemValue.Field(i), scanTargets)
} else if sf.PkgPath == "" {
scanTargets = append(scanTargets, dstElemValue.Field(i).Addr().Interface())
}
}
@@ -565,8 +567,28 @@ func RowToAddrOfStructByName[T any](row CollectableRow) (*T, error) {
return &value, err
}
// RowToStructByNameLax returns a T scanned from row. T must be a struct. T must have greater than or equal number of named public
// fields as row has fields. The row and T fields will by matched by name. The match is case-insensitive. The database
// column name can be overridden with a "db" struct tag. If the "db" struct tag is "-" then the field will be ignored.
func RowToStructByNameLax[T any](row CollectableRow) (T, error) {
var value T
err := row.Scan(&namedStructRowScanner{ptrToStruct: &value, lax: true})
return value, err
}
// RowToAddrOfStructByNameLax returns the address of a T scanned from row. T must be a struct. T must have greater than or
// equal number of named public fields as row has fields. The row and T fields will by matched by name. The match is
// case-insensitive. The database column name can be overridden with a "db" struct tag. If the "db" struct tag is "-"
// then the field will be ignored.
func RowToAddrOfStructByNameLax[T any](row CollectableRow) (*T, error) {
var value T
err := row.Scan(&namedStructRowScanner{ptrToStruct: &value, lax: true})
return &value, err
}
type namedStructRowScanner struct {
ptrToStruct any
lax bool
}
func (rs *namedStructRowScanner) ScanRow(rows Rows) error {
@@ -578,7 +600,6 @@ func (rs *namedStructRowScanner) ScanRow(rows Rows) error {
dstElemValue := dstValue.Elem()
scanTargets, err := rs.appendScanTargets(dstElemValue, nil, rows.FieldDescriptions())
if err != nil {
return err
}
@@ -638,7 +659,13 @@ func (rs *namedStructRowScanner) appendScanTargets(dstElemValue reflect.Value, s
colName = sf.Name
}
fpos := fieldPosByName(fldDescs, colName)
if fpos == -1 || fpos >= len(scanTargets) {
if fpos == -1 {
if rs.lax {
continue
}
return nil, fmt.Errorf("cannot find field %s in returned row", colName)
}
if fpos >= len(scanTargets) && !rs.lax {
return nil, fmt.Errorf("cannot find field %s in returned row", colName)
}
scanTargets[fpos] = dstElemValue.Field(i).Addr().Interface()

View File

@@ -44,6 +44,10 @@ type TxOptions struct {
IsoLevel TxIsoLevel
AccessMode TxAccessMode
DeferrableMode TxDeferrableMode
// BeginQuery is the SQL query that will be executed to begin the transaction. This allows using non-standard syntax
// such as BEGIN PRIORITY HIGH with CockroachDB. If set this will override the other settings.
BeginQuery string
}
var emptyTxOptions TxOptions
@@ -53,6 +57,10 @@ func (txOptions TxOptions) beginSQL() string {
return "begin"
}
if txOptions.BeginQuery != "" {
return txOptions.BeginQuery
}
var buf strings.Builder
buf.Grow(64) // 64 - maximum length of string with available options
buf.WriteString("begin")

View File

@@ -16,6 +16,10 @@ This package provides various compression algorithms.
# changelog
* Apr 16, 2023 - [v1.16.5](https://github.com/klauspost/compress/releases/tag/v1.16.5)
* zstd: readByte needs to use io.ReadFull by @jnoxon in https://github.com/klauspost/compress/pull/802
* gzip: Fix WriterTo after initial read https://github.com/klauspost/compress/pull/804
* Apr 5, 2023 - [v1.16.4](https://github.com/klauspost/compress/releases/tag/v1.16.4)
* zstd: Improve zstd best efficiency by @greatroar and @klauspost in https://github.com/klauspost/compress/pull/784
* zstd: Respect WithAllLitEntropyCompression https://github.com/klauspost/compress/pull/792

25
vendor/github.com/klauspost/compress/SECURITY.md generated vendored Normal file
View File

@@ -0,0 +1,25 @@
# Security Policy
## Supported Versions
Security updates are applied only to the latest release.
## Vulnerability Definition
A security vulnerability is a bug that with certain input triggers a crash or an infinite loop. Most calls will have varying execution time and only in rare cases will slow operation be considered a security vulnerability.
Corrupted output generally is not considered a security vulnerability, unless independent operations are able to affect each other. Note that not all functionality is re-entrant and safe to use concurrently.
Out-of-memory crashes only applies if the en/decoder uses an abnormal amount of memory, with appropriate options applied, to limit maximum window size, concurrency, etc. However, if you are in doubt you are welcome to file a security issue.
It is assumed that all callers are trusted, meaning internal data exposed through reflection or inspection of returned data structures is not considered a vulnerability.
Vulnerabilities resulting from compiler/assembler errors should be reported upstream. Depending on the severity this package may or may not implement a workaround.
## Reporting a Vulnerability
If you have discovered a security vulnerability in this project, please report it privately. **Do not disclose it as a public issue.** This gives us time to work with you to fix the issue before public exposure, reducing the chance that the exploit will be used before a patch is released.
Please disclose it at [security advisory](https://github.com/klaupost/compress/security/advisories/new). If possible please provide a minimal reproducer. If the issue only applies to a single platform, it would be helpful to provide access to that.
This project is maintained by a team of volunteers on a reasonable-effort basis. As such, vulnerabilities will be disclosed in a best effort base.

View File

@@ -90,9 +90,8 @@ type advancedState struct {
ii uint16 // position of last match, intended to overflow to reset.
// input window: unprocessed data is window[index:windowEnd]
index int
estBitsPerByte int
hashMatch [maxMatchLength + minMatchLength]uint32
index int
hashMatch [maxMatchLength + minMatchLength]uint32
// Input hash chains
// hashHead[hashValue] contains the largest inputIndex with the specified hash value

View File

@@ -34,11 +34,6 @@ const (
// Should preferably be a multiple of 6, since
// we accumulate 6 bytes between writes to the buffer.
bufferFlushSize = 246
// bufferSize is the actual output byte buffer size.
// It must have additional headroom for a flush
// which can contain up to 8 bytes.
bufferSize = bufferFlushSize + 8
)
// Minimum length code that emits bits.

View File

@@ -42,25 +42,6 @@ func quickSortByFreq(data []literalNode, a, b, maxDepth int) {
}
}
// siftDownByFreq implements the heap property on data[lo, hi).
// first is an offset into the array where the root of the heap lies.
func siftDownByFreq(data []literalNode, lo, hi, first int) {
root := lo
for {
child := 2*root + 1
if child >= hi {
break
}
if child+1 < hi && (data[first+child].freq == data[first+child+1].freq && data[first+child].literal < data[first+child+1].literal || data[first+child].freq < data[first+child+1].freq) {
child++
}
if data[first+root].freq == data[first+child].freq && data[first+root].literal > data[first+child].literal || data[first+root].freq > data[first+child].freq {
return
}
data[first+root], data[first+child] = data[first+child], data[first+root]
root = child
}
}
func doPivotByFreq(data []literalNode, lo, hi int) (midlo, midhi int) {
m := int(uint(lo+hi) >> 1) // Written like this to avoid integer overflow.
if hi-lo > 40 {

View File

@@ -13,14 +13,6 @@ type bitWriter struct {
out []byte
}
// bitMask16 is bitmasks. Has extra to avoid bounds check.
var bitMask16 = [32]uint16{
0, 1, 3, 7, 0xF, 0x1F,
0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF,
0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, 0xFFFF,
0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF,
0xFFFF, 0xFFFF} /* up to 16 bits */
// addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated.
// It will not check if there is space for them, so the caller must ensure that it has flushed recently.
func (b *bitWriter) addBits16Clean(value uint16, bits uint8) {

View File

@@ -253,7 +253,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) {
switch d.actualTableLog {
case 8:
const shift = 8 - 8
const shift = 0
for br.off >= 4 {
br.fillFast()
v := dt[uint8(br.value>>(56+shift))]

View File

@@ -87,18 +87,6 @@ func emitCopy(dst []byte, offset, length int) int {
return i + 2
}
// extendMatch returns the largest k such that k <= len(src) and that
// src[i:i+k-j] and src[j:k] have the same contents.
//
// It assumes that:
//
// 0 <= i && i < j && j <= len(src)
func extendMatch(src []byte, i, j int) int {
for ; j < len(src) && src[i] == src[j]; i, j = i+1, j+1 {
}
return j
}
func hash(u, shift uint32) uint32 {
return (u * 0x1e35a7bd) >> shift
}

View File

@@ -742,7 +742,6 @@ searchDict:
x := load64(src, s-2)
m2Hash := hash6(x, tableBits)
currHash := hash6(x>>8, tableBits)
candidate = int(table[currHash])
table[m2Hash] = uint32(s - 2)
table[currHash] = uint32(s - 1)
cv = load64(src, s)

View File

@@ -157,7 +157,6 @@ func encodeBlockBetterGo(dst, src []byte) (d int) {
index0 := base + 1
index1 := s - 2
cv = load64(src, s)
for index0 < index1 {
cv0 := load64(src, index0)
cv1 := load64(src, index1)
@@ -269,18 +268,21 @@ func encodeBlockBetterGo(dst, src []byte) (d int) {
lTable[hash7(cv0, lTableBits)] = uint32(index0)
sTable[hash4(cv0>>8, sTableBits)] = uint32(index0 + 1)
// lTable could be postponed, but very minor difference.
lTable[hash7(cv1, lTableBits)] = uint32(index1)
sTable[hash4(cv1>>8, sTableBits)] = uint32(index1 + 1)
index0 += 1
index1 -= 1
cv = load64(src, s)
// index every second long in between.
for index0 < index1 {
// Index large values sparsely in between.
// We do two starting from different offsets for speed.
index2 := (index0 + index1 + 1) >> 1
for index2 < index1 {
lTable[hash7(load64(src, index0), lTableBits)] = uint32(index0)
lTable[hash7(load64(src, index1), lTableBits)] = uint32(index1)
lTable[hash7(load64(src, index2), lTableBits)] = uint32(index2)
index0 += 2
index1 -= 2
index2 += 2
}
}
@@ -459,12 +461,14 @@ func encodeBlockBetterSnappyGo(dst, src []byte) (d int) {
index1 -= 1
cv = load64(src, s)
// index every second long in between.
for index0 < index1 {
// Index large values sparsely in between.
// We do two starting from different offsets for speed.
index2 := (index0 + index1 + 1) >> 1
for index2 < index1 {
lTable[hash7(load64(src, index0), lTableBits)] = uint32(index0)
lTable[hash7(load64(src, index1), lTableBits)] = uint32(index1)
lTable[hash7(load64(src, index2), lTableBits)] = uint32(index2)
index0 += 2
index1 -= 2
index2 += 2
}
}
@@ -599,7 +603,6 @@ searchDict:
if s >= sLimit {
break searchDict
}
cv = load64(src, s)
// Index in-between
index0 := base + 1
index1 := s - 2
@@ -865,12 +868,14 @@ searchDict:
index1 -= 1
cv = load64(src, s)
// index every second long in between.
for index0 < index1 {
// Index large values sparsely in between.
// We do two starting from different offsets for speed.
index2 := (index0 + index1 + 1) >> 1
for index2 < index1 {
lTable[hash7(load64(src, index0), lTableBits)] = uint32(index0)
lTable[hash7(load64(src, index1), lTableBits)] = uint32(index1)
lTable[hash7(load64(src, index2), lTableBits)] = uint32(index2)
index0 += 2
index1 -= 2
index2 += 2
}
}
@@ -961,7 +966,6 @@ searchDict:
index0 := base + 1
index1 := s - 2
cv = load64(src, s)
for index0 < index1 {
cv0 := load64(src, index0)
cv1 := load64(src, index1)
@@ -1079,12 +1083,14 @@ searchDict:
index1 -= 1
cv = load64(src, s)
// index every second long in between.
for index0 < index1 {
// Index large values sparsely in between.
// We do two starting from different offsets for speed.
index2 := (index0 + index1 + 1) >> 1
for index2 < index1 {
lTable[hash7(load64(src, index0), lTableBits)] = uint32(index0)
lTable[hash7(load64(src, index1), lTableBits)] = uint32(index1)
lTable[hash7(load64(src, index2), lTableBits)] = uint32(index2)
index0 += 2
index1 -= 2
index2 += 2
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -771,7 +771,7 @@ func (w *Writer) closeIndex(idx bool) ([]byte, error) {
}
var index []byte
if w.err(nil) == nil && w.writer != nil {
if w.err(err) == nil && w.writer != nil {
// Create index.
if idx {
compSize := int64(-1)

View File

@@ -304,7 +304,7 @@ import "github.com/klauspost/compress/zstd"
// Create a reader that caches decompressors.
// For this operation type we supply a nil Reader.
var decoder, _ = zstd.NewReader(nil, WithDecoderConcurrency(0))
var decoder, _ = zstd.NewReader(nil, zstd.WithDecoderConcurrency(0))
// Decompress a buffer. We don't supply a destination buffer,
// so it will be allocated by the decoder.

View File

@@ -592,7 +592,7 @@ func (b *blockDec) prepareSequences(in []byte, hist *history) (err error) {
}
seq.fse.setRLE(symb)
if debugDecoder {
printf("RLE set to %+v, code: %v", symb, v)
printf("RLE set to 0x%x, code: %v", symb, v)
}
case compModeFSE:
println("Reading table for", tableIndex(i))

View File

@@ -107,7 +107,7 @@ func WithDecoderDicts(dicts ...[]byte) DOption {
}
}
// WithEncoderDictRaw registers a dictionary that may be used by the decoder.
// WithDecoderDictRaw registers a dictionary that may be used by the decoder.
// The slice content can be arbitrary data.
func WithDecoderDictRaw(id uint32, content []byte) DOption {
return func(o *decoderOptions) error {

View File

@@ -133,8 +133,7 @@ encodeLoop:
if canRepeat && repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>16) {
// Consider history as well.
var seq seq
var length int32
length = 4 + e.matchlen(s+6, repIndex+4, src)
length := 4 + e.matchlen(s+6, repIndex+4, src)
seq.matchLen = uint32(length - zstdMinMatch)
// We might be able to match backwards.
@@ -645,8 +644,7 @@ encodeLoop:
if canRepeat && repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>16) {
// Consider history as well.
var seq seq
var length int32
length = 4 + e.matchlen(s+6, repIndex+4, src)
length := 4 + e.matchlen(s+6, repIndex+4, src)
seq.matchLen = uint32(length - zstdMinMatch)

View File

@@ -129,7 +129,7 @@ func WithEncoderPadding(n int) EOption {
}
// No need to waste our time.
if n == 1 {
o.pad = 0
n = 0
}
if n > 1<<30 {
return fmt.Errorf("padding must less than 1GB (1<<30 bytes) ")

View File

@@ -73,20 +73,20 @@ func (d *frameDec) reset(br byteBuffer) error {
switch err {
case io.EOF, io.ErrUnexpectedEOF:
return io.EOF
default:
return err
case nil:
signature[0] = b[0]
default:
return err
}
// Read the rest, don't allow io.ErrUnexpectedEOF
b, err = br.readSmall(3)
switch err {
case io.EOF:
return io.EOF
default:
return err
case nil:
copy(signature[1:], b)
default:
return err
}
if string(signature[1:4]) != skippableFrameMagic || signature[0]&0xf0 != 0x50 {

View File

@@ -0,0 +1,16 @@
//go:build amd64 && !appengine && !noasm && gc
// +build amd64,!appengine,!noasm,gc
// Copyright 2019+ Klaus Post. All rights reserved.
// License information can be found in the LICENSE file.
package zstd
// matchLen returns how many bytes match in a and b
//
// It assumes that:
//
// len(a) <= len(b) and len(a) > 0
//
//go:noescape
func matchLen(a []byte, b []byte) int

View File

@@ -0,0 +1,68 @@
// Copied from S2 implementation.
//go:build !appengine && !noasm && gc && !noasm
#include "textflag.h"
// func matchLen(a []byte, b []byte) int
// Requires: BMI
TEXT ·matchLen(SB), NOSPLIT, $0-56
MOVQ a_base+0(FP), AX
MOVQ b_base+24(FP), CX
MOVQ a_len+8(FP), DX
// matchLen
XORL SI, SI
CMPL DX, $0x08
JB matchlen_match4_standalone
matchlen_loopback_standalone:
MOVQ (AX)(SI*1), BX
XORQ (CX)(SI*1), BX
TESTQ BX, BX
JZ matchlen_loop_standalone
#ifdef GOAMD64_v3
TZCNTQ BX, BX
#else
BSFQ BX, BX
#endif
SARQ $0x03, BX
LEAL (SI)(BX*1), SI
JMP gen_match_len_end
matchlen_loop_standalone:
LEAL -8(DX), DX
LEAL 8(SI), SI
CMPL DX, $0x08
JAE matchlen_loopback_standalone
matchlen_match4_standalone:
CMPL DX, $0x04
JB matchlen_match2_standalone
MOVL (AX)(SI*1), BX
CMPL (CX)(SI*1), BX
JNE matchlen_match2_standalone
LEAL -4(DX), DX
LEAL 4(SI), SI
matchlen_match2_standalone:
CMPL DX, $0x02
JB matchlen_match1_standalone
MOVW (AX)(SI*1), BX
CMPW (CX)(SI*1), BX
JNE matchlen_match1_standalone
LEAL -2(DX), DX
LEAL 2(SI), SI
matchlen_match1_standalone:
CMPL DX, $0x01
JB gen_match_len_end
MOVB (AX)(SI*1), BL
CMPB (CX)(SI*1), BL
JNE gen_match_len_end
INCL SI
gen_match_len_end:
MOVQ SI, ret+48(FP)
RET

View File

@@ -0,0 +1,33 @@
//go:build !amd64 || appengine || !gc || noasm
// +build !amd64 appengine !gc noasm
// Copyright 2019+ Klaus Post. All rights reserved.
// License information can be found in the LICENSE file.
package zstd
import (
"encoding/binary"
"math/bits"
)
// matchLen returns the maximum common prefix length of a and b.
// a must be the shortest of the two.
func matchLen(a, b []byte) (n int) {
for ; len(a) >= 8 && len(b) >= 8; a, b = a[8:], b[8:] {
diff := binary.LittleEndian.Uint64(a) ^ binary.LittleEndian.Uint64(b)
if diff != 0 {
return n + bits.TrailingZeros64(diff)>>3
}
n += 8
}
for i := range a {
if a[i] != b[i] {
break
}
n++
}
return n
}

View File

@@ -9,7 +9,6 @@ import (
"errors"
"log"
"math"
"math/bits"
)
// enable debug printing
@@ -106,27 +105,6 @@ func printf(format string, a ...interface{}) {
}
}
// matchLen returns the maximum common prefix length of a and b.
// a must be the shortest of the two.
func matchLen(a, b []byte) (n int) {
for ; len(a) >= 8 && len(b) >= 8; a, b = a[8:], b[8:] {
diff := binary.LittleEndian.Uint64(a) ^ binary.LittleEndian.Uint64(b)
if diff != 0 {
return n + bits.TrailingZeros64(diff)>>3
}
n += 8
}
for i := range a {
if a[i] != b[i] {
break
}
n++
}
return n
}
func load3232(b []byte, i int32) uint32 {
return binary.LittleEndian.Uint32(b[:len(b):len(b)][i:])
}

View File

@@ -435,6 +435,7 @@ Exit Code 1
| SYSCALL | System-Call Extension (SCE): SYSCALL and SYSRET instructions. |
| SYSEE | SYSENTER and SYSEXIT instructions |
| TBM | AMD Trailing Bit Manipulation |
| TDX_GUEST | Intel Trust Domain Extensions Guest |
| TLB_FLUSH_NESTED | AMD: Flushing includes all the nested translations for guest translations |
| TME | Intel Total Memory Encryption. The following MSRs are supported: IA32_TME_CAPABILITY, IA32_TME_ACTIVATE, IA32_TME_EXCLUDE_MASK, and IA32_TME_EXCLUDE_BASE. |
| TOPEXT | TopologyExtensions: topology extensions support. Indicates support for CPUID Fn8000_001D_EAX_x[N:0]-CPUID Fn8000_001E_EDX. |

View File

@@ -226,6 +226,7 @@ const (
SYSCALL // System-Call Extension (SCE): SYSCALL and SYSRET instructions.
SYSEE // SYSENTER and SYSEXIT instructions
TBM // AMD Trailing Bit Manipulation
TDX_GUEST // Intel Trust Domain Extensions Guest
TLB_FLUSH_NESTED // AMD: Flushing includes all the nested translations for guest translations
TME // Intel Total Memory Encryption. The following MSRs are supported: IA32_TME_CAPABILITY, IA32_TME_ACTIVATE, IA32_TME_EXCLUDE_MASK, and IA32_TME_EXCLUDE_BASE.
TOPEXT // TopologyExtensions: topology extensions support. Indicates support for CPUID Fn8000_001D_EAX_x[N:0]-CPUID Fn8000_001E_EDX.
@@ -1186,13 +1187,8 @@ func support() flagSet {
fs.setIf(edx&(1<<30) != 0, IA32_CORE_CAP)
fs.setIf(edx&(1<<31) != 0, SPEC_CTRL_SSBD)
// CPUID.(EAX=7, ECX=1).EDX
fs.setIf(edx&(1<<4) != 0, AVXVNNIINT8)
fs.setIf(edx&(1<<5) != 0, AVXNECONVERT)
fs.setIf(edx&(1<<14) != 0, PREFETCHI)
// CPUID.(EAX=7, ECX=1).EAX
eax1, _, _, _ := cpuidex(7, 1)
eax1, _, _, edx1 := cpuidex(7, 1)
fs.setIf(fs.inSet(AVX) && eax1&(1<<4) != 0, AVXVNNI)
fs.setIf(eax1&(1<<7) != 0, CMPCCXADD)
fs.setIf(eax1&(1<<10) != 0, MOVSB_ZL)
@@ -1202,6 +1198,11 @@ func support() flagSet {
fs.setIf(eax1&(1<<23) != 0, AVXIFMA)
fs.setIf(eax1&(1<<26) != 0, LAM)
// CPUID.(EAX=7, ECX=1).EDX
fs.setIf(edx1&(1<<4) != 0, AVXVNNIINT8)
fs.setIf(edx1&(1<<5) != 0, AVXNECONVERT)
fs.setIf(edx1&(1<<14) != 0, PREFETCHI)
// Only detect AVX-512 features if XGETBV is supported
if c&((1<<26)|(1<<27)) == (1<<26)|(1<<27) {
// Check for OS support
@@ -1393,6 +1394,13 @@ func support() flagSet {
fs.setIf((a>>24)&1 == 1, VMSA_REGPROT)
}
if mfi >= 0x21 {
// Intel Trusted Domain Extensions Guests have their own cpuid leaf (0x21).
_, ebx, ecx, edx := cpuid(0x21)
identity := string(valAsString(ebx, edx, ecx))
fs.setIf(identity == "IntelTDX ", TDX_GUEST)
}
return fs
}

View File

@@ -166,59 +166,60 @@ func _() {
_ = x[SYSCALL-156]
_ = x[SYSEE-157]
_ = x[TBM-158]
_ = x[TLB_FLUSH_NESTED-159]
_ = x[TME-160]
_ = x[TOPEXT-161]
_ = x[TSCRATEMSR-162]
_ = x[TSXLDTRK-163]
_ = x[VAES-164]
_ = x[VMCBCLEAN-165]
_ = x[VMPL-166]
_ = x[VMSA_REGPROT-167]
_ = x[VMX-168]
_ = x[VPCLMULQDQ-169]
_ = x[VTE-170]
_ = x[WAITPKG-171]
_ = x[WBNOINVD-172]
_ = x[WRMSRNS-173]
_ = x[X87-174]
_ = x[XGETBV1-175]
_ = x[XOP-176]
_ = x[XSAVE-177]
_ = x[XSAVEC-178]
_ = x[XSAVEOPT-179]
_ = x[XSAVES-180]
_ = x[AESARM-181]
_ = x[ARMCPUID-182]
_ = x[ASIMD-183]
_ = x[ASIMDDP-184]
_ = x[ASIMDHP-185]
_ = x[ASIMDRDM-186]
_ = x[ATOMICS-187]
_ = x[CRC32-188]
_ = x[DCPOP-189]
_ = x[EVTSTRM-190]
_ = x[FCMA-191]
_ = x[FP-192]
_ = x[FPHP-193]
_ = x[GPA-194]
_ = x[JSCVT-195]
_ = x[LRCPC-196]
_ = x[PMULL-197]
_ = x[SHA1-198]
_ = x[SHA2-199]
_ = x[SHA3-200]
_ = x[SHA512-201]
_ = x[SM3-202]
_ = x[SM4-203]
_ = x[SVE-204]
_ = x[lastID-205]
_ = x[TDX_GUEST-159]
_ = x[TLB_FLUSH_NESTED-160]
_ = x[TME-161]
_ = x[TOPEXT-162]
_ = x[TSCRATEMSR-163]
_ = x[TSXLDTRK-164]
_ = x[VAES-165]
_ = x[VMCBCLEAN-166]
_ = x[VMPL-167]
_ = x[VMSA_REGPROT-168]
_ = x[VMX-169]
_ = x[VPCLMULQDQ-170]
_ = x[VTE-171]
_ = x[WAITPKG-172]
_ = x[WBNOINVD-173]
_ = x[WRMSRNS-174]
_ = x[X87-175]
_ = x[XGETBV1-176]
_ = x[XOP-177]
_ = x[XSAVE-178]
_ = x[XSAVEC-179]
_ = x[XSAVEOPT-180]
_ = x[XSAVES-181]
_ = x[AESARM-182]
_ = x[ARMCPUID-183]
_ = x[ASIMD-184]
_ = x[ASIMDDP-185]
_ = x[ASIMDHP-186]
_ = x[ASIMDRDM-187]
_ = x[ATOMICS-188]
_ = x[CRC32-189]
_ = x[DCPOP-190]
_ = x[EVTSTRM-191]
_ = x[FCMA-192]
_ = x[FP-193]
_ = x[FPHP-194]
_ = x[GPA-195]
_ = x[JSCVT-196]
_ = x[LRCPC-197]
_ = x[PMULL-198]
_ = x[SHA1-199]
_ = x[SHA2-200]
_ = x[SHA3-201]
_ = x[SHA512-202]
_ = x[SM3-203]
_ = x[SM4-204]
_ = x[SVE-205]
_ = x[lastID-206]
_ = x[firstID-0]
}
const _FeatureID_name = "firstIDADXAESNIAMD3DNOWAMD3DNOWEXTAMXBF16AMXFP16AMXINT8AMXTILEAVXAVX2AVX512BF16AVX512BITALGAVX512BWAVX512CDAVX512DQAVX512ERAVX512FAVX512FP16AVX512IFMAAVX512PFAVX512VBMIAVX512VBMI2AVX512VLAVX512VNNIAVX512VP2INTERSECTAVX512VPOPCNTDQAVXIFMAAVXNECONVERTAVXSLOWAVXVNNIAVXVNNIINT8BHI_CTRLBMI1BMI2CETIBTCETSSCLDEMOTECLMULCLZEROCMOVCMPCCXADDCMPSB_SCADBS_SHORTCMPXCHG8CPBOOSTCPPCCX16EFER_LMSLE_UNSENQCMDERMSF16CFLUSH_L1DFMA3FMA4FP128FP256FSRMFXSRFXSROPTGFNIHLEHRESETHTTHWAHYBRID_CPUHYPERVISORIA32_ARCH_CAPIA32_CORE_CAPIBPBIBRSIBRS_PREFERREDIBRS_PROVIDES_SMPIBSIBSBRNTRGTIBSFETCHSAMIBSFFVIBSOPCNTIBSOPCNTEXTIBSOPSAMIBSRDWROPCNTIBSRIPINVALIDCHKIBS_FETCH_CTLXIBS_OPDATA4IBS_OPFUSEIBS_PREVENTHOSTIBS_ZEN4IDPRED_CTRLINT_WBINVDINVLPGBLAHFLAMLBRVIRTLZCNTMCAOVERFLOWMCDT_NOMCOMMITMD_CLEARMMXMMXEXTMOVBEMOVDIR64BMOVDIRIMOVSB_ZLMOVUMPXMSRIRCMSRLISTMSR_PAGEFLUSHNRIPSNXOSXSAVEPCONFIGPOPCNTPPINPREFETCHIPSFDRDPRURDRANDRDSEEDRDTSCPRRSBA_CTRLRTMRTM_ALWAYS_ABORTSERIALIZESEVSEV_64BITSEV_ALTERNATIVESEV_DEBUGSWAPSEV_ESSEV_RESTRICTEDSEV_SNPSGXSGXLCSHASMESME_COHERENTSPEC_CTRL_SSBDSRBDS_CTRLSSESSE2SSE3SSE4SSE42SSE4ASSSE3STIBPSTIBP_ALWAYSONSTOSB_SHORTSUCCORSVMSVMDASVMFBASIDSVMLSVMNPSVMPFSVMPFTSYSCALLSYSEETBMTLB_FLUSH_NESTEDTMETOPEXTTSCRATEMSRTSXLDTRKVAESVMCBCLEANVMPLVMSA_REGPROTVMXVPCLMULQDQVTEWAITPKGWBNOINVDWRMSRNSX87XGETBV1XOPXSAVEXSAVECXSAVEOPTXSAVESAESARMARMCPUIDASIMDASIMDDPASIMDHPASIMDRDMATOMICSCRC32DCPOPEVTSTRMFCMAFPFPHPGPAJSCVTLRCPCPMULLSHA1SHA2SHA3SHA512SM3SM4SVElastID"
const _FeatureID_name = "firstIDADXAESNIAMD3DNOWAMD3DNOWEXTAMXBF16AMXFP16AMXINT8AMXTILEAVXAVX2AVX512BF16AVX512BITALGAVX512BWAVX512CDAVX512DQAVX512ERAVX512FAVX512FP16AVX512IFMAAVX512PFAVX512VBMIAVX512VBMI2AVX512VLAVX512VNNIAVX512VP2INTERSECTAVX512VPOPCNTDQAVXIFMAAVXNECONVERTAVXSLOWAVXVNNIAVXVNNIINT8BHI_CTRLBMI1BMI2CETIBTCETSSCLDEMOTECLMULCLZEROCMOVCMPCCXADDCMPSB_SCADBS_SHORTCMPXCHG8CPBOOSTCPPCCX16EFER_LMSLE_UNSENQCMDERMSF16CFLUSH_L1DFMA3FMA4FP128FP256FSRMFXSRFXSROPTGFNIHLEHRESETHTTHWAHYBRID_CPUHYPERVISORIA32_ARCH_CAPIA32_CORE_CAPIBPBIBRSIBRS_PREFERREDIBRS_PROVIDES_SMPIBSIBSBRNTRGTIBSFETCHSAMIBSFFVIBSOPCNTIBSOPCNTEXTIBSOPSAMIBSRDWROPCNTIBSRIPINVALIDCHKIBS_FETCH_CTLXIBS_OPDATA4IBS_OPFUSEIBS_PREVENTHOSTIBS_ZEN4IDPRED_CTRLINT_WBINVDINVLPGBLAHFLAMLBRVIRTLZCNTMCAOVERFLOWMCDT_NOMCOMMITMD_CLEARMMXMMXEXTMOVBEMOVDIR64BMOVDIRIMOVSB_ZLMOVUMPXMSRIRCMSRLISTMSR_PAGEFLUSHNRIPSNXOSXSAVEPCONFIGPOPCNTPPINPREFETCHIPSFDRDPRURDRANDRDSEEDRDTSCPRRSBA_CTRLRTMRTM_ALWAYS_ABORTSERIALIZESEVSEV_64BITSEV_ALTERNATIVESEV_DEBUGSWAPSEV_ESSEV_RESTRICTEDSEV_SNPSGXSGXLCSHASMESME_COHERENTSPEC_CTRL_SSBDSRBDS_CTRLSSESSE2SSE3SSE4SSE42SSE4ASSSE3STIBPSTIBP_ALWAYSONSTOSB_SHORTSUCCORSVMSVMDASVMFBASIDSVMLSVMNPSVMPFSVMPFTSYSCALLSYSEETBMTDX_GUESTTLB_FLUSH_NESTEDTMETOPEXTTSCRATEMSRTSXLDTRKVAESVMCBCLEANVMPLVMSA_REGPROTVMXVPCLMULQDQVTEWAITPKGWBNOINVDWRMSRNSX87XGETBV1XOPXSAVEXSAVECXSAVEOPTXSAVESAESARMARMCPUIDASIMDASIMDDPASIMDHPASIMDRDMATOMICSCRC32DCPOPEVTSTRMFCMAFPFPHPGPAJSCVTLRCPCPMULLSHA1SHA2SHA3SHA512SM3SM4SVElastID"
var _FeatureID_index = [...]uint16{0, 7, 10, 15, 23, 34, 41, 48, 55, 62, 65, 69, 79, 91, 99, 107, 115, 123, 130, 140, 150, 158, 168, 179, 187, 197, 215, 230, 237, 249, 256, 263, 274, 282, 286, 290, 296, 301, 309, 314, 320, 324, 333, 351, 359, 366, 370, 374, 388, 394, 398, 402, 411, 415, 419, 424, 429, 433, 437, 444, 448, 451, 457, 460, 463, 473, 483, 496, 509, 513, 517, 531, 548, 551, 561, 572, 578, 586, 597, 605, 617, 633, 647, 658, 668, 683, 691, 702, 712, 719, 723, 726, 733, 738, 749, 756, 763, 771, 774, 780, 785, 794, 801, 809, 813, 816, 822, 829, 842, 847, 849, 856, 863, 869, 873, 882, 886, 891, 897, 903, 909, 919, 922, 938, 947, 950, 959, 974, 987, 993, 1007, 1014, 1017, 1022, 1025, 1028, 1040, 1054, 1064, 1067, 1071, 1075, 1079, 1084, 1089, 1094, 1099, 1113, 1124, 1130, 1133, 1138, 1147, 1151, 1156, 1161, 1167, 1174, 1179, 1182, 1198, 1201, 1207, 1217, 1225, 1229, 1238, 1242, 1254, 1257, 1267, 1270, 1277, 1285, 1292, 1295, 1302, 1305, 1310, 1316, 1324, 1330, 1336, 1344, 1349, 1356, 1363, 1371, 1378, 1383, 1388, 1395, 1399, 1401, 1405, 1408, 1413, 1418, 1423, 1427, 1431, 1435, 1441, 1444, 1447, 1450, 1456}
var _FeatureID_index = [...]uint16{0, 7, 10, 15, 23, 34, 41, 48, 55, 62, 65, 69, 79, 91, 99, 107, 115, 123, 130, 140, 150, 158, 168, 179, 187, 197, 215, 230, 237, 249, 256, 263, 274, 282, 286, 290, 296, 301, 309, 314, 320, 324, 333, 351, 359, 366, 370, 374, 388, 394, 398, 402, 411, 415, 419, 424, 429, 433, 437, 444, 448, 451, 457, 460, 463, 473, 483, 496, 509, 513, 517, 531, 548, 551, 561, 572, 578, 586, 597, 605, 617, 633, 647, 658, 668, 683, 691, 702, 712, 719, 723, 726, 733, 738, 749, 756, 763, 771, 774, 780, 785, 794, 801, 809, 813, 816, 822, 829, 842, 847, 849, 856, 863, 869, 873, 882, 886, 891, 897, 903, 909, 919, 922, 938, 947, 950, 959, 974, 987, 993, 1007, 1014, 1017, 1022, 1025, 1028, 1040, 1054, 1064, 1067, 1071, 1075, 1079, 1084, 1089, 1094, 1099, 1113, 1124, 1130, 1133, 1138, 1147, 1151, 1156, 1161, 1167, 1174, 1179, 1182, 1191, 1207, 1210, 1216, 1226, 1234, 1238, 1247, 1251, 1263, 1266, 1276, 1279, 1286, 1294, 1301, 1304, 1311, 1314, 1319, 1325, 1333, 1339, 1345, 1353, 1358, 1365, 1372, 1380, 1387, 1392, 1397, 1404, 1408, 1410, 1414, 1417, 1422, 1427, 1432, 1436, 1440, 1444, 1450, 1453, 1456, 1459, 1465}
func (i FeatureID) String() string {
if i < 0 || i >= FeatureID(len(_FeatureID_index)-1) {

View File

@@ -62,7 +62,7 @@ type PutObjectFanOutResponse struct {
ETag string `json:"etag,omitempty"`
VersionID string `json:"versionId,omitempty"`
LastModified *time.Time `json:"lastModified,omitempty"`
Error error `json:"error,omitempty"`
Error string `json:"error,omitempty"`
}
// PutObjectFanOut - is a variant of PutObject instead of writing a single object from a single

View File

@@ -389,8 +389,9 @@ func (c *Client) completeMultipartUpload(ctx context.Context, bucketName, object
headers := opts.Header()
if s3utils.IsAmazonEndpoint(*c.endpointURL) {
headers.Del(encrypt.SseKmsKeyID) // Remove X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id not supported in CompleteMultipartUpload
headers.Del(encrypt.SseGenericHeader) // Remove X-Amz-Server-Side-Encryption not supported in CompleteMultipartUpload
headers.Del(encrypt.SseKmsKeyID) // Remove X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id not supported in CompleteMultipartUpload
headers.Del(encrypt.SseGenericHeader) // Remove X-Amz-Server-Side-Encryption not supported in CompleteMultipartUpload
headers.Del(encrypt.SseEncryptionContext) // Remove X-Amz-Server-Side-Encryption-Context not supported in CompleteMultipartUpload
}
// Instantiate all the complete multipart buffer.

View File

@@ -124,7 +124,7 @@ type Options struct {
// Global constants.
const (
libraryName = "minio-go"
libraryVersion = "v7.0.56"
libraryVersion = "v7.0.57"
)
// User Agent should always following the below style.

View File

@@ -4821,6 +4821,11 @@ func testPresignedPostPolicy() {
policy.SetContentType("binary/octet-stream")
policy.SetContentLengthRange(10, 1024*1024)
policy.SetUserMetadata(metadataKey, metadataValue)
// Add CRC32C
checksum := minio.ChecksumCRC32C.ChecksumBytes(buf)
policy.SetChecksum(checksum)
args["policy"] = policy.String()
presignedPostPolicyURL, formData, err := c.PresignedPostPolicy(context.Background(), policy)
@@ -4888,6 +4893,7 @@ func testPresignedPostPolicy() {
Timeout: 30 * time.Second,
Transport: transport,
}
args["url"] = presignedPostPolicyURL.String()
req, err := http.NewRequest(http.MethodPost, presignedPostPolicyURL.String(), bytes.NewReader(formBuf.Bytes()))
if err != nil {
@@ -4920,13 +4926,21 @@ func testPresignedPostPolicy() {
expectedLocation := scheme + os.Getenv(serverEndpoint) + "/" + bucketName + "/" + objectName
expectedLocationBucketDNS := scheme + bucketName + "." + os.Getenv(serverEndpoint) + "/" + objectName
if val, ok := res.Header["Location"]; ok {
if val[0] != expectedLocation && val[0] != expectedLocationBucketDNS {
logError(testName, function, args, startTime, "", "Location in header response is incorrect", err)
if !strings.Contains(expectedLocation, "s3.amazonaws.com/") {
// Test when not against AWS S3.
if val, ok := res.Header["Location"]; ok {
if val[0] != expectedLocation && val[0] != expectedLocationBucketDNS {
logError(testName, function, args, startTime, "", fmt.Sprintf("Location in header response is incorrect. Want %q or %q, got %q", expectedLocation, expectedLocationBucketDNS, val[0]), err)
return
}
} else {
logError(testName, function, args, startTime, "", "Location not found in header response", err)
return
}
} else {
logError(testName, function, args, startTime, "", "Location not found in header response", err)
}
want := checksum.Encoded()
if got := res.Header.Get("X-Amz-Checksum-Crc32c"); got != want {
logError(testName, function, args, startTime, "", fmt.Sprintf("Want checksum %q, got %q", want, got), nil)
return
}

View File

@@ -29,7 +29,7 @@ When using or transitioning to Go modules support:
```bash
# Go client latest or explicit version
go get github.com/nats-io/nats.go/@latest
go get github.com/nats-io/nats.go/@v1.26.0
go get github.com/nats-io/nats.go/@v1.27.0
# For latest NATS Server, add /v2 at the end
go get github.com/nats-io/nats-server/v2
@@ -92,6 +92,13 @@ nc.Close()
## JetStream Basic Usage
> __NOTE__
>
> We encourage you to try out a new, simplified version on JetStream API.
> The new API is currently in preview and is available under `jetstream` package.
>
> You can find more information on the new API [here](https://github.com/nats-io/nats.go/blob/main/jetstream/README.md)
```go
import "github.com/nats-io/nats.go"

View File

@@ -679,6 +679,15 @@ func (js *js) newAsyncReply() string {
return sb.String()
}
func (js *js) cleanupReplySub() {
js.mu.Lock()
if js.rsub != nil {
js.rsub.Unsubscribe()
js.rsub = nil
}
js.mu.Unlock()
}
// registerPAF will register for a PubAckFuture.
func (js *js) registerPAF(id string, paf *pubAckFuture) (int, int) {
js.mu.Lock()

View File

@@ -47,7 +47,7 @@ import (
// Default Constants
const (
Version = "1.26.0"
Version = "1.27.0"
DefaultURL = "nats://127.0.0.1:4222"
DefaultPort = 4222
DefaultMaxReconnect = 60

View File

@@ -370,14 +370,17 @@ func (obs *obs) Put(meta *ObjectMeta, r io.Reader, opts ...ObjectOpt) (*ObjectIn
}
// Create our own JS context to handle errors etc.
js, err := obs.js.nc.JetStream(PublishAsyncErrHandler(func(js JetStream, _ *Msg, err error) { setErr(err) }))
jetStream, err := obs.js.nc.JetStream(PublishAsyncErrHandler(func(js JetStream, _ *Msg, err error) { setErr(err) }))
if err != nil {
return nil, err
}
defer jetStream.(*js).cleanupReplySub()
purgePartial := func() {
// wait until all pubs are complete or up to default timeout before attempting purge
select {
case <-js.PublishAsyncComplete():
case <-jetStream.PublishAsyncComplete():
case <-time.After(obs.js.opts.wait):
}
obs.js.purgeStream(obs.stream, &StreamPurgeRequest{Subject: chunkSubj})
@@ -423,7 +426,7 @@ func (obs *obs) Put(meta *ObjectMeta, r io.Reader, opts ...ObjectOpt) (*ObjectIn
h.Write(m.Data)
// Send msg itself.
if _, err := js.PublishMsgAsync(m); err != nil {
if _, err := jetStream.PublishMsgAsync(m); err != nil {
purgePartial()
return nil, err
}
@@ -458,7 +461,7 @@ func (obs *obs) Put(meta *ObjectMeta, r io.Reader, opts ...ObjectOpt) (*ObjectIn
}
// Publish the meta message.
_, err = js.PublishMsgAsync(mm)
_, err = jetStream.PublishMsgAsync(mm)
if err != nil {
if r != nil {
purgePartial()
@@ -468,7 +471,7 @@ func (obs *obs) Put(meta *ObjectMeta, r io.Reader, opts ...ObjectOpt) (*ObjectIn
// Wait for all to be processed.
select {
case <-js.PublishAsyncComplete():
case <-jetStream.PublishAsyncComplete():
if err := getErr(); err != nil {
if r != nil {
purgePartial()

View File

@@ -185,7 +185,7 @@ copyMatchTry8:
// A 16-at-a-time loop doesn't provide a further speedup.
CMP $8, len
CCMP HS, offset, $8, $0
BLO copyMatchLoop1
BLO copyMatchTry4
AND $7, len, lenRem
SUB $8, len
@@ -201,8 +201,19 @@ copyMatchLoop8:
MOVD tmp2, -8(dst)
B copyMatchDone
copyMatchTry4:
// Copy words if both len and offset are at least four.
CMP $4, len
CCMP HS, offset, $4, $0
BLO copyMatchLoop1
MOVWU.P 4(match), tmp2
MOVWU.P tmp2, 4(dst)
SUBS $4, len
BEQ copyMatchDone
copyMatchLoop1:
// Byte-at-a-time copy for small offsets.
// Byte-at-a-time copy for small offsets <= 3.
MOVBU.P 1(match), tmp2
MOVB.P tmp2, 1(dst)
SUBS $1, len

View File

@@ -4,6 +4,7 @@ import (
"bytes"
"context"
"fmt"
"io"
"net/http"
"os"
@@ -19,16 +20,24 @@ const (
type AudioResponseFormat string
const (
AudioResponseFormatJSON AudioResponseFormat = "json"
AudioResponseFormatSRT AudioResponseFormat = "srt"
AudioResponseFormatVTT AudioResponseFormat = "vtt"
AudioResponseFormatJSON AudioResponseFormat = "json"
AudioResponseFormatText AudioResponseFormat = "text"
AudioResponseFormatSRT AudioResponseFormat = "srt"
AudioResponseFormatVerboseJSON AudioResponseFormat = "verbose_json"
AudioResponseFormatVTT AudioResponseFormat = "vtt"
)
// AudioRequest represents a request structure for audio API.
// ResponseFormat is not supported for now. We only return JSON text, which may be sufficient.
type AudioRequest struct {
Model string
FilePath string
Model string
// FilePath is either an existing file in your filesystem or a filename representing the contents of Reader.
FilePath string
// Reader is an optional io.Reader when you do not want to use an existing file.
Reader io.Reader
Prompt string // For translation, it should be in English
Temperature float32
Language string // For translation, just do not use it. It seems "en" works, not confirmed...
@@ -37,6 +46,22 @@ type AudioRequest struct {
// AudioResponse represents a response structure for audio API.
type AudioResponse struct {
Task string `json:"task"`
Language string `json:"language"`
Duration float64 `json:"duration"`
Segments []struct {
ID int `json:"id"`
Seek int `json:"seek"`
Start float64 `json:"start"`
End float64 `json:"end"`
Text string `json:"text"`
Tokens []int `json:"tokens"`
Temperature float64 `json:"temperature"`
AvgLogprob float64 `json:"avg_logprob"`
CompressionRatio float64 `json:"compression_ratio"`
NoSpeechProb float64 `json:"no_speech_prob"`
Transient bool `json:"transient"`
} `json:"segments"`
Text string `json:"text"`
}
@@ -89,21 +114,15 @@ func (c *Client) callAudioAPI(
// HasJSONResponse returns true if the response format is JSON.
func (r AudioRequest) HasJSONResponse() bool {
return r.Format == "" || r.Format == AudioResponseFormatJSON
return r.Format == "" || r.Format == AudioResponseFormatJSON || r.Format == AudioResponseFormatVerboseJSON
}
// audioMultipartForm creates a form with audio file contents and the name of the model to use for
// audio processing.
func audioMultipartForm(request AudioRequest, b utils.FormBuilder) error {
f, err := os.Open(request.FilePath)
err := createFileField(request, b)
if err != nil {
return fmt.Errorf("opening audio file: %w", err)
}
defer f.Close()
err = b.CreateFormFile("file", f)
if err != nil {
return fmt.Errorf("creating form file: %w", err)
return err
}
err = b.WriteField("model", request.Model)
@@ -146,3 +165,27 @@ func audioMultipartForm(request AudioRequest, b utils.FormBuilder) error {
// Close the multipart writer
return b.Close()
}
// createFileField creates the "file" form field from either an existing file or by using the reader.
func createFileField(request AudioRequest, b utils.FormBuilder) error {
if request.Reader != nil {
err := b.CreateFormFileReader("file", request.Reader, request.FilePath)
if err != nil {
return fmt.Errorf("creating form using reader: %w", err)
}
return nil
}
f, err := os.Open(request.FilePath)
if err != nil {
return fmt.Errorf("opening audio file: %w", err)
}
defer f.Close()
err = b.CreateFormFile("file", f)
if err != nil {
return fmt.Errorf("creating form file: %w", err)
}
return nil
}

View File

@@ -11,8 +11,11 @@ const (
ChatMessageRoleSystem = "system"
ChatMessageRoleUser = "user"
ChatMessageRoleAssistant = "assistant"
ChatMessageRoleFunction = "function"
)
const chatCompletionsSuffix = "/chat/completions"
var (
ErrChatCompletionInvalidModel = errors.New("this model is not supported with this method, please use CreateCompletion client method instead") //nolint:lll
ErrChatCompletionStreamNotSupported = errors.New("streaming is not supported with this method, please use CreateChatCompletionStream") //nolint:lll
@@ -27,6 +30,14 @@ type ChatCompletionMessage struct {
// - https://github.com/openai/openai-python/blob/main/chatml.md
// - https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb
Name string `json:"name,omitempty"`
FunctionCall *FunctionCall `json:"function_call,omitempty"`
}
type FunctionCall struct {
Name string `json:"name,omitempty"`
// call function with arguments in JSON format
Arguments string `json:"arguments,omitempty"`
}
// ChatCompletionRequest represents a request structure for chat completion API.
@@ -43,12 +54,72 @@ type ChatCompletionRequest struct {
FrequencyPenalty float32 `json:"frequency_penalty,omitempty"`
LogitBias map[string]int `json:"logit_bias,omitempty"`
User string `json:"user,omitempty"`
Functions []*FunctionDefine `json:"functions,omitempty"`
FunctionCall string `json:"function_call,omitempty"`
}
type FunctionDefine struct {
Name string `json:"name"`
Description string `json:"description,omitempty"`
// it's required in function call
Parameters *FunctionParams `json:"parameters"`
}
type FunctionParams struct {
// the Type must be JSONSchemaTypeObject
Type JSONSchemaType `json:"type"`
Properties map[string]*JSONSchemaDefine `json:"properties,omitempty"`
Required []string `json:"required,omitempty"`
}
type JSONSchemaType string
const (
JSONSchemaTypeObject JSONSchemaType = "object"
JSONSchemaTypeNumber JSONSchemaType = "number"
JSONSchemaTypeString JSONSchemaType = "string"
JSONSchemaTypeArray JSONSchemaType = "array"
JSONSchemaTypeNull JSONSchemaType = "null"
JSONSchemaTypeBoolean JSONSchemaType = "boolean"
)
// JSONSchemaDefine is a struct for JSON Schema.
type JSONSchemaDefine struct {
// Type is a type of JSON Schema.
Type JSONSchemaType `json:"type,omitempty"`
// Description is a description of JSON Schema.
Description string `json:"description,omitempty"`
// Enum is a enum of JSON Schema. It used if Type is JSONSchemaTypeString.
Enum []string `json:"enum,omitempty"`
// Properties is a properties of JSON Schema. It used if Type is JSONSchemaTypeObject.
Properties map[string]*JSONSchemaDefine `json:"properties,omitempty"`
// Required is a required of JSON Schema. It used if Type is JSONSchemaTypeObject.
Required []string `json:"required,omitempty"`
// Items is a property of JSON Schema. It used if Type is JSONSchemaTypeArray.
Items *JSONSchemaDefine `json:"items,omitempty"`
}
type FinishReason string
const (
FinishReasonStop FinishReason = "stop"
FinishReasonLength FinishReason = "length"
FinishReasonFunctionCall FinishReason = "function_call"
FinishReasonContentFilter FinishReason = "content_filter"
FinishReasonNull FinishReason = "null"
)
type ChatCompletionChoice struct {
Index int `json:"index"`
Message ChatCompletionMessage `json:"message"`
FinishReason string `json:"finish_reason"`
Index int `json:"index"`
Message ChatCompletionMessage `json:"message"`
// FinishReason
// stop: API returned complete message,
// or a message terminated by one of the stop sequences provided via the stop parameter
// length: Incomplete model output due to max_tokens parameter or token limit
// function_call: The model decided to call a function
// content_filter: Omitted content due to a flag from our content filters
// null: API response still in progress or incomplete
FinishReason FinishReason `json:"finish_reason"`
}
// ChatCompletionResponse represents a response structure for chat completion API.
@@ -71,13 +142,13 @@ func (c *Client) CreateChatCompletion(
return
}
urlSuffix := "/chat/completions"
urlSuffix := chatCompletionsSuffix
if !checkEndpointSupportsModel(urlSuffix, request.Model) {
err = ErrChatCompletionInvalidModel
return
}
req, err := c.requestBuilder.build(ctx, http.MethodPost, c.fullURL(urlSuffix, request.Model), request)
req, err := c.requestBuilder.Build(ctx, http.MethodPost, c.fullURL(urlSuffix, request.Model), request)
if err != nil {
return
}

View File

@@ -3,7 +3,6 @@ package openai
import (
"bufio"
"context"
"net/http"
utils "github.com/sashabaranov/go-openai/internal"
)
@@ -16,7 +15,7 @@ type ChatCompletionStreamChoiceDelta struct {
type ChatCompletionStreamChoice struct {
Index int `json:"index"`
Delta ChatCompletionStreamChoiceDelta `json:"delta"`
FinishReason string `json:"finish_reason"`
FinishReason FinishReason `json:"finish_reason"`
}
type ChatCompletionStreamResponse struct {
@@ -41,7 +40,7 @@ func (c *Client) CreateChatCompletionStream(
ctx context.Context,
request ChatCompletionRequest,
) (stream *ChatCompletionStream, err error) {
urlSuffix := "/chat/completions"
urlSuffix := chatCompletionsSuffix
if !checkEndpointSupportsModel(urlSuffix, request.Model) {
err = ErrChatCompletionInvalidModel
return
@@ -57,7 +56,7 @@ func (c *Client) CreateChatCompletionStream(
if err != nil {
return
}
if resp.StatusCode < http.StatusOK || resp.StatusCode >= http.StatusBadRequest {
if isFailureStatusCode(resp) {
return nil, c.handleErrorResp(resp)
}
@@ -66,7 +65,7 @@ func (c *Client) CreateChatCompletionStream(
emptyMessagesLimit: c.config.EmptyMessagesLimit,
reader: bufio.NewReader(resp.Body),
response: resp,
errAccumulator: newErrorAccumulator(),
errAccumulator: utils.NewErrorAccumulator(),
unmarshaler: &utils.JSONUnmarshaler{},
},
}

View File

@@ -15,7 +15,7 @@ import (
type Client struct {
config ClientConfig
requestBuilder requestBuilder
requestBuilder utils.RequestBuilder
createFormBuilder func(io.Writer) utils.FormBuilder
}
@@ -29,7 +29,7 @@ func NewClient(authToken string) *Client {
func NewClientWithConfig(config ClientConfig) *Client {
return &Client{
config: config,
requestBuilder: newRequestBuilder(),
requestBuilder: utils.NewRequestBuilder(),
createFormBuilder: func(body io.Writer) utils.FormBuilder {
return utils.NewFormBuilder(body)
},
@@ -47,13 +47,6 @@ func NewOrgClient(authToken, org string) *Client {
func (c *Client) sendRequest(req *http.Request, v any) error {
req.Header.Set("Accept", "application/json; charset=utf-8")
// Azure API Key authentication
if c.config.APIType == APITypeAzure {
req.Header.Set(AzureAPIKeyHeader, c.config.authToken)
} else {
// OpenAI or Azure AD authentication
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", c.config.authToken))
}
// Check whether Content-Type is already set, Upload Files API requires
// Content-Type == multipart/form-data
@@ -62,9 +55,7 @@ func (c *Client) sendRequest(req *http.Request, v any) error {
req.Header.Set("Content-Type", "application/json; charset=utf-8")
}
if len(c.config.OrgID) > 0 {
req.Header.Set("OpenAI-Organization", c.config.OrgID)
}
c.setCommonHeaders(req)
res, err := c.config.HTTPClient.Do(req)
if err != nil {
@@ -73,13 +64,31 @@ func (c *Client) sendRequest(req *http.Request, v any) error {
defer res.Body.Close()
if res.StatusCode < http.StatusOK || res.StatusCode >= http.StatusBadRequest {
if isFailureStatusCode(res) {
return c.handleErrorResp(res)
}
return decodeResponse(res.Body, v)
}
func (c *Client) setCommonHeaders(req *http.Request) {
// https://learn.microsoft.com/en-us/azure/cognitive-services/openai/reference#authentication
// Azure API Key authentication
if c.config.APIType == APITypeAzure {
req.Header.Set(AzureAPIKeyHeader, c.config.authToken)
} else {
// OpenAI or Azure AD authentication
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", c.config.authToken))
}
if c.config.OrgID != "" {
req.Header.Set("OpenAI-Organization", c.config.OrgID)
}
}
func isFailureStatusCode(resp *http.Response) bool {
return resp.StatusCode < http.StatusOK || resp.StatusCode >= http.StatusBadRequest
}
func decodeResponse(body io.Reader, v any) error {
if v == nil {
return nil
@@ -135,7 +144,7 @@ func (c *Client) newStreamRequest(
urlSuffix string,
body any,
model string) (*http.Request, error) {
req, err := c.requestBuilder.build(ctx, method, c.fullURL(urlSuffix, model), body)
req, err := c.requestBuilder.Build(ctx, method, c.fullURL(urlSuffix, model), body)
if err != nil {
return nil, err
}
@@ -145,17 +154,7 @@ func (c *Client) newStreamRequest(
req.Header.Set("Cache-Control", "no-cache")
req.Header.Set("Connection", "keep-alive")
// https://learn.microsoft.com/en-us/azure/cognitive-services/openai/reference#authentication
// Azure API Key authentication
if c.config.APIType == APITypeAzure {
req.Header.Set(AzureAPIKeyHeader, c.config.authToken)
} else {
// OpenAI or Azure AD authentication
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", c.config.authToken))
}
if c.config.OrgID != "" {
req.Header.Set("OpenAI-Organization", c.config.OrgID)
}
c.setCommonHeaders(req)
return req, nil
}

View File

@@ -17,11 +17,16 @@ var (
// GPT3 Models are designed for text-based tasks. For code-specific
// tasks, please refer to the Codex series of models.
const (
GPT432K0613 = "gpt-4-32k-0613"
GPT432K0314 = "gpt-4-32k-0314"
GPT432K = "gpt-4-32k"
GPT40613 = "gpt-4-0613"
GPT40314 = "gpt-4-0314"
GPT4 = "gpt-4"
GPT3Dot5Turbo0613 = "gpt-3.5-turbo-0613"
GPT3Dot5Turbo0301 = "gpt-3.5-turbo-0301"
GPT3Dot5Turbo16K = "gpt-3.5-turbo-16k"
GPT3Dot5Turbo16K0613 = "gpt-3.5-turbo-16k-0613"
GPT3Dot5Turbo = "gpt-3.5-turbo"
GPT3TextDavinci003 = "text-davinci-003"
GPT3TextDavinci002 = "text-davinci-002"
@@ -48,14 +53,19 @@ const (
var disabledModelsForEndpoints = map[string]map[string]bool{
"/completions": {
GPT3Dot5Turbo: true,
GPT3Dot5Turbo0301: true,
GPT4: true,
GPT40314: true,
GPT432K: true,
GPT432K0314: true,
GPT3Dot5Turbo: true,
GPT3Dot5Turbo0301: true,
GPT3Dot5Turbo0613: true,
GPT3Dot5Turbo16K: true,
GPT3Dot5Turbo16K0613: true,
GPT4: true,
GPT40314: true,
GPT40613: true,
GPT432K: true,
GPT432K0314: true,
GPT432K0613: true,
},
"/chat/completions": {
chatCompletionsSuffix: {
CodexCodeDavinci002: true,
CodexCodeCushman001: true,
CodexCodeDavinci001: true,
@@ -155,7 +165,7 @@ func (c *Client) CreateCompletion(
return
}
req, err := c.requestBuilder.build(ctx, http.MethodPost, c.fullURL(urlSuffix, request.Model), request)
req, err := c.requestBuilder.Build(ctx, http.MethodPost, c.fullURL(urlSuffix, request.Model), request)
if err != nil {
return
}

View File

@@ -32,7 +32,7 @@ type EditsResponse struct {
// Perform an API call to the Edits endpoint.
func (c *Client) Edits(ctx context.Context, request EditsRequest) (response EditsResponse, err error) {
req, err := c.requestBuilder.build(ctx, http.MethodPost, c.fullURL("/edits", fmt.Sprint(request.Model)), request)
req, err := c.requestBuilder.Build(ctx, http.MethodPost, c.fullURL("/edits", fmt.Sprint(request.Model)), request)
if err != nil {
return
}

View File

@@ -132,7 +132,7 @@ type EmbeddingRequest struct {
// CreateEmbeddings returns an EmbeddingResponse which will contain an Embedding for every item in |request.Input|.
// https://beta.openai.com/docs/api-reference/embeddings/create
func (c *Client) CreateEmbeddings(ctx context.Context, request EmbeddingRequest) (resp EmbeddingResponse, err error) {
req, err := c.requestBuilder.build(ctx, http.MethodPost, c.fullURL("/embeddings", request.Model.String()), request)
req, err := c.requestBuilder.Build(ctx, http.MethodPost, c.fullURL("/embeddings", request.Model.String()), request)
if err != nil {
return
}

View File

@@ -22,7 +22,7 @@ type EnginesList struct {
// ListEngines Lists the currently available engines, and provides basic
// information about each option such as the owner and availability.
func (c *Client) ListEngines(ctx context.Context) (engines EnginesList, err error) {
req, err := c.requestBuilder.build(ctx, http.MethodGet, c.fullURL("/engines"), nil)
req, err := c.requestBuilder.Build(ctx, http.MethodGet, c.fullURL("/engines"), nil)
if err != nil {
return
}
@@ -38,7 +38,7 @@ func (c *Client) GetEngine(
engineID string,
) (engine Engine, err error) {
urlSuffix := fmt.Sprintf("/engines/%s", engineID)
req, err := c.requestBuilder.build(ctx, http.MethodGet, c.fullURL(urlSuffix), nil)
req, err := c.requestBuilder.Build(ctx, http.MethodGet, c.fullURL(urlSuffix), nil)
if err != nil {
return
}

View File

@@ -44,9 +44,13 @@ func (e *APIError) UnmarshalJSON(data []byte) (err error) {
return
}
err = json.Unmarshal(rawMap["type"], &e.Type)
if err != nil {
return
// optional fields for azure openai
// refs: https://github.com/sashabaranov/go-openai/issues/343
if _, ok := rawMap["type"]; ok {
err = json.Unmarshal(rawMap["type"], &e.Type)
if err != nil {
return
}
}
// optional fields

View File

@@ -1,53 +0,0 @@
package openai
import (
"bytes"
"fmt"
"io"
utils "github.com/sashabaranov/go-openai/internal"
)
type errorAccumulator interface {
write(p []byte) error
unmarshalError() *ErrorResponse
}
type errorBuffer interface {
io.Writer
Len() int
Bytes() []byte
}
type defaultErrorAccumulator struct {
buffer errorBuffer
unmarshaler utils.Unmarshaler
}
func newErrorAccumulator() errorAccumulator {
return &defaultErrorAccumulator{
buffer: &bytes.Buffer{},
unmarshaler: &utils.JSONUnmarshaler{},
}
}
func (e *defaultErrorAccumulator) write(p []byte) error {
_, err := e.buffer.Write(p)
if err != nil {
return fmt.Errorf("error accumulator write error, %w", err)
}
return nil
}
func (e *defaultErrorAccumulator) unmarshalError() (errResp *ErrorResponse) {
if e.buffer.Len() == 0 {
return
}
err := e.unmarshaler.Unmarshal(e.buffer.Bytes(), &errResp)
if err != nil {
errResp = nil
}
return
}

View File

@@ -4,6 +4,7 @@ import (
"bytes"
"context"
"fmt"
"io"
"net/http"
"os"
)
@@ -70,7 +71,7 @@ func (c *Client) CreateFile(ctx context.Context, request FileRequest) (file File
// DeleteFile deletes an existing file.
func (c *Client) DeleteFile(ctx context.Context, fileID string) (err error) {
req, err := c.requestBuilder.build(ctx, http.MethodDelete, c.fullURL("/files/"+fileID), nil)
req, err := c.requestBuilder.Build(ctx, http.MethodDelete, c.fullURL("/files/"+fileID), nil)
if err != nil {
return
}
@@ -82,7 +83,7 @@ func (c *Client) DeleteFile(ctx context.Context, fileID string) (err error) {
// ListFiles Lists the currently available files,
// and provides basic information about each file such as the file name and purpose.
func (c *Client) ListFiles(ctx context.Context) (files FilesList, err error) {
req, err := c.requestBuilder.build(ctx, http.MethodGet, c.fullURL("/files"), nil)
req, err := c.requestBuilder.Build(ctx, http.MethodGet, c.fullURL("/files"), nil)
if err != nil {
return
}
@@ -95,7 +96,7 @@ func (c *Client) ListFiles(ctx context.Context) (files FilesList, err error) {
// such as the file name and purpose.
func (c *Client) GetFile(ctx context.Context, fileID string) (file File, err error) {
urlSuffix := fmt.Sprintf("/files/%s", fileID)
req, err := c.requestBuilder.build(ctx, http.MethodGet, c.fullURL(urlSuffix), nil)
req, err := c.requestBuilder.Build(ctx, http.MethodGet, c.fullURL(urlSuffix), nil)
if err != nil {
return
}
@@ -103,3 +104,26 @@ func (c *Client) GetFile(ctx context.Context, fileID string) (file File, err err
err = c.sendRequest(req, &file)
return
}
func (c *Client) GetFileContent(ctx context.Context, fileID string) (content io.ReadCloser, err error) {
urlSuffix := fmt.Sprintf("/files/%s/content", fileID)
req, err := c.requestBuilder.Build(ctx, http.MethodGet, c.fullURL(urlSuffix), nil)
if err != nil {
return
}
c.setCommonHeaders(req)
res, err := c.config.HTTPClient.Do(req)
if err != nil {
return
}
if isFailureStatusCode(res) {
err = c.handleErrorResp(res)
return
}
content = res.Body
return
}

View File

@@ -68,7 +68,7 @@ type FineTuneDeleteResponse struct {
func (c *Client) CreateFineTune(ctx context.Context, request FineTuneRequest) (response FineTune, err error) {
urlSuffix := "/fine-tunes"
req, err := c.requestBuilder.build(ctx, http.MethodPost, c.fullURL(urlSuffix), request)
req, err := c.requestBuilder.Build(ctx, http.MethodPost, c.fullURL(urlSuffix), request)
if err != nil {
return
}
@@ -79,7 +79,7 @@ func (c *Client) CreateFineTune(ctx context.Context, request FineTuneRequest) (r
// CancelFineTune cancel a fine-tune job.
func (c *Client) CancelFineTune(ctx context.Context, fineTuneID string) (response FineTune, err error) {
req, err := c.requestBuilder.build(ctx, http.MethodPost, c.fullURL("/fine-tunes/"+fineTuneID+"/cancel"), nil)
req, err := c.requestBuilder.Build(ctx, http.MethodPost, c.fullURL("/fine-tunes/"+fineTuneID+"/cancel"), nil)
if err != nil {
return
}
@@ -89,7 +89,7 @@ func (c *Client) CancelFineTune(ctx context.Context, fineTuneID string) (respons
}
func (c *Client) ListFineTunes(ctx context.Context) (response FineTuneList, err error) {
req, err := c.requestBuilder.build(ctx, http.MethodGet, c.fullURL("/fine-tunes"), nil)
req, err := c.requestBuilder.Build(ctx, http.MethodGet, c.fullURL("/fine-tunes"), nil)
if err != nil {
return
}
@@ -100,7 +100,7 @@ func (c *Client) ListFineTunes(ctx context.Context) (response FineTuneList, err
func (c *Client) GetFineTune(ctx context.Context, fineTuneID string) (response FineTune, err error) {
urlSuffix := fmt.Sprintf("/fine-tunes/%s", fineTuneID)
req, err := c.requestBuilder.build(ctx, http.MethodGet, c.fullURL(urlSuffix), nil)
req, err := c.requestBuilder.Build(ctx, http.MethodGet, c.fullURL(urlSuffix), nil)
if err != nil {
return
}
@@ -110,7 +110,7 @@ func (c *Client) GetFineTune(ctx context.Context, fineTuneID string) (response F
}
func (c *Client) DeleteFineTune(ctx context.Context, fineTuneID string) (response FineTuneDeleteResponse, err error) {
req, err := c.requestBuilder.build(ctx, http.MethodDelete, c.fullURL("/fine-tunes/"+fineTuneID), nil)
req, err := c.requestBuilder.Build(ctx, http.MethodDelete, c.fullURL("/fine-tunes/"+fineTuneID), nil)
if err != nil {
return
}
@@ -120,7 +120,7 @@ func (c *Client) DeleteFineTune(ctx context.Context, fineTuneID string) (respons
}
func (c *Client) ListFineTuneEvents(ctx context.Context, fineTuneID string) (response FineTuneEventList, err error) {
req, err := c.requestBuilder.build(ctx, http.MethodGet, c.fullURL("/fine-tunes/"+fineTuneID+"/events"), nil)
req, err := c.requestBuilder.Build(ctx, http.MethodGet, c.fullURL("/fine-tunes/"+fineTuneID+"/events"), nil)
if err != nil {
return
}

View File

@@ -44,7 +44,7 @@ type ImageResponseDataInner struct {
// CreateImage - API call to create an image. This is the main endpoint of the DALL-E API.
func (c *Client) CreateImage(ctx context.Context, request ImageRequest) (response ImageResponse, err error) {
urlSuffix := "/images/generations"
req, err := c.requestBuilder.build(ctx, http.MethodPost, c.fullURL(urlSuffix), request)
req, err := c.requestBuilder.Build(ctx, http.MethodPost, c.fullURL(urlSuffix), request)
if err != nil {
return
}

View File

@@ -0,0 +1,44 @@
package openai
import (
"bytes"
"fmt"
"io"
)
type ErrorAccumulator interface {
Write(p []byte) error
Bytes() []byte
}
type errorBuffer interface {
io.Writer
Len() int
Bytes() []byte
}
type DefaultErrorAccumulator struct {
Buffer errorBuffer
}
func NewErrorAccumulator() ErrorAccumulator {
return &DefaultErrorAccumulator{
Buffer: &bytes.Buffer{},
}
}
func (e *DefaultErrorAccumulator) Write(p []byte) error {
_, err := e.Buffer.Write(p)
if err != nil {
return fmt.Errorf("error accumulator write error, %w", err)
}
return nil
}
func (e *DefaultErrorAccumulator) Bytes() (errBytes []byte) {
if e.Buffer.Len() == 0 {
return
}
errBytes = e.Buffer.Bytes()
return
}

View File

@@ -1,13 +1,16 @@
package openai
import (
"fmt"
"io"
"mime/multipart"
"os"
"path"
)
type FormBuilder interface {
CreateFormFile(fieldname string, file *os.File) error
CreateFormFileReader(fieldname string, r io.Reader, filename string) error
WriteField(fieldname, value string) error
Close() error
FormDataContentType() string
@@ -24,15 +27,28 @@ func NewFormBuilder(body io.Writer) *DefaultFormBuilder {
}
func (fb *DefaultFormBuilder) CreateFormFile(fieldname string, file *os.File) error {
fieldWriter, err := fb.writer.CreateFormFile(fieldname, file.Name())
return fb.createFormFile(fieldname, file, file.Name())
}
func (fb *DefaultFormBuilder) CreateFormFileReader(fieldname string, r io.Reader, filename string) error {
return fb.createFormFile(fieldname, r, path.Base(filename))
}
func (fb *DefaultFormBuilder) createFormFile(fieldname string, r io.Reader, filename string) error {
if filename == "" {
return fmt.Errorf("filename cannot be empty")
}
fieldWriter, err := fb.writer.CreateFormFile(fieldname, filename)
if err != nil {
return err
}
_, err = io.Copy(fieldWriter, file)
_, err = io.Copy(fieldWriter, r)
if err != nil {
return err
}
return nil
}

View File

@@ -4,25 +4,23 @@ import (
"bytes"
"context"
"net/http"
utils "github.com/sashabaranov/go-openai/internal"
)
type requestBuilder interface {
build(ctx context.Context, method, url string, request any) (*http.Request, error)
type RequestBuilder interface {
Build(ctx context.Context, method, url string, request any) (*http.Request, error)
}
type httpRequestBuilder struct {
marshaller utils.Marshaller
type HTTPRequestBuilder struct {
marshaller Marshaller
}
func newRequestBuilder() *httpRequestBuilder {
return &httpRequestBuilder{
marshaller: &utils.JSONMarshaller{},
func NewRequestBuilder() *HTTPRequestBuilder {
return &HTTPRequestBuilder{
marshaller: &JSONMarshaller{},
}
}
func (b *httpRequestBuilder) build(ctx context.Context, method, url string, request any) (*http.Request, error) {
func (b *HTTPRequestBuilder) Build(ctx context.Context, method, url string, request any) (*http.Request, error) {
if request == nil {
return http.NewRequestWithContext(ctx, method, url, nil)
}

View File

@@ -2,6 +2,7 @@ package openai
import (
"context"
"fmt"
"net/http"
)
@@ -40,7 +41,7 @@ type ModelsList struct {
// ListModels Lists the currently available models,
// and provides basic information about each model such as the model id and parent.
func (c *Client) ListModels(ctx context.Context) (models ModelsList, err error) {
req, err := c.requestBuilder.build(ctx, http.MethodGet, c.fullURL("/models"), nil)
req, err := c.requestBuilder.Build(ctx, http.MethodGet, c.fullURL("/models"), nil)
if err != nil {
return
}
@@ -48,3 +49,16 @@ func (c *Client) ListModels(ctx context.Context) (models ModelsList, err error)
err = c.sendRequest(req, &models)
return
}
// GetModel Retrieves a model instance, providing basic information about
// the model such as the owner and permissioning.
func (c *Client) GetModel(ctx context.Context, modelID string) (model Model, err error) {
urlSuffix := fmt.Sprintf("/models/%s", modelID)
req, err := c.requestBuilder.Build(ctx, http.MethodGet, c.fullURL(urlSuffix), nil)
if err != nil {
return
}
err = c.sendRequest(req, &model)
return
}

View File

@@ -63,7 +63,7 @@ type ModerationResponse struct {
// Moderations — perform a moderation api call over a string.
// Input can be an array or slice but a string will reduce the complexity.
func (c *Client) Moderations(ctx context.Context, request ModerationRequest) (response ModerationResponse, err error) {
req, err := c.requestBuilder.build(ctx, http.MethodPost, c.fullURL("/moderations", request.Model), request)
req, err := c.requestBuilder.Build(ctx, http.MethodPost, c.fullURL("/moderations", request.Model), request)
if err != nil {
return
}

Some files were not shown because too many files have changed in this diff Show More