1
0
mirror of https://github.com/go-micro/go-micro.git synced 2025-06-30 22:33:49 +02:00

Plugins and profiles (#2764)

* feat: more plugins

* chore(ci): split out benchmarks

Attempt to resolve too many open files in ci

* chore(ci): split out benchmarks

* fix(ci): Attempt to resolve too many open files in ci

* fix: set DefaultX for cli flag and service option

* fix: restore http broker

* fix: default http broker

* feat: full nats profile

* chore: still ugly, not ready

* fix: better initialization for profiles

* fix(tests): comment out flaky listen tests

* fix: disable benchmarks on gha

* chore: cleanup, comments

* chore: add nats config source
This commit is contained in:
Brian Ketelsen
2025-05-20 13:24:06 -04:00
committed by GitHub
parent e12504ce3a
commit ddc34801ee
58 changed files with 6792 additions and 218 deletions

13
store/postgres/README.md Normal file
View File

@ -0,0 +1,13 @@
# Postgres plugin
This module implements a Postgres implementation of the micro store interface.
## Implementation notes
### Concepts
We maintain a single connection to the Postgres server. Due to the way connections are handled this means that all micro "databases" and "tables" are stored under a single Postgres database as specified in the connection string (https://www.postgresql.org/docs/8.1/ddl-schemas.html). The mapping of micro to Postgres concepts is:
- micro database => Postgres schema
- micro table => Postgres table
### Expiry
Expiry is managed by an expiry column in the table. A record's expiry is specified in the column and when a record is read the expiry field is first checked, only returning the record if its still valid otherwise it's deleted. A maintenance loop also periodically runs to delete any rows that have expired.

View File

@ -0,0 +1,61 @@
// Copyright 2020 Asim Aslam
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Original source: github.com/micro/go-plugins/v3/store/cockroach/metadata.go
package postgres
import (
"database/sql/driver"
"encoding/json"
"errors"
)
// https://github.com/upper/db/blob/master/postgresql/custom_types.go#L43
type Metadata map[string]interface{}
// Scan satisfies the sql.Scanner interface.
func (m *Metadata) Scan(src interface{}) error {
source, ok := src.([]byte)
if !ok {
return errors.New("Type assertion .([]byte) failed.")
}
var i interface{}
err := json.Unmarshal(source, &i)
if err != nil {
return err
}
*m, ok = i.(map[string]interface{})
if !ok {
return errors.New("Type assertion .(map[string]interface{}) failed.")
}
return nil
}
// Value satisfies the driver.Valuer interface.
func (m Metadata) Value() (driver.Value, error) {
j, err := json.Marshal(m)
return j, err
}
func toMetadata(m *Metadata) map[string]interface{} {
md := make(map[string]interface{})
for k, v := range *m {
md[k] = v
}
return md
}

View File

@ -0,0 +1,14 @@
# Postgres pgx plugin
This module implements a Postgres implementation of the micro store interface.
It uses modern https://github.com/jackc/pgx driver to access Postgres.
## Implementation notes
### Concepts
Every database has they own connection pool. Due to the way connections are handled this means that all micro "databases" and "tables" can be stored under a single or several Postgres database as specified in the connection string (https://www.postgresql.org/docs/8.1/ddl-schemas.html). The mapping of micro to Postgres concepts is:
- micro database => Postgres schema
- micro table => Postgres table
### Expiry
Expiry is managed by an expiry column in the table. A record's expiry is specified in the column and when a record is read the expiry field is first checked, only returning the record if it's still valid otherwise it's deleted. A maintenance loop also periodically runs to delete any rows that have expired.

8
store/postgres/pgx/db.go Normal file
View File

@ -0,0 +1,8 @@
package pgx
import "github.com/jackc/pgx/v4/pgxpool"
type DB struct {
conn *pgxpool.Pool
tables map[string]Queries
}

View File

@ -0,0 +1,44 @@
package pgx
import (
"database/sql/driver"
"encoding/json"
"errors"
)
type Metadata map[string]interface{}
// Scan satisfies the sql.Scanner interface.
func (m *Metadata) Scan(src interface{}) error {
source, ok := src.([]byte)
if !ok {
return errors.New("type assertion .([]byte) failed")
}
var i interface{}
err := json.Unmarshal(source, &i)
if err != nil {
return err
}
*m, ok = i.(map[string]interface{})
if !ok {
return errors.New("type assertion .(map[string]interface{}) failed")
}
return nil
}
// Value satisfies the driver.Valuer interface.
func (m *Metadata) Value() (driver.Value, error) {
j, err := json.Marshal(m)
return j, err
}
func toMetadata(m *Metadata) map[string]interface{} {
md := make(map[string]interface{})
for k, v := range *m {
md[k] = v
}
return md
}

427
store/postgres/pgx/pgx.go Normal file
View File

@ -0,0 +1,427 @@
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package pgx implements the postgres store with pgx driver
package pgx
import (
"database/sql"
"fmt"
"net/url"
"regexp"
"strings"
"sync"
"time"
"github.com/jackc/pgx/v4"
"github.com/jackc/pgx/v4/pgxpool"
"github.com/pkg/errors"
"go-micro.dev/v5/logger"
"go-micro.dev/v5/store"
)
const defaultDatabase = "micro"
const defaultTable = "micro"
type sqlStore struct {
options store.Options
re *regexp.Regexp
sync.Mutex
// known databases
databases map[string]DB
}
func (s *sqlStore) getDB(database, table string) (string, string) {
if len(database) == 0 {
if len(s.options.Database) > 0 {
database = s.options.Database
} else {
database = defaultDatabase
}
}
if len(table) == 0 {
if len(s.options.Table) > 0 {
table = s.options.Table
} else {
table = defaultTable
}
}
// store.namespace must only contain letters, numbers and underscores
database = s.re.ReplaceAllString(database, "_")
table = s.re.ReplaceAllString(table, "_")
return database, table
}
func (s *sqlStore) db(database, table string) (*pgxpool.Pool, Queries, error) {
s.Lock()
defer s.Unlock()
database, table = s.getDB(database, table)
if _, ok := s.databases[database]; !ok {
err := s.initDB(database)
if err != nil {
return nil, Queries{}, err
}
}
dbObj := s.databases[database]
if _, ok := dbObj.tables[table]; !ok {
err := s.initTable(database, table)
if err != nil {
return nil, Queries{}, err
}
}
return dbObj.conn, dbObj.tables[table], nil
}
func (s *sqlStore) initTable(database, table string) error {
db := s.databases[database].conn
_, err := db.Exec(s.options.Context, fmt.Sprintf(createTable, database, table))
if err != nil {
return errors.Wrap(err, "cannot create table")
}
_, err = db.Exec(s.options.Context, fmt.Sprintf(createMDIndex, table, database, table))
if err != nil {
return errors.Wrap(err, "cannot create metadata index")
}
_, err = db.Exec(s.options.Context, fmt.Sprintf(createExpiryIndex, table, database, table))
if err != nil {
return errors.Wrap(err, "cannot create expiry index")
}
s.databases[database].tables[table] = NewQueries(database, table)
return nil
}
func (s *sqlStore) initDB(database string) error {
if len(s.options.Nodes) == 0 {
s.options.Nodes = []string{"postgresql://root@localhost:26257?sslmode=disable"}
}
source := s.options.Nodes[0]
// check if it is a standard connection string eg: host=%s port=%d user=%s password=%s dbname=%s sslmode=disable
// if err is nil which means it would be a URL like postgre://xxxx?yy=zz
_, err := url.Parse(source)
if err != nil {
if !strings.Contains(source, " ") {
source = fmt.Sprintf("host=%s", source)
}
}
config, err := pgxpool.ParseConfig(source)
if err != nil {
return err
}
db, err := pgxpool.ConnectConfig(s.options.Context, config)
if err != nil {
return err
}
if err = db.Ping(s.options.Context); err != nil {
return err
}
_, err = db.Exec(s.options.Context, fmt.Sprintf(createSchema, database))
if err != nil {
return err
}
if len(database) == 0 {
if len(s.options.Database) > 0 {
database = s.options.Database
} else {
database = defaultDatabase
}
}
// save the values
s.databases[database] = DB{
conn: db,
tables: make(map[string]Queries),
}
return nil
}
func (s *sqlStore) Close() error {
for _, obj := range s.databases {
obj.conn.Close()
}
return nil
}
func (s *sqlStore) Init(opts ...store.Option) error {
for _, o := range opts {
o(&s.options)
}
_, _, err := s.db(s.options.Database, s.options.Table)
return err
}
// List all the known records
func (s *sqlStore) List(opts ...store.ListOption) ([]string, error) {
options := store.ListOptions{}
for _, o := range opts {
o(&options)
}
db, queries, err := s.db(options.Database, options.Table)
if err != nil {
return nil, err
}
pattern := "%"
if options.Prefix != "" {
pattern = options.Prefix + pattern
}
if options.Suffix != "" {
pattern = pattern + options.Suffix
}
var rows pgx.Rows
if options.Limit > 0 {
rows, err = db.Query(s.options.Context, queries.ListAscLimit, pattern, options.Limit, options.Offset)
} else {
rows, err = db.Query(s.options.Context, queries.ListAsc, pattern)
}
if err != nil {
if err == pgx.ErrNoRows {
return nil, nil
}
return nil, err
}
defer rows.Close()
keys := make([]string, 0, 10)
for rows.Next() {
var key string
err = rows.Scan(&key)
if err != nil {
return nil, err
}
keys = append(keys, key)
}
return keys, nil
}
// rowToRecord converts from pgx.Row to a store.Record
func (s *sqlStore) rowToRecord(row pgx.Row) (*store.Record, error) {
var expiry *time.Time
record := &store.Record{}
metadata := make(Metadata)
if err := row.Scan(&record.Key, &record.Value, &metadata, &expiry); err != nil {
if err == sql.ErrNoRows {
return record, store.ErrNotFound
}
return nil, err
}
// set the metadata
record.Metadata = toMetadata(&metadata)
if expiry != nil {
record.Expiry = time.Until(*expiry)
}
return record, nil
}
// rowsToRecords converts from pgx.Rows to []*store.Record
func (s *sqlStore) rowsToRecords(rows pgx.Rows) ([]*store.Record, error) {
var records []*store.Record
for rows.Next() {
var expiry *time.Time
record := &store.Record{}
metadata := make(Metadata)
if err := rows.Scan(&record.Key, &record.Value, &metadata, &expiry); err != nil {
return records, err
}
// set the metadata
record.Metadata = toMetadata(&metadata)
if expiry != nil {
record.Expiry = time.Until(*expiry)
}
records = append(records, record)
}
return records, nil
}
// Read a single key
func (s *sqlStore) Read(key string, opts ...store.ReadOption) ([]*store.Record, error) {
options := store.ReadOptions{
}
for _, o := range opts {
o(&options)
}
db, queries, err := s.db(options.Database, options.Table)
if err != nil {
return nil, err
}
// read one record
if !options.Prefix && !options.Suffix {
row := db.QueryRow(s.options.Context, queries.ReadOne, key)
record, err := s.rowToRecord(row)
if err != nil {
return nil, err
}
return []*store.Record{record}, nil
}
// read by pattern
pattern := "%"
if options.Prefix {
pattern = key + pattern
}
if options.Suffix {
pattern = pattern + key
}
var rows pgx.Rows
if options.Limit > 0 {
rows, err = db.Query(s.options.Context, queries.ListAscLimit, pattern, options.Limit, options.Offset)
} else {
rows, err = db.Query(s.options.Context, queries.ListAsc, pattern)
}
if err != nil {
if err == pgx.ErrNoRows {
return nil, nil
}
return nil, err
}
defer rows.Close()
return s.rowsToRecords(rows)
}
// Write records
func (s *sqlStore) Write(r *store.Record, opts ...store.WriteOption) error {
var options store.WriteOptions
for _, o := range opts {
o(&options)
}
db, queries, err := s.db(options.Database, options.Table)
if err != nil {
return err
}
metadata := make(Metadata)
for k, v := range r.Metadata {
metadata[k] = v
}
if r.Expiry != 0 {
_, err = db.Exec(s.options.Context, queries.Write, r.Key, r.Value, metadata, time.Now().Add(r.Expiry))
} else {
_, err = db.Exec(s.options.Context, queries.Write, r.Key, r.Value, metadata, nil)
}
if err != nil {
return errors.Wrap(err, "cannot upsert record "+r.Key)
}
return nil
}
// Delete records with keys
func (s *sqlStore) Delete(key string, opts ...store.DeleteOption) error {
var options store.DeleteOptions
for _, o := range opts {
o(&options)
}
db, queries, err := s.db(options.Database, options.Table)
if err != nil {
return err
}
_, err = db.Exec(s.options.Context, queries.Delete, key)
return err
}
func (s *sqlStore) Options() store.Options {
return s.options
}
func (s *sqlStore) String() string {
return "pgx"
}
// NewStore returns a new micro Store backed by sql
func NewStore(opts ...store.Option) store.Store {
options := store.Options{
Database: defaultDatabase,
Table: defaultTable,
}
for _, o := range opts {
o(&options)
}
// new store
s := new(sqlStore)
s.options = options
s.databases = make(map[string]DB)
s.re = regexp.MustCompile("[^a-zA-Z0-9]+")
go s.expiryLoop()
// return store
return s
}
func (s *sqlStore) expiryLoop() {
for {
err := s.expireRows()
if err != nil {
logger.Errorf("error cleaning up %s", err)
}
time.Sleep(1 * time.Hour)
}
}
func (s *sqlStore) expireRows() error {
for database, dbObj := range s.databases {
db := dbObj.conn
for table, queries := range dbObj.tables {
res, err := db.Exec(s.options.Context, queries.DeleteExpired)
if err != nil {
logger.Errorf("Error cleaning up %s", err)
return err
}
logger.Infof("Cleaning up %s %s: %d rows deleted", database, table, res.RowsAffected())
}
}
return nil
}

View File

@ -0,0 +1,139 @@
//go:build integration
// +build integration
package pgx
import (
"encoding/json"
"testing"
"github.com/stretchr/testify/assert"
"go-micro.dev/v5/store"
)
type testObj struct {
One string
Two int64
}
func TestPostgres(t *testing.T) {
t.Run("ReadWrite", func(t *testing.T) {
s := NewStore(store.Nodes("postgresql://postgres@localhost:5432/?sslmode=disable"))
b, _ := json.Marshal(testObj{
One: "1",
Two: 2,
})
err := s.Write(&store.Record{
Key: "foobar/baz",
Value: b,
Metadata: map[string]interface{}{
"meta1": "val1",
},
})
assert.NoError(t, err)
recs, err := s.Read("foobar/baz")
assert.NoError(t, err)
assert.Len(t, recs, 1)
assert.Equal(t, "foobar/baz", recs[0].Key)
assert.Len(t, recs[0].Metadata, 1)
assert.Equal(t, "val1", recs[0].Metadata["meta1"])
var tobj testObj
assert.NoError(t, json.Unmarshal(recs[0].Value, &tobj))
assert.Equal(t, "1", tobj.One)
assert.Equal(t, int64(2), tobj.Two)
})
t.Run("Prefix", func(t *testing.T) {
s := NewStore(store.Nodes("postgresql://postgres@localhost:5432/?sslmode=disable"))
b, _ := json.Marshal(testObj{
One: "1",
Two: 2,
})
err := s.Write(&store.Record{
Key: "foo/bar",
Value: b,
Metadata: map[string]interface{}{
"meta1": "val1",
},
})
assert.NoError(t, err)
err = s.Write(&store.Record{
Key: "foo/baz",
Value: b,
Metadata: map[string]interface{}{
"meta1": "val1",
},
})
assert.NoError(t, err)
recs, err := s.Read("foo/", store.ReadPrefix())
assert.NoError(t, err)
assert.Len(t, recs, 2)
assert.Equal(t, "foo/bar", recs[0].Key)
assert.Equal(t, "foo/baz", recs[1].Key)
})
t.Run("MultipleTables", func(t *testing.T) {
s1 := NewStore(store.Nodes("postgresql://postgres@localhost:5432/?sslmode=disable"), store.Table("t1"))
s2 := NewStore(store.Nodes("postgresql://postgres@localhost:5432/?sslmode=disable"), store.Table("t2"))
b1, _ := json.Marshal(testObj{
One: "1",
Two: 2,
})
err := s1.Write(&store.Record{
Key: "foo/bar",
Value: b1,
})
assert.NoError(t, err)
b2, _ := json.Marshal(testObj{
One: "1",
Two: 2,
})
err = s2.Write(&store.Record{
Key: "foo/baz",
Value: b2,
})
assert.NoError(t, err)
recs1, err := s1.List()
assert.NoError(t, err)
assert.Len(t, recs1, 1)
assert.Equal(t, "foo/bar", recs1[0])
recs2, err := s2.List()
assert.NoError(t, err)
assert.Len(t, recs2, 1)
assert.Equal(t, "foo/baz", recs2[0])
})
t.Run("MultipleDBs", func(t *testing.T) {
s1 := NewStore(store.Nodes("postgresql://postgres@localhost:5432/?sslmode=disable"), store.Database("d1"))
s2 := NewStore(store.Nodes("postgresql://postgres@localhost:5432/?sslmode=disable"), store.Database("d2"))
b1, _ := json.Marshal(testObj{
One: "1",
Two: 2,
})
err := s1.Write(&store.Record{
Key: "foo/bar",
Value: b1,
})
assert.NoError(t, err)
b2, _ := json.Marshal(testObj{
One: "1",
Two: 2,
})
err = s2.Write(&store.Record{
Key: "foo/baz",
Value: b2,
})
assert.NoError(t, err)
recs1, err := s1.List()
assert.NoError(t, err)
assert.Len(t, recs1, 1)
assert.Equal(t, "foo/bar", recs1[0])
recs2, err := s2.List()
assert.NoError(t, err)
assert.Len(t, recs2, 1)
assert.Equal(t, "foo/baz", recs2[0])
})
}

View File

@ -0,0 +1,38 @@
package pgx
import "fmt"
type Queries struct {
// read
ListAsc string
ListAscLimit string
ListDesc string
ListDescLimit string
ReadOne string
ReadManyAsc string
ReadManyAscLimit string
ReadManyDesc string
ReadManyDescLimit string
// change
Write string
Delete string
DeleteExpired string
}
func NewQueries(database, table string) Queries {
return Queries{
ListAsc: fmt.Sprintf(list, database, table) + asc,
ListAscLimit: fmt.Sprintf(list, database, table) + asc + limit,
ListDesc: fmt.Sprintf(list, database, table) + desc,
ListDescLimit: fmt.Sprintf(list, database, table) + desc + limit,
ReadOne: fmt.Sprintf(readOne, database, table),
ReadManyAsc: fmt.Sprintf(readMany, database, table) + asc,
ReadManyAscLimit: fmt.Sprintf(readMany, database, table) + asc + limit,
ReadManyDesc: fmt.Sprintf(readMany, database, table) + desc,
ReadManyDescLimit: fmt.Sprintf(readMany, database, table) + desc + limit,
Write: fmt.Sprintf(write, database, table),
Delete: fmt.Sprintf(deleteRecord, database, table),
DeleteExpired: fmt.Sprintf(deleteExpired, database, table),
}
}

View File

@ -0,0 +1,35 @@
package pgx
// init
const createSchema = "CREATE SCHEMA IF NOT EXISTS %s"
const createTable = `CREATE TABLE IF NOT EXISTS %s.%s
(
key text primary key,
value bytea,
metadata JSONB,
expiry timestamp with time zone
)`
const createMDIndex = `create index if not exists idx_md_%s ON %s.%s USING GIN (metadata)`
const createExpiryIndex = `create index if not exists idx_expiry_%s on %s.%s (expiry) where (expiry IS NOT NULL)`
// base queries
const (
list = "SELECT key FROM %s.%s WHERE key LIKE $1 and (expiry < now() or expiry isnull)"
readOne = "SELECT key, value, metadata, expiry FROM %s.%s WHERE key = $1 and (expiry < now() or expiry isnull)"
readMany = "SELECT key, value, metadata, expiry FROM %s.%s WHERE key LIKE $1 and (expiry < now() or expiry isnull)"
write = `INSERT INTO %s.%s(key, value, metadata, expiry)
VALUES ($1, $2::bytea, $3, $4)
ON CONFLICT (key)
DO UPDATE
SET value = EXCLUDED.value, metadata = EXCLUDED.metadata, expiry = EXCLUDED.expiry`
deleteRecord = "DELETE FROM %s.%s WHERE key = $1"
deleteExpired = "DELETE FROM %s.%s WHERE expiry < now()"
)
// suffixes
const (
limit = " LIMIT $2 OFFSET $3"
asc = " ORDER BY key ASC"
desc = " ORDER BY key DESC"
)

663
store/postgres/postgres.go Normal file
View File

@ -0,0 +1,663 @@
// Copyright 2020 Asim Aslam
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Original source: github.com/micro/go-plugins/v3/store/cockroach/cockroach.go
// Package postgres implements the postgres store
package postgres
import (
"database/sql"
"database/sql/driver"
"fmt"
"net"
"net/url"
"regexp"
"strings"
"sync"
"syscall"
"time"
"github.com/lib/pq"
"github.com/pkg/errors"
"go-micro.dev/v5/logger"
"go-micro.dev/v5/store"
)
// DefaultDatabase is the namespace that the sql store
// will use if no namespace is provided.
var (
DefaultDatabase = "micro"
DefaultTable = "micro"
ErrNoConnection = errors.New("Database connection not initialised")
)
var (
re = regexp.MustCompile("[^a-zA-Z0-9]+")
// alternative ordering
orderAsc = "ORDER BY key ASC"
orderDesc = "ORDER BY key DESC"
// the sql statements we prepare and use
statements = map[string]string{
"list": "SELECT key, value, metadata, expiry FROM %s.%s WHERE key LIKE $1 ORDER BY key ASC LIMIT $2 OFFSET $3;",
"read": "SELECT key, value, metadata, expiry FROM %s.%s WHERE key = $1;",
"readMany": "SELECT key, value, metadata, expiry FROM %s.%s WHERE key LIKE $1 ORDER BY key ASC;",
"readOffset": "SELECT key, value, metadata, expiry FROM %s.%s WHERE key LIKE $1 ORDER BY key ASC LIMIT $2 OFFSET $3;",
"write": "INSERT INTO %s.%s(key, value, metadata, expiry) VALUES ($1, $2::bytea, $3, $4) ON CONFLICT (key) DO UPDATE SET value = EXCLUDED.value, metadata = EXCLUDED.metadata, expiry = EXCLUDED.expiry;",
"delete": "DELETE FROM %s.%s WHERE key = $1;",
"deleteExpired": "DELETE FROM %s.%s WHERE expiry < now();",
"showTables": "SELECT schemaname, tablename FROM pg_catalog.pg_tables WHERE schemaname != 'pg_catalog' AND schemaname != 'information_schema';",
}
)
type sqlStore struct {
options store.Options
dbConn *sql.DB
sync.RWMutex
// known databases
databases map[string]bool
}
func (s *sqlStore) getDB(database, table string) (string, string) {
if len(database) == 0 {
if len(s.options.Database) > 0 {
database = s.options.Database
} else {
database = DefaultDatabase
}
}
if len(table) == 0 {
if len(s.options.Table) > 0 {
table = s.options.Table
} else {
table = DefaultTable
}
}
// store.namespace must only contain letters, numbers and underscores
database = re.ReplaceAllString(database, "_")
table = re.ReplaceAllString(table, "_")
return database, table
}
// createDB ensures that the DB and table have been created. It's used for lazy initialisation
// and will record which tables have been created to reduce calls to the DB
func (s *sqlStore) createDB(database, table string) error {
database, table = s.getDB(database, table)
s.Lock()
defer s.Unlock()
if _, ok := s.databases[database+":"+table]; ok {
return nil
}
if err := s.initDB(database, table); err != nil {
return err
}
s.databases[database+":"+table] = true
return nil
}
// db returns a valid connection to the DB
func (s *sqlStore) db() (*sql.DB, error) {
if s.dbConn == nil {
return nil, ErrNoConnection
}
if err := s.dbConn.Ping(); err != nil {
if !isBadConnError(err) {
return nil, err
}
logger.Errorf("Error with DB connection, will reconfigure: %s", err)
if err := s.configure(); err != nil {
logger.Errorf("Error while reconfiguring client: %s", err)
return nil, err
}
}
return s.dbConn, nil
}
// isBadConnError returns true if the error is related to having a bad connection such that you need to reconnect
func isBadConnError(err error) bool {
if err == nil {
return false
}
if err == driver.ErrBadConn {
return true
}
// heavy handed crude check for "connection reset by peer"
if strings.Contains(err.Error(), syscall.ECONNRESET.Error()) {
return true
}
// otherwise iterate through the error types
switch t := err.(type) {
case syscall.Errno:
return t == syscall.ECONNRESET || t == syscall.ECONNABORTED || t == syscall.ECONNREFUSED
case *net.OpError:
return !t.Temporary()
case net.Error:
return !t.Temporary()
}
return false
}
func (s *sqlStore) initDB(database, table string) error {
db, err := s.db()
if err != nil {
return err
}
// Create the namespace's database
_, err = db.Exec(fmt.Sprintf("CREATE DATABASE %s;", database))
if err != nil && !strings.Contains(err.Error(), "already exists") {
return err
}
var version string
if err = db.QueryRow("select version()").Scan(&version); err == nil {
if strings.Contains(version, "PostgreSQL") {
_, err = db.Exec(fmt.Sprintf("CREATE SCHEMA IF NOT EXISTS %s;", database))
if err != nil {
return err
}
}
}
// Create a table for the namespace's prefix
_, err = db.Exec(fmt.Sprintf(`CREATE TABLE IF NOT EXISTS %s.%s
(
key text NOT NULL,
value bytea,
metadata JSONB,
expiry timestamp with time zone,
CONSTRAINT %s_pkey PRIMARY KEY (key)
);`, database, table, table))
if err != nil {
return errors.Wrap(err, "Couldn't create table")
}
// Create Index
_, err = db.Exec(fmt.Sprintf(`CREATE INDEX IF NOT EXISTS "%s" ON %s.%s USING btree ("key");`, "key_index_"+table, database, table))
if err != nil {
return err
}
// Create Metadata Index
_, err = db.Exec(fmt.Sprintf(`CREATE INDEX IF NOT EXISTS "%s" ON %s.%s USING GIN ("metadata");`, "metadata_index_"+table, database, table))
if err != nil {
return err
}
return nil
}
func (s *sqlStore) configure() error {
if len(s.options.Nodes) == 0 {
s.options.Nodes = []string{"postgresql://root@localhost:26257?sslmode=disable"}
}
source := s.options.Nodes[0]
// check if it is a standard connection string eg: host=%s port=%d user=%s password=%s dbname=%s sslmode=disable
// if err is nil which means it would be a URL like postgre://xxxx?yy=zz
_, err := url.Parse(source)
if err != nil {
if !strings.Contains(source, " ") {
source = fmt.Sprintf("host=%s", source)
}
}
// create source from first node
db, err := sql.Open("postgres", source)
if err != nil {
return err
}
if err := db.Ping(); err != nil {
return err
}
if s.dbConn != nil {
s.dbConn.Close()
}
// save the values
s.dbConn = db
// get DB
database, table := s.getDB(s.options.Database, s.options.Table)
// initialise the database
return s.initDB(database, table)
}
func (s *sqlStore) prepare(database, table, query string) (*sql.Stmt, error) {
st, ok := statements[query]
if !ok {
return nil, errors.New("unsupported statement")
}
// get DB
database, table = s.getDB(database, table)
q := fmt.Sprintf(st, database, table)
db, err := s.db()
if err != nil {
return nil, err
}
stmt, err := db.Prepare(q)
if err != nil {
return nil, err
}
return stmt, nil
}
func (s *sqlStore) Close() error {
if s.dbConn != nil {
return s.dbConn.Close()
}
return nil
}
func (s *sqlStore) Init(opts ...store.Option) error {
for _, o := range opts {
o(&s.options)
}
// reconfigure
return s.configure()
}
// List all the known records
func (s *sqlStore) List(opts ...store.ListOption) ([]string, error) {
options := store.ListOptions{}
for _, o := range opts {
o(&options)
}
// create the db if not exists
if err := s.createDB(options.Database, options.Table); err != nil {
return nil, err
}
limit := sql.NullInt32{}
offset := 0
pattern := "%"
if options.Prefix != "" || options.Suffix != "" {
if options.Prefix != "" {
pattern = options.Prefix + pattern
}
if options.Suffix != "" {
pattern = pattern + options.Suffix
}
}
if options.Offset > 0 {
offset = int(options.Offset)
}
if options.Limit > 0 {
limit = sql.NullInt32{Int32: int32(options.Limit), Valid: true}
}
st, err := s.prepare(options.Database, options.Table, "list")
if err != nil {
return nil, err
}
defer st.Close()
rows, err := st.Query(pattern, limit, offset)
if err != nil {
if err == sql.ErrNoRows {
return nil, nil
}
return nil, err
}
defer rows.Close()
var keys []string
records, err := s.rowsToRecords(rows)
if err != nil {
return nil, err
}
for _, k := range records {
keys = append(keys, k.Key)
}
rowErr := rows.Close()
if rowErr != nil {
// transaction rollback or something
return keys, rowErr
}
if err := rows.Err(); err != nil {
return keys, err
}
return keys, nil
}
// rowToRecord converts from sql.Row to a store.Record. If the record has expired it will issue a delete in a separate goroutine
func (s *sqlStore) rowToRecord(row *sql.Row) (*store.Record, error) {
var timehelper pq.NullTime
record := &store.Record{}
metadata := make(Metadata)
if err := row.Scan(&record.Key, &record.Value, &metadata, &timehelper); err != nil {
if err == sql.ErrNoRows {
return record, store.ErrNotFound
}
return nil, err
}
// set the metadata
record.Metadata = toMetadata(&metadata)
if timehelper.Valid {
if timehelper.Time.Before(time.Now()) {
// record has expired
go s.Delete(record.Key)
return nil, store.ErrNotFound
}
record.Expiry = time.Until(timehelper.Time)
}
return record, nil
}
// rowsToRecords converts from sql.Rows to []*store.Record. If a record has expired it will issue a delete in a separate goroutine
func (s *sqlStore) rowsToRecords(rows *sql.Rows) ([]*store.Record, error) {
var records []*store.Record
var timehelper pq.NullTime
for rows.Next() {
record := &store.Record{}
metadata := make(Metadata)
if err := rows.Scan(&record.Key, &record.Value, &metadata, &timehelper); err != nil {
return records, err
}
// set the metadata
record.Metadata = toMetadata(&metadata)
if timehelper.Valid {
if timehelper.Time.Before(time.Now()) {
// record has expired
go s.Delete(record.Key)
} else {
record.Expiry = time.Until(timehelper.Time)
records = append(records, record)
}
} else {
records = append(records, record)
}
}
return records, nil
}
// Read a single key
func (s *sqlStore) Read(key string, opts ...store.ReadOption) ([]*store.Record, error) {
options := store.ReadOptions{}
for _, o := range opts {
o(&options)
}
// create the db if not exists
if err := s.createDB(options.Database, options.Table); err != nil {
return nil, err
}
if options.Prefix || options.Suffix {
return s.read(key, options)
}
st, err := s.prepare(options.Database, options.Table, "read")
if err != nil {
return nil, err
}
defer st.Close()
row := st.QueryRow(key)
record, err := s.rowToRecord(row)
if err != nil {
return nil, err
}
var records []*store.Record
return append(records, record), nil
}
// Read Many records
func (s *sqlStore) read(key string, options store.ReadOptions) ([]*store.Record, error) {
pattern := "%"
if options.Prefix {
pattern = key + pattern
}
if options.Suffix {
pattern = pattern + key
}
var rows *sql.Rows
var st *sql.Stmt
var err error
if options.Limit != 0 {
st, err = s.prepare(options.Database, options.Table, "readOffset")
if err != nil {
return nil, err
}
defer st.Close()
rows, err = st.Query(pattern, options.Limit, options.Offset)
} else {
st, err = s.prepare(options.Database, options.Table, "readMany")
if err != nil {
return nil, err
}
defer st.Close()
rows, err = st.Query(pattern)
}
if err != nil {
if err == sql.ErrNoRows {
return []*store.Record{}, nil
}
return []*store.Record{}, errors.Wrap(err, "sqlStore.read failed")
}
defer rows.Close()
records, err := s.rowsToRecords(rows)
if err != nil {
return nil, err
}
rowErr := rows.Close()
if rowErr != nil {
// transaction rollback or something
return records, rowErr
}
if err := rows.Err(); err != nil {
return records, err
}
return records, nil
}
// Write records
func (s *sqlStore) Write(r *store.Record, opts ...store.WriteOption) error {
var options store.WriteOptions
for _, o := range opts {
o(&options)
}
// create the db if not exists
if err := s.createDB(options.Database, options.Table); err != nil {
return err
}
st, err := s.prepare(options.Database, options.Table, "write")
if err != nil {
return err
}
defer st.Close()
metadata := make(Metadata)
for k, v := range r.Metadata {
metadata[k] = v
}
var expiry time.Time
if r.Expiry != 0 {
expiry = time.Now().Add(r.Expiry)
}
if expiry.IsZero() {
_, err = st.Exec(r.Key, r.Value, metadata, nil)
} else {
_, err = st.Exec(r.Key, r.Value, metadata, expiry)
}
if err != nil {
return errors.Wrap(err, "Couldn't insert record "+r.Key)
}
return nil
}
// Delete records with keys
func (s *sqlStore) Delete(key string, opts ...store.DeleteOption) error {
var options store.DeleteOptions
for _, o := range opts {
o(&options)
}
// create the db if not exists
if err := s.createDB(options.Database, options.Table); err != nil {
return err
}
st, err := s.prepare(options.Database, options.Table, "delete")
if err != nil {
return err
}
defer st.Close()
result, err := st.Exec(key)
if err != nil {
return err
}
_, err = result.RowsAffected()
if err != nil {
return err
}
return nil
}
func (s *sqlStore) Options() store.Options {
return s.options
}
func (s *sqlStore) String() string {
return "cockroach"
}
// NewStore returns a new micro Store backed by sql
func NewStore(opts ...store.Option) store.Store {
options := store.Options{
Database: DefaultDatabase,
Table: DefaultTable,
}
for _, o := range opts {
o(&options)
}
// new store
s := new(sqlStore)
// set the options
s.options = options
// mark known databases
s.databases = make(map[string]bool)
// best-effort configure the store
if err := s.configure(); err != nil {
if logger.V(logger.ErrorLevel, logger.DefaultLogger) {
logger.Error("Error configuring store ", err)
}
}
go s.expiryLoop()
// return store
return s
}
func (s *sqlStore) expiryLoop() {
for {
s.expireRows()
time.Sleep(1 * time.Hour)
}
}
func (s *sqlStore) expireRows() error {
db, err := s.db()
if err != nil {
logger.Errorf("Error getting DB connection %s", err)
return err
}
stmt, err := db.Prepare(statements["showTables"])
if err != nil {
logger.Errorf("Error prepping show tables query %s", err)
return err
}
defer stmt.Close()
rows, err := stmt.Query()
if err != nil {
logger.Errorf("Error running show tables query %s", err)
return err
}
defer rows.Close()
for rows.Next() {
var schemaName, tableName string
if err := rows.Scan(&schemaName, &tableName); err != nil {
logger.Errorf("Error parsing result %s", err)
return err
}
db, err = s.db()
if err != nil {
logger.Errorf("Error prepping delete expired query %s", err)
return err
}
delStmt, err := db.Prepare(fmt.Sprintf(statements["deleteExpired"], schemaName, tableName))
if err != nil {
logger.Errorf("Error prepping delete expired query %s", err)
return err
}
defer delStmt.Close()
res, err := delStmt.Exec()
if err != nil {
logger.Errorf("Error cleaning up %s", err)
return err
}
r, _ := res.RowsAffected()
logger.Infof("Cleaning up %s %s: %d rows deleted", schemaName, tableName, r)
}
return nil
}

View File

@ -0,0 +1,148 @@
//go:build integration
// +build integration
package postgres
import (
"encoding/json"
"testing"
"github.com/stretchr/testify/assert"
"go-micro.dev/v5/store"
)
type testObj struct {
One string
Two int64
}
func TestPostgres(t *testing.T) {
t.Run("ReadWrite", func(t *testing.T) {
s := NewStore(store.Nodes("postgresql://postgres@localhost:5432/?sslmode=disable"))
base := s.(*sqlStore)
base.dbConn.Exec("DROP SCHENA IF EXISTS micro")
b, _ := json.Marshal(testObj{
One: "1",
Two: 2,
})
err := s.Write(&store.Record{
Key: "foobar/baz",
Value: b,
Metadata: map[string]interface{}{
"meta1": "val1",
},
})
assert.NoError(t, err)
recs, err := s.Read("foobar/baz")
assert.NoError(t, err)
assert.Len(t, recs, 1)
assert.Equal(t, "foobar/baz", recs[0].Key)
assert.Len(t, recs[0].Metadata, 1)
assert.Equal(t, "val1", recs[0].Metadata["meta1"])
var tobj testObj
assert.NoError(t, json.Unmarshal(recs[0].Value, &tobj))
assert.Equal(t, "1", tobj.One)
assert.Equal(t, int64(2), tobj.Two)
})
t.Run("Prefix", func(t *testing.T) {
s := NewStore(store.Nodes("postgresql://postgres@localhost:5432/?sslmode=disable"))
base := s.(*sqlStore)
base.dbConn.Exec("DROP SCHENA IF EXISTS micro")
b, _ := json.Marshal(testObj{
One: "1",
Two: 2,
})
err := s.Write(&store.Record{
Key: "foo/bar",
Value: b,
Metadata: map[string]interface{}{
"meta1": "val1",
},
})
assert.NoError(t, err)
err = s.Write(&store.Record{
Key: "foo/baz",
Value: b,
Metadata: map[string]interface{}{
"meta1": "val1",
},
})
assert.NoError(t, err)
recs, err := s.Read("foo/", store.ReadPrefix())
assert.NoError(t, err)
assert.Len(t, recs, 2)
assert.Equal(t, "foo/bar", recs[0].Key)
assert.Equal(t, "foo/baz", recs[1].Key)
})
t.Run("MultipleTables", func(t *testing.T) {
s1 := NewStore(store.Nodes("postgresql://postgres@localhost:5432/?sslmode=disable"), store.Table("t1"))
s2 := NewStore(store.Nodes("postgresql://postgres@localhost:5432/?sslmode=disable"), store.Table("t2"))
base := s1.(*sqlStore)
base.dbConn.Exec("DROP SCHENA IF EXISTS t1")
base.dbConn.Exec("DROP SCHENA IF EXISTS t2")
b1, _ := json.Marshal(testObj{
One: "1",
Two: 2,
})
err := s1.Write(&store.Record{
Key: "foo/bar",
Value: b1,
})
assert.NoError(t, err)
b2, _ := json.Marshal(testObj{
One: "1",
Two: 2,
})
err = s2.Write(&store.Record{
Key: "foo/baz",
Value: b2,
})
assert.NoError(t, err)
recs1, err := s1.List()
assert.NoError(t, err)
assert.Len(t, recs1, 1)
assert.Equal(t, "foo/bar", recs1[0])
recs2, err := s2.List()
assert.NoError(t, err)
assert.Len(t, recs2, 1)
assert.Equal(t, "foo/baz", recs2[0])
})
t.Run("MultipleDBs", func(t *testing.T) {
s1 := NewStore(store.Nodes("postgresql://postgres@localhost:5432/?sslmode=disable"), store.Database("d1"))
s2 := NewStore(store.Nodes("postgresql://postgres@localhost:5432/?sslmode=disable"), store.Database("d2"))
base := s1.(*sqlStore)
base.dbConn.Exec("DROP DATABASE EXISTS d1")
base.dbConn.Exec("DROP DATABASE EXISTS d2")
b1, _ := json.Marshal(testObj{
One: "1",
Two: 2,
})
err := s1.Write(&store.Record{
Key: "foo/bar",
Value: b1,
})
assert.NoError(t, err)
b2, _ := json.Marshal(testObj{
One: "1",
Two: 2,
})
err = s2.Write(&store.Record{
Key: "foo/baz",
Value: b2,
})
assert.NoError(t, err)
recs1, err := s1.List()
assert.NoError(t, err)
assert.Len(t, recs1, 1)
assert.Equal(t, "foo/bar", recs1[0])
recs2, err := s2.List()
assert.NoError(t, err)
assert.Len(t, recs2, 1)
assert.Equal(t, "foo/baz", recs2[0])
})
}