1
0
mirror of https://github.com/go-micro/go-micro.git synced 2025-11-23 21:44:41 +02:00

Plugins and profiles (#2764)

* feat: more plugins

* chore(ci): split out benchmarks

Attempt to resolve too many open files in ci

* chore(ci): split out benchmarks

* fix(ci): Attempt to resolve too many open files in ci

* fix: set DefaultX for cli flag and service option

* fix: restore http broker

* fix: default http broker

* feat: full nats profile

* chore: still ugly, not ready

* fix: better initialization for profiles

* fix(tests): comment out flaky listen tests

* fix: disable benchmarks on gha

* chore: cleanup, comments

* chore: add nats config source
This commit is contained in:
Brian Ketelsen
2025-05-20 13:24:06 -04:00
committed by GitHub
parent e12504ce3a
commit ddc34801ee
58 changed files with 6792 additions and 218 deletions

View File

@@ -0,0 +1,14 @@
# Postgres pgx plugin
This module implements a Postgres implementation of the micro store interface.
It uses modern https://github.com/jackc/pgx driver to access Postgres.
## Implementation notes
### Concepts
Every database has they own connection pool. Due to the way connections are handled this means that all micro "databases" and "tables" can be stored under a single or several Postgres database as specified in the connection string (https://www.postgresql.org/docs/8.1/ddl-schemas.html). The mapping of micro to Postgres concepts is:
- micro database => Postgres schema
- micro table => Postgres table
### Expiry
Expiry is managed by an expiry column in the table. A record's expiry is specified in the column and when a record is read the expiry field is first checked, only returning the record if it's still valid otherwise it's deleted. A maintenance loop also periodically runs to delete any rows that have expired.

8
store/postgres/pgx/db.go Normal file
View File

@@ -0,0 +1,8 @@
package pgx
import "github.com/jackc/pgx/v4/pgxpool"
type DB struct {
conn *pgxpool.Pool
tables map[string]Queries
}

View File

@@ -0,0 +1,44 @@
package pgx
import (
"database/sql/driver"
"encoding/json"
"errors"
)
type Metadata map[string]interface{}
// Scan satisfies the sql.Scanner interface.
func (m *Metadata) Scan(src interface{}) error {
source, ok := src.([]byte)
if !ok {
return errors.New("type assertion .([]byte) failed")
}
var i interface{}
err := json.Unmarshal(source, &i)
if err != nil {
return err
}
*m, ok = i.(map[string]interface{})
if !ok {
return errors.New("type assertion .(map[string]interface{}) failed")
}
return nil
}
// Value satisfies the driver.Valuer interface.
func (m *Metadata) Value() (driver.Value, error) {
j, err := json.Marshal(m)
return j, err
}
func toMetadata(m *Metadata) map[string]interface{} {
md := make(map[string]interface{})
for k, v := range *m {
md[k] = v
}
return md
}

427
store/postgres/pgx/pgx.go Normal file
View File

@@ -0,0 +1,427 @@
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package pgx implements the postgres store with pgx driver
package pgx
import (
"database/sql"
"fmt"
"net/url"
"regexp"
"strings"
"sync"
"time"
"github.com/jackc/pgx/v4"
"github.com/jackc/pgx/v4/pgxpool"
"github.com/pkg/errors"
"go-micro.dev/v5/logger"
"go-micro.dev/v5/store"
)
const defaultDatabase = "micro"
const defaultTable = "micro"
type sqlStore struct {
options store.Options
re *regexp.Regexp
sync.Mutex
// known databases
databases map[string]DB
}
func (s *sqlStore) getDB(database, table string) (string, string) {
if len(database) == 0 {
if len(s.options.Database) > 0 {
database = s.options.Database
} else {
database = defaultDatabase
}
}
if len(table) == 0 {
if len(s.options.Table) > 0 {
table = s.options.Table
} else {
table = defaultTable
}
}
// store.namespace must only contain letters, numbers and underscores
database = s.re.ReplaceAllString(database, "_")
table = s.re.ReplaceAllString(table, "_")
return database, table
}
func (s *sqlStore) db(database, table string) (*pgxpool.Pool, Queries, error) {
s.Lock()
defer s.Unlock()
database, table = s.getDB(database, table)
if _, ok := s.databases[database]; !ok {
err := s.initDB(database)
if err != nil {
return nil, Queries{}, err
}
}
dbObj := s.databases[database]
if _, ok := dbObj.tables[table]; !ok {
err := s.initTable(database, table)
if err != nil {
return nil, Queries{}, err
}
}
return dbObj.conn, dbObj.tables[table], nil
}
func (s *sqlStore) initTable(database, table string) error {
db := s.databases[database].conn
_, err := db.Exec(s.options.Context, fmt.Sprintf(createTable, database, table))
if err != nil {
return errors.Wrap(err, "cannot create table")
}
_, err = db.Exec(s.options.Context, fmt.Sprintf(createMDIndex, table, database, table))
if err != nil {
return errors.Wrap(err, "cannot create metadata index")
}
_, err = db.Exec(s.options.Context, fmt.Sprintf(createExpiryIndex, table, database, table))
if err != nil {
return errors.Wrap(err, "cannot create expiry index")
}
s.databases[database].tables[table] = NewQueries(database, table)
return nil
}
func (s *sqlStore) initDB(database string) error {
if len(s.options.Nodes) == 0 {
s.options.Nodes = []string{"postgresql://root@localhost:26257?sslmode=disable"}
}
source := s.options.Nodes[0]
// check if it is a standard connection string eg: host=%s port=%d user=%s password=%s dbname=%s sslmode=disable
// if err is nil which means it would be a URL like postgre://xxxx?yy=zz
_, err := url.Parse(source)
if err != nil {
if !strings.Contains(source, " ") {
source = fmt.Sprintf("host=%s", source)
}
}
config, err := pgxpool.ParseConfig(source)
if err != nil {
return err
}
db, err := pgxpool.ConnectConfig(s.options.Context, config)
if err != nil {
return err
}
if err = db.Ping(s.options.Context); err != nil {
return err
}
_, err = db.Exec(s.options.Context, fmt.Sprintf(createSchema, database))
if err != nil {
return err
}
if len(database) == 0 {
if len(s.options.Database) > 0 {
database = s.options.Database
} else {
database = defaultDatabase
}
}
// save the values
s.databases[database] = DB{
conn: db,
tables: make(map[string]Queries),
}
return nil
}
func (s *sqlStore) Close() error {
for _, obj := range s.databases {
obj.conn.Close()
}
return nil
}
func (s *sqlStore) Init(opts ...store.Option) error {
for _, o := range opts {
o(&s.options)
}
_, _, err := s.db(s.options.Database, s.options.Table)
return err
}
// List all the known records
func (s *sqlStore) List(opts ...store.ListOption) ([]string, error) {
options := store.ListOptions{}
for _, o := range opts {
o(&options)
}
db, queries, err := s.db(options.Database, options.Table)
if err != nil {
return nil, err
}
pattern := "%"
if options.Prefix != "" {
pattern = options.Prefix + pattern
}
if options.Suffix != "" {
pattern = pattern + options.Suffix
}
var rows pgx.Rows
if options.Limit > 0 {
rows, err = db.Query(s.options.Context, queries.ListAscLimit, pattern, options.Limit, options.Offset)
} else {
rows, err = db.Query(s.options.Context, queries.ListAsc, pattern)
}
if err != nil {
if err == pgx.ErrNoRows {
return nil, nil
}
return nil, err
}
defer rows.Close()
keys := make([]string, 0, 10)
for rows.Next() {
var key string
err = rows.Scan(&key)
if err != nil {
return nil, err
}
keys = append(keys, key)
}
return keys, nil
}
// rowToRecord converts from pgx.Row to a store.Record
func (s *sqlStore) rowToRecord(row pgx.Row) (*store.Record, error) {
var expiry *time.Time
record := &store.Record{}
metadata := make(Metadata)
if err := row.Scan(&record.Key, &record.Value, &metadata, &expiry); err != nil {
if err == sql.ErrNoRows {
return record, store.ErrNotFound
}
return nil, err
}
// set the metadata
record.Metadata = toMetadata(&metadata)
if expiry != nil {
record.Expiry = time.Until(*expiry)
}
return record, nil
}
// rowsToRecords converts from pgx.Rows to []*store.Record
func (s *sqlStore) rowsToRecords(rows pgx.Rows) ([]*store.Record, error) {
var records []*store.Record
for rows.Next() {
var expiry *time.Time
record := &store.Record{}
metadata := make(Metadata)
if err := rows.Scan(&record.Key, &record.Value, &metadata, &expiry); err != nil {
return records, err
}
// set the metadata
record.Metadata = toMetadata(&metadata)
if expiry != nil {
record.Expiry = time.Until(*expiry)
}
records = append(records, record)
}
return records, nil
}
// Read a single key
func (s *sqlStore) Read(key string, opts ...store.ReadOption) ([]*store.Record, error) {
options := store.ReadOptions{
}
for _, o := range opts {
o(&options)
}
db, queries, err := s.db(options.Database, options.Table)
if err != nil {
return nil, err
}
// read one record
if !options.Prefix && !options.Suffix {
row := db.QueryRow(s.options.Context, queries.ReadOne, key)
record, err := s.rowToRecord(row)
if err != nil {
return nil, err
}
return []*store.Record{record}, nil
}
// read by pattern
pattern := "%"
if options.Prefix {
pattern = key + pattern
}
if options.Suffix {
pattern = pattern + key
}
var rows pgx.Rows
if options.Limit > 0 {
rows, err = db.Query(s.options.Context, queries.ListAscLimit, pattern, options.Limit, options.Offset)
} else {
rows, err = db.Query(s.options.Context, queries.ListAsc, pattern)
}
if err != nil {
if err == pgx.ErrNoRows {
return nil, nil
}
return nil, err
}
defer rows.Close()
return s.rowsToRecords(rows)
}
// Write records
func (s *sqlStore) Write(r *store.Record, opts ...store.WriteOption) error {
var options store.WriteOptions
for _, o := range opts {
o(&options)
}
db, queries, err := s.db(options.Database, options.Table)
if err != nil {
return err
}
metadata := make(Metadata)
for k, v := range r.Metadata {
metadata[k] = v
}
if r.Expiry != 0 {
_, err = db.Exec(s.options.Context, queries.Write, r.Key, r.Value, metadata, time.Now().Add(r.Expiry))
} else {
_, err = db.Exec(s.options.Context, queries.Write, r.Key, r.Value, metadata, nil)
}
if err != nil {
return errors.Wrap(err, "cannot upsert record "+r.Key)
}
return nil
}
// Delete records with keys
func (s *sqlStore) Delete(key string, opts ...store.DeleteOption) error {
var options store.DeleteOptions
for _, o := range opts {
o(&options)
}
db, queries, err := s.db(options.Database, options.Table)
if err != nil {
return err
}
_, err = db.Exec(s.options.Context, queries.Delete, key)
return err
}
func (s *sqlStore) Options() store.Options {
return s.options
}
func (s *sqlStore) String() string {
return "pgx"
}
// NewStore returns a new micro Store backed by sql
func NewStore(opts ...store.Option) store.Store {
options := store.Options{
Database: defaultDatabase,
Table: defaultTable,
}
for _, o := range opts {
o(&options)
}
// new store
s := new(sqlStore)
s.options = options
s.databases = make(map[string]DB)
s.re = regexp.MustCompile("[^a-zA-Z0-9]+")
go s.expiryLoop()
// return store
return s
}
func (s *sqlStore) expiryLoop() {
for {
err := s.expireRows()
if err != nil {
logger.Errorf("error cleaning up %s", err)
}
time.Sleep(1 * time.Hour)
}
}
func (s *sqlStore) expireRows() error {
for database, dbObj := range s.databases {
db := dbObj.conn
for table, queries := range dbObj.tables {
res, err := db.Exec(s.options.Context, queries.DeleteExpired)
if err != nil {
logger.Errorf("Error cleaning up %s", err)
return err
}
logger.Infof("Cleaning up %s %s: %d rows deleted", database, table, res.RowsAffected())
}
}
return nil
}

View File

@@ -0,0 +1,139 @@
//go:build integration
// +build integration
package pgx
import (
"encoding/json"
"testing"
"github.com/stretchr/testify/assert"
"go-micro.dev/v5/store"
)
type testObj struct {
One string
Two int64
}
func TestPostgres(t *testing.T) {
t.Run("ReadWrite", func(t *testing.T) {
s := NewStore(store.Nodes("postgresql://postgres@localhost:5432/?sslmode=disable"))
b, _ := json.Marshal(testObj{
One: "1",
Two: 2,
})
err := s.Write(&store.Record{
Key: "foobar/baz",
Value: b,
Metadata: map[string]interface{}{
"meta1": "val1",
},
})
assert.NoError(t, err)
recs, err := s.Read("foobar/baz")
assert.NoError(t, err)
assert.Len(t, recs, 1)
assert.Equal(t, "foobar/baz", recs[0].Key)
assert.Len(t, recs[0].Metadata, 1)
assert.Equal(t, "val1", recs[0].Metadata["meta1"])
var tobj testObj
assert.NoError(t, json.Unmarshal(recs[0].Value, &tobj))
assert.Equal(t, "1", tobj.One)
assert.Equal(t, int64(2), tobj.Two)
})
t.Run("Prefix", func(t *testing.T) {
s := NewStore(store.Nodes("postgresql://postgres@localhost:5432/?sslmode=disable"))
b, _ := json.Marshal(testObj{
One: "1",
Two: 2,
})
err := s.Write(&store.Record{
Key: "foo/bar",
Value: b,
Metadata: map[string]interface{}{
"meta1": "val1",
},
})
assert.NoError(t, err)
err = s.Write(&store.Record{
Key: "foo/baz",
Value: b,
Metadata: map[string]interface{}{
"meta1": "val1",
},
})
assert.NoError(t, err)
recs, err := s.Read("foo/", store.ReadPrefix())
assert.NoError(t, err)
assert.Len(t, recs, 2)
assert.Equal(t, "foo/bar", recs[0].Key)
assert.Equal(t, "foo/baz", recs[1].Key)
})
t.Run("MultipleTables", func(t *testing.T) {
s1 := NewStore(store.Nodes("postgresql://postgres@localhost:5432/?sslmode=disable"), store.Table("t1"))
s2 := NewStore(store.Nodes("postgresql://postgres@localhost:5432/?sslmode=disable"), store.Table("t2"))
b1, _ := json.Marshal(testObj{
One: "1",
Two: 2,
})
err := s1.Write(&store.Record{
Key: "foo/bar",
Value: b1,
})
assert.NoError(t, err)
b2, _ := json.Marshal(testObj{
One: "1",
Two: 2,
})
err = s2.Write(&store.Record{
Key: "foo/baz",
Value: b2,
})
assert.NoError(t, err)
recs1, err := s1.List()
assert.NoError(t, err)
assert.Len(t, recs1, 1)
assert.Equal(t, "foo/bar", recs1[0])
recs2, err := s2.List()
assert.NoError(t, err)
assert.Len(t, recs2, 1)
assert.Equal(t, "foo/baz", recs2[0])
})
t.Run("MultipleDBs", func(t *testing.T) {
s1 := NewStore(store.Nodes("postgresql://postgres@localhost:5432/?sslmode=disable"), store.Database("d1"))
s2 := NewStore(store.Nodes("postgresql://postgres@localhost:5432/?sslmode=disable"), store.Database("d2"))
b1, _ := json.Marshal(testObj{
One: "1",
Two: 2,
})
err := s1.Write(&store.Record{
Key: "foo/bar",
Value: b1,
})
assert.NoError(t, err)
b2, _ := json.Marshal(testObj{
One: "1",
Two: 2,
})
err = s2.Write(&store.Record{
Key: "foo/baz",
Value: b2,
})
assert.NoError(t, err)
recs1, err := s1.List()
assert.NoError(t, err)
assert.Len(t, recs1, 1)
assert.Equal(t, "foo/bar", recs1[0])
recs2, err := s2.List()
assert.NoError(t, err)
assert.Len(t, recs2, 1)
assert.Equal(t, "foo/baz", recs2[0])
})
}

View File

@@ -0,0 +1,38 @@
package pgx
import "fmt"
type Queries struct {
// read
ListAsc string
ListAscLimit string
ListDesc string
ListDescLimit string
ReadOne string
ReadManyAsc string
ReadManyAscLimit string
ReadManyDesc string
ReadManyDescLimit string
// change
Write string
Delete string
DeleteExpired string
}
func NewQueries(database, table string) Queries {
return Queries{
ListAsc: fmt.Sprintf(list, database, table) + asc,
ListAscLimit: fmt.Sprintf(list, database, table) + asc + limit,
ListDesc: fmt.Sprintf(list, database, table) + desc,
ListDescLimit: fmt.Sprintf(list, database, table) + desc + limit,
ReadOne: fmt.Sprintf(readOne, database, table),
ReadManyAsc: fmt.Sprintf(readMany, database, table) + asc,
ReadManyAscLimit: fmt.Sprintf(readMany, database, table) + asc + limit,
ReadManyDesc: fmt.Sprintf(readMany, database, table) + desc,
ReadManyDescLimit: fmt.Sprintf(readMany, database, table) + desc + limit,
Write: fmt.Sprintf(write, database, table),
Delete: fmt.Sprintf(deleteRecord, database, table),
DeleteExpired: fmt.Sprintf(deleteExpired, database, table),
}
}

View File

@@ -0,0 +1,35 @@
package pgx
// init
const createSchema = "CREATE SCHEMA IF NOT EXISTS %s"
const createTable = `CREATE TABLE IF NOT EXISTS %s.%s
(
key text primary key,
value bytea,
metadata JSONB,
expiry timestamp with time zone
)`
const createMDIndex = `create index if not exists idx_md_%s ON %s.%s USING GIN (metadata)`
const createExpiryIndex = `create index if not exists idx_expiry_%s on %s.%s (expiry) where (expiry IS NOT NULL)`
// base queries
const (
list = "SELECT key FROM %s.%s WHERE key LIKE $1 and (expiry < now() or expiry isnull)"
readOne = "SELECT key, value, metadata, expiry FROM %s.%s WHERE key = $1 and (expiry < now() or expiry isnull)"
readMany = "SELECT key, value, metadata, expiry FROM %s.%s WHERE key LIKE $1 and (expiry < now() or expiry isnull)"
write = `INSERT INTO %s.%s(key, value, metadata, expiry)
VALUES ($1, $2::bytea, $3, $4)
ON CONFLICT (key)
DO UPDATE
SET value = EXCLUDED.value, metadata = EXCLUDED.metadata, expiry = EXCLUDED.expiry`
deleteRecord = "DELETE FROM %s.%s WHERE key = $1"
deleteExpired = "DELETE FROM %s.%s WHERE expiry < now()"
)
// suffixes
const (
limit = " LIMIT $2 OFFSET $3"
asc = " ORDER BY key ASC"
desc = " ORDER BY key DESC"
)