1
0
mirror of https://github.com/raseels-repos/golang-saas-starter-kit.git synced 2025-06-15 00:15:15 +02:00

Merge branch 'master' into Jsign/aws2

This commit is contained in:
Lee Brown
2019-08-26 03:38:26 -08:00
10 changed files with 209 additions and 102 deletions

View File

@ -237,7 +237,7 @@ https://docs.docker.com/install/
There is a `docker-compose` file that knows how to build and run all the services. Each service has its own a There is a `docker-compose` file that knows how to build and run all the services. Each service has its own a
`dockerfile`. `dockerfile`.
Before using `docker-compose`, you need to copy `sample.env_docker_compose` to `.env_docker_compose` that docker will use. When you run `docker-compose up` it will run all the services including the main.go file for each Go service. The Before using `docker-compose`, you need to copy `configs/sample.env_docker_compose` to `.env_docker_compose` that docker will use. When you run `docker-compose up` it will run all the services including the main.go file for each Go service. The
following services will run: following services will run:
- web-api - web-api
- web-app - web-app
@ -250,7 +250,7 @@ Use the `docker-compose.yaml` to run all of the services, including the third-pa
command, Docker will download the required images for the 3rd party services. command, Docker will download the required images for the 3rd party services.
```bash ```bash
$ cp sample.env_docker_compose .env_docker_compose $ cp configs/sample.env_docker_compose configs/.env_docker_compose
$ docker-compose up $ docker-compose up
``` ```
@ -303,7 +303,7 @@ changes.
Fresh is a command line tool that builds and (re)starts your web application everytime you save a Go or template file. Fresh is a command line tool that builds and (re)starts your web application everytime you save a Go or template file.
The (Fresh configuration file](https://gitlab.com/geeks-accelerator/oss/saas-starter-kit/blob/master/fresh-auto-reload.conf) The [Fresh configuration file](https://gitlab.com/geeks-accelerator/oss/saas-starter-kit/blob/master/configs/fresh-auto-reload.conf)
is located in the project root. By default the following folders are watched by Fresh: is located in the project root. By default the following folders are watched by Fresh:
- handlers - handlers
- static - static
@ -418,7 +418,7 @@ Create a new user with programmatic access and directly attach it the policy `Sa
4. Create a new docker-compose config file 4. Create a new docker-compose config file
```bash ```bash
cp sample.env_docker_compose .env_docker_compose cp configs/sample.env_docker_compose configs/.env_docker_compose
``` ```
5. Update .env_docker_compose with the Access key ID and Secret access key 5. Update .env_docker_compose with the Access key ID and Secret access key
@ -440,11 +440,11 @@ configs.
To set AWS configs and credentials for other third-party dependencies, you need to create a copy of the sample To set AWS configs and credentials for other third-party dependencies, you need to create a copy of the sample
environment docker-compose file without "sample" prepending the file name. environment docker-compose file without "sample" prepending the file name.
Navigate to the root of the project. Copy `sample.env_docker_compose` to `.env_docker_compose`. Navigate to the root of the project. Copy `configs/sample.env_docker_compose` to `.env_docker_compose`.
```bash ```bash
$ cd $GOPATH/src/geeks-accelerator/oss/saas-starter-kit $ cd $GOPATH/src/geeks-accelerator/oss/saas-starter-kit
$ cp sample.env_docker_compose .env_docker_compose $ cp configs/sample.env_docker_compose configs/.env_docker_compose
``` ```
The example the docker-compose file specifies these environmental variables. The $ means that the variable is commented The example the docker-compose file specifies these environmental variables. The $ means that the variable is commented

View File

@ -3,12 +3,13 @@ package config
import ( import (
"context" "context"
"fmt" "fmt"
"geeks-accelerator/oss/saas-starter-kit/internal/platform/web/webcontext" "io/ioutil"
"log" "log"
"os" "os"
"path/filepath" "path/filepath"
"strings" "strings"
"geeks-accelerator/oss/saas-starter-kit/internal/platform/web/webcontext"
"geeks-accelerator/oss/saas-starter-kit/internal/schema" "geeks-accelerator/oss/saas-starter-kit/internal/schema"
"github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/awserr"
@ -28,6 +29,10 @@ const (
// GitLabProjectBaseUrl is the base url used to create links to a specific CI/CD job or pipeline by ID. // GitLabProjectBaseUrl is the base url used to create links to a specific CI/CD job or pipeline by ID.
GitLabProjectBaseUrl = "https://gitlab.com/geeks-accelerator/oss/saas-starter-kit" GitLabProjectBaseUrl = "https://gitlab.com/geeks-accelerator/oss/saas-starter-kit"
// EnableRdsServerless will use the Aurora database engine that scales the capacity based on database load. This is
// a good option for intermittent or unpredictable workloads.
EnableRdsServerless = true
) )
// Env defines the target deployment environment. // Env defines the target deployment environment.
@ -92,7 +97,7 @@ func (cfgCtx *ConfigContext) Config(log *log.Logger) (*devdeploy.Config, error)
// Get the current working directory. This should be somewhere contained within the project. // Get the current working directory. This should be somewhere contained within the project.
workDir, err := os.Getwd() workDir, err := os.Getwd()
if err != nil { if err != nil {
return cfg, errors.WithMessage(err, "Failed to get current working directory.") return cfg, errors.Wrap(err, "Failed to get current working directory.")
} }
// Set the project root directory and project name. This is current set by finding the go.mod file for the project // Set the project root directory and project name. This is current set by finding the go.mod file for the project
@ -108,6 +113,21 @@ func (cfgCtx *ConfigContext) Config(log *log.Logger) (*devdeploy.Config, error)
// ProjectName will be used for prefixing AWS resources. This could be changed as needed or manually defined. // ProjectName will be used for prefixing AWS resources. This could be changed as needed or manually defined.
cfg.ProjectName = ProjectNamePrefix + modDetails.ProjectName cfg.ProjectName = ProjectNamePrefix + modDetails.ProjectName
// In a verbatim fork of the repo, a CI/CD would fail due to a conflict creating AWS resources (such as S3) since
// their name is calculated with the go.mod path. Since the name-scope of AWS resources is region/global scope,
// it will fail to create appropriate resources for the account of the forked user.
if cfg.ProjectName == "saas-starter-kit" {
remoteUser := gitRemoteUser(modDetails.ProjectRoot)
// Its a true fork from the origin repo.
if remoteUser != "saas-starter-kit" && remoteUser != "geeks-accelerator" {
// Replace the prefix 'saas' with the parent directory name, hopefully the gitlab group/username.
cfg.ProjectName = filepath.Base(filepath.Dir(cfg.ProjectRoot)) + "-starter-kit"
log.Println("switching project name to ", cfg.ProjectName)
}
}
// Set default AWS ECR Repository Name. // Set default AWS ECR Repository Name.
cfg.AwsEcrRepository = &devdeploy.AwsEcrRepository{ cfg.AwsEcrRepository = &devdeploy.AwsEcrRepository{
RepositoryName: cfg.ProjectName, RepositoryName: cfg.ProjectName,
@ -337,32 +357,86 @@ func (cfgCtx *ConfigContext) Config(log *log.Logger) (*devdeploy.Config, error)
}, },
} }
// Define the RDS Database instance for transactional data. A random one will be generated for any created instance. // If serverless RDS is enabled, defined the RDS database cluster and link it to the database instance.
cfg.AwsRdsDBInstance = &devdeploy.AwsRdsDBInstance{ if EnableRdsServerless {
DBInstanceIdentifier: cfg.ProjectName + "-" + cfg.Env, cfg.AwsRdsDBCluster = &devdeploy.AwsRdsDBCluster{
DBName: "shared", DBClusterIdentifier: cfg.ProjectName + "-" + cfg.Env,
Engine: "postgres", Engine: "aurora-postgresql",
MasterUsername: "god", EngineMode: "serverless",
Port: 5432, DatabaseName: "shared",
DBInstanceClass: "db.t2.small", MasterUsername: "god",
AllocatedStorage: 20, Port: 5432,
PubliclyAccessible: false, BackupRetentionPeriod: aws.Int64(7),
BackupRetentionPeriod: aws.Int64(7), CopyTagsToSnapshot: aws.Bool(true),
AutoMinorVersionUpgrade: true, Tags: []devdeploy.Tag{
CopyTagsToSnapshot: aws.Bool(true), {Key: devdeploy.AwsTagNameProject, Value: cfg.ProjectName},
Tags: []devdeploy.Tag{ {Key: devdeploy.AwsTagNameEnv, Value: cfg.Env},
{Key: devdeploy.AwsTagNameProject, Value: cfg.ProjectName}, },
{Key: devdeploy.AwsTagNameEnv, Value: cfg.Env}, PreCreate: func(input *rds.CreateDBClusterInput) error {
}, input.ScalingConfiguration = &rds.ScalingConfiguration{
AfterCreate: func(res *rds.DBInstance, dbInfo *devdeploy.DBConnInfo) error { // A value that indicates whether to allow or disallow automatic pause for an
masterDb, err := sqlx.Open(dbInfo.Driver, dbInfo.URL()) // Aurora DB cluster in serverless DB engine mode. A DB cluster can be paused
if err != nil { // only when it's idle (it has no connections).
return errors.WithMessage(err, "Failed to connect to db for schema migration.") //
} // If a DB cluster is paused for more than seven days, the DB cluster might
defer masterDb.Close() // be backed up with a snapshot. In this case, the DB cluster is restored when
// there is a request to connect to it.
AutoPause: aws.Bool(true),
return schema.Migrate(context.Background(), cfg.Env, masterDb, log, false) // The maximum capacity for an Aurora DB cluster in serverless DB engine mode.
}, // Valid capacity values are 1, 2, 4, 8, 16, 32, 64, 128, and 256.
// The maximum capacity must be greater than or equal to the minimum capacity.
MaxCapacity: aws.Int64(2),
// The minimum capacity for an Aurora DB cluster in serverless DB engine mode.
// Valid capacity values are 1, 2, 4, 8, 16, 32, 64, 128, and 256.
// The minimum capacity must be less than or equal to the maximum capacity.
MinCapacity: aws.Int64(2),
// The time, in seconds, before an Aurora DB cluster in serverless mode is paused.
SecondsUntilAutoPause: aws.Int64(3600),
// The action to take when the timeout is reached, either ForceApplyCapacityChange
// or RollbackCapacityChange.
// ForceApplyCapacityChange sets the capacity to the specified value as soon
// as possible.
// RollbackCapacityChange, the default, ignores the capacity change if a scaling
// point is not found in the timeout period.
// If you specify ForceApplyCapacityChange, connections that prevent Aurora
// Serverless from finding a scaling point might be dropped.
// For more information, see Autoscaling for Aurora Serverless (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-serverless.how-it-works.html#aurora-serverless.how-it-works.auto-scaling)
// in the Amazon Aurora User Guide.
TimeoutAction: aws.String("ForceApplyCapacityChange"),
}
return nil
},
AfterCreate: func(res *rds.DBCluster, dbInfo *devdeploy.DBConnInfo, masterDb *sqlx.DB) error {
return schema.Migrate(context.Background(), cfg.Env, masterDb, log, false)
},
}
} else {
// Define the RDS database instance for transactional data. A random password will be generated for any created instance.
cfg.AwsRdsDBInstance = &devdeploy.AwsRdsDBInstance{
DBInstanceIdentifier: cfg.ProjectName + "-" + cfg.Env,
DBName: "shared",
Engine: "postgres",
MasterUsername: "god",
Port: 5432,
DBInstanceClass: "db.t2.small",
AllocatedStorage: 20,
PubliclyAccessible: false,
BackupRetentionPeriod: aws.Int64(7),
AutoMinorVersionUpgrade: true,
CopyTagsToSnapshot: aws.Bool(true),
Tags: []devdeploy.Tag{
{Key: devdeploy.AwsTagNameProject, Value: cfg.ProjectName},
{Key: devdeploy.AwsTagNameEnv, Value: cfg.Env},
},
AfterCreate: func(res *rds.DBInstance, dbInfo *devdeploy.DBConnInfo, masterDb *sqlx.DB) error {
return schema.Migrate(context.Background(), cfg.Env, masterDb, log, false)
},
}
} }
// AwsIamPolicy defines the name and policy that will be attached to the task role. The policy document grants // AwsIamPolicy defines the name and policy that will be attached to the task role. The policy document grants
@ -498,3 +572,45 @@ func getCommitRef() string {
return commitRef return commitRef
} }
// gitRemoteUser returns the git username/organization for the git repo
func gitRemoteUser(projectRoot string) string {
var remoteUrl string
if ev := os.Getenv("CI_PROJECT_PATH"); ev != "" {
if strings.Contains(ev, "/") {
remoteUrl = strings.Split(ev, "/")[1]
} else {
remoteUrl = ev
}
} else {
dat, err := ioutil.ReadFile(filepath.Join(projectRoot, ".git/config"))
if err != nil {
return ""
}
lines := strings.Split(string(dat), "\n")
for _, l := range lines {
l = strings.TrimSpace(l)
if strings.HasPrefix(l, "url =") {
remoteUrl = l
break
}
}
if remoteUrl == "" {
return ""
}
remoteUrl = strings.TrimSpace(strings.Split(remoteUrl, "=")[1])
if !strings.Contains(remoteUrl, ":") {
return ""
}
remoteUrl = strings.Split(remoteUrl, ":")[1]
}
remoteUser := strings.Split(remoteUrl, "/")[0]
return remoteUser
}

View File

@ -30,7 +30,7 @@ func RunSchemaMigrationsForTargetEnv(log *log.Logger, awsCredentials devdeploy.A
masterDb, err := sqlx.Open(cfg.DBConnInfo.Driver, cfg.DBConnInfo.URL()) masterDb, err := sqlx.Open(cfg.DBConnInfo.Driver, cfg.DBConnInfo.URL())
if err != nil { if err != nil {
return errors.WithMessage(err, "Failed to connect to db for schema migration.") return errors.Wrap(err, "Failed to connect to db for schema migration.")
} }
defer masterDb.Close() defer masterDb.Close()

View File

@ -95,7 +95,6 @@ func main() {
Host string `default:"" envconfig:"HOST"` Host string `default:"" envconfig:"HOST"`
ReadTimeout time.Duration `default:"5s" envconfig:"READ_TIMEOUT"` ReadTimeout time.Duration `default:"5s" envconfig:"READ_TIMEOUT"`
WriteTimeout time.Duration `default:"5s" envconfig:"WRITE_TIMEOUT"` WriteTimeout time.Duration `default:"5s" envconfig:"WRITE_TIMEOUT"`
DisableHTTP2 bool `default:"false" envconfig:"DISABLE_HTTP2"`
} }
Service struct { Service struct {
Name string `default:"web-api" envconfig:"SERVICE_NAME"` Name string `default:"web-api" envconfig:"SERVICE_NAME"`
@ -601,9 +600,6 @@ func main() {
} }
api.TLSConfig = &tls.Config{GetCertificate: m.GetCertificate} api.TLSConfig = &tls.Config{GetCertificate: m.GetCertificate}
api.TLSConfig.NextProtos = append(api.TLSConfig.NextProtos, acme.ALPNProto) api.TLSConfig.NextProtos = append(api.TLSConfig.NextProtos, acme.ALPNProto)
if !cfg.HTTPS.DisableHTTP2 {
api.TLSConfig.NextProtos = append(api.TLSConfig.NextProtos, "h2")
}
httpServers = append(httpServers, api) httpServers = append(httpServers, api)

View File

@ -5,11 +5,6 @@ import (
"context" "context"
"encoding/json" "encoding/json"
"fmt" "fmt"
"geeks-accelerator/oss/saas-starter-kit/internal/account/account_preference"
"geeks-accelerator/oss/saas-starter-kit/internal/platform/notify"
"geeks-accelerator/oss/saas-starter-kit/internal/project"
"geeks-accelerator/oss/saas-starter-kit/internal/project_route"
"geeks-accelerator/oss/saas-starter-kit/internal/user_account/invite"
"io" "io"
"io/ioutil" "io/ioutil"
"net/http" "net/http"
@ -21,14 +16,19 @@ import (
"geeks-accelerator/oss/saas-starter-kit/cmd/web-api/handlers" "geeks-accelerator/oss/saas-starter-kit/cmd/web-api/handlers"
"geeks-accelerator/oss/saas-starter-kit/internal/account" "geeks-accelerator/oss/saas-starter-kit/internal/account"
"geeks-accelerator/oss/saas-starter-kit/internal/account/account_preference"
"geeks-accelerator/oss/saas-starter-kit/internal/platform/auth" "geeks-accelerator/oss/saas-starter-kit/internal/platform/auth"
"geeks-accelerator/oss/saas-starter-kit/internal/platform/notify"
"geeks-accelerator/oss/saas-starter-kit/internal/platform/tests" "geeks-accelerator/oss/saas-starter-kit/internal/platform/tests"
"geeks-accelerator/oss/saas-starter-kit/internal/platform/web" "geeks-accelerator/oss/saas-starter-kit/internal/platform/web"
"geeks-accelerator/oss/saas-starter-kit/internal/platform/web/webcontext" "geeks-accelerator/oss/saas-starter-kit/internal/platform/web/webcontext"
"geeks-accelerator/oss/saas-starter-kit/internal/platform/web/weberror" "geeks-accelerator/oss/saas-starter-kit/internal/platform/web/weberror"
"geeks-accelerator/oss/saas-starter-kit/internal/project"
"geeks-accelerator/oss/saas-starter-kit/internal/project_route"
"geeks-accelerator/oss/saas-starter-kit/internal/signup" "geeks-accelerator/oss/saas-starter-kit/internal/signup"
"geeks-accelerator/oss/saas-starter-kit/internal/user" "geeks-accelerator/oss/saas-starter-kit/internal/user"
"geeks-accelerator/oss/saas-starter-kit/internal/user_account" "geeks-accelerator/oss/saas-starter-kit/internal/user_account"
"geeks-accelerator/oss/saas-starter-kit/internal/user_account/invite"
"geeks-accelerator/oss/saas-starter-kit/internal/user_auth" "geeks-accelerator/oss/saas-starter-kit/internal/user_auth"
"github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp"
"github.com/iancoleman/strcase" "github.com/iancoleman/strcase"

View File

@ -90,7 +90,6 @@ func main() {
Host string `default:"" envconfig:"HOST"` Host string `default:"" envconfig:"HOST"`
ReadTimeout time.Duration `default:"5s" envconfig:"READ_TIMEOUT"` ReadTimeout time.Duration `default:"5s" envconfig:"READ_TIMEOUT"`
WriteTimeout time.Duration `default:"5s" envconfig:"WRITE_TIMEOUT"` WriteTimeout time.Duration `default:"5s" envconfig:"WRITE_TIMEOUT"`
DisableHTTP2 bool `default:"false" envconfig:"DISABLE_HTTP2"`
} }
Service struct { Service struct {
Name string `default:"web-app" envconfig:"SERVICE_NAME"` Name string `default:"web-app" envconfig:"SERVICE_NAME"`
@ -1071,9 +1070,6 @@ func main() {
} }
api.TLSConfig = &tls.Config{GetCertificate: m.GetCertificate} api.TLSConfig = &tls.Config{GetCertificate: m.GetCertificate}
api.TLSConfig.NextProtos = append(api.TLSConfig.NextProtos, acme.ALPNProto) api.TLSConfig.NextProtos = append(api.TLSConfig.NextProtos, acme.ALPNProto)
if !cfg.HTTPS.DisableHTTP2 {
api.TLSConfig.NextProtos = append(api.TLSConfig.NextProtos, "h2")
}
httpServers = append(httpServers, api) httpServers = append(httpServers, api)

4
go.mod
View File

@ -8,7 +8,7 @@ require (
github.com/dimfeld/httptreemux v5.0.1+incompatible github.com/dimfeld/httptreemux v5.0.1+incompatible
github.com/dustin/go-humanize v1.0.0 github.com/dustin/go-humanize v1.0.0
github.com/geeks-accelerator/files v0.0.0-20190704085106-630677cd5c14 github.com/geeks-accelerator/files v0.0.0-20190704085106-630677cd5c14
github.com/geeks-accelerator/sqlxmigrate v0.0.0-20190527223850-4a863a2d30db github.com/geeks-accelerator/sqlxmigrate v0.0.0-20190823021348-d047c980bb66
github.com/geeks-accelerator/swag v1.6.3 github.com/geeks-accelerator/swag v1.6.3
github.com/go-openapi/spec v0.19.2 // indirect github.com/go-openapi/spec v0.19.2 // indirect
github.com/go-openapi/swag v0.19.4 // indirect github.com/go-openapi/swag v0.19.4 // indirect
@ -41,7 +41,7 @@ require (
github.com/tinylib/msgp v1.1.0 // indirect github.com/tinylib/msgp v1.1.0 // indirect
github.com/urfave/cli v1.21.0 github.com/urfave/cli v1.21.0
github.com/xwb1989/sqlparser v0.0.0-20180606152119-120387863bf2 github.com/xwb1989/sqlparser v0.0.0-20180606152119-120387863bf2
gitlab.com/geeks-accelerator/oss/devops v1.0.7 gitlab.com/geeks-accelerator/oss/devops v1.0.11
golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4 golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4
golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7 golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7
golang.org/x/tools v0.0.0-20190807223507-b346f7fd45de // indirect golang.org/x/tools v0.0.0-20190807223507-b346f7fd45de // indirect

8
go.sum
View File

@ -44,6 +44,8 @@ github.com/geeks-accelerator/files v0.0.0-20190704085106-630677cd5c14 h1:Rrxsq3g
github.com/geeks-accelerator/files v0.0.0-20190704085106-630677cd5c14/go.mod h1:HMLrFyDC+sI+871eKlqqIBcaDim/NI8//Mbe+UwhY78= github.com/geeks-accelerator/files v0.0.0-20190704085106-630677cd5c14/go.mod h1:HMLrFyDC+sI+871eKlqqIBcaDim/NI8//Mbe+UwhY78=
github.com/geeks-accelerator/sqlxmigrate v0.0.0-20190527223850-4a863a2d30db h1:mjErP7mTFHQ3cw/ibAkW3CvQ8gM4k19EkfzRzRINDAE= github.com/geeks-accelerator/sqlxmigrate v0.0.0-20190527223850-4a863a2d30db h1:mjErP7mTFHQ3cw/ibAkW3CvQ8gM4k19EkfzRzRINDAE=
github.com/geeks-accelerator/sqlxmigrate v0.0.0-20190527223850-4a863a2d30db/go.mod h1:dzpCjo4q7chhMVuHDzs/odROkieZ5Wjp70rNDuX83jU= github.com/geeks-accelerator/sqlxmigrate v0.0.0-20190527223850-4a863a2d30db/go.mod h1:dzpCjo4q7chhMVuHDzs/odROkieZ5Wjp70rNDuX83jU=
github.com/geeks-accelerator/sqlxmigrate v0.0.0-20190823021348-d047c980bb66 h1:h9pb46oQroOhXmq5cCUU++Eagy240H1/aRwWNIYivrs=
github.com/geeks-accelerator/sqlxmigrate v0.0.0-20190823021348-d047c980bb66/go.mod h1:dzpCjo4q7chhMVuHDzs/odROkieZ5Wjp70rNDuX83jU=
github.com/geeks-accelerator/swag v1.6.3 h1:WottuX4MHoy5ZJFXfL+p1IrChpUb/e4g5vpM6tcwOIE= github.com/geeks-accelerator/swag v1.6.3 h1:WottuX4MHoy5ZJFXfL+p1IrChpUb/e4g5vpM6tcwOIE=
github.com/geeks-accelerator/swag v1.6.3/go.mod h1:YWy7dtuct7Uk3vmKr7s+v/F0SNkGYEeV7Y1CykFhmWU= github.com/geeks-accelerator/swag v1.6.3/go.mod h1:YWy7dtuct7Uk3vmKr7s+v/F0SNkGYEeV7Y1CykFhmWU=
github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk=
@ -213,10 +215,8 @@ github.com/urfave/cli v1.21.0/go.mod h1:lxDj6qX9Q6lWQxIrbrT0nwecwUtRnhVZAJjJZrVU
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
github.com/xwb1989/sqlparser v0.0.0-20180606152119-120387863bf2 h1:zzrxE1FKn5ryBNl9eKOeqQ58Y/Qpo3Q9QNxKHX5uzzQ= github.com/xwb1989/sqlparser v0.0.0-20180606152119-120387863bf2 h1:zzrxE1FKn5ryBNl9eKOeqQ58Y/Qpo3Q9QNxKHX5uzzQ=
github.com/xwb1989/sqlparser v0.0.0-20180606152119-120387863bf2/go.mod h1:hzfGeIUDq/j97IG+FhNqkowIyEcD88LrW6fyU3K3WqY= github.com/xwb1989/sqlparser v0.0.0-20180606152119-120387863bf2/go.mod h1:hzfGeIUDq/j97IG+FhNqkowIyEcD88LrW6fyU3K3WqY=
gitlab.com/geeks-accelerator/oss/devops v1.0.3 h1:SE2ZD4Csvmm3t/50RoJkVLjDcwXKHayQYawSkpOSqIw= gitlab.com/geeks-accelerator/oss/devops v1.0.11 h1:ojSvv4bSOZSyGjFMvpbJyREVfdN1A9O3CrOyTkNtb9c=
gitlab.com/geeks-accelerator/oss/devops v1.0.3/go.mod h1:rvI71qNJyNiO99ZgGnv/PmJCVrjJjupsXBmfYFXdjGM= gitlab.com/geeks-accelerator/oss/devops v1.0.11/go.mod h1:xr+rhNSDXrEh0A6bkBPnfMiRIou3OiPZK0oD5h9GAAM=
gitlab.com/geeks-accelerator/oss/devops v1.0.7 h1:ZlQufuVnRN3DwJ0I5c5KA5edhQs7OstXc0uUZ9V0ixI=
gitlab.com/geeks-accelerator/oss/devops v1.0.7/go.mod h1:JEl0T87/zftowrIzY1D+rhDMhG0AxnghuZB+VzEWuqM=
golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=

View File

@ -169,7 +169,7 @@ func New() *Test {
ctx := context.WithValue(context.Background(), webcontext.KeyValues, &v) ctx := context.WithValue(context.Background(), webcontext.KeyValues, &v)
// Execute the migrations // Execute the migrations
if err = schema.Migrate(ctx, masterDB, log, true); err != nil { if err = schema.Migrate(ctx, v.Env, masterDB, log, true); err != nil {
log.Fatalf("main : Migrate : %v", err) log.Fatalf("main : Migrate : %v", err)
} }
log.Printf("main : Migrate : Completed") log.Printf("main : Migrate : Completed")

View File

@ -43,21 +43,21 @@ func migrationList(ctx context.Context, db *sqlx.DB, log *log.Logger, isUnittest
CONSTRAINT email UNIQUE (email) CONSTRAINT email UNIQUE (email)
) ;` ) ;`
if _, err := tx.Exec(q1); err != nil { if _, err := tx.Exec(q1); err != nil {
return errors.WithMessagef(err, "Query failed %s", q1) return errors.Wrapf(err, "Query failed %s", q1)
} }
return nil return nil
}, },
Rollback: func(tx *sql.Tx) error { Rollback: func(tx *sql.Tx) error {
q1 := `DROP TABLE IF EXISTS users` q1 := `DROP TABLE IF EXISTS users`
if _, err := tx.Exec(q1); err != nil { if _, err := tx.Exec(q1); err != nil {
return errors.WithMessagef(err, "Query failed %s", q1) return errors.Wrapf(err, "Query failed %s", q1)
} }
return nil return nil
}, },
}, },
// Create new table accounts. // Create new table accounts.
{ {
ID: "20190522-01c", ID: "20190522-01h",
Migrate: func(tx *sql.Tx) error { Migrate: func(tx *sql.Tx) error {
if err := createTypeIfNotExists(tx, "account_status_t", "enum('active','pending','disabled')"); err != nil { if err := createTypeIfNotExists(tx, "account_status_t", "enum('active','pending','disabled')"); err != nil {
return err return err
@ -83,32 +83,32 @@ func migrationList(ctx context.Context, db *sqlx.DB, log *log.Logger, isUnittest
CONSTRAINT name UNIQUE (name) CONSTRAINT name UNIQUE (name)
)` )`
if _, err := tx.Exec(q2); err != nil { if _, err := tx.Exec(q2); err != nil {
return errors.WithMessagef(err, "Query failed %s", q2) return errors.Wrapf(err, "Query failed %s", q2)
} }
return nil return nil
}, },
Rollback: func(tx *sql.Tx) error { Rollback: func(tx *sql.Tx) error {
q1 := `DROP TYPE IF EXISTS account_status_t` q1 := `DROP TYPE IF EXISTS account_status_t`
if _, err := tx.Exec(q1); err != nil { if _, err := tx.Exec(q1); err != nil {
return errors.WithMessagef(err, "Query failed %s", q1) return errors.Wrapf(err, "Query failed %s", q1)
} }
q2 := `DROP TABLE IF EXISTS accounts` q2 := `DROP TABLE IF EXISTS accounts`
if _, err := tx.Exec(q2); err != nil { if _, err := tx.Exec(q2); err != nil {
return errors.WithMessagef(err, "Query failed %s", q2) return errors.Wrapf(err, "Query failed %s", q2)
} }
return nil return nil
}, },
}, },
// Create new table user_accounts. // Create new table user_accounts.
{ {
ID: "20190522-01e", ID: "20190522-02e",
Migrate: func(tx *sql.Tx) error { Migrate: func(tx *sql.Tx) error {
if err := createTypeIfNotExists(tx, "user_account_role_t", "enum('admin', 'user')"); err != nil { if err := createTypeIfNotExists(tx, "user_account_role_t", "enum('admin', 'user')"); err != nil {
return err return err
} }
if err := createTypeIfNotExists(tx, "user_account_status_t", "enum('active', 'invited','disabled'"); err != nil { if err := createTypeIfNotExists(tx, "user_account_status_t", "enum('active', 'invited','disabled')"); err != nil {
return err return err
} }
@ -125,7 +125,7 @@ func migrationList(ctx context.Context, db *sqlx.DB, log *log.Logger, isUnittest
CONSTRAINT user_account UNIQUE (user_id,account_id) CONSTRAINT user_account UNIQUE (user_id,account_id)
)` )`
if _, err := tx.Exec(q1); err != nil { if _, err := tx.Exec(q1); err != nil {
return errors.WithMessagef(err, "Query failed %s", q1) return errors.Wrapf(err, "Query failed %s", q1)
} }
return nil return nil
@ -133,17 +133,17 @@ func migrationList(ctx context.Context, db *sqlx.DB, log *log.Logger, isUnittest
Rollback: func(tx *sql.Tx) error { Rollback: func(tx *sql.Tx) error {
q1 := `DROP TYPE IF EXISTS user_account_role_t` q1 := `DROP TYPE IF EXISTS user_account_role_t`
if _, err := tx.Exec(q1); err != nil { if _, err := tx.Exec(q1); err != nil {
return errors.WithMessagef(err, "Query failed %s", q1) return errors.Wrapf(err, "Query failed %s", q1)
} }
q2 := `DROP TYPE IF EXISTS user_account_status_t` q2 := `DROP TYPE IF EXISTS user_account_status_t`
if _, err := tx.Exec(q2); err != nil { if _, err := tx.Exec(q2); err != nil {
return errors.WithMessagef(err, "Query failed %s", q2) return errors.Wrapf(err, "Query failed %s", q2)
} }
q3 := `DROP TABLE IF EXISTS users_accounts` q3 := `DROP TABLE IF EXISTS users_accounts`
if _, err := tx.Exec(q3); err != nil { if _, err := tx.Exec(q3); err != nil {
return errors.WithMessagef(err, "Query failed %s", q3) return errors.Wrapf(err, "Query failed %s", q3)
} }
return nil return nil
@ -168,19 +168,19 @@ func migrationList(ctx context.Context, db *sqlx.DB, log *log.Logger, isUnittest
PRIMARY KEY (id) PRIMARY KEY (id)
)` )`
if _, err := tx.Exec(q1); err != nil { if _, err := tx.Exec(q1); err != nil {
return errors.WithMessagef(err, "Query failed %s", q1) return errors.Wrapf(err, "Query failed %s", q1)
} }
return nil return nil
}, },
Rollback: func(tx *sql.Tx) error { Rollback: func(tx *sql.Tx) error {
q1 := `DROP TYPE IF EXISTS project_status_t` q1 := `DROP TYPE IF EXISTS project_status_t`
if _, err := tx.Exec(q1); err != nil && !errorIsAlreadyExists(err) { if _, err := tx.Exec(q1); err != nil && !errorIsAlreadyExists(err) {
return errors.WithMessagef(err, "Query failed %s", q1) return errors.Wrapf(err, "Query failed %s", q1)
} }
q2 := `DROP TABLE IF EXISTS projects` q2 := `DROP TABLE IF EXISTS projects`
if _, err := tx.Exec(q2); err != nil && !errorIsAlreadyExists(err) { if _, err := tx.Exec(q2); err != nil && !errorIsAlreadyExists(err) {
return errors.WithMessagef(err, "Query failed %s", q2) return errors.Wrapf(err, "Query failed %s", q2)
} }
return nil return nil
}, },
@ -192,13 +192,13 @@ func migrationList(ctx context.Context, db *sqlx.DB, log *log.Logger, isUnittest
q1 := `ALTER TABLE users q1 := `ALTER TABLE users
RENAME COLUMN name to first_name;` RENAME COLUMN name to first_name;`
if _, err := tx.Exec(q1); err != nil { if _, err := tx.Exec(q1); err != nil {
return errors.WithMessagef(err, "Query failed %s", q1) return errors.Wrapf(err, "Query failed %s", q1)
} }
q2 := `ALTER TABLE users q2 := `ALTER TABLE users
ADD last_name varchar(200) NOT NULL DEFAULT '';` ADD last_name varchar(200) NOT NULL DEFAULT '';`
if _, err := tx.Exec(q2); err != nil { if _, err := tx.Exec(q2); err != nil {
return errors.WithMessagef(err, "Query failed %s", q2) return errors.Wrapf(err, "Query failed %s", q2)
} }
return nil return nil
@ -206,7 +206,7 @@ func migrationList(ctx context.Context, db *sqlx.DB, log *log.Logger, isUnittest
Rollback: func(tx *sql.Tx) error { Rollback: func(tx *sql.Tx) error {
q1 := `DROP TABLE IF EXISTS users` q1 := `DROP TABLE IF EXISTS users`
if _, err := tx.Exec(q1); err != nil { if _, err := tx.Exec(q1); err != nil {
return errors.WithMessagef(err, "Query failed %s", q1) return errors.Wrapf(err, "Query failed %s", q1)
} }
return nil return nil
}, },
@ -234,9 +234,9 @@ func migrationList(ctx context.Context, db *sqlx.DB, log *log.Logger, isUnittest
} }
for _, q := range schemas { for _, q := range schemas {
_, err := db.Exec(q) _, err := tx.Exec(q)
if err != nil { if err != nil {
return errors.WithMessagef(err, "Failed to execute sql query '%s'", q) return errors.Wrapf(err, "Failed to execute sql query '%s'", q)
} }
} }
@ -273,9 +273,9 @@ func migrationList(ctx context.Context, db *sqlx.DB, log *log.Logger, isUnittest
"VALUES %s", strings.Join(valueStrings, ",")) "VALUES %s", strings.Join(valueStrings, ","))
insertStmt = db.Rebind(insertStmt) insertStmt = db.Rebind(insertStmt)
_, err := db.Exec(insertStmt, valueArgs...) _, err := tx.Exec(insertStmt, valueArgs...)
if err != nil { if err != nil {
return errors.WithMessagef(err, "Failed to execute sql query '%s'", insertStmt) return errors.Wrapf(err, "Failed to execute sql query '%s'", insertStmt)
} }
return nil return nil
@ -331,9 +331,9 @@ func migrationList(ctx context.Context, db *sqlx.DB, log *log.Logger, isUnittest
} }
for _, q := range queries { for _, q := range queries {
_, err := db.Exec(q) _, err := tx.Exec(q)
if err != nil { if err != nil {
return errors.WithMessagef(err, "Failed to execute sql query '%s'", q) return errors.Wrapf(err, "Failed to execute sql query '%s'", q)
} }
} }
@ -364,9 +364,9 @@ func migrationList(ctx context.Context, db *sqlx.DB, log *log.Logger, isUnittest
} }
for _, q := range schemas { for _, q := range schemas {
_, err := db.Exec(q) _, err := tx.Exec(q)
if err != nil { if err != nil {
return errors.WithMessagef(err, "Failed to execute sql query '%s'", q) return errors.Wrapf(err, "Failed to execute sql query '%s'", q)
} }
} }
@ -399,16 +399,16 @@ func migrationList(ctx context.Context, db *sqlx.DB, log *log.Logger, isUnittest
} }
for _, q := range prep { for _, q := range prep {
_, err := db.Exec(q) _, err := tx.Exec(q)
if err != nil { if err != nil {
return errors.WithMessagef(err, "Failed to execute sql query '%s'", q) return errors.Wrapf(err, "Failed to execute sql query '%s'", q)
} }
} }
u := "http://download.geonames.org/export/dump/countryInfo.txt" u := "http://download.geonames.org/export/dump/countryInfo.txt"
resp, err := pester.Get(u) resp, err := pester.Get(u)
if err != nil { if err != nil {
return errors.WithMessagef(err, "Failed to read country info from '%s'", u) return errors.Wrapf(err, "Failed to read country info from '%s'", u)
} }
defer resp.Body.Close() defer resp.Body.Close()
@ -491,9 +491,9 @@ func migrationList(ctx context.Context, db *sqlx.DB, log *log.Logger, isUnittest
q := "insert into countryinfo (" + strings.Join(columns, ",") + ") values(" + strings.Join(placeholders, ",") + ")" q := "insert into countryinfo (" + strings.Join(columns, ",") + ") values(" + strings.Join(placeholders, ",") + ")"
q = db.Rebind(q) q = db.Rebind(q)
stmt, err = db.Prepare(q) stmt, err = tx.Prepare(q)
if err != nil { if err != nil {
return errors.WithMessagef(err, "Failed to prepare sql query '%s'", q) return errors.Wrapf(err, "Failed to prepare sql query '%s'", q)
} }
} }
@ -531,9 +531,9 @@ func migrationList(ctx context.Context, db *sqlx.DB, log *log.Logger, isUnittest
} }
for _, q := range queries { for _, q := range queries {
_, err := db.Exec(q) _, err := tx.Exec(q)
if err != nil { if err != nil {
return errors.WithMessagef(err, "Failed to execute sql query '%s'", q) return errors.Wrapf(err, "Failed to execute sql query '%s'", q)
} }
} }
} }
@ -558,9 +558,9 @@ func migrationList(ctx context.Context, db *sqlx.DB, log *log.Logger, isUnittest
} }
for _, q := range queries { for _, q := range queries {
_, err := db.Exec(q) _, err := tx.Exec(q)
if err != nil { if err != nil {
return errors.WithMessagef(err, "Failed to execute sql query '%s'", q) return errors.Wrapf(err, "Failed to execute sql query '%s'", q)
} }
} }
@ -570,15 +570,15 @@ func migrationList(ctx context.Context, db *sqlx.DB, log *log.Logger, isUnittest
u := "http://download.geonames.org/export/dump/timeZones.txt" u := "http://download.geonames.org/export/dump/timeZones.txt"
resp, err := pester.Get(u) resp, err := pester.Get(u)
if err != nil { if err != nil {
return errors.WithMessagef(err, "Failed to read timezones info from '%s'", u) return errors.Wrapf(err, "Failed to read timezones info from '%s'", u)
} }
defer resp.Body.Close() defer resp.Body.Close()
q := "insert into country_timezones (country_code,timezone_id) values(?, ?)" q := "insert into country_timezones (country_code,timezone_id) values(?, ?)"
q = db.Rebind(q) q = db.Rebind(q)
stmt, err := db.Prepare(q) stmt, err := tx.Prepare(q)
if err != nil { if err != nil {
return errors.WithMessagef(err, "Failed to prepare sql query '%s'", q) return errors.Wrapf(err, "Failed to prepare sql query '%s'", q)
} }
scanner := bufio.NewScanner(resp.Body) scanner := bufio.NewScanner(resp.Body)
@ -633,7 +633,7 @@ func migrationList(ctx context.Context, db *sqlx.DB, log *log.Logger, isUnittest
CONSTRAINT account_preferences_pkey UNIQUE (account_id,name) CONSTRAINT account_preferences_pkey UNIQUE (account_id,name)
)` )`
if _, err := tx.Exec(q); err != nil { if _, err := tx.Exec(q); err != nil {
return errors.WithMessagef(err, "Query failed %s", q) return errors.Wrapf(err, "Query failed %s", q)
} }
return nil return nil
@ -648,12 +648,12 @@ func migrationList(ctx context.Context, db *sqlx.DB, log *log.Logger, isUnittest
Migrate: func(tx *sql.Tx) error { Migrate: func(tx *sql.Tx) error {
q1 := `ALTER TABLE users ALTER COLUMN timezone DROP DEFAULT` q1 := `ALTER TABLE users ALTER COLUMN timezone DROP DEFAULT`
if _, err := tx.Exec(q1); err != nil { if _, err := tx.Exec(q1); err != nil {
return errors.WithMessagef(err, "Query failed %s", q1) return errors.Wrapf(err, "Query failed %s", q1)
} }
q2 := `ALTER TABLE users ALTER COLUMN timezone DROP NOT NULL` q2 := `ALTER TABLE users ALTER COLUMN timezone DROP NOT NULL`
if _, err := tx.Exec(q2); err != nil { if _, err := tx.Exec(q2); err != nil {
return errors.WithMessagef(err, "Query failed %s", q2) return errors.Wrapf(err, "Query failed %s", q2)
} }
return nil return nil
@ -669,7 +669,7 @@ func migrationList(ctx context.Context, db *sqlx.DB, log *log.Logger, isUnittest
func dropTypeIfExists(tx *sql.Tx, name string) error { func dropTypeIfExists(tx *sql.Tx, name string) error {
q := "DROP TYPE IF EXISTS " + name q := "DROP TYPE IF EXISTS " + name
if _, err := tx.Exec(q); err != nil && !errorIsAlreadyExists(err) { if _, err := tx.Exec(q); err != nil && !errorIsAlreadyExists(err) {
return errors.WithMessagef(err, "Query failed %s", q) return errors.Wrapf(err, "Query failed %s", q)
} }
return nil return nil
} }
@ -677,10 +677,10 @@ func dropTypeIfExists(tx *sql.Tx, name string) error {
// createTypeIfNotExists checks to ensure a type doesn't exist before creating. // createTypeIfNotExists checks to ensure a type doesn't exist before creating.
func createTypeIfNotExists(tx *sql.Tx, name, val string) error { func createTypeIfNotExists(tx *sql.Tx, name, val string) error {
q1 := "select exists (select 1 from pg_type where typname = '"+name+"')" q1 := "select exists (select 1 from pg_type where typname = '" + name + "')"
rows, err := tx.Query(q1) rows, err := tx.Query(q1)
if err != nil { if err != nil {
return errors.WithMessagef(err, "Query failed %s", q1) return errors.Wrapf(err, "Query failed %s", q1)
} }
defer rows.Close() defer rows.Close()
@ -700,10 +700,9 @@ func createTypeIfNotExists(tx *sql.Tx, name, val string) error {
return nil return nil
} }
q2 := "CREATE TYPE " + name + " AS " + val
q2 := `CREATE TYPE "+name+" AS `+val
if _, err := tx.Exec(q2); err != nil && !errorIsAlreadyExists(err) { if _, err := tx.Exec(q2); err != nil && !errorIsAlreadyExists(err) {
return errors.WithMessagef(err, "Query failed %s", q2) return errors.Wrapf(err, "Query failed %s", q2)
} }
return nil return nil