You've already forked golang-saas-starter-kit
mirror of
https://github.com/raseels-repos/golang-saas-starter-kit.git
synced 2025-07-17 01:42:36 +02:00
Merge branch 'prod' into 'master'
Migrated devops code to independant project See merge request geeks-accelerator/oss/saas-starter-kit!13
This commit is contained in:
5
.gitignore
vendored
5
.gitignore
vendored
@ -1,8 +1,7 @@
|
||||
.idea
|
||||
aws.lee
|
||||
aws.*
|
||||
.env_docker_compose
|
||||
local.env
|
||||
.local.env
|
||||
.aws-deploy.env
|
||||
.DS_Store
|
||||
tmp
|
||||
.devops.json
|
||||
|
171
.gitlab-ci.yml
171
.gitlab-ci.yml
@ -7,7 +7,7 @@ variables:
|
||||
DOCKER_TLS_CERTDIR: ""
|
||||
|
||||
before_script:
|
||||
- './tools/devops/scripts/build.sh'
|
||||
- 'go install ./build/cicd'
|
||||
|
||||
stages:
|
||||
- build:dev
|
||||
@ -32,136 +32,127 @@ cache:
|
||||
.build_tmpl: &build_tmpl
|
||||
<<: *job_tmpl
|
||||
script:
|
||||
- './devops build -service=${SERVICE} -project=${PROJECT_NAME} -env=${TARGET_ENV}'
|
||||
- 'cicd --env=${TARGET_ENV} build ${TARGET_TYPE} --name=${TARGET_REF}'
|
||||
|
||||
.deploy_tmpl: &deploy_tmpl
|
||||
<<: *job_tmpl
|
||||
script:
|
||||
- './devops deploy -service=${SERVICE} -project=${PROJECT_NAME} -env=${TARGET_ENV} -enable_https=${ENABLE_HTTPS} -enable_elb=${ENABLE_ELB} -primary_host=${PRIMARY_HOST} -host_names=${HOST_NAMES} -private_bucket=${S3_BUCKET_PRIVATE} -public_bucket=${S3_BUCKET_PUBLIC} -public_bucket_cloudfront=${S3_BUCKET_PUBLIC_CLOUDFRONT} -static_files_s3=${STATIC_FILES_S3} -static_files_img_resize=${STATIC_FILES_IMG_RESIZE}'
|
||||
- 'cicd --env=${TARGET_ENV} deploy ${TARGET_TYPE} --name=${TARGET_REF}'
|
||||
|
||||
.migrate_tmpl: &migrate_tmpl
|
||||
<<: *job_tmpl
|
||||
script:
|
||||
- './devops migrate -project=${PROJECT_NAME} -env=${TARGET_ENV}'
|
||||
- 'cicd --env=${TARGET_ENV} schema migrate'
|
||||
|
||||
db:migrate:dev:
|
||||
db:migrate:prod:
|
||||
<<: *migrate_tmpl
|
||||
stage: migrate:dev
|
||||
stage: migrate:prod
|
||||
tags:
|
||||
- dev
|
||||
- prod
|
||||
only:
|
||||
- master
|
||||
- dev
|
||||
- /^dev-.*$/
|
||||
- prod
|
||||
- /^prod-.*$/
|
||||
variables:
|
||||
TARGET_ENV: 'dev'
|
||||
TARGET_ENV: 'prod'
|
||||
AWS_USE_ROLE: 'true'
|
||||
|
||||
webapp:build:dev:
|
||||
webapp:build:prod:
|
||||
<<: *build_tmpl
|
||||
stage: build:dev
|
||||
stage: build:prod
|
||||
tags:
|
||||
- dev
|
||||
- prod
|
||||
only:
|
||||
- master
|
||||
- dev
|
||||
- dev-web-app
|
||||
- prod
|
||||
- prod-web-app
|
||||
variables:
|
||||
TARGET_ENV: 'dev'
|
||||
SERVICE: 'web-app'
|
||||
TARGET_ENV: 'prod'
|
||||
TARGET_TYPE: 'service'
|
||||
TARGET_REF: 'web-app'
|
||||
AWS_USE_ROLE: 'true'
|
||||
webapp:deploy:dev:
|
||||
webapp:deploy:prod:
|
||||
<<: *deploy_tmpl
|
||||
stage: deploy:dev
|
||||
stage: deploy:prod
|
||||
tags:
|
||||
- dev
|
||||
- prod
|
||||
only:
|
||||
- master
|
||||
- dev
|
||||
- dev-web-app
|
||||
- prod
|
||||
- prod-web-app
|
||||
dependencies:
|
||||
- 'webapp:build:dev'
|
||||
- 'db:migrate:dev'
|
||||
- 'webapp:build:prod'
|
||||
- 'db:migrate:prod'
|
||||
variables:
|
||||
TARGET_ENV: 'dev'
|
||||
SERVICE: 'web-app'
|
||||
ENABLE_HTTPS: 1
|
||||
ENABLE_ELB: 0
|
||||
PRIMARY_HOST: 'example.saasstartupkit.com'
|
||||
HOST_NAMES: 'example.saasstartupkit.com,dev.example.saasstartupkit.com'
|
||||
S3_BUCKET_PRIVATE: 'saas-starter-kit-private'
|
||||
S3_BUCKET_PUBLIC: 'saas-starter-kit-public'
|
||||
S3_BUCKET_PUBLIC_CLOUDFRONT: 'true'
|
||||
STATIC_FILES_S3: 'true'
|
||||
STATIC_FILES_IMG_RESIZE: 'true'
|
||||
TARGET_ENV: 'prod'
|
||||
TARGET_TYPE: 'service'
|
||||
TARGET_REF: 'web-app'
|
||||
AWS_USE_ROLE: 'true'
|
||||
EMAIL_SENDER: 'lee+saas-starter-kit@geeksinthewoods.com'
|
||||
WEB_API_BASE_URL: https://api.example.saasstartupkit.com
|
||||
#when: manual
|
||||
|
||||
webapi:build:dev:
|
||||
webapi:build:prod:
|
||||
<<: *build_tmpl
|
||||
stage: build:dev
|
||||
stage: build:prod
|
||||
tags:
|
||||
- dev
|
||||
- prod
|
||||
only:
|
||||
- master
|
||||
- dev
|
||||
- dev-web-api
|
||||
- prod
|
||||
- prod-web-api
|
||||
variables:
|
||||
TARGET_ENV: 'dev'
|
||||
SERVICE: 'web-api'
|
||||
TARGET_ENV: 'prod'
|
||||
TARGET_TYPE: 'service'
|
||||
TARGET_REF: 'web-api'
|
||||
AWS_USE_ROLE: 'true'
|
||||
webapi:deploy:dev:
|
||||
webapi:deploy:prod:
|
||||
<<: *deploy_tmpl
|
||||
stage: deploy:dev
|
||||
stage: deploy:prod
|
||||
tags:
|
||||
- dev
|
||||
- prod
|
||||
only:
|
||||
- master
|
||||
- dev
|
||||
- dev-web-api
|
||||
- prod
|
||||
- prod-web-api
|
||||
dependencies:
|
||||
- 'webapi:build:dev'
|
||||
- 'db:migrate:dev'
|
||||
- 'webapi:build:prod'
|
||||
- 'db:migrate:prod'
|
||||
variables:
|
||||
TARGET_ENV: 'dev'
|
||||
SERVICE: 'web-api'
|
||||
ENABLE_HTTPS: 1
|
||||
ENABLE_ELB: 0
|
||||
PRIMARY_HOST: 'api.example.saasstartupkit.com'
|
||||
HOST_NAMES: 'api.dev.example.saasstartupkit.com'
|
||||
S3_BUCKET_PRIVATE: 'saas-starter-kit-private'
|
||||
S3_BUCKET_PUBLIC: 'saas-starter-kit-public'
|
||||
S3_BUCKET_PUBLIC_CLOUDFRONT: 'false'
|
||||
STATIC_FILES_S3: 'false'
|
||||
STATIC_FILES_IMG_RESIZE: 'false'
|
||||
TARGET_ENV: 'prod'
|
||||
TARGET_TYPE: 'service'
|
||||
TARGET_REF: 'web-api'
|
||||
AWS_USE_ROLE: 'true'
|
||||
EMAIL_SENDER: 'lee+saas-starter-kit@geeksinthewoods.com'
|
||||
WEB_APP_BASE_URL: https://example.saasstartupkit.com
|
||||
#when: manual
|
||||
|
||||
ddlogscollector:build:prod:
|
||||
<<: *build_tmpl
|
||||
stage: build:prod
|
||||
tags:
|
||||
- prod
|
||||
only:
|
||||
- master
|
||||
- prod
|
||||
- prod-ddlogs
|
||||
variables:
|
||||
TARGET_ENV: 'prod'
|
||||
TARGET_TYPE: 'function'
|
||||
TARGET_REF: 'ddlogscollector'
|
||||
AWS_USE_ROLE: 'true'
|
||||
ddlogscollector:deploy:prod:
|
||||
<<: *deploy_tmpl
|
||||
stage: deploy:prod
|
||||
tags:
|
||||
- prod
|
||||
only:
|
||||
- master
|
||||
- prod
|
||||
- prod-ddlogs
|
||||
dependencies:
|
||||
- 'ddlogscollector:build:prod'
|
||||
- 'db:migrate:prod'
|
||||
variables:
|
||||
TARGET_ENV: 'prod'
|
||||
TARGET_TYPE: 'function'
|
||||
TARGET_REF: 'ddlogscollector'
|
||||
AWS_USE_ROLE: 'true'
|
||||
#when: manual
|
||||
|
||||
#ddlogscollector:deploy:stage:
|
||||
# <<: *deploy_stage_tmpl
|
||||
# variables:
|
||||
# TARGET_ENV: 'stage'
|
||||
# ECS_CLUSTER: '${ECS_CLUSTER}'
|
||||
# SERVICE: 'ddlogscollector'
|
||||
# S3_BUCKET: 'keenispace-services-stage'
|
||||
# S3_KEY: 'aws/lambda/ddlogscollector/src/ddlogscollector-stage.zip'
|
||||
# ENABLE_VPC: 0
|
||||
# only:
|
||||
# - master
|
||||
# - stage
|
||||
#ddlogscollector:deploy:prod:
|
||||
# <<: *deploy_prod_tmpl
|
||||
# variables:
|
||||
# TARGET_ENV: 'prod'
|
||||
# ECS_CLUSTER: '${ECS_CLUSTER}'
|
||||
# SERVICE: 'ddlogscollector'
|
||||
# S3_BUCKET: 'keenispace-services-prod'
|
||||
# S3_KEY: 'aws/lambda/ddlogscollector/src/ddlogscollector-prod.zip'
|
||||
# ENABLE_VPC: 0
|
||||
# only:
|
||||
# - master
|
||||
# - prod
|
||||
# #dependencies:
|
||||
# # - 'ddlogscollector:deploy:stage'
|
||||
|
@ -1,5 +1,5 @@
|
||||
# This is the official list of people who can contribute
|
||||
# (and typically have contributed) code to the gotraining repository.
|
||||
# (and typically have contributed) code to the saas-starter-kit repository.
|
||||
#
|
||||
# Names should be added to this file only after verifying that
|
||||
# the individual or the individual's organization has agreed to
|
||||
|
69
README.md
69
README.md
@ -1,7 +1,7 @@
|
||||
# SaaS Startup Kit
|
||||
[](https://gitlab.com/geeks-accelerator/oss/devops/pipelines)
|
||||
[](https://goreportcard.com/report/gitlab.com/geeks-accelerator/oss/devops)
|
||||
|
||||
Copyright 2019, Geeks Accelerator
|
||||
twins@geeksaccelerator.com
|
||||
|
||||
The [SaaS Startup Kit](https://saasstartupkit.com/) is a set of libraries in Go and boilerplate Golang code for building
|
||||
scalable software-as-a-service (SaaS) applications. The goal of this project is to provide a proven starting point for new
|
||||
@ -26,6 +26,38 @@ https://docs.google.com/presentation/d/1WGYqMZ-YUOaNxlZBfU4srpN8i86MU0ppWWSBb3pk
|
||||
[](https://saasstartupkit.com/)
|
||||
|
||||
|
||||
|
||||
<!-- toc -->
|
||||
|
||||
- [Motivation](#motivation)
|
||||
- [Description](#description)
|
||||
* [Example project](#example-project)
|
||||
- [Local Installation](#local-installation)
|
||||
* [Getting the project](#getting-the-project)
|
||||
* [Go Modules](#go-modules)
|
||||
* [Installing Docker](#installing-docker)
|
||||
- [Getting started](#getting-started)
|
||||
* [Running the project](#running-the-project)
|
||||
* [How we run the project](#how-we-run-the-project)
|
||||
* [Stopping the project](#stopping-the-project)
|
||||
* [Re-starting a specific Go service for development](#re-starting-a-specific-go-service-for-development)
|
||||
* [Forking your own copy](#forking-your-own-copy)
|
||||
* [Optional. Set AWS and Datadog Configs](#optional-set-aws-and-datadog-configs)
|
||||
- [Web API](#web-api)
|
||||
* [API Documentation](#api-documentation)
|
||||
- [Web App](#web-app)
|
||||
- [Schema](#schema)
|
||||
* [Accessing Postgres](#accessing-postgres)
|
||||
- [Deployment](#deployment)
|
||||
- [Development Notes](#development-notes)
|
||||
- [What's Next](#whats-next)
|
||||
- [Join us on Gopher Slack](#join-us-on-gopher-slack)
|
||||
- [License](#license)
|
||||
|
||||
<!-- tocstop -->
|
||||
|
||||
|
||||
|
||||
## Motivation
|
||||
|
||||
When getting started building SaaS, we believe that is important for both the frontend web experience and the backend
|
||||
@ -199,8 +231,8 @@ Docker is a critical component and required to run this project.
|
||||
|
||||
https://docs.docker.com/install/
|
||||
|
||||
|
||||
## Running The Project
|
||||
|
||||
## Getting started
|
||||
|
||||
There is a `docker-compose` file that knows how to build and run all the services. Each service has its own a
|
||||
`dockerfile`.
|
||||
@ -490,6 +522,7 @@ of user management.
|
||||
|
||||
For more details on this service, read [web-app readme](https://gitlab.com/geeks-accelerator/oss/saas-starter-kit/blob/master/cmd/web-app/README.md)
|
||||
|
||||
|
||||
|
||||
## Schema
|
||||
[cmd/schema](https://gitlab.com/geeks-accelerator/oss/saas-starter-kit/tree/master/cmd/schema)
|
||||
@ -583,7 +616,7 @@ so each job is dependant on the previous or run jobs for each target environment
|
||||
|
||||
A build tool called [devops](https://gitlab.com/geeks-accelerator/oss/saas-starter-kit/tree/master/tools/devops) has
|
||||
been included apart of this project. _Devops_ handles creating AWS resources and deploying your services with minimal
|
||||
additional configuration. You can customizing any of the configuration in the code. While AWS is already a core part of
|
||||
additional configuration. You can customize any of the configuration in the code. While AWS is already a core part of
|
||||
the saas-starter-kit, keeping the deployment in GoLang limits the scope of additional technologies required to get your
|
||||
project successfully up and running. If you understand Golang, then you will be a master at devops with this tool.
|
||||
|
||||
@ -622,13 +655,37 @@ sqlQueryStr = db.Rebind(sqlQueryStr)
|
||||
For additional details refer to [bindvars](https://jmoiron.github.io/sqlx/#bindvars)
|
||||
|
||||
|
||||
|
||||
## What's Next
|
||||
|
||||
We are in the process of writing more documentation about this code. We welcome you to make enhancements to this
|
||||
documentation or just send us your feedback and suggestions :wink:
|
||||
|
||||
|
||||
|
||||
## Contributions
|
||||
|
||||
We :heart: contributions.
|
||||
|
||||
Have you had a good experience with SaaS Startup Kit? Why not share some love and contribute code?
|
||||
|
||||
Thank you to all those that have contributed to this project and are using it in their projects. You can find a
|
||||
CONTRIBUTORS file where we keep a list of contributors to the project. If you contribute a PR please consider adding
|
||||
your name there.
|
||||
|
||||
|
||||
|
||||
## Join us on Gopher Slack
|
||||
|
||||
If you are having problems installing, troubles getting the project running or would like to contribute, join the
|
||||
channel #saas-starter-kit on [Gopher Slack](http://invite.slack.golangbridge.org/)
|
||||
channel #saas-starter-kit on [Gopher Slack](http://invite.slack.golangbridge.org/)
|
||||
|
||||
|
||||
|
||||
## License
|
||||
|
||||
Please read the [LICENSE](./LICENSE) file here.
|
||||
|
||||
|
||||
Copyright 2019, Geeks Accelerator
|
||||
twins@geeksaccelerator.com
|
||||
|
413
build/cicd/README.md
Normal file
413
build/cicd/README.md
Normal file
@ -0,0 +1,413 @@
|
||||
|
||||
cicd
|
||||
===
|
||||
|
||||
_cicd_ is a simple command line tool that facilitates build and deployment for your project. The goal is to help enable
|
||||
developers to easily setup a continuous build pipeline using [GitLab CI/CD](https://docs.gitlab.com/ee/ci/) and code
|
||||
driven deployment.
|
||||
|
||||
<!-- toc -->
|
||||
|
||||
- [Overview](#overview)
|
||||
* [Deployment Environments](#deployment-environments)
|
||||
* [Services](#services)
|
||||
* [Functions](#functions)
|
||||
* [Schema Migrations](#schema-migrations)
|
||||
- [Getting Started](#getting-started)
|
||||
- [Installing locally](#installing-locally)
|
||||
- [Usage](#usage)
|
||||
* [Commands](#commands)
|
||||
* [Examples](#examples)
|
||||
- [Join us on Gopher Slack](#join-us-on-gopher-slack)
|
||||
|
||||
<!-- tocstop -->
|
||||
|
||||
|
||||
|
||||
## Overview
|
||||
|
||||
The command line tool provides the functionality to configure, build and deploy your code. When new code is push to GitLab,
|
||||
this tool will enable building, testing and deploying your code to [Amazon AWS](https://aws.amazon.com/).
|
||||
|
||||
Deploying your code to production always requires additional tooling and configuration. Instead of patching together a
|
||||
system of of existing tools and configuration files. This tool centralizes configuration for the application and any
|
||||
additional deployment resources needed.
|
||||
|
||||
Configuration is define with code. AWS resources are created/maintained using the [AWS SDK for Go](https://docs.aws.amazon.com/sdk-for-go/api/).
|
||||
|
||||
**This tool is used by GitLab CI/CD** and is configured by a file called `.gitlab-ci.yml` placed at the repository’s root.
|
||||
|
||||
**All code is deployed to Amazon AWS**.
|
||||
|
||||
Check out the [full presentation](https://docs.google.com/presentation/d/1sRFQwipziZlxBtN7xuF-ol8vtUqD55l_4GE-4_ns-qM/edit?usp=sharing)
|
||||
that covers how to setup your [GitLab CI/CD](https://docs.gitlab.com/ee/ci/) pipeline that uses autoscaling GitLab
|
||||
Runners on AWS.
|
||||
|
||||
Support is provided for both services and functions. The build process for both relies on docker and thus, neither are
|
||||
required to be written in go.
|
||||
|
||||
Configuration for build and deploy is provided by
|
||||
[gitlab.com/geeks-accelerator/oss/devops/pkg/devdeploy](https://godoc.org/gitlab.com/geeks-accelerator/oss/devops/pkg/devdeploy)
|
||||
|
||||
For additional details regarding this tool, refer to
|
||||
[gitlab.com/geeks-accelerator/oss/devops](https://gitlab.com/geeks-accelerator/oss/devops)
|
||||
|
||||
|
||||
|
||||
### Deployment Environments
|
||||
|
||||
All configuration for the deployment environments is defined in code that is located in the
|
||||
[internal/config](https://gitlab.com/geeks-accelerator/oss/saas-starter-kit/tree/master/build/cicd/internal/config)
|
||||
package. This includes configuration for the following deployment resources:
|
||||
|
||||
* [AWS ECR Repository](https://godoc.org/gitlab.com/geeks-accelerator/oss/devops/pkg/devdeploy#AwsEcrRepository)
|
||||
named `saas-starter-kit`
|
||||
|
||||
* [AWS EC2 VPC](https://godoc.org/gitlab.com/geeks-accelerator/oss/devops/pkg/devdeploy#AwsEc2Vpc) defined as using the
|
||||
default for the AWS Region.
|
||||
|
||||
* [AWS EC2 Security Group](https://godoc.org/gitlab.com/geeks-accelerator/oss/devops/pkg/devdeploy#AwsEc2SecurityGroup)
|
||||
named `saas-starter-kit-[dev|stage|prod]`
|
||||
* The name of the GitLab runner security group as `gitlab-runner` that will be added to the security group as ingress.
|
||||
|
||||
* Private [AWS S3 Bucket](https://godoc.org/gitlab.com/geeks-accelerator/oss/devops/pkg/devdeploy#AwsS3Bucket)
|
||||
named `saas-starter-kit-private` used by the `web-app` and `web-api` for large object storage.
|
||||
* A lifecycle policy is applied to the key prefix `tmp/` that will expire objects after 1 day for temporary storage
|
||||
like exports.
|
||||
* Configured to [block all public access](https://aws.amazon.com/blogs/aws/amazon-s3-block-public-access-another-layer-of-protection-for-your-accounts-and-buckets/)
|
||||
|
||||
* Public [AWS S3 Bucket](https://godoc.org/gitlab.com/geeks-accelerator/oss/devops/pkg/devdeploy#AwsS3Bucket)
|
||||
named `saas-starter-kit-public` used to serve static files primary for the `web-app`.
|
||||
* CORS rules for GET and POST to support static files served directly from the S3 Bucket or via Cloudfront.
|
||||
* Defined key prefix of `public/` used by deployment for uploading static files.
|
||||
* AWS CloudFront configured for the `prod` environment for serving static files from the S3 Bucket as origin.
|
||||
|
||||
* Redis [AWS Elastic Cache Cluster](https://godoc.org/gitlab.com/geeks-accelerator/oss/devops/pkg/devdeploy#AwsElasticCacheCluster)
|
||||
named `saas-starter-kit-[dev|stage|prod]` for ephemeral storage.
|
||||
* Configured using Redis version 5.0.4.
|
||||
* Deployed as a single node cache cluster using the instance type `cache.t2.micro`, 1vCPU with 512mbs of memory.
|
||||
* `maxmemory-policy` parameter set to `allkeys-lru` which will evict keys by trying to remove the less recently used
|
||||
(LRU) keys first, in order to make space for the new data added. This will prevent the cache cluster from ever
|
||||
running out of memory.
|
||||
|
||||
* Postgres [AWS RDS Database Instance](https://godoc.org/gitlab.com/geeks-accelerator/oss/devops/pkg/devdeploy#AwsRdsDBInstance)
|
||||
named `saas-starter-kit-[dev|stage|prod]`.
|
||||
* Configured with the default database `shared`, username `god` on port 5432.
|
||||
* The password is randomly generated during creation and stored in [AWS Secrets Manager](https://aws.amazon.com/secrets-manager/).
|
||||
* Deployed as a single instance using the instance type `db.t2.small`, 1vCPU with 2GiB of memory.
|
||||
* 20GiB of disk space has been allocated.
|
||||
|
||||
* [AWS Iam Policy](https://godoc.org/gitlab.com/geeks-accelerator/oss/devops/pkg/devdeploy#AwsIamPolicy) named
|
||||
`saasStarterKitService[Dev|Stage|Prod]` that will be used to grants permissions for AWS ECS tasks and AWS Lambda
|
||||
functions to access to the defined AWS resources listed above.
|
||||
|
||||
* Support for datadog can be enabled by added your Datadog API key to [AWS Secrets Manager](https://aws.amazon.com/secrets-manager/)
|
||||
using the key `saas-starter-kit/[dev|stage|prod]/datadog`
|
||||
|
||||
|
||||
Multiple development environments can easily be configured for more control. This tool supports three target deployment
|
||||
environments:
|
||||
* dev
|
||||
* stage
|
||||
* prod
|
||||
|
||||
`.gitlab-ci.yml` only has prod enabled.
|
||||
|
||||
|
||||
|
||||
### Services
|
||||
Services are generally applications that will need to be long running or continuously available. The configured services
|
||||
are:
|
||||
|
||||
* [web-app](https://gitlab.com/geeks-accelerator/oss/saas-starter-kit/tree/master/cmd/web-app) - Publicly accessible
|
||||
website and web application.
|
||||
* [web-api](https://gitlab.com/geeks-accelerator/oss/saas-starter-kit/tree/master/cmd/web-api) - Publicly accessible web
|
||||
API and documentation.
|
||||
|
||||
|
||||
The `Dockerfile` for both services is defined as [multi-stage build](https://docs.docker.com/develop/develop-images/multistage-build/)
|
||||
that includes building a base layer, running unittests and compiling the go application as static binary. The final
|
||||
layer in the multi-stage uses [alpine:3.9](https://hub.docker.com/_/alpine?tab=description) as its base image and copies
|
||||
in the compiled binary resulting in a docker container that is around 50mbs excluding any additional static assets. It's
|
||||
possible to swap out `alpine:3.9` with [busybox](https://willschenk.com/articles/2019/building_a_slimmer_go_docker_container/)
|
||||
for an even small resulting docker image.
|
||||
|
||||
A service is built using the defined service Dockerfile. The resulting image is pushed to
|
||||
[Amazon Elastic Container Registry](https://aws.amazon.com/ecr/).
|
||||
|
||||
Amazon Elastic Container Registry (ECR) is a fully-managed Docker container registry that makes it easy for
|
||||
developers to store, manage, and deploy Docker container images. Amazon ECR is integrated with Amazon Elastic
|
||||
Container Service (ECS) simplifying the development to production workflow.
|
||||
|
||||
A service is configured for deployment in [services.go](https://gitlab.com/geeks-accelerator/oss/saas-starter-kit/oss/devops/blob/master/build/cicd/internal/config/service.go).
|
||||
Services are deployed to [AWS Fargate](https://aws.amazon.com/fargate/) based on the defined task definition.
|
||||
|
||||
AWS Fargate is a compute engine for Amazon ECS that allows you to run containers without having to manage servers or
|
||||
clusters. With AWS Fargate, you no longer have to provision, configure, and scale clusters of virtual machines to
|
||||
run containers.
|
||||
|
||||
If the docker file is a multi-stage build and it contains a stage with the name `build_base_golang`, additional caching will
|
||||
be implemented to reduce build times. The build command assumes for a stage named `build_base_golang` assumes that the
|
||||
stage will run `go mod download` to pull down all package dependencies. The build command computes a checksum for the
|
||||
project go.mod and then executes a docker build that targets the specific stage `build_base_golang`. The built container
|
||||
image is tagged with the go.mod hash and pushed to the projects
|
||||
[GitLab repository](https://docs.gitlab.com/ee/user/project/repository/).
|
||||
|
||||
|
||||
|
||||
### Functions
|
||||
|
||||
Functions are applications that can be executed in short period of time. The configured function is:
|
||||
|
||||
*[Datadog Log Collection](https://gitlab.com/geeks-accelerator/oss/saas-starter-kit/tree/master/deployments/ddlogscollector) -
|
||||
Python script used to ship logs from AWS Cloudwatch to Datadog.
|
||||
|
||||
|
||||
A function is built using the defined Dockerfile. The `Dockerfile` for a function should use a
|
||||
[lambdaci image](https://hub.docker.com/r/lambci/lambda/) as the base image.
|
||||
|
||||
Lambdaci images provide a sandboxed local environment that replicates the live AWS Lambda environment almost
|
||||
identically – including installed software and libraries, file structure and permissions, environment variables,
|
||||
context objects and behaviors – even the user and running process are the same.
|
||||
|
||||
The build command then uses `docker cp` to extract all files from the resulting container image that are located in
|
||||
`/var/task`. These files are zipped and uploaded to the private AWS S3 bucket for deployment.
|
||||
|
||||
A function is configured for deployment in [functions.go](https://gitlab.com/geeks-accelerator/oss/saas-starter-kit/oss/devops/blob/master/build/cicd/internal/config/function.go).
|
||||
Functions are deployed to [AWS Lambda](https://aws.amazon.com/lambda/).
|
||||
|
||||
AWS Lambda lets you run code without provisioning or managing servers. You pay only for the compute time you consume
|
||||
- there is no charge when your code is not running.
|
||||
|
||||
|
||||
|
||||
### Schema Migrations
|
||||
|
||||
_cicd_ includes a minimalistic database migration script that implements
|
||||
[github.com/geeks-accelerator/sqlxmigrate](https://godoc.org/github.com/geeks-accelerator/sqlxmigrate). It provides
|
||||
schema versioning and migration rollback. The schema for the entire project is defined globally and is located at
|
||||
[internal/schema](https://gitlab.com/geeks-accelerator/oss/saas-starter-kit/tree/issue8/datadog-lambda-func/internal/schema)
|
||||
|
||||
The example schema package provides two separate methods for handling schema migration:
|
||||
* [Migrations](https://gitlab.com/geeks-accelerator/oss/saas-starter-kit/blob/issue8/datadog-lambda-func/internal/schema/migrations.go) -
|
||||
List of direct SQL statements for each migration with defined version ID. A database table is created to persist
|
||||
executed migrations. Upon run of each schema migration run, the migration logic checks the migration database table to
|
||||
check if it’s already been executed. Thus, schema migrations are only ever executed once. Migrations are defined as a
|
||||
function to enable complex migrations so results from query manipulated before being piped to the next query.
|
||||
|
||||
* [Init Schema](https://gitlab.com/geeks-accelerator/oss/saas-starter-kit/blob/issue8/datadog-lambda-func/internal/schema/init_schema.go) -
|
||||
If you have a lot of migrations, it can be a pain to run all them. For example, when you are deploying a new instance of
|
||||
the app into a clean database. To prevent this, use the initSchema function that will run as-if no migration was run
|
||||
before (in a new clean database).
|
||||
|
||||
Another bonus with the globally defined schema is that it enables your testing package the ability to dynamically [spin
|
||||
up database containers](https://gitlab.com/geeks-accelerator/oss/saas-starter-kit/blob/issue8/datadog-lambda-func/internal/platform/tests/main.go#L127)
|
||||
on-demand and automatically include all the migrations. This allows the testing package to
|
||||
[programmatically execute schema migrations](https://gitlab.com/geeks-accelerator/oss/saas-starter-kit/blob/issue8/datadog-lambda-func/internal/platform/tests/main.go#L172)
|
||||
before running any unit tests.
|
||||
|
||||
|
||||
|
||||
## Getting Started
|
||||
|
||||
One of the philosophies behind the SaaS Startup Kit is that building and deploying SaaS product offers should be easy
|
||||
allowing you focus on what's most important, writing the business logic. Below outline the steps needed to get a
|
||||
full build pipeline that includes both continious integration and continious deployment.
|
||||
|
||||
1. Configure your AWS infrastructure in [config.go](https://gitlab.com/geeks-accelerator/oss/saas-starter-kit/tree/master/build/cicd/internal/config/config.go)
|
||||
|
||||
2. Define your services in [service.go](https://gitlab.com/geeks-accelerator/oss/saas-starter-kit/tree/master/build/cicd/internal/config/service.go)
|
||||
that will be deployed to AWS Fargate. This includes settings for your [AWS ECS Cluster](https://godoc.org/gitlab.com/geeks-accelerator/oss/devops/pkg/devdeploy#AwsEcsCluster),
|
||||
the associated [AWS ECS Service](https://godoc.org/gitlab.com/geeks-accelerator/oss/devops/pkg/devdeploy#AwsEcsService)
|
||||
and [AWS ECS Task Definition](https://godoc.org/gitlab.com/geeks-accelerator/oss/devops/pkg/devdeploy#AwsEcsTaskDefinition).
|
||||
|
||||
3. Define your functions in [function.go](https://gitlab.com/geeks-accelerator/oss/saas-starter-kit/tree/master/build/cicd/internal/config/function.go)
|
||||
that will be deployed to AWS Lambda. This includes settings for the runtime, amount of memory, and timeout.
|
||||
|
||||
4. Ensure your [schema](https://gitlab.com/geeks-accelerator/oss/saas-starter-kit/tree/master/internal/schema) is ready
|
||||
for deployment. You should already be using the
|
||||
[schema tool](https://gitlab.com/geeks-accelerator/oss/saas-starter-kit/tree/master/tools/schema) for maintaining
|
||||
database schemas for local development, so no additional effort should be required for this step.
|
||||
|
||||
5. Update the [.gitlab-ci.yml](https://gitlab.com/geeks-accelerator/oss/saas-starter-kit/blob/master/.gitlab-ci.yml) in
|
||||
the project root to include the services and functions you have configured here. `.gitlab-ci.yml` will be used by GitLab
|
||||
to determine which services and functions should be built and deployed.
|
||||
|
||||
6. Setup a GitLab runner in your AWS account. This will allow the _cicd_ tool to execute database migration since the
|
||||
database deployed by default is not publicly available. GitLab does provide shared runners, but these will not be able
|
||||
to access your database.
|
||||
[Follow the instructions here](https://gitlab.com/geeks-accelerator/oss/devops/blob/master/README.md#gitlab-cicd) for
|
||||
setting up a GitLab Runner.
|
||||
|
||||
|
||||
|
||||
## Installing locally
|
||||
|
||||
Make sure you have a working Go environment. Go version 1.2+ is supported. [See
|
||||
the install instructions for Go](http://golang.org/doc/install.html).
|
||||
|
||||
|
||||
To install _cicd_, simply run:
|
||||
```
|
||||
$ go get -v geeks-accelerator/oss/saas-starter-kit/build/cicd
|
||||
```
|
||||
|
||||
Make sure your `PATH` includes the `$GOPATH/bin` directory so your commands can
|
||||
be easily used:
|
||||
```
|
||||
export PATH=$PATH:$GOPATH/bin
|
||||
```
|
||||
|
||||
_cicd_ requires AWS permissions to be executed locally. For the GitLab CI/CD build pipeline, AWS roles will be used. This
|
||||
user is only necessary for running _cicd_ locally.
|
||||
|
||||
1. You will need an existing AWS account or create a new AWS account.
|
||||
|
||||
2. Define a new [AWS IAM Policy](https://console.aws.amazon.com/iam/home?region=us-west-2#/policies$new?step=edit)
|
||||
called `saas-starter-kit-deploy` with a defined JSON statement instead of using the visual
|
||||
editor. The statement is rather large as each permission is granted individually. A copy of
|
||||
the statement is stored in the devops repo at
|
||||
[configs/aws-aim-deploy-policy.json](https://gitlab.com/geeks-accelerator/oss/devops/blob/master/configs/aws-aim-deploy-policy.json)
|
||||
|
||||
3. Create new [AWS User](https://console.aws.amazon.com/iam/home?region=us-west-2#/users$new?step=details)
|
||||
called `saas-starter-kit-deploy` with _Programmatic Access_ and _Attach existing policies directly_ with the policy
|
||||
created from step 2 `saas-starter-kit-deploy`
|
||||
|
||||
4. Set your AWS credentials as [environment variables](https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-envvars.html).
|
||||
These can also be passed into _cicd_ as command line options.
|
||||
```bash
|
||||
export AWS_ACCESS_KEY_ID=XXXXXXXXX
|
||||
export AWS_SECRET_ACCESS_KEY=XXXXXXXXX
|
||||
export AWS_REGION="us-west-2"
|
||||
export AWS_USE_ROLE=false
|
||||
```
|
||||
|
||||
|
||||
|
||||
## Usage
|
||||
|
||||
```bash
|
||||
$ cicd [global options] command [command options] [arguments...]
|
||||
```
|
||||
|
||||
### Global Options
|
||||
|
||||
* Target Environment - __required__
|
||||
|
||||
`--env [dev|stage|prod]`
|
||||
|
||||
* AWS Access Key - optional or can be set via env variable `AWS_ACCESS_KEY_ID`
|
||||
|
||||
`--aws-access-key value`
|
||||
|
||||
* AWS Secret Key - optional, can be set via env variable `AWS_SECRET_ACCESS_KEY`
|
||||
|
||||
`--aws-secret-key value`
|
||||
|
||||
* AWS Region - optional, can be set via env variable `AWS_REGION`
|
||||
|
||||
`--aws-region value`
|
||||
|
||||
* AWS Use Role - optional, can be set via env variable `AWS_USE_ROLE`, when enabled an IAM Role else AWS
|
||||
Access/Secret Keys are required
|
||||
|
||||
* Show help
|
||||
|
||||
`--help, -h`
|
||||
|
||||
* Print the version
|
||||
|
||||
`--version, -v`
|
||||
|
||||
### Commands
|
||||
|
||||
* `build service` - Executes a build for a single service
|
||||
|
||||
```bash
|
||||
$ cicd -env [dev|stage|prod] build service -name NNNNN [command options]
|
||||
```
|
||||
|
||||
Options:
|
||||
```bash
|
||||
--name value, -n value target service, required
|
||||
--release-tag value, --tag value optional tag to override default CI_COMMIT_SHORT_SHA
|
||||
--dry-run print out the build details
|
||||
--no-cache skip caching for the docker build
|
||||
--no-push disable pushing release image to remote repository
|
||||
```
|
||||
|
||||
* `build function` - Executes a build for a single function
|
||||
|
||||
```bash
|
||||
$ cicd -env [dev|stage|prod] build function -name NNNNN [command options]
|
||||
```
|
||||
|
||||
Options:
|
||||
```bash
|
||||
--name value, -n value target function, required
|
||||
--release-tag value, --tag value optional tag to override default CI_COMMIT_SHORT_SHA
|
||||
--dry-run print out the build details
|
||||
--no-cache skip caching for the docker build
|
||||
--no-push disable pushing release image to remote repository
|
||||
```
|
||||
|
||||
* `deploy service` - Executes a deploy for a single service
|
||||
|
||||
```bash
|
||||
$ cicd -env [dev|stage|prod] deploy service -name NNNNN [command options]
|
||||
```
|
||||
|
||||
Options:
|
||||
```bash
|
||||
--name value, -n value target service, one of [aws-ecs-go-web-api]
|
||||
--release-tag value, --tag value optional tag to override default CI_COMMIT_SHORT_SHA
|
||||
--dry-run print out the deploy details
|
||||
```
|
||||
|
||||
* `deploy function` - Executes a deploy for a single function
|
||||
|
||||
```bash
|
||||
$ cicd -env [dev|stage|prod] deploy function -name NNNNN [command options]
|
||||
```
|
||||
|
||||
Options:
|
||||
```bash
|
||||
--name value, -n value target function, required
|
||||
--release-tag value, --tag value optional tag to override default CI_COMMIT_SHORT_SHA
|
||||
--dry-run print out the deploy details
|
||||
```
|
||||
|
||||
* `schema migrate` - Runs the database migration using credentials from AWS Secrets Manager.
|
||||
|
||||
```bash
|
||||
$ cicd -env [dev|stage|prod] schema migrate
|
||||
```
|
||||
|
||||
* `help` - Shows a list of commands
|
||||
|
||||
```bash
|
||||
$ cicd help
|
||||
```
|
||||
|
||||
Or for one command:
|
||||
```bash
|
||||
$ cicd build help
|
||||
```
|
||||
|
||||
### Examples
|
||||
|
||||
Build the example service _web-app_
|
||||
```bash
|
||||
$ cicid --env=prod build service --name=web-app --release-tag=testv1 --dry-run=false
|
||||
```
|
||||
|
||||
Deploy the example service _web-app_
|
||||
```bash
|
||||
$ cicid --env=prod deploy service --name=web-app --release-tag=testv1 --dry-run=false
|
||||
```
|
||||
|
||||
|
||||
## Join us on Gopher Slack
|
||||
|
||||
If you are having problems installing, troubles getting the project running or would like to contribute, join the
|
||||
channel #saas-starter-kit on [Gopher Slack](http://invite.slack.golangbridge.org/)
|
601
build/cicd/internal/config/config.go
Normal file
601
build/cicd/internal/config/config.go
Normal file
@ -0,0 +1,601 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"geeks-accelerator/oss/saas-starter-kit/internal/platform/web/webcontext"
|
||||
"geeks-accelerator/oss/saas-starter-kit/internal/schema"
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/service/cloudfront"
|
||||
"github.com/aws/aws-sdk-go/service/rds"
|
||||
"github.com/aws/aws-sdk-go/service/s3"
|
||||
"github.com/aws/aws-sdk-go/service/secretsmanager"
|
||||
"github.com/iancoleman/strcase"
|
||||
"github.com/jmoiron/sqlx"
|
||||
"github.com/pkg/errors"
|
||||
"gitlab.com/geeks-accelerator/oss/devops/pkg/devdeploy"
|
||||
)
|
||||
|
||||
const (
|
||||
// ProjectNamePrefix will be appending to the name of the project.
|
||||
ProjectNamePrefix = ""
|
||||
|
||||
// GitLabProjectBaseUrl is the base url used to create links to a specific CI/CD job or pipeline by ID.
|
||||
GitLabProjectBaseUrl = "https://gitlab.com/geeks-accelerator/oss/saas-starter-kit"
|
||||
|
||||
// EnableRdsServerless will use the Aurora database engine that scales the capacity based on database load. This is
|
||||
// a good option for intermittent or unpredictable workloads.
|
||||
EnableRdsServerless = true
|
||||
)
|
||||
|
||||
// Env defines the target deployment environment.
|
||||
type Env = string
|
||||
|
||||
var (
|
||||
EnvDev Env = webcontext.Env_Dev
|
||||
EnvStage Env = webcontext.Env_Stage
|
||||
EnvProd Env = webcontext.Env_Prod
|
||||
)
|
||||
|
||||
// List of env names used by main.go for help.
|
||||
var EnvNames = []Env{
|
||||
EnvDev,
|
||||
EnvStage,
|
||||
EnvProd,
|
||||
}
|
||||
|
||||
// ConfigContext defines the flags for build env.
|
||||
type ConfigContext struct {
|
||||
// Env is the target environment used for the deployment.
|
||||
Env string `validate:"oneof=dev stage prod"`
|
||||
|
||||
// AwsCredentials defines the credentials used for deployment.
|
||||
AwsCredentials devdeploy.AwsCredentials `validate:"required,dive,required"`
|
||||
}
|
||||
|
||||
// NewConfigContext returns the ConfigContext.
|
||||
func NewConfigContext(targetEnv Env, awsCredentials devdeploy.AwsCredentials) (*ConfigContext, error) {
|
||||
ctx := &ConfigContext{
|
||||
Env: targetEnv,
|
||||
AwsCredentials: awsCredentials,
|
||||
}
|
||||
|
||||
// If AWS Credentials are not set and use role is not enabled, try to load the credentials from env vars.
|
||||
if ctx.AwsCredentials.UseRole == false && ctx.AwsCredentials.AccessKeyID == "" {
|
||||
var err error
|
||||
ctx.AwsCredentials, err = devdeploy.GetAwsCredentialsFromEnv(ctx.Env)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else if ctx.AwsCredentials.Region == "" {
|
||||
awsCreds, err := devdeploy.GetAwsCredentialsFromEnv(ctx.Env)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ctx.AwsCredentials.Region = awsCreds.Region
|
||||
}
|
||||
|
||||
return ctx, nil
|
||||
}
|
||||
|
||||
// Config defines the details to setup the target environment for the project to build services and functions.
|
||||
func (cfgCtx *ConfigContext) Config(log *log.Logger) (*devdeploy.Config, error) {
|
||||
|
||||
// Init a new build target environment for the project.
|
||||
cfg := &devdeploy.Config{
|
||||
Env: cfgCtx.Env,
|
||||
AwsCredentials: cfgCtx.AwsCredentials,
|
||||
}
|
||||
|
||||
// Get the current working directory. This should be somewhere contained within the project.
|
||||
workDir, err := os.Getwd()
|
||||
if err != nil {
|
||||
return cfg, errors.Wrap(err, "Failed to get current working directory.")
|
||||
}
|
||||
|
||||
// Set the project root directory and project name. This is current set by finding the go.mod file for the project
|
||||
// repo. Project name is the directory name.
|
||||
modDetails, err := devdeploy.LoadModuleDetails(workDir)
|
||||
if err != nil {
|
||||
return cfg, err
|
||||
}
|
||||
|
||||
// ProjectRoot should be the root directory for the project.
|
||||
cfg.ProjectRoot = modDetails.ProjectRoot
|
||||
|
||||
// ProjectName will be used for prefixing AWS resources. This could be changed as needed or manually defined.
|
||||
cfg.ProjectName = ProjectNamePrefix + modDetails.ProjectName
|
||||
|
||||
// In a verbatim fork of the repo, a CI/CD would fail due to a conflict creating AWS resources (such as S3) since
|
||||
// their name is calculated with the go.mod path. Since the name-scope of AWS resources is region/global scope,
|
||||
// it will fail to create appropriate resources for the account of the forked user.
|
||||
if cfg.ProjectName == "saas-starter-kit" {
|
||||
// Its a true fork from the origin repo.
|
||||
if gitRemoteUser( modDetails.ProjectRoot) != "geeks-accelerator" {
|
||||
// Replace the prefix 'saas' with the parent directory name, hopefully the gitlab group/username.
|
||||
cfg.ProjectName = filepath.Base(filepath.Dir(cfg.ProjectRoot)) + "-starter-kit"
|
||||
}
|
||||
}
|
||||
|
||||
// Set default AWS ECR Repository Name.
|
||||
cfg.AwsEcrRepository = &devdeploy.AwsEcrRepository{
|
||||
RepositoryName: cfg.ProjectName,
|
||||
Tags: []devdeploy.Tag{
|
||||
{Key: devdeploy.AwsTagNameProject, Value: cfg.ProjectName},
|
||||
{Key: devdeploy.AwsTagNameEnv, Value: cfg.Env},
|
||||
},
|
||||
}
|
||||
|
||||
// Set the deployment to use the default VPC for the region.
|
||||
cfg.AwsEc2Vpc = &devdeploy.AwsEc2Vpc{
|
||||
IsDefault: true,
|
||||
}
|
||||
|
||||
// Set the security group to use for the deployed services, database and cluster. This will used the VPC ID defined
|
||||
// for the deployment.
|
||||
cfg.AwsEc2SecurityGroup = &devdeploy.AwsEc2SecurityGroup{
|
||||
GroupName: cfg.ProjectName + "-" + cfg.Env,
|
||||
Description: fmt.Sprintf("Security group for %s services running on ECS", cfg.ProjectName),
|
||||
Tags: []devdeploy.Tag{
|
||||
{Key: devdeploy.AwsTagNameProject, Value: cfg.ProjectName},
|
||||
{Key: devdeploy.AwsTagNameEnv, Value: cfg.Env},
|
||||
},
|
||||
}
|
||||
|
||||
// Set the name of the EC2 Security Group used by the gitlab runner. This is used to ensure the security
|
||||
// group defined above has access to the RDS cluster/instance and can thus handle schema migrations.
|
||||
cfg.GitlabRunnerEc2SecurityGroupName = "gitlab-runner"
|
||||
|
||||
// Set the s3 buckets used by the deployed services.
|
||||
// S3 temp prefix used by services for short term storage. A lifecycle policy will be used for expiration.
|
||||
s3BucketTempPrefix := "tmp/"
|
||||
|
||||
// Defines a life cycle policy to expire keys for the temp directory.
|
||||
bucketLifecycleTempRule := &s3.LifecycleRule{
|
||||
ID: aws.String("Rule for : " + s3BucketTempPrefix),
|
||||
Status: aws.String("Enabled"),
|
||||
Filter: &s3.LifecycleRuleFilter{
|
||||
Prefix: aws.String(s3BucketTempPrefix),
|
||||
},
|
||||
Expiration: &s3.LifecycleExpiration{
|
||||
// Indicates the lifetime, in days, of the objects that are subject to the rule.
|
||||
// The value must be a non-zero positive integer.
|
||||
Days: aws.Int64(1),
|
||||
},
|
||||
// Specifies the days since the initiation of an incomplete multipart upload
|
||||
// that Amazon S3 will wait before permanently removing all parts of the upload.
|
||||
// For more information, see Aborting Incomplete Multipart Uploads Using a Bucket
|
||||
// Lifecycle Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config)
|
||||
// in the Amazon Simple Storage Service Developer Guide.
|
||||
AbortIncompleteMultipartUpload: &s3.AbortIncompleteMultipartUpload{
|
||||
DaysAfterInitiation: aws.Int64(1),
|
||||
},
|
||||
}
|
||||
|
||||
// Define the public S3 bucket used to serve static files for all the services.
|
||||
cfg.AwsS3BucketPublic = &devdeploy.AwsS3Bucket{
|
||||
BucketName: cfg.ProjectName + "-public",
|
||||
IsPublic: true,
|
||||
TempPrefix: s3BucketTempPrefix,
|
||||
LocationConstraint: &cfg.AwsCredentials.Region,
|
||||
LifecycleRules: []*s3.LifecycleRule{bucketLifecycleTempRule},
|
||||
CORSRules: []*s3.CORSRule{
|
||||
&s3.CORSRule{
|
||||
// Headers that are specified in the Access-Control-Request-Headers header.
|
||||
// These headers are allowed in a preflight OPTIONS request. In response to
|
||||
// any preflight OPTIONS request, Amazon S3 returns any requested headers that
|
||||
// are allowed.
|
||||
// AllowedHeaders: aws.StringSlice([]string{}),
|
||||
|
||||
// An HTTP method that you allow the origin to execute. Valid values are GET,
|
||||
// PUT, HEAD, POST, and DELETE.
|
||||
//
|
||||
// AllowedMethods is a required field
|
||||
AllowedMethods: aws.StringSlice([]string{"GET", "POST"}),
|
||||
|
||||
// One or more origins you want customers to be able to access the bucket from.
|
||||
//
|
||||
// AllowedOrigins is a required field
|
||||
AllowedOrigins: aws.StringSlice([]string{"*"}),
|
||||
|
||||
// One or more headers in the response that you want customers to be able to
|
||||
// access from their applications (for example, from a JavaScript XMLHttpRequest
|
||||
// object).
|
||||
// ExposeHeaders: aws.StringSlice([]string{}),
|
||||
|
||||
// The time in seconds that your browser is to cache the preflight response
|
||||
// for the specified resource.
|
||||
// MaxAgeSeconds: aws.Int64(),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// The base s3 key prefix used to upload static files.
|
||||
cfg.AwsS3BucketPublicKeyPrefix = "/public"
|
||||
|
||||
// For production, enable Cloudfront CDN for all static files to avoid serving them from the slower S3 option.
|
||||
if cfg.Env == EnvProd {
|
||||
cfg.AwsS3BucketPublic.CloudFront = &devdeploy.AwsS3BucketCloudFront{
|
||||
// S3 key prefix to request your content from a directory in your Amazon S3 bucket.
|
||||
OriginPath: cfg.AwsS3BucketPublicKeyPrefix,
|
||||
|
||||
// A complex type that controls whether CloudFront caches the response to requests.
|
||||
CachedMethods: []string{"HEAD", "GET"},
|
||||
|
||||
// The distribution's configuration information.
|
||||
DistributionConfig: &cloudfront.DistributionConfig{
|
||||
Comment: aws.String(""),
|
||||
Enabled: aws.Bool(true),
|
||||
HttpVersion: aws.String("http2"),
|
||||
IsIPV6Enabled: aws.Bool(true),
|
||||
DefaultCacheBehavior: &cloudfront.DefaultCacheBehavior{
|
||||
Compress: aws.Bool(true),
|
||||
DefaultTTL: aws.Int64(1209600),
|
||||
MinTTL: aws.Int64(604800),
|
||||
MaxTTL: aws.Int64(31536000),
|
||||
ForwardedValues: &cloudfront.ForwardedValues{
|
||||
QueryString: aws.Bool(true),
|
||||
Cookies: &cloudfront.CookiePreference{
|
||||
Forward: aws.String("none"),
|
||||
},
|
||||
},
|
||||
TrustedSigners: &cloudfront.TrustedSigners{
|
||||
Enabled: aws.Bool(false),
|
||||
Quantity: aws.Int64(0),
|
||||
},
|
||||
ViewerProtocolPolicy: aws.String("allow-all"),
|
||||
},
|
||||
ViewerCertificate: &cloudfront.ViewerCertificate{
|
||||
CertificateSource: aws.String("cloudfront"),
|
||||
MinimumProtocolVersion: aws.String("TLSv1"),
|
||||
CloudFrontDefaultCertificate: aws.Bool(true),
|
||||
},
|
||||
PriceClass: aws.String("PriceClass_All"),
|
||||
CallerReference: aws.String("devops-deploy" + cfg.AwsS3BucketPublic.BucketName),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Define the private S3 bucket used for long term file storage including but not limited to: log exports,
|
||||
// AWS Lambda code, application caching.
|
||||
cfg.AwsS3BucketPrivate = &devdeploy.AwsS3Bucket{
|
||||
BucketName: cfg.ProjectName + "-private",
|
||||
IsPublic: false,
|
||||
TempPrefix: s3BucketTempPrefix,
|
||||
LocationConstraint: &cfg.AwsCredentials.Region,
|
||||
LifecycleRules: []*s3.LifecycleRule{bucketLifecycleTempRule},
|
||||
PublicAccessBlock: &s3.PublicAccessBlockConfiguration{
|
||||
// Specifies whether Amazon S3 should block public access control lists (ACLs)
|
||||
// for this bucket and objects in this bucket. Setting this element to TRUE
|
||||
// causes the following behavior:
|
||||
//
|
||||
// * PUT Bucket acl and PUT Object acl calls fail if the specified ACL is
|
||||
// public.
|
||||
//
|
||||
// * PUT Object calls fail if the request includes a public ACL.
|
||||
//
|
||||
// Enabling this setting doesn't affect existing policies or ACLs.
|
||||
BlockPublicAcls: aws.Bool(true),
|
||||
|
||||
// Specifies whether Amazon S3 should block public bucket policies for this
|
||||
// bucket. Setting this element to TRUE causes Amazon S3 to reject calls to
|
||||
// PUT Bucket policy if the specified bucket policy allows public access.
|
||||
//
|
||||
// Enabling this setting doesn't affect existing bucket policies.
|
||||
BlockPublicPolicy: aws.Bool(true),
|
||||
|
||||
// Specifies whether Amazon S3 should restrict public bucket policies for this
|
||||
// bucket. Setting this element to TRUE restricts access to this bucket to only
|
||||
// AWS services and authorized users within this account if the bucket has a
|
||||
// public policy.
|
||||
//
|
||||
// Enabling this setting doesn't affect previously stored bucket policies, except
|
||||
// that public and cross-account access within any public bucket policy, including
|
||||
// non-public delegation to specific accounts, is blocked.
|
||||
RestrictPublicBuckets: aws.Bool(true),
|
||||
|
||||
// Specifies whether Amazon S3 should ignore public ACLs for this bucket and
|
||||
// objects in this bucket. Setting this element to TRUE causes Amazon S3 to
|
||||
// ignore all public ACLs on this bucket and objects in this bucket.
|
||||
//
|
||||
// Enabling this setting doesn't affect the persistence of any existing ACLs
|
||||
// and doesn't prevent new public ACLs from being set.
|
||||
IgnorePublicAcls: aws.Bool(true),
|
||||
},
|
||||
}
|
||||
|
||||
// Add a bucket policy to enable exports from Cloudwatch Logs for the private S3 bucket.
|
||||
cfg.AwsS3BucketPrivate.Policy = func() string {
|
||||
policyResource := strings.Trim(filepath.Join(cfg.AwsS3BucketPrivate.BucketName, cfg.AwsS3BucketPrivate.TempPrefix), "/")
|
||||
return fmt.Sprintf(`{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Action": "s3:GetBucketAcl",
|
||||
"Effect": "Allow",
|
||||
"Resource": "arn:aws:s3:::%s",
|
||||
"Principal": { "Service": "logs.%s.amazonaws.com" }
|
||||
},
|
||||
{
|
||||
"Action": "s3:PutObject" ,
|
||||
"Effect": "Allow",
|
||||
"Resource": "arn:aws:s3:::%s/*",
|
||||
"Condition": { "StringEquals": { "s3:x-amz-acl": "bucket-owner-full-control" } },
|
||||
"Principal": { "Service": "logs.%s.amazonaws.com" }
|
||||
}
|
||||
]
|
||||
}`, cfg.AwsS3BucketPrivate.BucketName, cfg.AwsCredentials.Region, policyResource, cfg.AwsCredentials.Region)
|
||||
}()
|
||||
|
||||
// Define the Redis Cache cluster used for ephemeral storage.
|
||||
cfg.AwsElasticCacheCluster = &devdeploy.AwsElasticCacheCluster{
|
||||
CacheClusterId: cfg.ProjectName + "-" + cfg.Env,
|
||||
CacheNodeType: "cache.t2.micro",
|
||||
CacheSubnetGroupName: "default",
|
||||
Engine: "redis",
|
||||
EngineVersion: "5.0.4",
|
||||
NumCacheNodes: 1,
|
||||
Port: 6379,
|
||||
AutoMinorVersionUpgrade: aws.Bool(true),
|
||||
SnapshotRetentionLimit: aws.Int64(7),
|
||||
ParameterNameValues: []devdeploy.AwsElasticCacheParameter{
|
||||
devdeploy.AwsElasticCacheParameter{
|
||||
ParameterName: "maxmemory-policy",
|
||||
ParameterValue: "allkeys-lru",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// If serverless RDS is enabled, defined the RDS database cluster and link it to the database instance.
|
||||
if EnableRdsServerless {
|
||||
cfg.AwsRdsDBCluster = &devdeploy.AwsRdsDBCluster{
|
||||
DBClusterIdentifier: cfg.ProjectName + "-" + cfg.Env,
|
||||
Engine: "aurora-postgresql",
|
||||
EngineMode: "serverless",
|
||||
DatabaseName: "shared",
|
||||
MasterUsername: "god",
|
||||
Port: 5432,
|
||||
BackupRetentionPeriod: aws.Int64(7),
|
||||
CopyTagsToSnapshot: aws.Bool(true),
|
||||
Tags: []devdeploy.Tag{
|
||||
{Key: devdeploy.AwsTagNameProject, Value: cfg.ProjectName},
|
||||
{Key: devdeploy.AwsTagNameEnv, Value: cfg.Env},
|
||||
},
|
||||
PreCreate: func(input *rds.CreateDBClusterInput) error {
|
||||
input.ScalingConfiguration = &rds.ScalingConfiguration{
|
||||
// A value that indicates whether to allow or disallow automatic pause for an
|
||||
// Aurora DB cluster in serverless DB engine mode. A DB cluster can be paused
|
||||
// only when it's idle (it has no connections).
|
||||
//
|
||||
// If a DB cluster is paused for more than seven days, the DB cluster might
|
||||
// be backed up with a snapshot. In this case, the DB cluster is restored when
|
||||
// there is a request to connect to it.
|
||||
AutoPause: aws.Bool(true),
|
||||
|
||||
// The maximum capacity for an Aurora DB cluster in serverless DB engine mode.
|
||||
// Valid capacity values are 1, 2, 4, 8, 16, 32, 64, 128, and 256.
|
||||
// The maximum capacity must be greater than or equal to the minimum capacity.
|
||||
MaxCapacity: aws.Int64(2),
|
||||
|
||||
// The minimum capacity for an Aurora DB cluster in serverless DB engine mode.
|
||||
// Valid capacity values are 1, 2, 4, 8, 16, 32, 64, 128, and 256.
|
||||
// The minimum capacity must be less than or equal to the maximum capacity.
|
||||
MinCapacity: aws.Int64(2),
|
||||
|
||||
// The time, in seconds, before an Aurora DB cluster in serverless mode is paused.
|
||||
SecondsUntilAutoPause: aws.Int64(3600),
|
||||
|
||||
// The action to take when the timeout is reached, either ForceApplyCapacityChange
|
||||
// or RollbackCapacityChange.
|
||||
// ForceApplyCapacityChange sets the capacity to the specified value as soon
|
||||
// as possible.
|
||||
// RollbackCapacityChange, the default, ignores the capacity change if a scaling
|
||||
// point is not found in the timeout period.
|
||||
// If you specify ForceApplyCapacityChange, connections that prevent Aurora
|
||||
// Serverless from finding a scaling point might be dropped.
|
||||
// For more information, see Autoscaling for Aurora Serverless (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-serverless.how-it-works.html#aurora-serverless.how-it-works.auto-scaling)
|
||||
// in the Amazon Aurora User Guide.
|
||||
TimeoutAction: aws.String("ForceApplyCapacityChange"),
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
AfterCreate: func(res *rds.DBCluster, dbInfo *devdeploy.DBConnInfo, masterDb *sqlx.DB) error {
|
||||
return schema.Migrate(context.Background(), cfg.Env, masterDb, log, false)
|
||||
},
|
||||
}
|
||||
} else {
|
||||
// Define the RDS database instance for transactional data. A random password will be generated for any created instance.
|
||||
cfg.AwsRdsDBInstance = &devdeploy.AwsRdsDBInstance{
|
||||
DBInstanceIdentifier: cfg.ProjectName + "-" + cfg.Env,
|
||||
DBName: "shared",
|
||||
Engine: "postgres",
|
||||
MasterUsername: "god",
|
||||
Port: 5432,
|
||||
DBInstanceClass: "db.t2.small",
|
||||
AllocatedStorage: 20,
|
||||
PubliclyAccessible: false,
|
||||
BackupRetentionPeriod: aws.Int64(7),
|
||||
AutoMinorVersionUpgrade: true,
|
||||
CopyTagsToSnapshot: aws.Bool(true),
|
||||
Tags: []devdeploy.Tag{
|
||||
{Key: devdeploy.AwsTagNameProject, Value: cfg.ProjectName},
|
||||
{Key: devdeploy.AwsTagNameEnv, Value: cfg.Env},
|
||||
},
|
||||
AfterCreate: func(res *rds.DBInstance, dbInfo *devdeploy.DBConnInfo, masterDb *sqlx.DB) error {
|
||||
return schema.Migrate(context.Background(), cfg.Env, masterDb, log, false)
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// AwsIamPolicy defines the name and policy that will be attached to the task role. The policy document grants
|
||||
// the permissions required for deployed services to access AWS services. If the policy already exists, the
|
||||
// statements will be used to add new required actions, but not for removal.
|
||||
cfg.AwsIamPolicy = &devdeploy.AwsIamPolicy{
|
||||
PolicyName: fmt.Sprintf("%s%sServices", cfg.ProjectNameCamel(), strcase.ToCamel(cfg.Env)),
|
||||
Description: fmt.Sprintf("Defines access for %s services. ", cfg.ProjectName),
|
||||
PolicyDocument: devdeploy.AwsIamPolicyDocument{
|
||||
Version: "2012-10-17",
|
||||
Statement: []devdeploy.AwsIamStatementEntry{
|
||||
{
|
||||
Sid: "DefaultServiceAccess",
|
||||
Effect: "Allow",
|
||||
Action: []string{
|
||||
"s3:HeadBucket",
|
||||
"s3:ListObjects",
|
||||
"s3:PutObject",
|
||||
"s3:PutObjectAcl",
|
||||
"cloudfront:ListDistributions",
|
||||
"ec2:DescribeNetworkInterfaces",
|
||||
"ec2:DeleteNetworkInterface",
|
||||
"ecs:ListTasks",
|
||||
"ecs:DescribeServices",
|
||||
"ecs:DescribeTasks",
|
||||
"ec2:DescribeNetworkInterfaces",
|
||||
"route53:ListHostedZones",
|
||||
"route53:ListResourceRecordSets",
|
||||
"route53:ChangeResourceRecordSets",
|
||||
"ecs:UpdateService",
|
||||
"ses:SendEmail",
|
||||
"ses:ListIdentities",
|
||||
"ses:GetAccountSendingEnabled",
|
||||
"secretsmanager:ListSecretVersionIds",
|
||||
"secretsmanager:GetSecretValue",
|
||||
"secretsmanager:CreateSecret",
|
||||
"secretsmanager:UpdateSecret",
|
||||
"secretsmanager:RestoreSecret",
|
||||
"secretsmanager:DeleteSecret",
|
||||
},
|
||||
Resource: "*",
|
||||
},
|
||||
{
|
||||
Sid: "ServiceInvokeLambda",
|
||||
Effect: "Allow",
|
||||
Action: []string{
|
||||
"iam:GetRole",
|
||||
"lambda:InvokeFunction",
|
||||
"lambda:ListVersionsByFunction",
|
||||
"lambda:GetFunction",
|
||||
"lambda:InvokeAsync",
|
||||
"lambda:GetFunctionConfiguration",
|
||||
"iam:PassRole",
|
||||
"lambda:GetAlias",
|
||||
"lambda:GetPolicy",
|
||||
},
|
||||
Resource: []string{
|
||||
"arn:aws:iam:::role/*",
|
||||
"arn:aws:lambda:::function:*",
|
||||
},
|
||||
},
|
||||
{
|
||||
Sid: "datadoglambda",
|
||||
Effect: "Allow",
|
||||
Action: []string{
|
||||
"cloudwatch:Get*",
|
||||
"cloudwatch:List*",
|
||||
"ec2:Describe*",
|
||||
"support:*",
|
||||
"tag:GetResources",
|
||||
"tag:GetTagKeys",
|
||||
"tag:GetTagValues",
|
||||
},
|
||||
Resource: "*",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
log.Printf("\t\tSet Task Policy Name to '%s'.", cfg.AwsIamPolicy.PolicyName)
|
||||
|
||||
return cfg, nil
|
||||
}
|
||||
|
||||
// getDatadogApiKey tries to find the datadog api key from env variable or AWS Secrets Manager.
|
||||
func getDatadogApiKey(cfg *devdeploy.Config) (string, error) {
|
||||
// Load Datadog API key which can be either stored in an environment variable or in AWS Secrets Manager.
|
||||
// 1. Check env vars for [DEV|STAGE|PROD]_DD_API_KEY and DD_API_KEY
|
||||
apiKey := devdeploy.GetTargetEnv(cfg.Env, "DD_API_KEY")
|
||||
|
||||
// 2. Check AWS Secrets Manager for datadog entry prefixed with target environment.
|
||||
if apiKey == "" {
|
||||
prefixedSecretId := cfg.SecretID("datadog")
|
||||
var err error
|
||||
apiKey, err = devdeploy.GetAwsSecretValue(cfg.AwsCredentials, prefixedSecretId)
|
||||
if err != nil {
|
||||
if aerr, ok := errors.Cause(err).(awserr.Error); !ok || aerr.Code() != secretsmanager.ErrCodeResourceNotFoundException {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// 3. Check AWS Secrets Manager for Datadog entry.
|
||||
if apiKey == "" {
|
||||
secretId := "DATADOG"
|
||||
var err error
|
||||
apiKey, err = devdeploy.GetAwsSecretValue(cfg.AwsCredentials, secretId)
|
||||
if err != nil {
|
||||
if aerr, ok := errors.Cause(err).(awserr.Error); !ok || aerr.Code() != secretsmanager.ErrCodeResourceNotFoundException {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return apiKey, nil
|
||||
}
|
||||
|
||||
// getCommitRef returns a string that will be used by go build to replace main.go:build constant.
|
||||
func getCommitRef() string {
|
||||
var commitRef string
|
||||
|
||||
// Set the commit ref based on the GitLab CI/CD environment variables.
|
||||
if ev := os.Getenv("CI_COMMIT_TAG"); ev != "" {
|
||||
commitRef = "tag-" + ev
|
||||
} else if ev := os.Getenv("CI_COMMIT_REF_NAME"); ev != "" {
|
||||
commitRef = "branch-" + ev
|
||||
}
|
||||
|
||||
if commitRef != "" {
|
||||
if ev := os.Getenv("CI_COMMIT_SHORT_SHA"); ev != "" {
|
||||
commitRef = commitRef + "@" + ev
|
||||
}
|
||||
}
|
||||
|
||||
return commitRef
|
||||
}
|
||||
|
||||
// gitRemoteUser returns the git username/organization for the git repo
|
||||
func gitRemoteUser(projectRoot string) string {
|
||||
dat, err := ioutil.ReadFile(filepath.Join(projectRoot, ".git/config"))
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
var remoteUrl string
|
||||
lines := strings.Split(string(dat), "\n")
|
||||
for _, l := range lines {
|
||||
l = strings.TrimSpace(l)
|
||||
if strings.HasPrefix(l, "url =") {
|
||||
remoteUrl = l
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if remoteUrl == "" {
|
||||
return ""
|
||||
}
|
||||
remoteUrl = strings.TrimSpace(strings.Split(remoteUrl, "=")[1])
|
||||
|
||||
if !strings.Contains(remoteUrl, ":") {
|
||||
return ""
|
||||
}
|
||||
remoteUser := strings.Split(remoteUrl, ":")[1]
|
||||
remoteUser = strings.Split(remoteUser, "/")[0]
|
||||
|
||||
return remoteUser
|
||||
}
|
385
build/cicd/internal/config/function.go
Normal file
385
build/cicd/internal/config/function.go
Normal file
@ -0,0 +1,385 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"log"
|
||||
"path/filepath"
|
||||
|
||||
"encoding/json"
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/pkg/errors"
|
||||
"gitlab.com/geeks-accelerator/oss/devops/pkg/devdeploy"
|
||||
)
|
||||
|
||||
// Function define the name of a function.
|
||||
type Function = string
|
||||
|
||||
var (
|
||||
Function_Ddlogscollector = "ddlogscollector"
|
||||
Function_YourNewFunction = "your-new-function"
|
||||
)
|
||||
|
||||
// List of function names used by main.go for help.
|
||||
var FunctionNames = []Function{
|
||||
// Python Datadog Logs Collector
|
||||
Function_Ddlogscollector,
|
||||
Function_YourNewFunction,
|
||||
}
|
||||
|
||||
// FunctionContext defines the flags for deploying a function.
|
||||
type FunctionContext struct {
|
||||
// Required flags.
|
||||
Name string `validate:"required" example:"aws-lambda-go-func"`
|
||||
AwsLambdaFunction *devdeploy.AwsLambdaFunction `validate:"required"`
|
||||
AwsIamRole *devdeploy.AwsIamRole `validate:"required"`
|
||||
AwsIamPolicy *devdeploy.AwsIamPolicy `validate:"required"`
|
||||
|
||||
// Optional flags.
|
||||
FunctionDir string `validate:"omitempty"`
|
||||
BuildDir string `validate:"omitempty"`
|
||||
DockerBuildContext string `validate:"omitempty" example:"."`
|
||||
Dockerfile string `validate:"required" example:"./cmd/web-api/Dockerfile"`
|
||||
ReleaseTag string `validate:"required"`
|
||||
EnableVPC bool `validate:"omitempty" example:"false"`
|
||||
}
|
||||
|
||||
// NewFunctionContext returns the FunctionContext.
|
||||
func NewFunctionContext(funcName string, cfg *devdeploy.Config) (*FunctionContext, error) {
|
||||
|
||||
ctx := &FunctionContext{
|
||||
Name: funcName,
|
||||
|
||||
FunctionDir: filepath.Join(cfg.ProjectRoot, "examples", funcName),
|
||||
|
||||
DockerBuildContext: ".",
|
||||
|
||||
// Set the release tag for the image to use include env + service name + commit hash/tag.
|
||||
ReleaseTag: devdeploy.GitLabCiReleaseTag(cfg.Env, funcName),
|
||||
}
|
||||
|
||||
switch funcName {
|
||||
case Function_YourNewFunction:
|
||||
// No additional settings for function.
|
||||
case Function_Ddlogscollector:
|
||||
|
||||
// Python Datadog Logs Collector is
|
||||
ctx.FunctionDir = filepath.Join(cfg.ProjectRoot, "deployments/ddlogscollector")
|
||||
|
||||
// Change the build directory to the function directory instead of project root.
|
||||
ctx.BuildDir = ctx.FunctionDir
|
||||
|
||||
// AwsLambdaFunction defines the details needed to create an lambda function.
|
||||
ctx.AwsLambdaFunction = &devdeploy.AwsLambdaFunction{
|
||||
FunctionName: ctx.Name,
|
||||
Description: "Ship logs from cloudwatch to datadog",
|
||||
|
||||
Handler: "lambda_function.lambda_handler",
|
||||
Runtime: "python2.7",
|
||||
MemorySize: 512,
|
||||
|
||||
Timeout: aws.Int64(300),
|
||||
Environment: map[string]string{
|
||||
"DD_API_KEY": "",
|
||||
"LAMBDA_FUNC": ctx.Name,
|
||||
},
|
||||
Tags: []devdeploy.Tag{
|
||||
{Key: devdeploy.AwsTagNameProject, Value: cfg.ProjectName},
|
||||
{Key: devdeploy.AwsTagNameEnv, Value: cfg.Env},
|
||||
},
|
||||
}
|
||||
|
||||
ctx.AwsIamRole = &devdeploy.AwsIamRole{
|
||||
RoleName: "DatadogAWSIntegrationLambdaRole",
|
||||
Description: "Allows Datadog to run Lambda functions to call AWS services on your behalf.",
|
||||
AssumeRolePolicyDocument: "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Service\":[\"lambda.amazonaws.com\"]},\"Action\":[\"sts:AssumeRole\"]}]}",
|
||||
Tags: []devdeploy.Tag{
|
||||
{Key: devdeploy.AwsTagNameProject, Value: cfg.ProjectName},
|
||||
{Key: devdeploy.AwsTagNameEnv, Value: cfg.Env},
|
||||
},
|
||||
}
|
||||
|
||||
ctx.AwsIamPolicy = &devdeploy.AwsIamPolicy{
|
||||
PolicyName: "DatadogAWSIntegrationPolicy",
|
||||
Description: "Provides Datadog Lambda function the ability to ship AWS service related logs back to Datadog.",
|
||||
PolicyDocument: devdeploy.AwsIamPolicyDocument{
|
||||
Version: "2012-10-17",
|
||||
Statement: []devdeploy.AwsIamStatementEntry{
|
||||
{
|
||||
Action: []string{
|
||||
"apigateway:GET",
|
||||
"autoscaling:Describe*",
|
||||
"budgets:ViewBudget",
|
||||
"cloudfront:GetDistributionConfig",
|
||||
"cloudfront:ListDistributions",
|
||||
"cloudtrail:DescribeTrails",
|
||||
"cloudtrail:GetTrailStatus",
|
||||
"cloudwatch:Describe*",
|
||||
"cloudwatch:Get*",
|
||||
"cloudwatch:List*",
|
||||
"codedeploy:List*",
|
||||
"codedeploy:BatchGet*",
|
||||
"directconnect:Describe*",
|
||||
"dynamodb:List*",
|
||||
"dynamodb:Describe*",
|
||||
"ec2:Describe*",
|
||||
"ecs:Describe*",
|
||||
"ecs:List*",
|
||||
"elasticache:Describe*",
|
||||
"elasticache:List*",
|
||||
"elasticfilesystem:DescribeFileSystems",
|
||||
"elasticfilesystem:DescribeTags",
|
||||
"elasticloadbalancing:Describe*",
|
||||
"elasticmapreduce:List*",
|
||||
"elasticmapreduce:Describe*",
|
||||
"es:ListTags",
|
||||
"es:ListDomainNames",
|
||||
"es:DescribeElasticsearchDomains",
|
||||
"health:DescribeEvents",
|
||||
"health:DescribeEventDetails",
|
||||
"health:DescribeAffectedEntities",
|
||||
"kinesis:List*",
|
||||
"kinesis:Describe*",
|
||||
"lambda:AddPermission",
|
||||
"lambda:GetPolicy",
|
||||
"lambda:List*",
|
||||
"lambda:RemovePermission",
|
||||
"logs:Get*",
|
||||
"logs:Describe*",
|
||||
"logs:FilterLogEvents",
|
||||
"logs:TestMetricFilter",
|
||||
"logs:PutSubscriptionFilter",
|
||||
"logs:DeleteSubscriptionFilter",
|
||||
"logs:DescribeSubscriptionFilters",
|
||||
"rds:Describe*",
|
||||
"rds:List*",
|
||||
"redshift:DescribeClusters",
|
||||
"redshift:DescribeLoggingStatus",
|
||||
"route53:List*",
|
||||
"s3:GetBucketLogging",
|
||||
"s3:GetBucketLocation",
|
||||
"s3:GetBucketNotification",
|
||||
"s3:GetBucketTagging",
|
||||
"s3:ListAllMyBuckets",
|
||||
"s3:PutBucketNotification",
|
||||
"ses:Get*",
|
||||
"sns:List*",
|
||||
"sns:Publish",
|
||||
"sqs:ListQueues",
|
||||
"support:*",
|
||||
"tag:GetResources",
|
||||
"tag:GetTagKeys",
|
||||
"tag:GetTagValues",
|
||||
"xray:BatchGetTraces",
|
||||
"xray:GetTraceSummaries",
|
||||
"lambda:List*",
|
||||
"logs:DescribeLogGroups",
|
||||
"logs:DescribeLogStreams",
|
||||
"logs:FilterLogEvents",
|
||||
"tag:GetResources",
|
||||
"cloudfront:GetDistributionConfig",
|
||||
"cloudfront:ListDistributions",
|
||||
"elasticloadbalancing:DescribeLoadBalancers",
|
||||
"elasticloadbalancing:DescribeLoadBalancerAttributes",
|
||||
"lambda:AddPermission",
|
||||
"lambda:GetPolicy",
|
||||
"lambda:RemovePermission",
|
||||
"redshift:DescribeClusters",
|
||||
"redshift:DescribeLoggingStatus",
|
||||
"s3:GetBucketLogging",
|
||||
"s3:GetBucketLocation",
|
||||
"s3:GetBucketNotification",
|
||||
"s3:ListAllMyBuckets",
|
||||
"s3:PutBucketNotification",
|
||||
"logs:PutSubscriptionFilter",
|
||||
"logs:DeleteSubscriptionFilter",
|
||||
"logs:DescribeSubscriptionFilters",
|
||||
},
|
||||
Effect: "Allow",
|
||||
Resource: "*",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
default:
|
||||
return nil, errors.Wrapf(devdeploy.ErrInvalidFunction,
|
||||
"No function context defined for function '%s'",
|
||||
funcName)
|
||||
}
|
||||
|
||||
// Append the datadog api key before execution.
|
||||
ctx.AwsLambdaFunction.UpdateEnvironment = func(vars map[string]string) error {
|
||||
datadogApiKey, err := getDatadogApiKey(cfg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
vars["DD_API_KEY"] = datadogApiKey
|
||||
return nil
|
||||
}
|
||||
|
||||
// Set the docker file if no custom one has been defined for the service.
|
||||
if ctx.Dockerfile == "" {
|
||||
ctx.Dockerfile = filepath.Join(ctx.BuildDir, "Dockerfile")
|
||||
}
|
||||
|
||||
return ctx, nil
|
||||
}
|
||||
|
||||
// Build handles defining all the information needed to deploy a service to AWS ECS.
|
||||
func (ctx *FunctionContext) Build(log *log.Logger, noCache, noPush bool) (*devdeploy.BuildLambda, error) {
|
||||
|
||||
log.Printf("Define build for function '%s'.", ctx.Name)
|
||||
log.Printf("\tUsing release tag %s.", ctx.ReleaseTag)
|
||||
|
||||
srv := &devdeploy.BuildLambda{
|
||||
FuncName: ctx.Name,
|
||||
ReleaseTag: ctx.ReleaseTag,
|
||||
BuildDir: ctx.BuildDir,
|
||||
Dockerfile: ctx.Dockerfile,
|
||||
DockerBuildContext: ctx.DockerBuildContext,
|
||||
NoCache: noCache,
|
||||
NoPush: noPush,
|
||||
}
|
||||
|
||||
return srv, nil
|
||||
}
|
||||
|
||||
// Deploy handles defining all the information needed to deploy a service to AWS ECS.
|
||||
func (ctx *FunctionContext) Deploy(log *log.Logger) (*devdeploy.DeployLambda, error) {
|
||||
|
||||
log.Printf("Define build for function '%s'.", ctx.Name)
|
||||
log.Printf("\tUsing release tag %s.", ctx.ReleaseTag)
|
||||
|
||||
srv := &devdeploy.DeployLambda{
|
||||
FuncName: ctx.Name,
|
||||
EnableVPC: ctx.EnableVPC,
|
||||
AwsLambdaFunction: ctx.AwsLambdaFunction,
|
||||
AwsIamPolicy: ctx.AwsIamPolicy,
|
||||
AwsIamRole: ctx.AwsIamRole,
|
||||
}
|
||||
|
||||
return srv, nil
|
||||
}
|
||||
|
||||
// S3Location returns the s3 bucket and key used to upload the code to.
|
||||
func (ctx *FunctionContext) S3Location(cfg *devdeploy.Config) (string, string) {
|
||||
s3Bucket := cfg.AwsS3BucketPrivate.BucketName
|
||||
s3Key := filepath.Join("src", "aws", "lambda", cfg.Env, ctx.Name, ctx.ReleaseTag+".zip")
|
||||
|
||||
return s3Bucket, s3Key
|
||||
}
|
||||
|
||||
// BuildFunctionForTargetEnv executes the build commands for a target function.
|
||||
func BuildFunctionForTargetEnv(log *log.Logger, awsCredentials devdeploy.AwsCredentials, targetEnv Env, functionName, releaseTag string, dryRun, noCache, noPush bool) error {
|
||||
|
||||
cfgCtx, err := NewConfigContext(targetEnv, awsCredentials)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cfg, err := cfgCtx.Config(log)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
funcCtx, err := NewFunctionContext(functionName, cfg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Override the release tag if set.
|
||||
if releaseTag != "" {
|
||||
funcCtx.ReleaseTag = releaseTag
|
||||
}
|
||||
|
||||
details, err := funcCtx.Build(log, noCache, noPush)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Set the s3 bucket and s3 for uploading the zip file.
|
||||
details.CodeS3Bucket, details.CodeS3Key = funcCtx.S3Location(cfg)
|
||||
|
||||
// funcPath is used to copy the service specific code in the Dockerfile.
|
||||
funcPath, err := filepath.Rel(cfg.ProjectRoot, funcCtx.FunctionDir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// commitRef is used by main.go:build constant.
|
||||
commitRef := getCommitRef()
|
||||
if commitRef == "" {
|
||||
commitRef = funcCtx.ReleaseTag
|
||||
}
|
||||
|
||||
details.BuildArgs = map[string]string{
|
||||
"func_path": funcPath,
|
||||
"commit_ref": commitRef,
|
||||
}
|
||||
|
||||
if dryRun {
|
||||
cfgJSON, err := json.MarshalIndent(cfg, "", " ")
|
||||
if err != nil {
|
||||
log.Fatalf("BuildFunctionForTargetEnv : Marshalling config to JSON : %+v", err)
|
||||
}
|
||||
log.Printf("BuildFunctionForTargetEnv : config : %v\n", string(cfgJSON))
|
||||
|
||||
detailsJSON, err := json.MarshalIndent(details, "", " ")
|
||||
if err != nil {
|
||||
log.Fatalf("BuildFunctionForTargetEnv : Marshalling details to JSON : %+v", err)
|
||||
}
|
||||
log.Printf("BuildFunctionForTargetEnv : details : %v\n", string(detailsJSON))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
return devdeploy.BuildLambdaForTargetEnv(log, cfg, details)
|
||||
}
|
||||
|
||||
// DeployFunctionForTargetEnv executes the deploy commands for a target function.
|
||||
func DeployFunctionForTargetEnv(log *log.Logger, awsCredentials devdeploy.AwsCredentials, targetEnv Env, functionName, releaseTag string, dryRun bool) error {
|
||||
|
||||
cfgCtx, err := NewConfigContext(targetEnv, awsCredentials)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cfg, err := cfgCtx.Config(log)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
funcCtx, err := NewFunctionContext(functionName, cfg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Override the release tag if set.
|
||||
if releaseTag != "" {
|
||||
funcCtx.ReleaseTag = releaseTag
|
||||
}
|
||||
|
||||
details, err := funcCtx.Deploy(log)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Set the s3 bucket and s3 for uploading the zip file.
|
||||
details.CodeS3Bucket, details.CodeS3Key = funcCtx.S3Location(cfg)
|
||||
|
||||
if dryRun {
|
||||
cfgJSON, err := json.MarshalIndent(cfg, "", " ")
|
||||
if err != nil {
|
||||
log.Fatalf("DeployFunctionForTargetEnv : Marshalling config to JSON : %+v", err)
|
||||
}
|
||||
log.Printf("DeployFunctionForTargetEnv : config : %v\n", string(cfgJSON))
|
||||
|
||||
detailsJSON, err := json.MarshalIndent(details, "", " ")
|
||||
if err != nil {
|
||||
log.Fatalf("DeployFunctionForTargetEnv : Marshalling details to JSON : %+v", err)
|
||||
}
|
||||
log.Printf("DeployFunctionForTargetEnv : details : %v\n", string(detailsJSON))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
return devdeploy.DeployLambdaToTargetEnv(log, cfg, details)
|
||||
}
|
38
build/cicd/internal/config/schema.go
Normal file
38
build/cicd/internal/config/schema.go
Normal file
@ -0,0 +1,38 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log"
|
||||
|
||||
"geeks-accelerator/oss/saas-starter-kit/internal/schema"
|
||||
"github.com/jmoiron/sqlx"
|
||||
"github.com/pkg/errors"
|
||||
"gitlab.com/geeks-accelerator/oss/devops/pkg/devdeploy"
|
||||
)
|
||||
|
||||
// RunSchemaMigrationsForTargetEnv executes schema migrations for the target environment.
|
||||
func RunSchemaMigrationsForTargetEnv(log *log.Logger, awsCredentials devdeploy.AwsCredentials, targetEnv Env, isUnittest bool) error {
|
||||
|
||||
cfgCtx, err := NewConfigContext(targetEnv, awsCredentials)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cfg, err := cfgCtx.Config(log)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = devdeploy.SetupDeploymentEnv(log, cfg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
masterDb, err := sqlx.Open(cfg.DBConnInfo.Driver, cfg.DBConnInfo.URL())
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Failed to connect to db for schema migration.")
|
||||
}
|
||||
defer masterDb.Close()
|
||||
|
||||
return schema.Migrate(context.Background(), targetEnv, masterDb, log, false)
|
||||
}
|
754
build/cicd/internal/config/service.go
Normal file
754
build/cicd/internal/config/service.go
Normal file
@ -0,0 +1,754 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/service/ecs"
|
||||
"github.com/iancoleman/strcase"
|
||||
"github.com/pkg/errors"
|
||||
"gitlab.com/geeks-accelerator/oss/devops/pkg/devdeploy"
|
||||
"gopkg.in/go-playground/validator.v9"
|
||||
)
|
||||
|
||||
// Service define the name of a service.
|
||||
type Service = string
|
||||
|
||||
var (
|
||||
ServiceWebApi = "web-api"
|
||||
ServiceWebApp = "web-app"
|
||||
)
|
||||
|
||||
// List of service names used by main.go for help.
|
||||
var ServiceNames = []Service{
|
||||
ServiceWebApi,
|
||||
ServiceWebApp,
|
||||
}
|
||||
|
||||
// ServiceConfig defines the settings for a service.
|
||||
type ServiceConfig struct {
|
||||
// Required flags.
|
||||
Name string `validate:"required" example:"web-api"`
|
||||
ServiceHostPrimary string `validate:"required" example:"example-project.com"`
|
||||
DesiredCount int `validate:"required" example:"2"`
|
||||
ServiceDir string `validate:"required"`
|
||||
Dockerfile string `validate:"required" example:"./cmd/web-api/Dockerfile"`
|
||||
ReleaseTag string `validate:"required"`
|
||||
|
||||
// Optional flags.
|
||||
ServiceHostNames []string `validate:"omitempty" example:"subdomain.example-project.com"`
|
||||
EnableHTTPS bool `validate:"omitempty" example:"false"`
|
||||
EnableElb bool `validate:"omitempty" example:"false"`
|
||||
StaticFilesS3Enable bool `validate:"omitempty" example:"false"`
|
||||
BuildDir string `validate:"omitempty"`
|
||||
DockerBuildContext string `validate:"omitempty" example:"."`
|
||||
}
|
||||
|
||||
// ServiceContext includes the config and task definition for building and deploying a service.
|
||||
type ServiceContext struct {
|
||||
ServiceConfig
|
||||
|
||||
// AwsEcsTaskDefinition defines the ECS task definition based on the service configs.
|
||||
AwsEcsTaskDefinition func(cfg *devdeploy.Config, srv *devdeploy.DeployService) (*ecs.RegisterTaskDefinitionInput, error)
|
||||
}
|
||||
|
||||
// NewServiceConfig returns the Service for a service that is configured for the target deployment env.
|
||||
func NewServiceConfig(serviceName string, cfg *devdeploy.Config) (ServiceConfig, error) {
|
||||
|
||||
// =========================================================================
|
||||
// New service context.
|
||||
srv := ServiceConfig{
|
||||
Name: serviceName,
|
||||
DesiredCount: 1,
|
||||
DockerBuildContext: ".",
|
||||
ServiceDir: filepath.Join(cfg.ProjectRoot, "cmd", serviceName),
|
||||
|
||||
// Set the release tag for the image to use include env + service name + commit hash/tag.
|
||||
ReleaseTag: devdeploy.GitLabCiReleaseTag(cfg.Env, serviceName),
|
||||
}
|
||||
|
||||
// =========================================================================
|
||||
// Context settings based on target env.
|
||||
if cfg.Env == EnvStage || cfg.Env == EnvProd {
|
||||
srv.EnableHTTPS = true
|
||||
srv.StaticFilesS3Enable = true
|
||||
} else {
|
||||
srv.EnableHTTPS = false
|
||||
srv.StaticFilesS3Enable = false
|
||||
}
|
||||
|
||||
// =========================================================================
|
||||
// Service dependant settings.
|
||||
switch serviceName {
|
||||
case ServiceWebApp:
|
||||
|
||||
// Set the hostnames for the service.
|
||||
if cfg.Env == EnvProd {
|
||||
srv.ServiceHostPrimary = "example.saasstartupkit.com"
|
||||
|
||||
// Any hostname listed here that doesn't match the primary hostname will be updated in Route 53 but the
|
||||
// service itself will redirect any requests back to the primary hostname.
|
||||
srv.ServiceHostNames = []string{
|
||||
fmt.Sprintf("%s.example.saasstartupkit.com", cfg.Env),
|
||||
}
|
||||
} else {
|
||||
srv.ServiceHostPrimary = fmt.Sprintf("%s.example.saasstartupkit.com", cfg.Env)
|
||||
}
|
||||
|
||||
case ServiceWebApi:
|
||||
|
||||
// Set the hostnames for the service.
|
||||
if cfg.Env == EnvProd {
|
||||
srv.ServiceHostPrimary = "api.example.saasstartupkit.com"
|
||||
} else {
|
||||
srv.ServiceHostPrimary = fmt.Sprintf("api.%s.example.saasstartupkit.com", cfg.Env)
|
||||
}
|
||||
|
||||
default:
|
||||
return ServiceConfig{}, errors.Wrapf(devdeploy.ErrInvalidService,
|
||||
"No service config defined for service '%s'",
|
||||
serviceName)
|
||||
}
|
||||
|
||||
// Set the docker file if no custom one has been defined for the service.
|
||||
if srv.Dockerfile == "" {
|
||||
srv.Dockerfile = filepath.Join(srv.ServiceDir, "Dockerfile")
|
||||
}
|
||||
|
||||
// Ensure the config is valid.
|
||||
errs := validator.New().Struct(cfg)
|
||||
if errs != nil {
|
||||
return srv, errs
|
||||
}
|
||||
|
||||
return srv, nil
|
||||
}
|
||||
|
||||
// BaseUrl returns the base url for a specific service.
|
||||
func (c ServiceConfig) BaseUrl() string {
|
||||
var schema string
|
||||
if c.EnableHTTPS {
|
||||
schema = "https"
|
||||
} else {
|
||||
schema = "http"
|
||||
}
|
||||
return fmt.Sprintf("%s://%s/", schema, c.ServiceHostPrimary)
|
||||
}
|
||||
|
||||
// NewServiceContext returns the ServiceContext for a service that is configured for the target deployment env.
|
||||
func NewServiceContext(serviceName Service, cfg *devdeploy.Config) (*ServiceContext, error) {
|
||||
|
||||
// =========================================================================
|
||||
// Shared details that could be applied to all task definitions.
|
||||
|
||||
// Load the web-app config for the web-api can reference it's hostname.
|
||||
webAppCfg, err := NewServiceConfig(ServiceWebApp, cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Load the web-api config for the web-app can reference it's hostname.
|
||||
webApiCfg, err := NewServiceConfig(ServiceWebApi, cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Define a base set of environment variables that can be assigned to individual container definitions.
|
||||
baseEnvVals := func(cfg *devdeploy.Config, srv *devdeploy.DeployService) []*ecs.KeyValuePair {
|
||||
|
||||
var ciJobURL string
|
||||
if id := os.Getenv("CI_JOB_ID"); id != "" {
|
||||
ciJobURL = strings.TrimRight(GitLabProjectBaseUrl, "/") + "/-/jobs/" + os.Getenv("CI_JOB_ID")
|
||||
}
|
||||
|
||||
var ciPipelineURL string
|
||||
if id := os.Getenv("CI_PIPELINE_ID"); id != "" {
|
||||
ciPipelineURL = strings.TrimRight(GitLabProjectBaseUrl, "/") + "/pipelines/" + os.Getenv("CI_PIPELINE_ID")
|
||||
}
|
||||
|
||||
return []*ecs.KeyValuePair{
|
||||
ecsKeyValuePair(devdeploy.ENV_KEY_ECS_CLUSTER, srv.AwsEcsCluster.ClusterName),
|
||||
ecsKeyValuePair(devdeploy.ENV_KEY_ECS_SERVICE, srv.AwsEcsService.ServiceName),
|
||||
ecsKeyValuePair("AWS_REGION", cfg.AwsCredentials.Region),
|
||||
ecsKeyValuePair("AWS_USE_ROLE", "true"),
|
||||
ecsKeyValuePair("AWSLOGS_GROUP", srv.AwsCloudWatchLogGroup.LogGroupName),
|
||||
ecsKeyValuePair("ECS_ENABLE_CONTAINER_METADATA", "true"),
|
||||
ecsKeyValuePair("CI_COMMIT_REF_NAME", os.Getenv("CI_COMMIT_REF_NAME")),
|
||||
ecsKeyValuePair("CI_COMMIT_SHORT_SHA", os.Getenv("CI_COMMIT_SHORT_SHA")),
|
||||
ecsKeyValuePair("CI_COMMIT_SHA", os.Getenv("CI_COMMIT_SHA")),
|
||||
ecsKeyValuePair("CI_COMMIT_TAG", os.Getenv("CI_COMMIT_TAG")),
|
||||
ecsKeyValuePair("CI_JOB_ID", os.Getenv("CI_JOB_ID")),
|
||||
ecsKeyValuePair("CI_PIPELINE_ID", os.Getenv("CI_PIPELINE_ID")),
|
||||
ecsKeyValuePair("CI_JOB_URL", ciJobURL),
|
||||
ecsKeyValuePair("CI_PIPELINE_URL", ciPipelineURL),
|
||||
ecsKeyValuePair("WEB_APP_BASE_URL", webAppCfg.BaseUrl()),
|
||||
ecsKeyValuePair("WEB_API_BASE_URL", webApiCfg.BaseUrl()),
|
||||
ecsKeyValuePair("EMAIL_SENDER", "lee+saas-starter-kit@geeksinthewoods.com"),
|
||||
}
|
||||
}
|
||||
|
||||
// =========================================================================
|
||||
// Service dependant settings.
|
||||
|
||||
var ctx *ServiceContext
|
||||
switch serviceName {
|
||||
|
||||
// Define the ServiceContext for the web-app that will be used for build and deploy.
|
||||
case ServiceWebApp:
|
||||
|
||||
ctx = &ServiceContext{
|
||||
ServiceConfig: webAppCfg,
|
||||
}
|
||||
|
||||
// Define the service task definition with a function to enable use of config and deploy details.
|
||||
ctx.AwsEcsTaskDefinition = func(cfg *devdeploy.Config, srv *devdeploy.DeployService) (*ecs.RegisterTaskDefinitionInput, error) {
|
||||
|
||||
// Defined a container definition for the specific service.
|
||||
container1 := &ecs.ContainerDefinition{
|
||||
Name: aws.String(ctx.Name),
|
||||
Image: aws.String(srv.ReleaseImage),
|
||||
Essential: aws.Bool(true),
|
||||
LogConfiguration: &ecs.LogConfiguration{
|
||||
LogDriver: aws.String("awslogs"),
|
||||
Options: map[string]*string{
|
||||
"awslogs-group": aws.String(srv.AwsCloudWatchLogGroup.LogGroupName),
|
||||
"awslogs-region": aws.String(cfg.AwsCredentials.Region),
|
||||
"awslogs-stream-prefix": aws.String("ecs"),
|
||||
},
|
||||
},
|
||||
PortMappings: []*ecs.PortMapping{
|
||||
&ecs.PortMapping{
|
||||
HostPort: aws.Int64(80),
|
||||
Protocol: aws.String("tcp"),
|
||||
ContainerPort: aws.Int64(80),
|
||||
},
|
||||
},
|
||||
Cpu: aws.Int64(128),
|
||||
MemoryReservation: aws.Int64(128),
|
||||
Environment: baseEnvVals(cfg, srv),
|
||||
HealthCheck: &ecs.HealthCheck{
|
||||
Retries: aws.Int64(3),
|
||||
Command: aws.StringSlice([]string{
|
||||
"CMD-SHELL",
|
||||
"curl -f http://localhost/ping || exit 1",
|
||||
}),
|
||||
Timeout: aws.Int64(5),
|
||||
Interval: aws.Int64(60),
|
||||
StartPeriod: aws.Int64(60),
|
||||
},
|
||||
Ulimits: []*ecs.Ulimit{
|
||||
&ecs.Ulimit{
|
||||
Name: aws.String("nofile"),
|
||||
SoftLimit: aws.Int64(987654),
|
||||
HardLimit: aws.Int64(999999),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// If the service has HTTPS enabled with the use of an AWS Elastic Load Balancer, then need to enable
|
||||
// traffic for port 443 for SSL traffic to get terminated on the deployed tasks.
|
||||
if ctx.EnableHTTPS && !ctx.EnableElb {
|
||||
container1.PortMappings = append(container1.PortMappings, &ecs.PortMapping{
|
||||
HostPort: aws.Int64(443),
|
||||
Protocol: aws.String("tcp"),
|
||||
ContainerPort: aws.Int64(443),
|
||||
})
|
||||
}
|
||||
|
||||
// Append env vars for the service task.
|
||||
container1.Environment = append(container1.Environment,
|
||||
ecsKeyValuePair("SERVICE_NAME", srv.ServiceName),
|
||||
ecsKeyValuePair("PROJECT_NAME", cfg.ProjectName),
|
||||
|
||||
// Use placeholders for these environment variables that will be replaced with devdeploy.DeployServiceToTargetEnv
|
||||
ecsKeyValuePair("WEB_APP_HOST_HOST", "{HTTP_HOST}"),
|
||||
ecsKeyValuePair("WEB_APP_HTTPS_HOST", "{HTTPS_HOST}"),
|
||||
ecsKeyValuePair("WEB_APP_SERVICE_ENABLE_HTTPS", "{HTTPS_ENABLED}"),
|
||||
ecsKeyValuePair("WEB_APP_SERVICE_BASE_URL", "{APP_BASE_URL}"),
|
||||
ecsKeyValuePair("WEB_APP_SERVICE_HOST_NAMES", "{HOST_NAMES}"),
|
||||
ecsKeyValuePair("WEB_APP_SERVICE_STATICFILES_S3_ENABLED", "{STATIC_FILES_S3_ENABLED}"),
|
||||
ecsKeyValuePair("WEB_APP_SERVICE_STATICFILES_S3_PREFIX", "{STATIC_FILES_S3_PREFIX}"),
|
||||
ecsKeyValuePair("WEB_APP_SERVICE_STATICFILES_CLOUDFRONT_ENABLED", "{STATIC_FILES_CLOUDFRONT_ENABLED}"),
|
||||
ecsKeyValuePair("WEB_APP_REDIS_HOST", "{CACHE_HOST}"),
|
||||
ecsKeyValuePair("WEB_APP_DB_HOST", "{DB_HOST}"),
|
||||
ecsKeyValuePair("WEB_APP_DB_USERNAME", "{DB_USER}"),
|
||||
ecsKeyValuePair("WEB_APP_DB_PASSWORD", "{DB_PASS}"),
|
||||
ecsKeyValuePair("WEB_APP_DB_DATABASE", "{DB_DATABASE}"),
|
||||
ecsKeyValuePair("WEB_APP_DB_DRIVER", "{DB_DRIVER}"),
|
||||
ecsKeyValuePair("WEB_APP_DB_DISABLE_TLS", "{DB_DISABLE_TLS}"),
|
||||
ecsKeyValuePair("WEB_APP_AWS_S3_BUCKET_PRIVATE", "{AWS_S3_BUCKET_PRIVATE}"),
|
||||
ecsKeyValuePair("WEB_APP_AWS_S3_BUCKET_PUBLIC", "{AWS_S3_BUCKET_PUBLIC}"),
|
||||
ecsKeyValuePair(devdeploy.ENV_KEY_ROUTE53_UPDATE_TASK_IPS, "{ROUTE53_UPDATE_TASK_IPS}"),
|
||||
ecsKeyValuePair(devdeploy.ENV_KEY_ROUTE53_ZONES, "{ROUTE53_ZONES}"),
|
||||
)
|
||||
|
||||
// Define the full task definition for the service.
|
||||
def := &ecs.RegisterTaskDefinitionInput{
|
||||
Family: aws.String(srv.ServiceName),
|
||||
ExecutionRoleArn: aws.String(srv.AwsEcsExecutionRole.Arn()),
|
||||
TaskRoleArn: aws.String(srv.AwsEcsTaskRole.Arn()),
|
||||
NetworkMode: aws.String("awsvpc"),
|
||||
ContainerDefinitions: []*ecs.ContainerDefinition{
|
||||
// Include the single container definition for the service. Additional definitions could be added
|
||||
// here like one for datadog.
|
||||
container1,
|
||||
},
|
||||
RequiresCompatibilities: aws.StringSlice([]string{"FARGATE"}),
|
||||
}
|
||||
|
||||
return def, nil
|
||||
}
|
||||
|
||||
// Define the ServiceContext for the web-api that will be used for build and deploy.
|
||||
case ServiceWebApi:
|
||||
|
||||
ctx = &ServiceContext{
|
||||
ServiceConfig: webApiCfg,
|
||||
}
|
||||
|
||||
// Define the service task definition with a function to enable use of config and deploy details.
|
||||
ctx.AwsEcsTaskDefinition = func(cfg *devdeploy.Config, srv *devdeploy.DeployService) (*ecs.RegisterTaskDefinitionInput, error) {
|
||||
|
||||
// Defined a container definition for the specific service.
|
||||
container1 := &ecs.ContainerDefinition{
|
||||
Name: aws.String(ctx.Name),
|
||||
Image: aws.String(srv.ReleaseImage),
|
||||
Essential: aws.Bool(true),
|
||||
LogConfiguration: &ecs.LogConfiguration{
|
||||
LogDriver: aws.String("awslogs"),
|
||||
Options: map[string]*string{
|
||||
"awslogs-group": aws.String(srv.AwsCloudWatchLogGroup.LogGroupName),
|
||||
"awslogs-region": aws.String(cfg.AwsCredentials.Region),
|
||||
"awslogs-stream-prefix": aws.String("ecs"),
|
||||
},
|
||||
},
|
||||
PortMappings: []*ecs.PortMapping{
|
||||
&ecs.PortMapping{
|
||||
HostPort: aws.Int64(80),
|
||||
Protocol: aws.String("tcp"),
|
||||
ContainerPort: aws.Int64(80),
|
||||
},
|
||||
},
|
||||
Cpu: aws.Int64(128),
|
||||
MemoryReservation: aws.Int64(128),
|
||||
Environment: baseEnvVals(cfg, srv),
|
||||
HealthCheck: &ecs.HealthCheck{
|
||||
Retries: aws.Int64(3),
|
||||
Command: aws.StringSlice([]string{
|
||||
"CMD-SHELL",
|
||||
"curl -f http://localhost/ping || exit 1",
|
||||
}),
|
||||
Timeout: aws.Int64(5),
|
||||
Interval: aws.Int64(60),
|
||||
StartPeriod: aws.Int64(60),
|
||||
},
|
||||
Ulimits: []*ecs.Ulimit{
|
||||
&ecs.Ulimit{
|
||||
Name: aws.String("nofile"),
|
||||
SoftLimit: aws.Int64(987654),
|
||||
HardLimit: aws.Int64(999999),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// If the service has HTTPS enabled with the use of an AWS Elastic Load Balancer, then need to enable
|
||||
// traffic for port 443 for SSL traffic to get terminated on the deployed tasks.
|
||||
if ctx.EnableHTTPS && !ctx.EnableElb {
|
||||
container1.PortMappings = append(container1.PortMappings, &ecs.PortMapping{
|
||||
HostPort: aws.Int64(443),
|
||||
Protocol: aws.String("tcp"),
|
||||
ContainerPort: aws.Int64(443),
|
||||
})
|
||||
}
|
||||
|
||||
// Append env vars for the service task.
|
||||
container1.Environment = append(container1.Environment,
|
||||
ecsKeyValuePair("SERVICE_NAME", srv.ServiceName),
|
||||
ecsKeyValuePair("PROJECT_NAME", cfg.ProjectName),
|
||||
|
||||
// Use placeholders for these environment variables that will be replaced with devdeploy.DeployServiceToTargetEnv
|
||||
ecsKeyValuePair("WEB_API_HTTP_HOST", "{HTTP_HOST}"),
|
||||
ecsKeyValuePair("WEB_API_HTTPS_HOST", "{HTTPS_HOST}"),
|
||||
ecsKeyValuePair("WEB_API_SERVICE_ENABLE_HTTPS", "{HTTPS_ENABLED}"),
|
||||
ecsKeyValuePair("WEB_API_SERVICE_BASE_URL", "{APP_BASE_URL}"),
|
||||
ecsKeyValuePair("WEB_API_SERVICE_HOST_NAMES", "{HOST_NAMES}"),
|
||||
ecsKeyValuePair("WEB_API_SERVICE_STATICFILES_S3_ENABLED", "{STATIC_FILES_S3_ENABLED}"),
|
||||
ecsKeyValuePair("WEB_API_SERVICE_STATICFILES_S3_PREFIX", "{STATIC_FILES_S3_PREFIX}"),
|
||||
ecsKeyValuePair("WEB_API_SERVICE_STATICFILES_CLOUDFRONT_ENABLED", "{STATIC_FILES_CLOUDFRONT_ENABLED}"),
|
||||
ecsKeyValuePair("WEB_API_REDIS_HOST", "{CACHE_HOST}"),
|
||||
ecsKeyValuePair("WEB_API_DB_HOST", "{DB_HOST}"),
|
||||
ecsKeyValuePair("WEB_API_DB_USERNAME", "{DB_USER}"),
|
||||
ecsKeyValuePair("WEB_API_DB_PASSWORD", "{DB_PASS}"),
|
||||
ecsKeyValuePair("WEB_API_DB_DATABASE", "{DB_DATABASE}"),
|
||||
ecsKeyValuePair("WEB_API_DB_DRIVER", "{DB_DRIVER}"),
|
||||
ecsKeyValuePair("WEB_API_DB_DISABLE_TLS", "{DB_DISABLE_TLS}"),
|
||||
ecsKeyValuePair("WEB_API_AWS_S3_BUCKET_PRIVATE", "{AWS_S3_BUCKET_PRIVATE}"),
|
||||
ecsKeyValuePair("WEB_API_AWS_S3_BUCKET_PUBLIC", "{AWS_S3_BUCKET_PUBLIC}"),
|
||||
ecsKeyValuePair(devdeploy.ENV_KEY_ROUTE53_UPDATE_TASK_IPS, "{ROUTE53_UPDATE_TASK_IPS}"),
|
||||
ecsKeyValuePair(devdeploy.ENV_KEY_ROUTE53_ZONES, "{ROUTE53_ZONES}"),
|
||||
)
|
||||
|
||||
// Define the full task definition for the service.
|
||||
def := &ecs.RegisterTaskDefinitionInput{
|
||||
Family: aws.String(srv.ServiceName),
|
||||
ExecutionRoleArn: aws.String(srv.AwsEcsExecutionRole.Arn()),
|
||||
TaskRoleArn: aws.String(srv.AwsEcsTaskRole.Arn()),
|
||||
NetworkMode: aws.String("awsvpc"),
|
||||
ContainerDefinitions: []*ecs.ContainerDefinition{
|
||||
// Include the single container definition for the service. Additional definitions could be added
|
||||
// here like one for datadog.
|
||||
container1,
|
||||
},
|
||||
RequiresCompatibilities: aws.StringSlice([]string{"FARGATE"}),
|
||||
}
|
||||
|
||||
return def, nil
|
||||
}
|
||||
|
||||
default:
|
||||
return nil, errors.Wrapf(devdeploy.ErrInvalidService,
|
||||
"No service context defined for service '%s'",
|
||||
serviceName)
|
||||
}
|
||||
|
||||
return ctx, nil
|
||||
}
|
||||
|
||||
// BuildService handles defining all the information needed to a service with docker and push to AWS ECR.
|
||||
func (ctx *ServiceContext) Build(log *log.Logger, noCache, noPush bool) (*devdeploy.BuildService, error) {
|
||||
|
||||
log.Printf("Define build for service '%s'.", ctx.Name)
|
||||
log.Printf("\tUsing release tag %s.", ctx.ReleaseTag)
|
||||
|
||||
srv := &devdeploy.BuildService{
|
||||
ServiceName: ctx.Name,
|
||||
ReleaseTag: ctx.ReleaseTag,
|
||||
BuildDir: ctx.BuildDir,
|
||||
Dockerfile: ctx.Dockerfile,
|
||||
DockerBuildContext: ctx.DockerBuildContext,
|
||||
NoCache: noCache,
|
||||
NoPush: noPush,
|
||||
}
|
||||
|
||||
return srv, nil
|
||||
}
|
||||
|
||||
// DeployService handles defining all the information needed to deploy a service to AWS ECS.
|
||||
func (ctx *ServiceContext) Deploy(log *log.Logger, cfg *devdeploy.Config) (*devdeploy.DeployService, error) {
|
||||
|
||||
log.Printf("Define deploy for service '%s'.", ctx.Name)
|
||||
log.Printf("\tUsing release tag %s.", ctx.ReleaseTag)
|
||||
|
||||
// Start to define all the information for the service from the service context.
|
||||
srv := &devdeploy.DeployService{
|
||||
ServiceName: ctx.Name,
|
||||
ReleaseTag: ctx.ReleaseTag,
|
||||
EnableHTTPS: ctx.EnableHTTPS,
|
||||
ServiceHostPrimary: ctx.ServiceHostPrimary,
|
||||
ServiceHostNames: ctx.ServiceHostNames,
|
||||
}
|
||||
|
||||
// When only service host names are set, choose the first item as the primary host.
|
||||
if srv.ServiceHostPrimary == "" && len(srv.ServiceHostNames) > 0 {
|
||||
srv.ServiceHostPrimary = srv.ServiceHostNames[0]
|
||||
log.Printf("\t\tSet Service Primary Host to '%s'.", srv.ServiceHostPrimary)
|
||||
}
|
||||
|
||||
// The S3 prefix used to upload static files served to public.
|
||||
if ctx.StaticFilesS3Enable {
|
||||
srv.StaticFilesS3Prefix = filepath.Join(cfg.AwsS3BucketPublicKeyPrefix, srv.ReleaseTag, "static")
|
||||
}
|
||||
|
||||
// Determine the Dockerfile for the service.
|
||||
if ctx.Dockerfile != "" {
|
||||
srv.Dockerfile = ctx.Dockerfile
|
||||
log.Printf("\t\tUsing docker file '%s'.", srv.Dockerfile)
|
||||
} else {
|
||||
var err error
|
||||
srv.Dockerfile, err = devdeploy.FindServiceDockerFile(cfg.ProjectRoot, srv.ServiceName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
log.Printf("\t\tFound service docker file '%s'.", srv.Dockerfile)
|
||||
}
|
||||
|
||||
// Set the service directory.
|
||||
if ctx.ServiceDir == "" {
|
||||
ctx.ServiceDir = filepath.Dir(srv.Dockerfile)
|
||||
}
|
||||
srv.StaticFilesDir = filepath.Join(ctx.ServiceDir, "static")
|
||||
|
||||
// Define the ECS Cluster used to host the serverless fargate tasks.
|
||||
srv.AwsEcsCluster = &devdeploy.AwsEcsCluster{
|
||||
ClusterName: cfg.ProjectName + "-" + cfg.Env,
|
||||
Tags: []devdeploy.Tag{
|
||||
{Key: devdeploy.AwsTagNameProject, Value: cfg.ProjectName},
|
||||
{Key: devdeploy.AwsTagNameEnv, Value: cfg.Env},
|
||||
},
|
||||
}
|
||||
|
||||
// Define the ECS task execution role. This role executes ECS actions such as pulling the image and storing the
|
||||
// application logs in cloudwatch.
|
||||
srv.AwsEcsExecutionRole = &devdeploy.AwsIamRole{
|
||||
RoleName: fmt.Sprintf("ecsExecutionRole%s%s", cfg.ProjectNameCamel(), strcase.ToCamel(cfg.Env)),
|
||||
Description: fmt.Sprintf("Provides access to other AWS service resources that are required to run Amazon ECS tasks for %s. ", cfg.ProjectName),
|
||||
AssumeRolePolicyDocument: "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Service\":[\"ecs-tasks.amazonaws.com\"]},\"Action\":[\"sts:AssumeRole\"]}]}",
|
||||
Tags: []devdeploy.Tag{
|
||||
{Key: devdeploy.AwsTagNameProject, Value: cfg.ProjectName},
|
||||
{Key: devdeploy.AwsTagNameEnv, Value: cfg.Env},
|
||||
},
|
||||
AttachRolePolicyArns: []string{"arn:aws:iam::aws:policy/service-role/AmazonECSTaskExecutionRolePolicy"},
|
||||
}
|
||||
log.Printf("\t\tSet ECS Execution Role Name to '%s'.", srv.AwsEcsExecutionRole.RoleName)
|
||||
|
||||
// Define the ECS task role. This role is used by the task itself for calling other AWS services.
|
||||
srv.AwsEcsTaskRole = &devdeploy.AwsIamRole{
|
||||
RoleName: fmt.Sprintf("ecsTaskRole%s%s", cfg.ProjectNameCamel(), strcase.ToCamel(cfg.Env)),
|
||||
Description: fmt.Sprintf("Allows ECS tasks for %s to call AWS services on your behalf.", cfg.ProjectName),
|
||||
AssumeRolePolicyDocument: "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Service\":[\"ecs-tasks.amazonaws.com\"]},\"Action\":[\"sts:AssumeRole\"]}]}",
|
||||
Tags: []devdeploy.Tag{
|
||||
{Key: devdeploy.AwsTagNameProject, Value: cfg.ProjectName},
|
||||
{Key: devdeploy.AwsTagNameEnv, Value: cfg.Env},
|
||||
},
|
||||
}
|
||||
log.Printf("\t\tSet ECS Task Role Name to '%s'.", srv.AwsEcsTaskRole.RoleName)
|
||||
|
||||
// AwsCloudWatchLogGroup defines the name of the cloudwatch log group that will be used to store logs for the ECS tasks.
|
||||
srv.AwsCloudWatchLogGroup = &devdeploy.AwsCloudWatchLogGroup{
|
||||
LogGroupName: fmt.Sprintf("logs/env_%s/aws/ecs/cluster_%s/service_%s", cfg.Env, srv.AwsEcsCluster.ClusterName, srv.ServiceName),
|
||||
Tags: []devdeploy.Tag{
|
||||
{Key: devdeploy.AwsTagNameProject, Value: cfg.ProjectName},
|
||||
{Key: devdeploy.AwsTagNameEnv, Value: cfg.Env},
|
||||
},
|
||||
}
|
||||
log.Printf("\t\tSet AWS Log Group Name to '%s'.", srv.AwsCloudWatchLogGroup.LogGroupName)
|
||||
|
||||
// AwsSdPrivateDnsNamespace defines the service discovery group.
|
||||
srv.AwsSdPrivateDnsNamespace = &devdeploy.AwsSdPrivateDnsNamespace{
|
||||
Name: srv.AwsEcsCluster.ClusterName,
|
||||
Description: fmt.Sprintf("Private DNS namespace used for services running on the ECS Cluster %s", srv.AwsEcsCluster.ClusterName),
|
||||
Service: &devdeploy.AwsSdService{
|
||||
Name: ctx.Name,
|
||||
Description: fmt.Sprintf("Service %s running on the ECS Cluster %s", ctx.Name, srv.AwsEcsCluster.ClusterName),
|
||||
DnsRecordTTL: 300,
|
||||
HealthCheckFailureThreshold: 3,
|
||||
},
|
||||
}
|
||||
log.Printf("\t\tSet AWS Service Discovery Namespace to '%s'.", srv.AwsSdPrivateDnsNamespace.Name)
|
||||
|
||||
// If the service is requested to use an elastic load balancer then define.
|
||||
if ctx.EnableElb {
|
||||
// AwsElbLoadBalancer defines if the service should use an elastic load balancer.
|
||||
srv.AwsElbLoadBalancer = &devdeploy.AwsElbLoadBalancer{
|
||||
Name: fmt.Sprintf("%s-%s-%s", cfg.Env, srv.AwsEcsCluster.ClusterName, srv.ServiceName),
|
||||
IpAddressType: "ipv4",
|
||||
Scheme: "internet-facing",
|
||||
Type: "application",
|
||||
Tags: []devdeploy.Tag{
|
||||
{Key: devdeploy.AwsTagNameProject, Value: cfg.ProjectName},
|
||||
{Key: devdeploy.AwsTagNameEnv, Value: cfg.Env},
|
||||
},
|
||||
}
|
||||
log.Printf("\t\tSet ELB Name to '%s'.", srv.AwsElbLoadBalancer.Name)
|
||||
|
||||
// Define the target group for service to receive HTTP traffic from the load balancer.
|
||||
srv.AwsElbLoadBalancer.TargetGroup = &devdeploy.AwsElbTargetGroup{
|
||||
Name: fmt.Sprintf("%s-http", srv.ServiceName),
|
||||
Port: 80,
|
||||
Protocol: "HTTP",
|
||||
TargetType: "ip",
|
||||
HealthCheckEnabled: true,
|
||||
HealthCheckIntervalSeconds: 30,
|
||||
HealthCheckPath: "/ping",
|
||||
HealthCheckProtocol: "HTTP",
|
||||
HealthCheckTimeoutSeconds: 5,
|
||||
HealthyThresholdCount: 3,
|
||||
UnhealthyThresholdCount: 3,
|
||||
Matcher: "200",
|
||||
}
|
||||
log.Printf("\t\t\tSet ELB Target Group Name for %s to '%s'.",
|
||||
srv.AwsElbLoadBalancer.TargetGroup.Protocol,
|
||||
srv.AwsElbLoadBalancer.TargetGroup.Name)
|
||||
|
||||
// Set ECS configs based on specified env.
|
||||
if cfg.Env == "prod" {
|
||||
srv.AwsElbLoadBalancer.EcsTaskDeregistrationDelay = 300
|
||||
} else {
|
||||
// Force staging to deploy immediately without waiting for connections to drain
|
||||
srv.AwsElbLoadBalancer.EcsTaskDeregistrationDelay = 0
|
||||
}
|
||||
}
|
||||
|
||||
// AwsEcsService defines the details for the ecs service.
|
||||
srv.AwsEcsService = &devdeploy.AwsEcsService{
|
||||
ServiceName: ctx.Name,
|
||||
DesiredCount: int64(ctx.DesiredCount),
|
||||
EnableECSManagedTags: false,
|
||||
HealthCheckGracePeriodSeconds: 60,
|
||||
LaunchType: "FARGATE",
|
||||
}
|
||||
|
||||
// Ensure when deploying a new service there is always at-least one running.
|
||||
if srv.AwsEcsService.DesiredCount == 0 {
|
||||
srv.AwsEcsService.DesiredCount = 1
|
||||
}
|
||||
|
||||
// Set ECS configs based on specified env.
|
||||
if cfg.Env == "prod" {
|
||||
srv.AwsEcsService.DeploymentMinimumHealthyPercent = 100
|
||||
srv.AwsEcsService.DeploymentMaximumPercent = 200
|
||||
} else {
|
||||
srv.AwsEcsService.DeploymentMinimumHealthyPercent = 100
|
||||
srv.AwsEcsService.DeploymentMaximumPercent = 200
|
||||
}
|
||||
|
||||
// AwsEcsTaskDefinition defines the details for registering a new ECS task definition.
|
||||
taskDef, err := ctx.AwsEcsTaskDefinition(cfg, srv)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
srv.AwsEcsTaskDefinition = &devdeploy.AwsEcsTaskDefinition{
|
||||
RegisterInput: taskDef,
|
||||
UpdatePlaceholders: func(placeholders map[string]string) error {
|
||||
|
||||
// Try to find the Datadog API key, this value is optional.
|
||||
// If Datadog API key is not specified, then integration with Datadog for observability will not be active.
|
||||
{
|
||||
datadogApiKey, err := getDatadogApiKey(cfg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if datadogApiKey != "" {
|
||||
log.Println("DATADOG API Key set.")
|
||||
} else {
|
||||
log.Printf("DATADOG API Key NOT set.")
|
||||
}
|
||||
|
||||
placeholders["{DATADOG_APIKEY}"] = datadogApiKey
|
||||
|
||||
// When the datadog API key is empty, don't force the container to be essential have have the whole task fail.
|
||||
if datadogApiKey != "" {
|
||||
placeholders["{DATADOG_ESSENTIAL}"] = "true"
|
||||
} else {
|
||||
placeholders["{DATADOG_ESSENTIAL}"] = "false"
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
log.Printf("\t\tDeploying task to '%s'.", ctx.ServiceHostPrimary)
|
||||
|
||||
return srv, nil
|
||||
}
|
||||
|
||||
// BuildServiceForTargetEnv executes the build commands for a target service.
|
||||
func BuildServiceForTargetEnv(log *log.Logger, awsCredentials devdeploy.AwsCredentials, targetEnv Env, serviceName, releaseTag string, dryRun, noCache, noPush bool) error {
|
||||
|
||||
cfgCtx, err := NewConfigContext(targetEnv, awsCredentials)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cfg, err := cfgCtx.Config(log)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
srvCtx, err := NewServiceContext(serviceName, cfg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Override the release tag if set.
|
||||
if releaseTag != "" {
|
||||
srvCtx.ReleaseTag = releaseTag
|
||||
}
|
||||
|
||||
details, err := srvCtx.Build(log, noCache, noPush)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// servicePath is used to copy the service specific code in the Dockerfile.
|
||||
servicePath, err := filepath.Rel(cfg.ProjectRoot, srvCtx.ServiceDir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// commitRef is used by main.go:build constant.
|
||||
commitRef := getCommitRef()
|
||||
if commitRef == "" {
|
||||
commitRef = srvCtx.ReleaseTag
|
||||
}
|
||||
|
||||
details.BuildArgs = map[string]string{
|
||||
"service_path": servicePath,
|
||||
"commit_ref": commitRef,
|
||||
}
|
||||
|
||||
if dryRun {
|
||||
cfgJSON, err := json.MarshalIndent(cfg, "", " ")
|
||||
if err != nil {
|
||||
log.Fatalf("BuildServiceForTargetEnv : Marshalling config to JSON : %+v", err)
|
||||
}
|
||||
log.Printf("BuildServiceForTargetEnv : config : %v\n", string(cfgJSON))
|
||||
|
||||
detailsJSON, err := json.MarshalIndent(details, "", " ")
|
||||
if err != nil {
|
||||
log.Fatalf("BuildServiceForTargetEnv : Marshalling details to JSON : %+v", err)
|
||||
}
|
||||
log.Printf("BuildServiceForTargetEnv : details : %v\n", string(detailsJSON))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
return devdeploy.BuildServiceForTargetEnv(log, cfg, details)
|
||||
}
|
||||
|
||||
// DeployServiceForTargetEnv executes the build commands for a target service.
|
||||
func DeployServiceForTargetEnv(log *log.Logger, awsCredentials devdeploy.AwsCredentials, targetEnv Env, serviceName, releaseTag string, dryRun bool) error {
|
||||
|
||||
cfgCtx, err := NewConfigContext(targetEnv, awsCredentials)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cfg, err := cfgCtx.Config(log)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
srvCtx, err := NewServiceContext(serviceName, cfg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Override the release tag if set.
|
||||
if releaseTag != "" {
|
||||
srvCtx.ReleaseTag = releaseTag
|
||||
}
|
||||
|
||||
details, err := srvCtx.Deploy(log, cfg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return devdeploy.DeployServiceToTargetEnv(log, cfg, details)
|
||||
}
|
||||
|
||||
// ecsKeyValuePair returns an *ecs.KeyValuePair
|
||||
func ecsKeyValuePair(name, value string) *ecs.KeyValuePair {
|
||||
return &ecs.KeyValuePair{
|
||||
Name: aws.String(name),
|
||||
Value: aws.String(value),
|
||||
}
|
||||
}
|
249
build/cicd/main.go
Normal file
249
build/cicd/main.go
Normal file
@ -0,0 +1,249 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"geeks-accelerator/oss/saas-starter-kit/build/cicd/internal/config"
|
||||
"github.com/urfave/cli"
|
||||
"gitlab.com/geeks-accelerator/oss/devops/pkg/devdeploy"
|
||||
)
|
||||
|
||||
// service is the name of the program used for logging, tracing, etc.
|
||||
var service = "CICD"
|
||||
|
||||
func main() {
|
||||
|
||||
// =========================================================================
|
||||
// Logging
|
||||
log.SetFlags(log.LstdFlags | log.Lmicroseconds | log.Lshortfile)
|
||||
log.SetPrefix(service + " : ")
|
||||
log := log.New(os.Stdout, log.Prefix(), log.Flags())
|
||||
|
||||
// =========================================================================
|
||||
// New CLI application.
|
||||
app := cli.NewApp()
|
||||
app.Name = "cicd"
|
||||
app.Usage = "Provides build and deploy for GitLab to Amazon AWS"
|
||||
app.Version = "1.0"
|
||||
app.Author = "Lee Brown"
|
||||
app.Email = "lee@geeksinthewoods.com"
|
||||
|
||||
// Define global CLI flags.
|
||||
var awsCredentials devdeploy.AwsCredentials
|
||||
app.Flags = []cli.Flag{
|
||||
cli.StringFlag{
|
||||
Name: "env",
|
||||
Usage: fmt.Sprintf("target environment, one of [%s]",
|
||||
strings.Join(config.EnvNames, ", ")),
|
||||
Required: true,
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "aws-access-key",
|
||||
Usage: "AWS Access Key",
|
||||
EnvVar: "AWS_ACCESS_KEY_ID",
|
||||
Destination: &awsCredentials.AccessKeyID,
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "aws-secret-key",
|
||||
Usage: "AWS Secret Key",
|
||||
EnvVar: "AWS_SECRET_ACCESS_KEY",
|
||||
Destination: &awsCredentials.SecretAccessKey,
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "aws-region",
|
||||
Usage: "AWS Region",
|
||||
EnvVar: "AWS_REGION",
|
||||
Destination: &awsCredentials.Region,
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "aws-use-role",
|
||||
Usage: "Use an IAM Role else AWS Access/Secret Keys are required",
|
||||
EnvVar: "AWS_USE_ROLE",
|
||||
Destination: &awsCredentials.UseRole,
|
||||
},
|
||||
}
|
||||
|
||||
app.Commands = []cli.Command{
|
||||
// Build command for services and functions.
|
||||
{
|
||||
Name: "build",
|
||||
Aliases: []string{"b"},
|
||||
Usage: "build a service or function",
|
||||
Subcommands: []cli.Command{
|
||||
{
|
||||
Name: "service",
|
||||
Usage: "build a service",
|
||||
Flags: []cli.Flag{
|
||||
cli.StringFlag{
|
||||
Name: "name, n",
|
||||
Usage: fmt.Sprintf("target service, one of [%s]",
|
||||
strings.Join(config.ServiceNames, ", ")),
|
||||
Required: true,
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "release-tag, tag",
|
||||
Usage: "optional tag to override default CI_COMMIT_SHORT_SHA",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "dry-run",
|
||||
Usage: "print out the build details",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "no-cache",
|
||||
Usage: "skip caching for the docker build",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "no-push",
|
||||
Usage: "disable pushing release image to remote repository",
|
||||
},
|
||||
},
|
||||
Action: func(c *cli.Context) error {
|
||||
targetEnv := c.GlobalString("env")
|
||||
serviceName := c.String("name")
|
||||
releaseTag := c.String("release-tag")
|
||||
dryRun := c.Bool("dry-run")
|
||||
noCache := c.Bool("no-cache")
|
||||
noPush := c.Bool("no-push")
|
||||
|
||||
return config.BuildServiceForTargetEnv(log, awsCredentials, targetEnv, serviceName, releaseTag, dryRun, noCache, noPush)
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "function",
|
||||
Usage: "build a function",
|
||||
Flags: []cli.Flag{
|
||||
cli.StringFlag{
|
||||
Name: "name, n",
|
||||
Usage: fmt.Sprintf("target function, one of [%s]",
|
||||
strings.Join(config.FunctionNames, ", ")),
|
||||
Required: true,
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "release-tag, tag",
|
||||
Usage: "optional tag to override default CI_COMMIT_SHORT_SHA",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "dry-run",
|
||||
Usage: "print out the build details",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "no-cache",
|
||||
Usage: "skip caching for the docker build",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "no-push",
|
||||
Usage: "disable pushing release image to remote repository",
|
||||
},
|
||||
},
|
||||
Action: func(c *cli.Context) error {
|
||||
targetEnv := c.GlobalString("env")
|
||||
funcName := c.String("name")
|
||||
releaseTag := c.String("release-tag")
|
||||
dryRun := c.Bool("dry-run")
|
||||
noCache := c.Bool("no-cache")
|
||||
noPush := c.Bool("no-push")
|
||||
|
||||
return config.BuildFunctionForTargetEnv(log, awsCredentials, targetEnv, funcName, releaseTag, dryRun, noCache, noPush)
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
// deploy command for services and functions.
|
||||
{
|
||||
Name: "deploy",
|
||||
Aliases: []string{"d"},
|
||||
Usage: "deploy a service or function",
|
||||
Subcommands: []cli.Command{
|
||||
{
|
||||
Name: "service",
|
||||
Usage: "deploy a service",
|
||||
Flags: []cli.Flag{
|
||||
cli.StringFlag{
|
||||
Name: "name, n",
|
||||
Usage: fmt.Sprintf("target service, one of [%s]",
|
||||
strings.Join(config.ServiceNames, ", ")),
|
||||
Required: true,
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "release-tag, tag",
|
||||
Usage: "optional tag to override default CI_COMMIT_SHORT_SHA",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "dry-run",
|
||||
Usage: "print out the deploy details",
|
||||
},
|
||||
},
|
||||
Action: func(c *cli.Context) error {
|
||||
targetEnv := c.GlobalString("env")
|
||||
serviceName := c.String("name")
|
||||
releaseTag := c.String("release-tag")
|
||||
dryRun := c.Bool("dry-run")
|
||||
|
||||
return config.DeployServiceForTargetEnv(log, awsCredentials, targetEnv, serviceName, releaseTag, dryRun)
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "function",
|
||||
Usage: "deploy a function",
|
||||
Flags: []cli.Flag{
|
||||
cli.StringFlag{
|
||||
Name: "name, n",
|
||||
Usage: fmt.Sprintf("target function, one of [%s]",
|
||||
strings.Join(config.FunctionNames, ", ")),
|
||||
Required: true,
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "release-tag, tag",
|
||||
Usage: "optional tag to override default CI_COMMIT_SHORT_SHA",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "dry-run",
|
||||
Usage: "print out the deploy details",
|
||||
},
|
||||
},
|
||||
Action: func(c *cli.Context) error {
|
||||
targetEnv := c.GlobalString("env")
|
||||
funcName := c.String("name")
|
||||
releaseTag := c.String("release-tag")
|
||||
dryRun := c.Bool("dry-run")
|
||||
|
||||
return config.DeployFunctionForTargetEnv(log, awsCredentials, targetEnv, funcName, releaseTag, dryRun)
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
// schema command used to run database schema migrations.
|
||||
{
|
||||
Name: "schema",
|
||||
Aliases: []string{"s"},
|
||||
Usage: "manage the database schema",
|
||||
Subcommands: []cli.Command{
|
||||
{
|
||||
Name: "migrate",
|
||||
Usage: "run the schema migrations",
|
||||
Flags: []cli.Flag{
|
||||
cli.BoolFlag{
|
||||
Name: "unittest",
|
||||
Usage: "print out the build details",
|
||||
},
|
||||
},
|
||||
Action: func(c *cli.Context) error {
|
||||
targetEnv := c.GlobalString("env")
|
||||
isUnittest := c.Bool("unittest")
|
||||
|
||||
return config.RunSchemaMigrationsForTargetEnv(log, awsCredentials, targetEnv, isUnittest)
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
if err := app.Run(os.Args); err != nil {
|
||||
log.Fatalf("%+v", err)
|
||||
}
|
||||
}
|
@ -1,4 +1,4 @@
|
||||
FROM golang:1.12.6-alpine3.9 AS build_base_golang
|
||||
FROM golang:1.12.9-alpine3.9 AS build_base_golang
|
||||
|
||||
LABEL maintainer="lee@geeksinthewoods.com"
|
||||
|
||||
@ -47,7 +47,7 @@ COPY cmd/${service}/templates /templates
|
||||
|
||||
# Copy the global templates.
|
||||
ADD resources/templates/shared /templates/shared
|
||||
ADD fresh-auto-reload.conf /runner.conf
|
||||
ADD configs/fresh-auto-reload.conf /runner.conf
|
||||
|
||||
ENV TEMPLATE_DIR=/templates
|
||||
|
||||
|
@ -4,10 +4,8 @@ import (
|
||||
"context"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"geeks-accelerator/oss/saas-starter-kit/internal/account"
|
||||
accountref "geeks-accelerator/oss/saas-starter-kit/internal/account/account_preference"
|
||||
"geeks-accelerator/oss/saas-starter-kit/internal/platform/auth"
|
||||
"geeks-accelerator/oss/saas-starter-kit/internal/platform/web"
|
||||
"geeks-accelerator/oss/saas-starter-kit/internal/platform/web/webcontext"
|
||||
@ -19,30 +17,11 @@ import (
|
||||
|
||||
// Account represents the Account API method handler set.
|
||||
type Accounts struct {
|
||||
Repository AccountRepository
|
||||
Repository *account.Repository
|
||||
|
||||
// ADD OTHER STATE LIKE THE LOGGER AND CONFIG HERE.
|
||||
}
|
||||
|
||||
type AccountRepository interface {
|
||||
//CanReadAccount(ctx context.Context, claims auth.Claims, dbConn *sqlx.DB, accountID string) error
|
||||
Find(ctx context.Context, claims auth.Claims, req account.AccountFindRequest) (account.Accounts, error)
|
||||
Create(ctx context.Context, claims auth.Claims, req account.AccountCreateRequest, now time.Time) (*account.Account, error)
|
||||
ReadByID(ctx context.Context, claims auth.Claims, id string) (*account.Account, error)
|
||||
Read(ctx context.Context, claims auth.Claims, req account.AccountReadRequest) (*account.Account, error)
|
||||
Update(ctx context.Context, claims auth.Claims, req account.AccountUpdateRequest, now time.Time) error
|
||||
Archive(ctx context.Context, claims auth.Claims, req account.AccountArchiveRequest, now time.Time) error
|
||||
Delete(ctx context.Context, claims auth.Claims, req account.AccountDeleteRequest) error
|
||||
}
|
||||
type AccountPrefRepository interface {
|
||||
Find(ctx context.Context, claims auth.Claims, req accountref.AccountPreferenceFindRequest) ([]*accountref.AccountPreference, error)
|
||||
FindByAccountID(ctx context.Context, claims auth.Claims, req accountref.AccountPreferenceFindByAccountIDRequest) ([]*accountref.AccountPreference, error)
|
||||
Read(ctx context.Context, claims auth.Claims, req accountref.AccountPreferenceReadRequest) (*accountref.AccountPreference, error)
|
||||
Set(ctx context.Context, claims auth.Claims, req accountref.AccountPreferenceSetRequest, now time.Time) error
|
||||
Archive(ctx context.Context, claims auth.Claims, req accountref.AccountPreferenceArchiveRequest, now time.Time) error
|
||||
Delete(ctx context.Context, claims auth.Claims, req accountref.AccountPreferenceDeleteRequest) error
|
||||
}
|
||||
|
||||
// Read godoc
|
||||
// @Summary Get account by ID
|
||||
// @Description Read returns the specified account from the system.
|
||||
|
@ -6,6 +6,7 @@ import (
|
||||
"os"
|
||||
|
||||
"geeks-accelerator/oss/saas-starter-kit/internal/platform/web"
|
||||
|
||||
"github.com/jmoiron/sqlx"
|
||||
"github.com/pkg/errors"
|
||||
"gopkg.in/DataDog/dd-trace-go.v1/contrib/go-redis/redis"
|
||||
|
@ -2,19 +2,20 @@ package handlers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
|
||||
"geeks-accelerator/oss/saas-starter-kit/internal/platform/auth"
|
||||
"geeks-accelerator/oss/saas-starter-kit/internal/platform/web"
|
||||
"geeks-accelerator/oss/saas-starter-kit/internal/platform/web/webcontext"
|
||||
"geeks-accelerator/oss/saas-starter-kit/internal/platform/web/weberror"
|
||||
"geeks-accelerator/oss/saas-starter-kit/internal/project"
|
||||
"net/http"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// Example represents the Example API method handler set.
|
||||
type Example struct {
|
||||
Project ProjectRepository
|
||||
Project *project.Repository
|
||||
|
||||
// ADD OTHER STATE LIKE THE LOGGER AND CONFIG HERE.
|
||||
}
|
||||
|
@ -5,7 +5,6 @@ import (
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"geeks-accelerator/oss/saas-starter-kit/internal/platform/auth"
|
||||
"geeks-accelerator/oss/saas-starter-kit/internal/platform/web"
|
||||
@ -19,21 +18,11 @@ import (
|
||||
|
||||
// Project represents the Project API method handler set.
|
||||
type Projects struct {
|
||||
Repository ProjectRepository
|
||||
Repository *project.Repository
|
||||
|
||||
// ADD OTHER STATE LIKE THE LOGGER IF NEEDED.
|
||||
}
|
||||
|
||||
type ProjectRepository interface {
|
||||
ReadByID(ctx context.Context, claims auth.Claims, id string) (*project.Project, error)
|
||||
Find(ctx context.Context, claims auth.Claims, req project.ProjectFindRequest) (project.Projects, error)
|
||||
Read(ctx context.Context, claims auth.Claims, req project.ProjectReadRequest) (*project.Project, error)
|
||||
Create(ctx context.Context, claims auth.Claims, req project.ProjectCreateRequest, now time.Time) (*project.Project, error)
|
||||
Update(ctx context.Context, claims auth.Claims, req project.ProjectUpdateRequest, now time.Time) error
|
||||
Archive(ctx context.Context, claims auth.Claims, req project.ProjectArchiveRequest, now time.Time) error
|
||||
Delete(ctx context.Context, claims auth.Claims, req project.ProjectDeleteRequest) error
|
||||
}
|
||||
|
||||
// Find godoc
|
||||
// TODO: Need to implement unittests on projects/find endpoint. There are none.
|
||||
// @Summary List projects
|
||||
|
@ -5,13 +5,20 @@ import (
|
||||
"net/http"
|
||||
"os"
|
||||
|
||||
"geeks-accelerator/oss/saas-starter-kit/internal/account"
|
||||
"geeks-accelerator/oss/saas-starter-kit/internal/account/account_preference"
|
||||
"geeks-accelerator/oss/saas-starter-kit/internal/mid"
|
||||
saasSwagger "geeks-accelerator/oss/saas-starter-kit/internal/mid/saas-swagger"
|
||||
"geeks-accelerator/oss/saas-starter-kit/internal/platform/auth"
|
||||
"geeks-accelerator/oss/saas-starter-kit/internal/platform/web"
|
||||
"geeks-accelerator/oss/saas-starter-kit/internal/platform/web/webcontext"
|
||||
_ "geeks-accelerator/oss/saas-starter-kit/internal/platform/web/weberror"
|
||||
_ "geeks-accelerator/oss/saas-starter-kit/internal/signup"
|
||||
"geeks-accelerator/oss/saas-starter-kit/internal/project"
|
||||
"geeks-accelerator/oss/saas-starter-kit/internal/signup"
|
||||
"geeks-accelerator/oss/saas-starter-kit/internal/user"
|
||||
"geeks-accelerator/oss/saas-starter-kit/internal/user_account"
|
||||
"geeks-accelerator/oss/saas-starter-kit/internal/user_account/invite"
|
||||
"geeks-accelerator/oss/saas-starter-kit/internal/user_auth"
|
||||
|
||||
"github.com/jmoiron/sqlx"
|
||||
"gopkg.in/DataDog/dd-trace-go.v1/contrib/go-redis/redis"
|
||||
@ -22,14 +29,14 @@ type AppContext struct {
|
||||
Env webcontext.Env
|
||||
MasterDB *sqlx.DB
|
||||
Redis *redis.Client
|
||||
UserRepo UserRepository
|
||||
UserAccountRepo UserAccountRepository
|
||||
AccountRepo AccountRepository
|
||||
AccountPrefRepo AccountPrefRepository
|
||||
AuthRepo UserAuthRepository
|
||||
SignupRepo SignupRepository
|
||||
InviteRepo UserInviteRepository
|
||||
ProjectRepo ProjectRepository
|
||||
UserRepo *user.Repository
|
||||
UserAccountRepo *user_account.Repository
|
||||
AccountRepo *account.Repository
|
||||
AccountPrefRepo *account_preference.Repository
|
||||
AuthRepo *user_auth.Repository
|
||||
SignupRepo *signup.Repository
|
||||
InviteRepo *invite.Repository
|
||||
ProjectRepo *project.Repository
|
||||
Authenticator *auth.Authenticator
|
||||
PreAppMiddleware []web.Middleware
|
||||
PostAppMiddleware []web.Middleware
|
||||
|
@ -2,8 +2,6 @@ package handlers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"geeks-accelerator/oss/saas-starter-kit/internal/account"
|
||||
"geeks-accelerator/oss/saas-starter-kit/internal/platform/auth"
|
||||
@ -11,6 +9,7 @@ import (
|
||||
"geeks-accelerator/oss/saas-starter-kit/internal/platform/web/webcontext"
|
||||
"geeks-accelerator/oss/saas-starter-kit/internal/platform/web/weberror"
|
||||
"geeks-accelerator/oss/saas-starter-kit/internal/signup"
|
||||
"net/http"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"gopkg.in/go-playground/validator.v9"
|
||||
@ -18,15 +17,11 @@ import (
|
||||
|
||||
// Signup represents the Signup API method handler set.
|
||||
type Signup struct {
|
||||
Repository SignupRepository
|
||||
Repository *signup.Repository
|
||||
|
||||
// ADD OTHER STATE LIKE THE LOGGER AND CONFIG HERE.
|
||||
}
|
||||
|
||||
type SignupRepository interface {
|
||||
Signup(ctx context.Context, claims auth.Claims, req signup.SignupRequest, now time.Time) (*signup.SignupResult, error)
|
||||
}
|
||||
|
||||
// Signup godoc
|
||||
// @Summary Signup handles new account creation.
|
||||
// @Description Signup creates a new account and user in the system.
|
||||
|
@ -24,35 +24,12 @@ var sessionTtl = time.Hour * 24
|
||||
|
||||
// User represents the User API method handler set.
|
||||
type Users struct {
|
||||
AuthRepo UserAuthRepository
|
||||
UserRepo UserRepository
|
||||
AuthRepo *user_auth.Repository
|
||||
UserRepo *user.Repository
|
||||
|
||||
// ADD OTHER STATE LIKE THE LOGGER AND CONFIG HERE.
|
||||
}
|
||||
|
||||
type UserAuthRepository interface {
|
||||
SwitchAccount(ctx context.Context, claims auth.Claims, req user_auth.SwitchAccountRequest, expires time.Duration,
|
||||
now time.Time, scopes ...string) (user_auth.Token, error)
|
||||
Authenticate(ctx context.Context, req user_auth.AuthenticateRequest, expires time.Duration, now time.Time, scopes ...string) (user_auth.Token, error)
|
||||
VirtualLogin(ctx context.Context, claims auth.Claims, req user_auth.VirtualLoginRequest,
|
||||
expires time.Duration, now time.Time, scopes ...string) (user_auth.Token, error)
|
||||
VirtualLogout(ctx context.Context, claims auth.Claims, expires time.Duration, now time.Time, scopes ...string) (user_auth.Token, error)
|
||||
}
|
||||
|
||||
type UserRepository interface {
|
||||
Find(ctx context.Context, claims auth.Claims, req user.UserFindRequest) (user.Users, error)
|
||||
//FindByAccount(ctx context.Context, claims auth.Claims, req user.UserFindByAccountRequest) (user.Users, error)
|
||||
Read(ctx context.Context, claims auth.Claims, req user.UserReadRequest) (*user.User, error)
|
||||
ReadByID(ctx context.Context, claims auth.Claims, id string) (*user.User, error)
|
||||
Create(ctx context.Context, claims auth.Claims, req user.UserCreateRequest, now time.Time) (*user.User, error)
|
||||
Update(ctx context.Context, claims auth.Claims, req user.UserUpdateRequest, now time.Time) error
|
||||
UpdatePassword(ctx context.Context, claims auth.Claims, req user.UserUpdatePasswordRequest, now time.Time) error
|
||||
Archive(ctx context.Context, claims auth.Claims, req user.UserArchiveRequest, now time.Time) error
|
||||
Restore(ctx context.Context, claims auth.Claims, req user.UserRestoreRequest, now time.Time) error
|
||||
Delete(ctx context.Context, claims auth.Claims, req user.UserDeleteRequest) error
|
||||
ResetPassword(ctx context.Context, req user.UserResetPasswordRequest, now time.Time) (string, error)
|
||||
ResetConfirm(ctx context.Context, req user.UserResetConfirmRequest, now time.Time) (*user.User, error)
|
||||
}
|
||||
|
||||
// Find godoc
|
||||
// TODO: Need to implement unittests on users/find endpoint. There are none.
|
||||
// @Summary List users
|
||||
|
@ -5,13 +5,11 @@ import (
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"geeks-accelerator/oss/saas-starter-kit/internal/platform/auth"
|
||||
"geeks-accelerator/oss/saas-starter-kit/internal/platform/web"
|
||||
"geeks-accelerator/oss/saas-starter-kit/internal/platform/web/webcontext"
|
||||
"geeks-accelerator/oss/saas-starter-kit/internal/platform/web/weberror"
|
||||
|
||||
"geeks-accelerator/oss/saas-starter-kit/internal/user_account"
|
||||
"geeks-accelerator/oss/saas-starter-kit/internal/user_account/invite"
|
||||
|
||||
@ -21,28 +19,12 @@ import (
|
||||
|
||||
// UserAccount represents the UserAccount API method handler set.
|
||||
type UserAccount struct {
|
||||
UserInvite UserInviteRepository
|
||||
Repository UserAccountRepository
|
||||
UserInvite *invite.Repository
|
||||
Repository *user_account.Repository
|
||||
|
||||
// ADD OTHER STATE LIKE THE LOGGER AND CONFIG HERE.
|
||||
}
|
||||
|
||||
type UserAccountRepository interface {
|
||||
Find(ctx context.Context, claims auth.Claims, req user_account.UserAccountFindRequest) (user_account.UserAccounts, error)
|
||||
FindByUserID(ctx context.Context, claims auth.Claims, userID string, includedArchived bool) (user_account.UserAccounts, error)
|
||||
UserFindByAccount(ctx context.Context, claims auth.Claims, req user_account.UserFindByAccountRequest) (user_account.Users, error)
|
||||
Create(ctx context.Context, claims auth.Claims, req user_account.UserAccountCreateRequest, now time.Time) (*user_account.UserAccount, error)
|
||||
Read(ctx context.Context, claims auth.Claims, req user_account.UserAccountReadRequest) (*user_account.UserAccount, error)
|
||||
Update(ctx context.Context, claims auth.Claims, req user_account.UserAccountUpdateRequest, now time.Time) error
|
||||
Archive(ctx context.Context, claims auth.Claims, req user_account.UserAccountArchiveRequest, now time.Time) error
|
||||
Delete(ctx context.Context, claims auth.Claims, req user_account.UserAccountDeleteRequest) error
|
||||
}
|
||||
|
||||
type UserInviteRepository interface {
|
||||
SendUserInvites(ctx context.Context, claims auth.Claims, req invite.SendUserInvitesRequest, now time.Time) ([]string, error)
|
||||
AcceptInvite(ctx context.Context, req invite.AcceptInviteRequest, now time.Time) (*user_account.UserAccount, error)
|
||||
AcceptInviteUser(ctx context.Context, req invite.AcceptInviteUserRequest, now time.Time) (*user_account.UserAccount, error)
|
||||
}
|
||||
|
||||
// Find godoc
|
||||
// TODO: Need to implement unittests on user_accounts/find endpoint. There are none.
|
||||
// @Summary List user accounts
|
||||
|
@ -1,4 +1,4 @@
|
||||
FROM golang:1.12.6-alpine3.9 AS build_base_golang
|
||||
FROM golang:1.12.9-alpine3.9 AS build_base_golang
|
||||
|
||||
LABEL maintainer="lee@geeksinthewoods.com"
|
||||
|
||||
@ -30,7 +30,7 @@ COPY cmd/${service}/static /static
|
||||
|
||||
# Copy the global templates.
|
||||
ADD resources/templates/shared /templates/shared
|
||||
ADD fresh-auto-reload.conf /runner.conf
|
||||
ADD configs/fresh-auto-reload.conf /runner.conf
|
||||
|
||||
ENV TEMPLATE_DIR=/templates
|
||||
|
||||
|
@ -2,7 +2,9 @@ package handlers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"geeks-accelerator/oss/saas-starter-kit/cmd/web-api/handlers"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"geeks-accelerator/oss/saas-starter-kit/internal/account"
|
||||
"geeks-accelerator/oss/saas-starter-kit/internal/account/account_preference"
|
||||
"geeks-accelerator/oss/saas-starter-kit/internal/geonames"
|
||||
@ -10,9 +12,7 @@ import (
|
||||
"geeks-accelerator/oss/saas-starter-kit/internal/platform/web"
|
||||
"geeks-accelerator/oss/saas-starter-kit/internal/platform/web/webcontext"
|
||||
"geeks-accelerator/oss/saas-starter-kit/internal/platform/web/weberror"
|
||||
|
||||
"net/http"
|
||||
"time"
|
||||
"geeks-accelerator/oss/saas-starter-kit/internal/user_auth"
|
||||
|
||||
"github.com/gorilla/schema"
|
||||
"github.com/pkg/errors"
|
||||
@ -20,10 +20,10 @@ import (
|
||||
|
||||
// Account represents the Account API method handler set.
|
||||
type Account struct {
|
||||
AccountRepo handlers.AccountRepository
|
||||
AccountPrefRepo handlers.AccountPrefRepository
|
||||
AuthRepo handlers.UserAuthRepository
|
||||
GeoRepo GeoRepository
|
||||
AccountRepo *account.Repository
|
||||
AccountPrefRepo *account_preference.Repository
|
||||
AuthRepo *user_auth.Repository
|
||||
GeoRepo *geonames.Repository
|
||||
Authenticator *auth.Authenticator
|
||||
Renderer web.Renderer
|
||||
}
|
||||
|
@ -16,16 +16,7 @@ import (
|
||||
// Check provides support for orchestration geo endpoints.
|
||||
type Geo struct {
|
||||
Redis *redis.Client
|
||||
GeoRepo GeoRepository
|
||||
}
|
||||
|
||||
type GeoRepository interface {
|
||||
FindGeonames(ctx context.Context, orderBy, where string, args ...interface{}) ([]*geonames.Geoname, error)
|
||||
FindGeonamePostalCodes(ctx context.Context, where string, args ...interface{}) ([]string, error)
|
||||
FindGeonameRegions(ctx context.Context, orderBy, where string, args ...interface{}) ([]*geonames.Region, error)
|
||||
FindCountries(ctx context.Context, orderBy, where string, args ...interface{}) ([]*geonames.Country, error)
|
||||
FindCountryTimezones(ctx context.Context, orderBy, where string, args ...interface{}) ([]*geonames.CountryTimezone, error)
|
||||
ListTimezones(ctx context.Context) ([]string, error)
|
||||
GeoRepo *geonames.Repository
|
||||
}
|
||||
|
||||
// GeonameByPostalCode...
|
||||
|
@ -6,6 +6,7 @@ import (
|
||||
"os"
|
||||
|
||||
"geeks-accelerator/oss/saas-starter-kit/internal/platform/web"
|
||||
|
||||
"github.com/jmoiron/sqlx"
|
||||
"github.com/pkg/errors"
|
||||
"gopkg.in/DataDog/dd-trace-go.v1/contrib/go-redis/redis"
|
||||
|
@ -8,6 +8,7 @@ import (
|
||||
"geeks-accelerator/oss/saas-starter-kit/internal/platform/web"
|
||||
"geeks-accelerator/oss/saas-starter-kit/internal/platform/web/webcontext"
|
||||
"geeks-accelerator/oss/saas-starter-kit/internal/platform/web/weberror"
|
||||
|
||||
"github.com/gorilla/schema"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/net/html"
|
||||
|
@ -3,7 +3,6 @@ package handlers
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"geeks-accelerator/oss/saas-starter-kit/cmd/web-api/handlers"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
@ -21,7 +20,7 @@ import (
|
||||
|
||||
// Projects represents the Projects API method handler set.
|
||||
type Projects struct {
|
||||
ProjectRepo handlers.ProjectRepository
|
||||
ProjectRepo *project.Repository
|
||||
Redis *redis.Client
|
||||
Renderer web.Renderer
|
||||
}
|
||||
|
@ -9,6 +9,7 @@ import (
|
||||
"geeks-accelerator/oss/saas-starter-kit/internal/platform/web"
|
||||
"geeks-accelerator/oss/saas-starter-kit/internal/platform/web/webcontext"
|
||||
"geeks-accelerator/oss/saas-starter-kit/internal/project_route"
|
||||
|
||||
"github.com/ikeikeikeike/go-sitemap-generator/v2/stm"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sethgrid/pester"
|
||||
|
@ -9,22 +9,21 @@ import (
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"geeks-accelerator/oss/saas-starter-kit/cmd/web-api/handlers"
|
||||
//"geeks-accelerator/oss/saas-starter-kit/internal/account"
|
||||
//"geeks-accelerator/oss/saas-starter-kit/internal/account/account_preference"
|
||||
"geeks-accelerator/oss/saas-starter-kit/internal/account"
|
||||
"geeks-accelerator/oss/saas-starter-kit/internal/account/account_preference"
|
||||
"geeks-accelerator/oss/saas-starter-kit/internal/geonames"
|
||||
"geeks-accelerator/oss/saas-starter-kit/internal/mid"
|
||||
"geeks-accelerator/oss/saas-starter-kit/internal/platform/auth"
|
||||
"geeks-accelerator/oss/saas-starter-kit/internal/platform/web"
|
||||
"geeks-accelerator/oss/saas-starter-kit/internal/platform/web/webcontext"
|
||||
"geeks-accelerator/oss/saas-starter-kit/internal/platform/web/weberror"
|
||||
|
||||
//"geeks-accelerator/oss/saas-starter-kit/internal/project"
|
||||
"geeks-accelerator/oss/saas-starter-kit/internal/project"
|
||||
"geeks-accelerator/oss/saas-starter-kit/internal/project_route"
|
||||
// "geeks-accelerator/oss/saas-starter-kit/internal/signup"
|
||||
// "geeks-accelerator/oss/saas-starter-kit/internal/user"
|
||||
// "geeks-accelerator/oss/saas-starter-kit/internal/user_account"
|
||||
// "geeks-accelerator/oss/saas-starter-kit/internal/user_account/invite"
|
||||
// "geeks-accelerator/oss/saas-starter-kit/internal/user_auth"
|
||||
"geeks-accelerator/oss/saas-starter-kit/internal/signup"
|
||||
"geeks-accelerator/oss/saas-starter-kit/internal/user"
|
||||
"geeks-accelerator/oss/saas-starter-kit/internal/user_account"
|
||||
"geeks-accelerator/oss/saas-starter-kit/internal/user_account/invite"
|
||||
"geeks-accelerator/oss/saas-starter-kit/internal/user_auth"
|
||||
|
||||
"github.com/ikeikeikeike/go-sitemap-generator/v2/stm"
|
||||
"github.com/jmoiron/sqlx"
|
||||
@ -42,15 +41,15 @@ type AppContext struct {
|
||||
Env webcontext.Env
|
||||
MasterDB *sqlx.DB
|
||||
Redis *redis.Client
|
||||
UserRepo handlers.UserRepository
|
||||
UserAccountRepo handlers.UserAccountRepository
|
||||
AccountRepo handlers.AccountRepository
|
||||
AccountPrefRepo handlers.AccountPrefRepository
|
||||
AuthRepo handlers.UserAuthRepository
|
||||
SignupRepo handlers.SignupRepository
|
||||
InviteRepo handlers.UserInviteRepository
|
||||
ProjectRepo handlers.ProjectRepository
|
||||
GeoRepo GeoRepository
|
||||
UserRepo *user.Repository
|
||||
UserAccountRepo *user_account.Repository
|
||||
AccountRepo *account.Repository
|
||||
AccountPrefRepo *account_preference.Repository
|
||||
AuthRepo *user_auth.Repository
|
||||
SignupRepo *signup.Repository
|
||||
InviteRepo *invite.Repository
|
||||
ProjectRepo *project.Repository
|
||||
GeoRepo *geonames.Repository
|
||||
Authenticator *auth.Authenticator
|
||||
StaticDir string
|
||||
TemplateDir string
|
||||
|
@ -2,7 +2,6 @@ package handlers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"geeks-accelerator/oss/saas-starter-kit/cmd/web-api/handlers"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
@ -22,9 +21,9 @@ import (
|
||||
|
||||
// Signup represents the Signup API method handler set.
|
||||
type Signup struct {
|
||||
SignupRepo handlers.SignupRepository
|
||||
AuthRepo handlers.UserAuthRepository
|
||||
GeoRepo GeoRepository
|
||||
SignupRepo *signup.Repository
|
||||
AuthRepo *user_auth.Repository
|
||||
GeoRepo *geonames.Repository
|
||||
MasterDB *sqlx.DB
|
||||
Renderer web.Renderer
|
||||
}
|
||||
|
@ -8,9 +8,8 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"geeks-accelerator/oss/saas-starter-kit/cmd/web-api/handlers"
|
||||
"geeks-accelerator/oss/saas-starter-kit/internal/account"
|
||||
|
||||
"geeks-accelerator/oss/saas-starter-kit/internal/geonames"
|
||||
"geeks-accelerator/oss/saas-starter-kit/internal/platform/auth"
|
||||
"geeks-accelerator/oss/saas-starter-kit/internal/platform/web"
|
||||
"geeks-accelerator/oss/saas-starter-kit/internal/platform/web/webcontext"
|
||||
@ -27,11 +26,11 @@ import (
|
||||
|
||||
// User represents the User API method handler set.
|
||||
type UserRepos struct {
|
||||
UserRepo handlers.UserRepository
|
||||
AuthRepo handlers.UserAuthRepository
|
||||
UserAccountRepo handlers.UserAccountRepository
|
||||
AccountRepo handlers.AccountRepository
|
||||
GeoRepo GeoRepository
|
||||
UserRepo *user.Repository
|
||||
AuthRepo *user_auth.Repository
|
||||
UserAccountRepo *user_account.Repository
|
||||
AccountRepo *account.Repository
|
||||
GeoRepo *geonames.Repository
|
||||
MasterDB *sqlx.DB
|
||||
Renderer web.Renderer
|
||||
SecretKey string
|
||||
|
@ -3,11 +3,12 @@ package handlers
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"geeks-accelerator/oss/saas-starter-kit/cmd/web-api/handlers"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"geeks-accelerator/oss/saas-starter-kit/internal/account"
|
||||
"geeks-accelerator/oss/saas-starter-kit/internal/geonames"
|
||||
"geeks-accelerator/oss/saas-starter-kit/internal/platform/auth"
|
||||
"geeks-accelerator/oss/saas-starter-kit/internal/platform/datatable"
|
||||
"geeks-accelerator/oss/saas-starter-kit/internal/platform/web"
|
||||
@ -27,12 +28,12 @@ import (
|
||||
|
||||
// Users represents the Users API method handler set.
|
||||
type Users struct {
|
||||
UserRepo handlers.UserRepository
|
||||
AccountRepo handlers.AccountRepository
|
||||
UserAccountRepo handlers.UserAccountRepository
|
||||
AuthRepo handlers.UserAuthRepository
|
||||
InviteRepo handlers.UserInviteRepository
|
||||
GeoRepo GeoRepository
|
||||
UserRepo *user.Repository
|
||||
AccountRepo *account.Repository
|
||||
UserAccountRepo *user_account.Repository
|
||||
AuthRepo *user_auth.Repository
|
||||
InviteRepo *invite.Repository
|
||||
GeoRepo *geonames.Repository
|
||||
MasterDB *sqlx.DB
|
||||
Redis *redis.Client
|
||||
Renderer web.Renderer
|
||||
|
@ -6,13 +6,6 @@ import (
|
||||
"encoding/json"
|
||||
"expvar"
|
||||
"fmt"
|
||||
"geeks-accelerator/oss/saas-starter-kit/internal/account/account_preference"
|
||||
"geeks-accelerator/oss/saas-starter-kit/internal/geonames"
|
||||
"geeks-accelerator/oss/saas-starter-kit/internal/project"
|
||||
"geeks-accelerator/oss/saas-starter-kit/internal/signup"
|
||||
"geeks-accelerator/oss/saas-starter-kit/internal/user_account"
|
||||
"geeks-accelerator/oss/saas-starter-kit/internal/user_account/invite"
|
||||
"geeks-accelerator/oss/saas-starter-kit/internal/user_auth"
|
||||
"html/template"
|
||||
"log"
|
||||
"net"
|
||||
@ -29,6 +22,8 @@ import (
|
||||
|
||||
"geeks-accelerator/oss/saas-starter-kit/cmd/web-app/handlers"
|
||||
"geeks-accelerator/oss/saas-starter-kit/internal/account"
|
||||
"geeks-accelerator/oss/saas-starter-kit/internal/account/account_preference"
|
||||
"geeks-accelerator/oss/saas-starter-kit/internal/geonames"
|
||||
"geeks-accelerator/oss/saas-starter-kit/internal/mid"
|
||||
"geeks-accelerator/oss/saas-starter-kit/internal/platform/auth"
|
||||
"geeks-accelerator/oss/saas-starter-kit/internal/platform/devops"
|
||||
@ -39,8 +34,13 @@ import (
|
||||
template_renderer "geeks-accelerator/oss/saas-starter-kit/internal/platform/web/template-renderer"
|
||||
"geeks-accelerator/oss/saas-starter-kit/internal/platform/web/webcontext"
|
||||
"geeks-accelerator/oss/saas-starter-kit/internal/platform/web/weberror"
|
||||
"geeks-accelerator/oss/saas-starter-kit/internal/project"
|
||||
"geeks-accelerator/oss/saas-starter-kit/internal/project_route"
|
||||
"geeks-accelerator/oss/saas-starter-kit/internal/signup"
|
||||
"geeks-accelerator/oss/saas-starter-kit/internal/user"
|
||||
"geeks-accelerator/oss/saas-starter-kit/internal/user_account"
|
||||
"geeks-accelerator/oss/saas-starter-kit/internal/user_account/invite"
|
||||
"geeks-accelerator/oss/saas-starter-kit/internal/user_auth"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
|
1
cmd/web-app/static/assets/vendor/fontawesome-free/svgs/brands/aws.svg
vendored
Executable file
1
cmd/web-app/static/assets/vendor/fontawesome-free/svgs/brands/aws.svg
vendored
Executable file
@ -0,0 +1 @@
|
||||
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 640 512"><path d="M180.41 203.01c-.72 22.65 10.6 32.68 10.88 39.05a8.164 8.164 0 0 1-4.1 6.27l-12.8 8.96a10.66 10.66 0 0 1-5.63 1.92c-.43-.02-8.19 1.83-20.48-25.61a78.608 78.608 0 0 1-62.61 29.45c-16.28.89-60.4-9.24-58.13-56.21-1.59-38.28 34.06-62.06 70.93-60.05 7.1.02 21.6.37 46.99 6.27v-15.62c2.69-26.46-14.7-46.99-44.81-43.91-2.4.01-19.4-.5-45.84 10.11-7.36 3.38-8.3 2.82-10.75 2.82-7.41 0-4.36-21.48-2.94-24.2 5.21-6.4 35.86-18.35 65.94-18.18a76.857 76.857 0 0 1 55.69 17.28 70.285 70.285 0 0 1 17.67 52.36l-.01 69.29zM93.99 235.4c32.43-.47 46.16-19.97 49.29-30.47 2.46-10.05 2.05-16.41 2.05-27.4-9.67-2.32-23.59-4.85-39.56-4.87-15.15-1.14-42.82 5.63-41.74 32.26-1.24 16.79 11.12 31.4 29.96 30.48zm170.92 23.05c-7.86.72-11.52-4.86-12.68-10.37l-49.8-164.65c-.97-2.78-1.61-5.65-1.92-8.58a4.61 4.61 0 0 1 3.86-5.25c.24-.04-2.13 0 22.25 0 8.78-.88 11.64 6.03 12.55 10.37l35.72 140.83 33.16-140.83c.53-3.22 2.94-11.07 12.8-10.24h17.16c2.17-.18 11.11-.5 12.68 10.37l33.42 142.63L420.98 80.1c.48-2.18 2.72-11.37 12.68-10.37h19.72c.85-.13 6.15-.81 5.25 8.58-.43 1.85 3.41-10.66-52.75 169.9-1.15 5.51-4.82 11.09-12.68 10.37h-18.69c-10.94 1.15-12.51-9.66-12.68-10.75L328.67 110.7l-32.78 136.99c-.16 1.09-1.73 11.9-12.68 10.75h-18.3zm273.48 5.63c-5.88.01-33.92-.3-57.36-12.29a12.802 12.802 0 0 1-7.81-11.91v-10.75c0-8.45 6.2-6.9 8.83-5.89 10.04 4.06 16.48 7.14 28.81 9.6 36.65 7.53 52.77-2.3 56.72-4.48 13.15-7.81 14.19-25.68 5.25-34.95-10.48-8.79-15.48-9.12-53.13-21-4.64-1.29-43.7-13.61-43.79-52.36-.61-28.24 25.05-56.18 69.52-55.95 12.67-.01 46.43 4.13 55.57 15.62 1.35 2.09 2.02 4.55 1.92 7.04v10.11c0 4.44-1.62 6.66-4.87 6.66-7.71-.86-21.39-11.17-49.16-10.75-6.89-.36-39.89.91-38.41 24.97-.43 18.96 26.61 26.07 29.7 26.89 36.46 10.97 48.65 12.79 63.12 29.58 17.14 22.25 7.9 48.3 4.35 55.44-19.08 37.49-68.42 34.44-69.26 34.42zm40.2 104.86c-70.03 51.72-171.69 79.25-258.49 79.25A469.127 469.127 0 0 1 2.83 327.46c-6.53-5.89-.77-13.96 7.17-9.47a637.37 637.37 0 0 0 316.88 84.12 630.22 630.22 0 0 0 241.59-49.55c11.78-5 21.77 7.8 10.12 16.38zm29.19-33.29c-8.96-11.52-59.28-5.38-81.81-2.69-6.79.77-7.94-5.12-1.79-9.47 40.07-28.17 105.88-20.1 113.44-10.63 7.55 9.47-2.05 75.41-39.56 106.91-5.76 4.87-11.27 2.3-8.71-4.1 8.44-21.25 27.39-68.49 18.43-80.02z"/></svg>
|
After Width: | Height: | Size: 2.3 KiB |
1
configs/.gitignore
vendored
Normal file
1
configs/.gitignore
vendored
Normal file
@ -0,0 +1 @@
|
||||
.env_docker_compose
|
@ -48,7 +48,7 @@ services:
|
||||
aliases:
|
||||
- datadog
|
||||
env_file:
|
||||
- .env_docker_compose
|
||||
- configs/.env_docker_compose
|
||||
environment:
|
||||
- DD_LOGS_ENABLED=true
|
||||
- DD_APM_ENABLED=true
|
||||
@ -76,12 +76,8 @@ services:
|
||||
main:
|
||||
aliases:
|
||||
- web-app
|
||||
links:
|
||||
- postgres
|
||||
- redis
|
||||
- datadog
|
||||
env_file:
|
||||
- .env_docker_compose
|
||||
- configs/.env_docker_compose
|
||||
environment:
|
||||
- WEB_APP_HTTP_HOST=:3000
|
||||
- WEB_APP_APP_BASE_URL=http://127.0.0.1:3000
|
||||
@ -115,12 +111,8 @@ services:
|
||||
main:
|
||||
aliases:
|
||||
- web-api
|
||||
links:
|
||||
- postgres
|
||||
- redis
|
||||
- datadog
|
||||
env_file:
|
||||
- .env_docker_compose
|
||||
- configs/.env_docker_compose
|
||||
environment:
|
||||
- WEB_API_HTTP_HOST=:3001
|
||||
- WEB_API_APP_BASE_URL=http://127.0.0.1:3001
|
||||
|
34
go.mod
34
go.mod
@ -2,15 +2,12 @@ module geeks-accelerator/oss/saas-starter-kit
|
||||
|
||||
require (
|
||||
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751
|
||||
github.com/aws/aws-sdk-go v1.21.8
|
||||
github.com/bobesa/go-domain-util v0.0.0-20180815122459-1d708c097a6a
|
||||
github.com/aws/aws-sdk-go v1.23.0
|
||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible
|
||||
github.com/dimfeld/httptreemux v5.0.1+incompatible
|
||||
github.com/dustin/go-humanize v1.0.0
|
||||
github.com/fatih/camelcase v1.0.0
|
||||
github.com/fatih/structtag v1.0.0
|
||||
github.com/geeks-accelerator/files v0.0.0-20190704085106-630677cd5c14
|
||||
github.com/geeks-accelerator/sqlxmigrate v0.0.0-20190527223850-4a863a2d30db
|
||||
github.com/geeks-accelerator/sqlxmigrate v0.0.0-20190823021348-d047c980bb66
|
||||
github.com/geeks-accelerator/swag v1.6.3
|
||||
github.com/go-openapi/spec v0.19.2 // indirect
|
||||
github.com/go-openapi/swag v0.19.4 // indirect
|
||||
@ -18,52 +15,37 @@ require (
|
||||
github.com/go-playground/pkg v0.0.0-20190522230805-792a755e6910
|
||||
github.com/go-playground/universal-translator v0.16.0
|
||||
github.com/go-redis/redis v6.15.2+incompatible
|
||||
github.com/golang/protobuf v1.3.2 // indirect
|
||||
github.com/google/go-cmp v0.3.0
|
||||
github.com/google/go-cmp v0.3.1
|
||||
github.com/google/uuid v1.1.1 // indirect
|
||||
github.com/gorilla/schema v1.1.0
|
||||
github.com/gorilla/securecookie v1.1.1
|
||||
github.com/gorilla/sessions v1.2.0
|
||||
github.com/howeyc/fsnotify v0.9.0 // indirect
|
||||
github.com/huandu/go-sqlbuilder v1.4.1
|
||||
github.com/iancoleman/strcase v0.0.0-20190422225806-e506e3ef7365
|
||||
github.com/ikeikeikeike/go-sitemap-generator/v2 v2.0.2
|
||||
github.com/jmoiron/sqlx v1.2.0
|
||||
github.com/json-iterator/go v1.1.7 // indirect
|
||||
github.com/kelseyhightower/envconfig v1.4.0
|
||||
github.com/kr/pty v1.1.8 // indirect
|
||||
github.com/leodido/go-urn v1.1.0 // indirect
|
||||
github.com/lib/pq v1.2.0
|
||||
github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e // indirect
|
||||
github.com/mattn/go-colorable v0.1.2 // indirect
|
||||
github.com/mattn/go-sqlite3 v1.11.0 // indirect
|
||||
github.com/nfnt/resize v0.0.0-20180221191011-83c6a9932646
|
||||
github.com/onsi/ginkgo v1.8.0 // indirect
|
||||
github.com/onsi/gomega v1.5.0
|
||||
github.com/opentracing/opentracing-go v1.1.0 // indirect
|
||||
github.com/pborman/uuid v1.2.0
|
||||
github.com/philhofer/fwd v1.0.0 // indirect
|
||||
github.com/pilu/config v0.0.0-20131214182432-3eb99e6c0b9a // indirect
|
||||
github.com/pilu/fresh v0.0.0-20170301142741-9c0092493eff // indirect
|
||||
github.com/pkg/errors v0.8.1
|
||||
github.com/sergi/go-diff v1.0.0
|
||||
github.com/sethgrid/pester v0.0.0-20190127155807-68a33a018ad0
|
||||
github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24
|
||||
github.com/stretchr/testify v1.3.0
|
||||
github.com/stretchr/testify v1.4.0
|
||||
github.com/sudo-suhas/symcrypto v1.0.0
|
||||
github.com/tinylib/msgp v1.1.0 // indirect
|
||||
github.com/ugorji/go v1.1.7 // indirect
|
||||
github.com/urfave/cli v1.21.0
|
||||
github.com/xwb1989/sqlparser v0.0.0-20180606152119-120387863bf2
|
||||
gitlab.com/geeks-accelerator/oss/devops v1.0.8
|
||||
golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4
|
||||
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80
|
||||
golang.org/x/sys v0.0.0-20190804053845-51ab0e2deafa // indirect
|
||||
golang.org/x/text v0.3.2
|
||||
golang.org/x/tools v0.0.0-20190807223507-b346f7fd45de // indirect
|
||||
google.golang.org/appengine v1.6.1 // indirect
|
||||
golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7 // indirect
|
||||
gopkg.in/DataDog/dd-trace-go.v1 v1.16.1
|
||||
gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc // indirect
|
||||
gopkg.in/go-playground/validator.v9 v9.29.1
|
||||
gopkg.in/gomail.v2 v2.0.0-20160411212932-81ebce5c23df
|
||||
gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce
|
||||
gotest.tools v2.2.0+incompatible // indirect
|
||||
)
|
||||
|
115
go.sum
115
go.sum
@ -1,6 +1,7 @@
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/KyleBanks/depth v1.2.1 h1:5h8fQADFrWtarTdtDudMmGsC7GPbOAu6RVB3ffsVFHc=
|
||||
github.com/KyleBanks/depth v1.2.1/go.mod h1:jzSb9d0L43HxTQfT+oSA1EEp2q+ne2uh6XgeJcm8brE=
|
||||
github.com/Masterminds/semver v1.4.2/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y=
|
||||
github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
|
||||
github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI=
|
||||
github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
|
||||
@ -8,13 +9,18 @@ github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV
|
||||
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
|
||||
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 h1:JYp7IbQjafoB+tBA3gMyHYHrpOtNuDiK/uB5uXxq5wM=
|
||||
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/aws/aws-sdk-go v1.21.8 h1:Lv6hW2twBhC6mGZAuWtqplEpIIqtVctJg02sE7Qn0Zw=
|
||||
github.com/aws/aws-sdk-go v1.21.8/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
|
||||
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
|
||||
github.com/aws/aws-sdk-go v1.23.0 h1:ilfJN/vJtFo1XDFxB2YMBYGeOvGZl6Qow17oyD4+Z9A=
|
||||
github.com/aws/aws-sdk-go v1.23.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
|
||||
github.com/beevik/etree v1.1.0 h1:T0xke/WvNtMoCqgzPhkX2r4rjY3GDZFi+FjpRZY2Jbs=
|
||||
github.com/beevik/etree v1.1.0/go.mod h1:r8Aw8JqVegEf0w2fDnATrX9VpkMcyFeM0FhwO62wh+A=
|
||||
github.com/bobesa/go-domain-util v0.0.0-20180815122459-1d708c097a6a/go.mod h1:/mf0HzRK9xVv+1puqGSMzCo7bhEcQhiisuUXlMkq2p4=
|
||||
github.com/clbanning/mxj v1.8.3 h1:2r/KCJi52w2MRz+K+UMa/1d7DdCjnLqYJfnbr7dYNWI=
|
||||
github.com/clbanning/mxj v1.8.3/go.mod h1:BVjHeAH+rl9rs6f+QIpeRl0tfu10SXn1pUSa5PVGJng=
|
||||
github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
|
||||
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||
github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk=
|
||||
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
||||
github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
|
||||
github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
@ -25,18 +31,22 @@ github.com/dimfeld/httptreemux v5.0.1+incompatible h1:Qj3gVcDNoOthBAqftuD596rm4w
|
||||
github.com/dimfeld/httptreemux v5.0.1+incompatible/go.mod h1:rbUlSV+CCpv/SuqUTP/8Bk2O3LyUV436/yaRGkhP6Z0=
|
||||
github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo=
|
||||
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
||||
github.com/fatih/camelcase v1.0.0 h1:hxNvNX/xYBp0ovncs8WyWZrOrpBNub/JfaMvbURyft8=
|
||||
github.com/fatih/camelcase v1.0.0/go.mod h1:yN2Sb0lFhZJUdVvtELVWefmrXpuZESvPmqwoZc+/fpc=
|
||||
github.com/fatih/structs v1.1.0 h1:Q7juDM0QtcnhCpeyLGQKyg4TOIghuNXrkL32pHAUMxo=
|
||||
github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M=
|
||||
github.com/fatih/structtag v1.0.0 h1:pTHj65+u3RKWYPSGaU290FpI/dXxTaHdVwVwbcPKmEc=
|
||||
github.com/fatih/structtag v1.0.0/go.mod h1:IKitwq45uXL/yqi5mYghiD3w9H6eTOvI9vnk8tXMphA=
|
||||
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/geeks-accelerator/files v0.0.0-20190704085106-630677cd5c14 h1:Rrxsq3gr2TWGdnSWHfRbhP/hcxatCyC9kMgLZ3da75A=
|
||||
github.com/geeks-accelerator/files v0.0.0-20190704085106-630677cd5c14/go.mod h1:HMLrFyDC+sI+871eKlqqIBcaDim/NI8//Mbe+UwhY78=
|
||||
github.com/geeks-accelerator/sqlxmigrate v0.0.0-20190527223850-4a863a2d30db h1:mjErP7mTFHQ3cw/ibAkW3CvQ8gM4k19EkfzRzRINDAE=
|
||||
github.com/geeks-accelerator/sqlxmigrate v0.0.0-20190527223850-4a863a2d30db/go.mod h1:dzpCjo4q7chhMVuHDzs/odROkieZ5Wjp70rNDuX83jU=
|
||||
github.com/geeks-accelerator/sqlxmigrate v0.0.0-20190823020016-2f4a58a35bb3 h1:183xSX0si++wcRtX6fbGpVXNmC15m4bn4Q9TXEuHtHI=
|
||||
github.com/geeks-accelerator/sqlxmigrate v0.0.0-20190823020016-2f4a58a35bb3/go.mod h1:dzpCjo4q7chhMVuHDzs/odROkieZ5Wjp70rNDuX83jU=
|
||||
github.com/geeks-accelerator/sqlxmigrate v0.0.0-20190823020330-42b8aadfc2b9 h1:4O/zyFNOCInZgxMkQTaffOJA6PJYZLY+UFVHzLsLYMc=
|
||||
github.com/geeks-accelerator/sqlxmigrate v0.0.0-20190823020330-42b8aadfc2b9/go.mod h1:dzpCjo4q7chhMVuHDzs/odROkieZ5Wjp70rNDuX83jU=
|
||||
github.com/geeks-accelerator/sqlxmigrate v0.0.0-20190823021239-b65769ae7cf1 h1:cH4nj3YQWCvPeRfy6KWzHPKZxn2cJnpQzjbydTdl5Cw=
|
||||
github.com/geeks-accelerator/sqlxmigrate v0.0.0-20190823021239-b65769ae7cf1/go.mod h1:dzpCjo4q7chhMVuHDzs/odROkieZ5Wjp70rNDuX83jU=
|
||||
github.com/geeks-accelerator/sqlxmigrate v0.0.0-20190823021348-d047c980bb66 h1:h9pb46oQroOhXmq5cCUU++Eagy240H1/aRwWNIYivrs=
|
||||
github.com/geeks-accelerator/sqlxmigrate v0.0.0-20190823021348-d047c980bb66/go.mod h1:dzpCjo4q7chhMVuHDzs/odROkieZ5Wjp70rNDuX83jU=
|
||||
github.com/geeks-accelerator/swag v1.6.3 h1:WottuX4MHoy5ZJFXfL+p1IrChpUb/e4g5vpM6tcwOIE=
|
||||
github.com/geeks-accelerator/swag v1.6.3/go.mod h1:YWy7dtuct7Uk3vmKr7s+v/F0SNkGYEeV7Y1CykFhmWU=
|
||||
github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk=
|
||||
@ -72,15 +82,19 @@ github.com/go-redis/redis v6.15.2+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8w
|
||||
github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
|
||||
github.com/go-sql-driver/mysql v1.4.1 h1:g24URVg0OFbNUTx9qqY1IRZ9D9z3iPyi5zKhQZpNwpA=
|
||||
github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
|
||||
github.com/gobuffalo/envy v1.7.0/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI=
|
||||
github.com/gobuffalo/logger v1.0.0/go.mod h1:2zbswyIUa45I+c+FLXuWl9zSWEiVuthsk8ze5s8JvPs=
|
||||
github.com/gobuffalo/logger v1.0.1/go.mod h1:2zbswyIUa45I+c+FLXuWl9zSWEiVuthsk8ze5s8JvPs=
|
||||
github.com/gobuffalo/packd v0.3.0/go.mod h1:zC7QkmNkYVGKPw4tHpBQ+ml7W/3tIebgeo1b36chA3Q=
|
||||
github.com/gobuffalo/packr/v2 v2.5.2/go.mod h1:sgEE1xNZ6G0FNN5xn9pevVu4nywaxHvgup67xisti08=
|
||||
github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg=
|
||||
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
|
||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY=
|
||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/go-cmp v0.3.1 h1:Xye71clBPdm5HgqGwUkwhbynsUJZhDbS20FvLhQ2izg=
|
||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY=
|
||||
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
@ -90,8 +104,7 @@ github.com/gorilla/securecookie v1.1.1 h1:miw7JPhV+b/lAHSXz4qd/nN9jRiAFV5FwjeKyC
|
||||
github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4=
|
||||
github.com/gorilla/sessions v1.2.0 h1:S7P+1Hm5V/AT9cjEcUD5uDaQSX0OE577aCXgoaKpYbQ=
|
||||
github.com/gorilla/sessions v1.2.0/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM=
|
||||
github.com/howeyc/fsnotify v0.9.0 h1:0gtV5JmOKH4A8SsFxG2BczSeXWWPvcMT0euZt5gDAxY=
|
||||
github.com/howeyc/fsnotify v0.9.0/go.mod h1:41HzSPxBGeFRQKEEwgh49TRw/nKBsYZ2cF1OzPjSJsA=
|
||||
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
|
||||
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/huandu/go-sqlbuilder v1.4.1 h1:DYGFGLbOUXhtQ2kwO1uyDIPJbsztmVWdPPDyxi0EJGw=
|
||||
@ -100,6 +113,7 @@ github.com/iancoleman/strcase v0.0.0-20190422225806-e506e3ef7365 h1:ECW73yc9MY79
|
||||
github.com/iancoleman/strcase v0.0.0-20190422225806-e506e3ef7365/go.mod h1:SK73tn/9oHe+/Y0h39VT4UCxmurVJkR5NA7kMEAOgSE=
|
||||
github.com/ikeikeikeike/go-sitemap-generator/v2 v2.0.2 h1:wIdDEle9HEy7vBPjC6oKz6ejs3Ut+jmsYvuOoAW2pSM=
|
||||
github.com/ikeikeikeike/go-sitemap-generator/v2 v2.0.2/go.mod h1:WtaVKD9TeruTED9ydiaOJU08qGoEPP/LyzTKiD3jEsw=
|
||||
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
||||
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM=
|
||||
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
|
||||
github.com/jmoiron/sqlx v1.2.0 h1:41Ip0zITnmWNR/vHV+S4m+VoUivnWY5E4OJfLZjCJMA=
|
||||
@ -107,14 +121,15 @@ github.com/jmoiron/sqlx v1.2.0/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhB
|
||||
github.com/joho/godotenv v1.3.0 h1:Zjp+RcGpHhGlrMbJzXTrZZPrWj+1vfm90La1wgB6Bhc=
|
||||
github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg=
|
||||
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||
github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/karrick/godirwalk v1.10.12/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA=
|
||||
github.com/kelseyhightower/envconfig v1.4.0 h1:Im6hONhd3pLkfDFsbRgu68RDNkGF1r3dvMUtDTo2cv8=
|
||||
github.com/kelseyhightower/envconfig v1.4.0/go.mod h1:cccZRl6mQpaq41TPp5QxidR+Sa3axMbJDNb//FQX6Gg=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA=
|
||||
github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw=
|
||||
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/leodido/go-urn v1.1.0 h1:Sm1gr51B1kKyfD2BlRcLSiEkffoG96g6TPv6eRoEiB8=
|
||||
@ -123,23 +138,20 @@ github.com/lib/pq v1.0.0 h1:X5PMW56eZitiTeO7tKzZxFCSpbFZJtkMMooicw2us9A=
|
||||
github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||
github.com/lib/pq v1.2.0 h1:LXpIM/LZ5xGFhOpXAQUIMM1HdyqzVYM13zNdjCEEcA0=
|
||||
github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
||||
github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e h1:hB2xlXdHp/pmPZq0y3QnmWAArdw9PqbmotexnWx/FU8=
|
||||
github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
github.com/mattn/go-colorable v0.1.2 h1:/bC9yWikZXAL9uJdulbSfyVNIR3n3trXl+v8+1sx8mU=
|
||||
github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
|
||||
github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
|
||||
github.com/mattn/go-isatty v0.0.8 h1:HLtExJ+uU2HOZ+wI0Tt5DtUDrx8yhUqDcp7fYERX4CE=
|
||||
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
|
||||
github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
|
||||
github.com/mattn/go-sqlite3 v1.10.0 h1:jbhqpg7tQe4SupckyijYiy0mJJ/pRyHvXf7JdWK860o=
|
||||
github.com/mattn/go-sqlite3 v1.10.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
|
||||
github.com/mattn/go-sqlite3 v1.11.0 h1:LDdKkqtYlom37fkvqs8rMPFKAMe8+SgjbwZ6ex1/A/Q=
|
||||
github.com/mattn/go-sqlite3 v1.11.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/nfnt/resize v0.0.0-20180221191011-83c6a9932646 h1:zYyBkD/k9seD2A7fsi6Oo2LfFZAehjjQMERAvZLEDnQ=
|
||||
github.com/nfnt/resize v0.0.0-20180221191011-83c6a9932646/go.mod h1:jpp1/29i3P1S/RLdc7JQKbRpFeM1dOBd8T9ki5s+AY8=
|
||||
@ -152,45 +164,63 @@ github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsq
|
||||
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
|
||||
github.com/pborman/uuid v1.2.0 h1:J7Q5mO4ysT1dv8hyrUGHb9+ooztCXu1D8MY8DZYsu3g=
|
||||
github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k=
|
||||
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
|
||||
github.com/philhofer/fwd v1.0.0 h1:UbZqGr5Y38ApvM/V/jEljVxwocdweyH+vmYvRPBnbqQ=
|
||||
github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU=
|
||||
github.com/pilu/config v0.0.0-20131214182432-3eb99e6c0b9a h1:Tg4E4cXPZSZyd3H1tJlYo6ZreXV0ZJvE/lorNqyw1AU=
|
||||
github.com/pilu/config v0.0.0-20131214182432-3eb99e6c0b9a/go.mod h1:9Or9aIl95Kp43zONcHd5tLZGKXb9iLx0pZjau0uJ5zg=
|
||||
github.com/pilu/fresh v0.0.0-20170301142741-9c0092493eff h1:/FQrxtJUVqC79XhN/OHwWzuSe051qehQCzZ3LIhdo5c=
|
||||
github.com/pilu/fresh v0.0.0-20170301142741-9c0092493eff/go.mod h1:2LLTtftTZSdAPR/iVyennXZDLZOYzyDn+T0qEKJ8eSw=
|
||||
github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/sergi/go-diff v1.0.0 h1:Kpca3qRNrduNnOQeazBd0ysaKrUJiIuISHxogkT9RPQ=
|
||||
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
|
||||
github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
|
||||
github.com/sethgrid/pester v0.0.0-20190127155807-68a33a018ad0 h1:X9XMOYjxEfAYSy3xK1DzO5dMkkWhs9E9UCcS1IERx2k=
|
||||
github.com/sethgrid/pester v0.0.0-20190127155807-68a33a018ad0/go.mod h1:Ad7IjTpvzZO8Fl0vh9AzQ+j/jYZfyp2diGwI8m5q+ns=
|
||||
github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24 h1:pntxY8Ary0t43dCZ5dqY4YTJCObLY1kIXl0uzMv+7DE=
|
||||
github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4=
|
||||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
|
||||
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
|
||||
github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU=
|
||||
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
|
||||
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
|
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
github.com/sudo-suhas/symcrypto v1.0.0 h1:VG6FdACf5XeXFQUzeA++aB6snNThz0OFlmUHiCddi2s=
|
||||
github.com/sudo-suhas/symcrypto v1.0.0/go.mod h1:g/faGDjhlF/DXdqp3+SQ0LmhPcv4iYaIRjcm/Q60+68=
|
||||
github.com/tinylib/msgp v1.1.0 h1:9fQd+ICuRIu/ue4vxJZu6/LzxN0HwMds2nq/0cFvxHU=
|
||||
github.com/tinylib/msgp v1.1.0/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE=
|
||||
github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
|
||||
github.com/ugorji/go v1.1.5-pre/go.mod h1:FwP/aQVg39TXzItUBMwnWp9T9gPQnXw4Poh4/oBQZ/0=
|
||||
github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw=
|
||||
github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
|
||||
github.com/ugorji/go/codec v1.1.5-pre/go.mod h1:tULtS6Gy1AE1yCENaw4Vb//HLH5njI2tfCQDUqRd8fI=
|
||||
github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY=
|
||||
github.com/urfave/cli v1.20.0 h1:fDqGv3UG/4jbVl/QkFwEdddtEDjh/5Ov6X+0B/3bPaw=
|
||||
github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
|
||||
github.com/urfave/cli v1.21.0 h1:wYSSj06510qPIzGSua9ZqsncMmWE3Zr55KBERygyrxE=
|
||||
github.com/urfave/cli v1.21.0/go.mod h1:lxDj6qX9Q6lWQxIrbrT0nwecwUtRnhVZAJjJZrVUZZQ=
|
||||
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
|
||||
github.com/xwb1989/sqlparser v0.0.0-20180606152119-120387863bf2 h1:zzrxE1FKn5ryBNl9eKOeqQ58Y/Qpo3Q9QNxKHX5uzzQ=
|
||||
github.com/xwb1989/sqlparser v0.0.0-20180606152119-120387863bf2/go.mod h1:hzfGeIUDq/j97IG+FhNqkowIyEcD88LrW6fyU3K3WqY=
|
||||
gitlab.com/geeks-accelerator/oss/devops v1.0.2 h1:LqME1zTc9bgB+J/tw7tv1WDjvgmrgl2OZdKcRToleqg=
|
||||
gitlab.com/geeks-accelerator/oss/devops v1.0.2/go.mod h1:rvI71qNJyNiO99ZgGnv/PmJCVrjJjupsXBmfYFXdjGM=
|
||||
gitlab.com/geeks-accelerator/oss/devops v1.0.3 h1:SE2ZD4Csvmm3t/50RoJkVLjDcwXKHayQYawSkpOSqIw=
|
||||
gitlab.com/geeks-accelerator/oss/devops v1.0.3/go.mod h1:rvI71qNJyNiO99ZgGnv/PmJCVrjJjupsXBmfYFXdjGM=
|
||||
gitlab.com/geeks-accelerator/oss/devops v1.0.7 h1:ZlQufuVnRN3DwJ0I5c5KA5edhQs7OstXc0uUZ9V0ixI=
|
||||
gitlab.com/geeks-accelerator/oss/devops v1.0.7/go.mod h1:JEl0T87/zftowrIzY1D+rhDMhG0AxnghuZB+VzEWuqM=
|
||||
gitlab.com/geeks-accelerator/oss/devops v1.0.8 h1:ql2Ii6CuW+k9X0SM0sVSzwYWDdiVdoX2xuGg9ADlj+Y=
|
||||
gitlab.com/geeks-accelerator/oss/devops v1.0.8/go.mod h1:JEl0T87/zftowrIzY1D+rhDMhG0AxnghuZB+VzEWuqM=
|
||||
golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190621222207-cc06ce4a13d4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4 h1:HuIa8hRrWRSrqYzx1qI49NNxhdi2PrY7gxVSq1JjLDc=
|
||||
golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225 h1:kNX+jCowfMYzvlSvJu5pQWEmyWFrBXJ3PBy10xKMXK8=
|
||||
@ -203,38 +233,34 @@ golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn
|
||||
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
|
||||
golang.org/x/net v0.0.0-20190611141213-3f473d35a33a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859 h1:R/3boaszxrf1GEUWTVDzSKVwLmSJpwZ1yqXm8j0v2QI=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80 h1:Ao/3l156eZf2AW5wK8a7/smtodRU+gha3+BeqJ69lRk=
|
||||
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7 h1:fHDIZ2oxGnUZRN6WgWFCbYBjH9uqVPRCUVUDhs0wnbA=
|
||||
golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190515120540-06a5c4944438/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190610200419-93c9922d18ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190730183949-1393eb018365 h1:SaXEMXhWzMJThc05vu6uh61Q245r4KaWMrsTedk0FDc=
|
||||
golang.org/x/sys v0.0.0-20190730183949-1393eb018365/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190804053845-51ab0e2deafa/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a h1:aYOabOQFp6Vj6W1F80affTUvO9UxmJRx8K0gsfABByQ=
|
||||
golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190606050223-4d9ae51c2468/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20190611222205-d73e1c7e250b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59 h1:QjA/9ArTfVTLfEhClDCG7SGrZkZixxWpwNCDiwJfh88=
|
||||
golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20190730205120-7deaedd405c4 h1:GhbPrljMrt6gCNHHAJcWLDV3nDPFkIm0EEuqY9GtuX0=
|
||||
golang.org/x/tools v0.0.0-20190730205120-7deaedd405c4/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI=
|
||||
golang.org/x/tools v0.0.0-20190805222050-c5a2fd39b72a h1:0AGI+cC4FJwXNdClvHzfHhJf/yPjKwdo/+m0lPKrdJA=
|
||||
golang.org/x/tools v0.0.0-20190805222050-c5a2fd39b72a/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI=
|
||||
golang.org/x/tools v0.0.0-20190807223507-b346f7fd45de h1:VNumCimp/Bwk6fRqgPHkjiUPZ/vzlpi23/kQTuQ4gBA=
|
||||
golang.org/x/tools v0.0.0-20190807223507-b346f7fd45de/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0 h1:Dh6fw+p6FyRl5x/FvNswO1ji0lIGzm3KP8Y9VkS9PTE=
|
||||
golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7 h1:9zdDQZ7Thm29KFXgAX/+yaf3eVbP7djjWp/dXAppNCc=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
@ -242,9 +268,12 @@ google.golang.org/appengine v1.6.1 h1:QzqyMA1tlu6CgqCDUtU9V+ZKhLFT2dkJuANu5QaxI3
|
||||
google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
|
||||
gopkg.in/DataDog/dd-trace-go.v1 v1.16.1 h1:Dngw1zun6yTYFHNdzEWBlrJzFA2QJMjSA2sZ4nH2UWo=
|
||||
gopkg.in/DataDog/dd-trace-go.v1 v1.16.1/go.mod h1:DVp8HmDh8PuTu2Z0fVVlBsyWaC++fzwVCaGWylTe3tg=
|
||||
gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc h1:2gGKlE2+asNV9m7xrywl36YYNnBG5ZQ0r/BOOxqPpmk=
|
||||
gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc/go.mod h1:m7x9LTH6d71AHyAX77c9yqWCCa3UKHcVEj9y7hAtKDk=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
|
||||
gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
gopkg.in/go-playground/assert.v1 v1.2.1 h1:xoYuJVE7KT85PYWrN730RguIQO0ePzVRfFMXadIrXTM=
|
||||
@ -254,8 +283,6 @@ gopkg.in/go-playground/validator.v9 v9.29.1 h1:SvGtYmN60a5CVKTOzMSyfzWDeZRxRuGvR
|
||||
gopkg.in/go-playground/validator.v9 v9.29.1/go.mod h1:+c9/zcJMFNgbLvly1L1V+PpxWdVbfP1avr/N00E2vyQ=
|
||||
gopkg.in/gomail.v2 v2.0.0-20160411212932-81ebce5c23df h1:n7WqCuqOuCbNr617RXOY0AWRXxgwEyPp2z+p0+hgMuE=
|
||||
gopkg.in/gomail.v2 v2.0.0-20160411212932-81ebce5c23df/go.mod h1:LRQQ+SO6ZHR7tOkpBDuZnXENFzX8qRjMDMyPD6BRkCw=
|
||||
gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce h1:xcEWjVhvbDy+nHP67nPDDpbYrY+ILlfndk4bRioVHaU=
|
||||
gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||
gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE=
|
||||
|
@ -169,7 +169,7 @@ func New() *Test {
|
||||
ctx := context.WithValue(context.Background(), webcontext.KeyValues, &v)
|
||||
|
||||
// Execute the migrations
|
||||
if err = schema.Migrate(ctx, masterDB, log, true); err != nil {
|
||||
if err = schema.Migrate(ctx, v.Env, masterDB, log, true); err != nil {
|
||||
log.Fatalf("main : Migrate : %v", err)
|
||||
}
|
||||
log.Printf("main : Migrate : Completed")
|
||||
|
@ -45,6 +45,13 @@ var (
|
||||
Env_Prod Env = "prod"
|
||||
)
|
||||
|
||||
// List of env names.
|
||||
var EnvNames = []Env{
|
||||
Env_Dev,
|
||||
Env_Stage,
|
||||
Env_Prod,
|
||||
}
|
||||
|
||||
func ContextEnv(ctx context.Context) string {
|
||||
cv := ctx.Value(KeyValues).(*Values)
|
||||
if cv != nil {
|
||||
|
@ -16,374 +16,3 @@ func initSchema(ctx context.Context, db *sqlx.DB, log *log.Logger, isUnittest bo
|
||||
|
||||
return f
|
||||
}
|
||||
|
||||
/*
|
||||
// initGeonames populates countries and postal codes.
|
||||
func initGeonamesOld(db *sqlx.DB) error {
|
||||
schemas := []string{
|
||||
`DROP TABLE IF EXISTS geoname`,
|
||||
`create table geoname (
|
||||
geonameid int,
|
||||
name varchar(200),
|
||||
asciiname varchar(200),
|
||||
alternatenames text,
|
||||
latitude float,
|
||||
longitude float,
|
||||
fclass char(1),
|
||||
fcode varchar(10),
|
||||
country varchar(2),
|
||||
cc2 varchar(600),
|
||||
admin1 varchar(20),
|
||||
admin2 varchar(80),
|
||||
admin3 varchar(20),
|
||||
admin4 varchar(20),
|
||||
population bigint,
|
||||
elevation int,
|
||||
gtopo30 int,
|
||||
timezone varchar(40),
|
||||
moddate date)`,
|
||||
`DROP TABLE IF EXISTS countryinfo`,
|
||||
`CREATE TABLE countryinfo (
|
||||
iso_alpha2 char(2),
|
||||
iso_alpha3 char(3),
|
||||
iso_numeric integer,
|
||||
fips_code character varying(3),
|
||||
country character varying(200),
|
||||
capital character varying(200),
|
||||
areainsqkm double precision,
|
||||
population integer,
|
||||
continent char(2),
|
||||
tld CHAR(10),
|
||||
currency_code char(3),
|
||||
currency_name CHAR(20),
|
||||
phone character varying(20),
|
||||
postal character varying(60),
|
||||
postal_format character varying(200),
|
||||
postal_regex character varying(200),
|
||||
languages character varying(200),
|
||||
geonameId int,
|
||||
neighbours character varying(50),
|
||||
equivalent_fips_code character varying(3))`,
|
||||
}
|
||||
|
||||
for _, q := range schemas {
|
||||
_, err := db.Exec(q)
|
||||
if err != nil {
|
||||
return errors.WithMessagef(err, "Failed to execute sql query '%s'", q)
|
||||
}
|
||||
}
|
||||
|
||||
// Load the countryinfo table.
|
||||
if false {
|
||||
u := "http://download.geonames.org/export/dump/countryInfo.txt"
|
||||
resp, err := pester.Get(u)
|
||||
if err != nil {
|
||||
return errors.WithMessagef(err, "Failed to read country info from '%s'", u)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
scanner := bufio.NewScanner(resp.Body)
|
||||
var prevLine string
|
||||
var stmt *sql.Stmt
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
|
||||
// Skip comments.
|
||||
if strings.HasPrefix(line, "#") {
|
||||
prevLine = line
|
||||
continue
|
||||
}
|
||||
|
||||
// Pull the last comment to load the fields.
|
||||
if stmt == nil {
|
||||
prevLine = strings.TrimPrefix(prevLine, "#")
|
||||
r := csv.NewReader(strings.NewReader(prevLine))
|
||||
r.Comma = '\t' // Use tab-delimited instead of comma <---- here!
|
||||
r.FieldsPerRecord = -1
|
||||
|
||||
lines, err := r.ReadAll()
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
var columns []string
|
||||
|
||||
for _, fn := range lines[0] {
|
||||
var cn string
|
||||
switch fn {
|
||||
case "ISO":
|
||||
cn = "iso_alpha2"
|
||||
case "ISO3":
|
||||
cn = "iso_alpha3"
|
||||
case "ISO-Numeric":
|
||||
cn = "iso_numeric"
|
||||
case "fips":
|
||||
cn = "fips_code"
|
||||
case "Country":
|
||||
cn = "country"
|
||||
case "Capital":
|
||||
cn = "capital"
|
||||
case "Area(in sq km)":
|
||||
cn = "areainsqkm"
|
||||
case "Population":
|
||||
cn = "population"
|
||||
case "Continent":
|
||||
cn = "continent"
|
||||
case "tld":
|
||||
cn = "tld"
|
||||
case "CurrencyCode":
|
||||
cn = "currency_code"
|
||||
case "CurrencyName":
|
||||
cn = "currency_name"
|
||||
case "Phone":
|
||||
cn = "phone"
|
||||
case "Postal":
|
||||
cn = "postal"
|
||||
case "Postal Code Format":
|
||||
cn = "postal_format"
|
||||
case "Postal Code Regex":
|
||||
cn = "postal_regex"
|
||||
case "Languages":
|
||||
cn = "languages"
|
||||
case "geonameid":
|
||||
cn = "geonameId"
|
||||
case "neighbours":
|
||||
cn = "neighbours"
|
||||
case "EquivalentFipsCode":
|
||||
cn = "equivalent_fips_code"
|
||||
default :
|
||||
return errors.Errorf("Failed to map column %s", fn)
|
||||
}
|
||||
columns = append(columns, cn)
|
||||
}
|
||||
|
||||
placeholders := []string{}
|
||||
for i := 0; i < len(columns); i++ {
|
||||
placeholders = append(placeholders, "?")
|
||||
}
|
||||
|
||||
q := "insert into countryinfo ("+strings.Join(columns, ",")+") values("+strings.Join(placeholders, ",")+")"
|
||||
q = db.Rebind(q)
|
||||
stmt, err = db.Prepare(q)
|
||||
if err != nil {
|
||||
return errors.WithMessagef(err, "Failed to prepare sql query '%s'", q)
|
||||
}
|
||||
}
|
||||
|
||||
r := csv.NewReader(strings.NewReader(line))
|
||||
r.Comma = '\t' // Use tab-delimited instead of comma <---- here!
|
||||
r.FieldsPerRecord = -1
|
||||
|
||||
lines, err := r.ReadAll()
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
for _, row := range lines {
|
||||
var args []interface{}
|
||||
for _, v := range row {
|
||||
args = append(args, v)
|
||||
}
|
||||
|
||||
_, err = stmt.Exec(args...)
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err := scanner.Err(); err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Load the geoname table.
|
||||
{
|
||||
u := "http://download.geonames.org/export/dump/allCountries.zip"
|
||||
resp, err := pester.Get(u)
|
||||
if err != nil {
|
||||
return errors.WithMessagef(err, "Failed to read countries from '%s'", u)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
br := bufio.NewReader(resp.Body)
|
||||
|
||||
buff := bytes.NewBuffer([]byte{})
|
||||
size, err := io.Copy(buff, br)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
b := bytes.NewReader(buff.Bytes())
|
||||
zr, err := zip.NewReader(b, size)
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
q := "insert into geoname " +
|
||||
"(geonameid,name,asciiname,alternatenames,latitude,longitude,fclass,fcode,country,cc2,admin1,admin2,admin3,admin4,population,elevation,gtopo30,timezone,moddate) " +
|
||||
"values(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)"
|
||||
q = db.Rebind(q)
|
||||
stmt, err := db.Prepare(q)
|
||||
if err != nil {
|
||||
return errors.WithMessagef(err, "Failed to prepare sql query '%s'", q)
|
||||
}
|
||||
|
||||
for _, f := range zr.File {
|
||||
if f.Name == "readme.txt" {
|
||||
continue
|
||||
}
|
||||
|
||||
fh, err := f.Open()
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
scanner := bufio.NewScanner(fh)
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
|
||||
// Skip comments.
|
||||
if strings.HasPrefix(line, "#") {
|
||||
continue
|
||||
}
|
||||
|
||||
if strings.Contains(line, "\"") {
|
||||
line = strings.Replace(line, "\"", "\\\"", -1)
|
||||
}
|
||||
|
||||
r := csv.NewReader(strings.NewReader(line))
|
||||
r.Comma = '\t' // Use tab-delimited instead of comma <---- here!
|
||||
r.LazyQuotes = true
|
||||
r.FieldsPerRecord = -1
|
||||
|
||||
lines, err := r.ReadAll()
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
for _, row := range lines {
|
||||
var args []interface{}
|
||||
for idx, v := range row {
|
||||
if v == "" {
|
||||
if idx == 0 || idx == 14 || idx == 15 {
|
||||
v = "0"
|
||||
}
|
||||
}
|
||||
args = append(args, v)
|
||||
}
|
||||
|
||||
_, err = stmt.Exec(args...)
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err := scanner.Err(); err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
return errors.New("not finished")
|
||||
|
||||
|
||||
queries := []string{
|
||||
// Countries...
|
||||
`DROP TABLE IF EXISTS countries`,
|
||||
`CREATE TABLE countries(
|
||||
id serial not null constraint countries_pkey primary key,
|
||||
geoname_id int,
|
||||
iso char(2),
|
||||
country character varying(50),
|
||||
capital character varying(50),
|
||||
created_at TIMESTAMP WITH TIME ZONE NOT NULL,
|
||||
updated_at TIMESTAMP WITH TIME ZONE DEFAULT NULL,
|
||||
archived_at TIMESTAMP WITH TIME ZONE DEFAULT NULL)`,
|
||||
`create index idx_countries_deleted_at on countries (deleted_at)`,
|
||||
`insert into countries(geoname_id, iso, country, capital, created_at, updated_at)
|
||||
select geonameId, iso_alpha2, country, capital, NOW(), NOW()
|
||||
from countryinfo`,
|
||||
// Regions...
|
||||
`DROP TABLE IF EXISTS regions`,
|
||||
`CREATE TABLE regions (
|
||||
id serial not null constraint regions_pkey primary key,
|
||||
country_id int,
|
||||
geoname_id int,
|
||||
name varchar(200),
|
||||
ascii_name varchar(200),
|
||||
adm varchar(20),
|
||||
country char(2),
|
||||
latitude float,
|
||||
longitude float,
|
||||
created_at TIMESTAMP WITH TIME ZONE NOT NULL,
|
||||
updated_at TIMESTAMP WITH TIME ZONE DEFAULT NULL,
|
||||
archived_at TIMESTAMP WITH TIME ZONE DEFAULT NULL)`,
|
||||
`create index idx_regions_deleted_at on regions (deleted_at)`,
|
||||
`insert into regions(country_id, geoname_id, name, ascii_name, adm, country, latitude, longitude, created_at, updated_at)
|
||||
select c.id,
|
||||
g.geonameid,
|
||||
g.name,
|
||||
g.asciiname,
|
||||
g.admin1,
|
||||
c.iso,
|
||||
g.latitude,
|
||||
g.longitude,
|
||||
to_timestamp(TO_CHAR(g.moddate, 'YYYY-MM-DD'), 'YYYY-MM-DD'),
|
||||
to_timestamp(TO_CHAR(g.moddate, 'YYYY-MM-DD'), 'YYYY-MM-DD')
|
||||
from countries as c
|
||||
inner join geoname as g on c.iso = g.country and g.fcode like 'ADM1'`,
|
||||
// cities
|
||||
`DROP TABLE IF EXISTS cities`,
|
||||
`CREATE TABLE cities (
|
||||
id serial not null constraint cities_pkey primary key,
|
||||
country_id int,
|
||||
region_id int,
|
||||
geoname_id int,
|
||||
name varchar(200),
|
||||
ascii_name varchar(200),
|
||||
latitude float,
|
||||
longitude float,
|
||||
created_at TIMESTAMP WITH TIME ZONE NOT NULL,
|
||||
updated_at TIMESTAMP WITH TIME ZONE DEFAULT NULL,
|
||||
archived_at TIMESTAMP WITH TIME ZONE DEFAULT NULL)`,
|
||||
`create index idx_cities_deleted_at on cities (deleted_at)`,
|
||||
`insert into cities(country_id, region_id, geoname_id, name, ascii_name, latitude, longitude, created_at, updated_at)
|
||||
select r.country_id,
|
||||
r.id,
|
||||
g.geonameid,
|
||||
g.name,
|
||||
g.asciiname,
|
||||
g.latitude,
|
||||
g.longitude,
|
||||
to_timestamp(TO_CHAR(g.moddate, 'YYYY-MM-DD'), 'YYYY-MM-DD'),
|
||||
to_timestamp(TO_CHAR(g.moddate, 'YYYY-MM-DD'), 'YYYY-MM-DD')
|
||||
from geoname as g
|
||||
join regions as r on r.adm = g.admin1
|
||||
and r.country = g.country
|
||||
and (g.fcode in ('PPLC', 'PPLA') or (g.fcode like 'PPLA%' and g.population >= 50000));`,
|
||||
|
||||
}
|
||||
|
||||
tx, err := db.Begin()
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
for _, q := range queries {
|
||||
_, err = tx.Exec(q)
|
||||
if err != nil {
|
||||
return errors.WithMessagef(err, "Failed to execute sql query '%s'", q)
|
||||
}
|
||||
}
|
||||
|
||||
err = tx.Commit()
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
*/
|
||||
|
@ -5,10 +5,10 @@ import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"encoding/csv"
|
||||
"fmt"
|
||||
"log"
|
||||
"strings"
|
||||
"time"
|
||||
"fmt"
|
||||
|
||||
"geeks-accelerator/oss/saas-starter-kit/internal/geonames"
|
||||
"github.com/geeks-accelerator/sqlxmigrate"
|
||||
@ -26,7 +26,7 @@ func migrationList(ctx context.Context, db *sqlx.DB, log *log.Logger, isUnittest
|
||||
return []*sqlxmigrate.Migration{
|
||||
// Create table users.
|
||||
{
|
||||
ID: "20190522-01a",
|
||||
ID: "20190522-01b",
|
||||
Migrate: func(tx *sql.Tx) error {
|
||||
q1 := `CREATE TABLE IF NOT EXISTS users (
|
||||
id char(36) NOT NULL,
|
||||
@ -43,25 +43,24 @@ func migrationList(ctx context.Context, db *sqlx.DB, log *log.Logger, isUnittest
|
||||
CONSTRAINT email UNIQUE (email)
|
||||
) ;`
|
||||
if _, err := tx.Exec(q1); err != nil {
|
||||
return errors.WithMessagef(err, "Query failed %s", q1)
|
||||
return errors.Wrapf(err, "Query failed %s", q1)
|
||||
}
|
||||
return nil
|
||||
},
|
||||
Rollback: func(tx *sql.Tx) error {
|
||||
q1 := `DROP TABLE IF EXISTS users`
|
||||
if _, err := tx.Exec(q1); err != nil {
|
||||
return errors.WithMessagef(err, "Query failed %s", q1)
|
||||
return errors.Wrapf(err, "Query failed %s", q1)
|
||||
}
|
||||
return nil
|
||||
},
|
||||
},
|
||||
// Create new table accounts.
|
||||
{
|
||||
ID: "20190522-01b",
|
||||
ID: "20190522-01h",
|
||||
Migrate: func(tx *sql.Tx) error {
|
||||
q1 := `CREATE TYPE account_status_t as enum('active','pending','disabled')`
|
||||
if _, err := tx.Exec(q1); err != nil {
|
||||
return errors.WithMessagef(err, "Query failed %s", q1)
|
||||
if err := createTypeIfNotExists(tx, "account_status_t", "enum('active','pending','disabled')"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
q2 := `CREATE TABLE IF NOT EXISTS accounts (
|
||||
@ -84,38 +83,36 @@ func migrationList(ctx context.Context, db *sqlx.DB, log *log.Logger, isUnittest
|
||||
CONSTRAINT name UNIQUE (name)
|
||||
)`
|
||||
if _, err := tx.Exec(q2); err != nil {
|
||||
return errors.WithMessagef(err, "Query failed %s", q2)
|
||||
return errors.Wrapf(err, "Query failed %s", q2)
|
||||
}
|
||||
return nil
|
||||
},
|
||||
Rollback: func(tx *sql.Tx) error {
|
||||
q1 := `DROP TYPE account_status_t`
|
||||
q1 := `DROP TYPE IF EXISTS account_status_t`
|
||||
if _, err := tx.Exec(q1); err != nil {
|
||||
return errors.WithMessagef(err, "Query failed %s", q1)
|
||||
return errors.Wrapf(err, "Query failed %s", q1)
|
||||
}
|
||||
|
||||
q2 := `DROP TABLE IF EXISTS accounts`
|
||||
if _, err := tx.Exec(q2); err != nil {
|
||||
return errors.WithMessagef(err, "Query failed %s", q2)
|
||||
return errors.Wrapf(err, "Query failed %s", q2)
|
||||
}
|
||||
return nil
|
||||
},
|
||||
},
|
||||
// Create new table user_accounts.
|
||||
{
|
||||
ID: "20190522-01d",
|
||||
ID: "20190522-02e",
|
||||
Migrate: func(tx *sql.Tx) error {
|
||||
q1 := `CREATE TYPE user_account_role_t as enum('admin', 'user')`
|
||||
if _, err := tx.Exec(q1); err != nil {
|
||||
return errors.WithMessagef(err, "Query failed %s", q1)
|
||||
if err := createTypeIfNotExists(tx, "user_account_role_t", "enum('admin', 'user')"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
q2 := `CREATE TYPE user_account_status_t as enum('active', 'invited','disabled')`
|
||||
if _, err := tx.Exec(q2); err != nil {
|
||||
return errors.WithMessagef(err, "Query failed %s", q2)
|
||||
if err := createTypeIfNotExists(tx, "user_account_status_t", "enum('active', 'invited','disabled')"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
q3 := `CREATE TABLE IF NOT EXISTS users_accounts (
|
||||
q1 := `CREATE TABLE IF NOT EXISTS users_accounts (
|
||||
id char(36) NOT NULL,
|
||||
account_id char(36) NOT NULL REFERENCES accounts(id) ON DELETE NO ACTION,
|
||||
user_id char(36) NOT NULL REFERENCES users(id) ON DELETE NO ACTION,
|
||||
@ -127,26 +124,26 @@ func migrationList(ctx context.Context, db *sqlx.DB, log *log.Logger, isUnittest
|
||||
PRIMARY KEY (id),
|
||||
CONSTRAINT user_account UNIQUE (user_id,account_id)
|
||||
)`
|
||||
if _, err := tx.Exec(q3); err != nil {
|
||||
return errors.WithMessagef(err, "Query failed %s", q3)
|
||||
if _, err := tx.Exec(q1); err != nil {
|
||||
return errors.Wrapf(err, "Query failed %s", q1)
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
Rollback: func(tx *sql.Tx) error {
|
||||
q1 := `DROP TYPE user_account_role_t`
|
||||
q1 := `DROP TYPE IF EXISTS user_account_role_t`
|
||||
if _, err := tx.Exec(q1); err != nil {
|
||||
return errors.WithMessagef(err, "Query failed %s", q1)
|
||||
return errors.Wrapf(err, "Query failed %s", q1)
|
||||
}
|
||||
|
||||
q2 := `DROP TYPE userr_account_status_t`
|
||||
q2 := `DROP TYPE IF EXISTS user_account_status_t`
|
||||
if _, err := tx.Exec(q2); err != nil {
|
||||
return errors.WithMessagef(err, "Query failed %s", q2)
|
||||
return errors.Wrapf(err, "Query failed %s", q2)
|
||||
}
|
||||
|
||||
q3 := `DROP TABLE IF EXISTS users_accounts`
|
||||
if _, err := tx.Exec(q3); err != nil {
|
||||
return errors.WithMessagef(err, "Query failed %s", q3)
|
||||
return errors.Wrapf(err, "Query failed %s", q3)
|
||||
}
|
||||
|
||||
return nil
|
||||
@ -156,12 +153,11 @@ func migrationList(ctx context.Context, db *sqlx.DB, log *log.Logger, isUnittest
|
||||
{
|
||||
ID: "20190622-01",
|
||||
Migrate: func(tx *sql.Tx) error {
|
||||
q1 := `CREATE TYPE project_status_t as enum('active','disabled')`
|
||||
if _, err := tx.Exec(q1); err != nil {
|
||||
return errors.WithMessagef(err, "Query failed %s", q1)
|
||||
if err := createTypeIfNotExists(tx, "project_status_t", "enum('active','disabled')"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
q2 := `CREATE TABLE IF NOT EXISTS projects (
|
||||
q1 := `CREATE TABLE IF NOT EXISTS projects (
|
||||
id char(36) NOT NULL,
|
||||
account_id char(36) NOT NULL REFERENCES accounts(id) ON DELETE SET NULL,
|
||||
name varchar(255) NOT NULL,
|
||||
@ -171,20 +167,20 @@ func migrationList(ctx context.Context, db *sqlx.DB, log *log.Logger, isUnittest
|
||||
archived_at TIMESTAMP WITH TIME ZONE DEFAULT NULL,
|
||||
PRIMARY KEY (id)
|
||||
)`
|
||||
if _, err := tx.Exec(q2); err != nil {
|
||||
return errors.WithMessagef(err, "Query failed %s", q2)
|
||||
if _, err := tx.Exec(q1); err != nil {
|
||||
return errors.Wrapf(err, "Query failed %s", q1)
|
||||
}
|
||||
return nil
|
||||
},
|
||||
Rollback: func(tx *sql.Tx) error {
|
||||
q1 := `DROP TYPE project_status_t`
|
||||
if _, err := tx.Exec(q1); err != nil {
|
||||
return errors.WithMessagef(err, "Query failed %s", q1)
|
||||
q1 := `DROP TYPE IF EXISTS project_status_t`
|
||||
if _, err := tx.Exec(q1); err != nil && !errorIsAlreadyExists(err) {
|
||||
return errors.Wrapf(err, "Query failed %s", q1)
|
||||
}
|
||||
|
||||
q2 := `DROP TABLE IF EXISTS projects`
|
||||
if _, err := tx.Exec(q2); err != nil {
|
||||
return errors.WithMessagef(err, "Query failed %s", q2)
|
||||
if _, err := tx.Exec(q2); err != nil && !errorIsAlreadyExists(err) {
|
||||
return errors.Wrapf(err, "Query failed %s", q2)
|
||||
}
|
||||
return nil
|
||||
},
|
||||
@ -196,13 +192,13 @@ func migrationList(ctx context.Context, db *sqlx.DB, log *log.Logger, isUnittest
|
||||
q1 := `ALTER TABLE users
|
||||
RENAME COLUMN name to first_name;`
|
||||
if _, err := tx.Exec(q1); err != nil {
|
||||
return errors.WithMessagef(err, "Query failed %s", q1)
|
||||
return errors.Wrapf(err, "Query failed %s", q1)
|
||||
}
|
||||
|
||||
q2 := `ALTER TABLE users
|
||||
ADD last_name varchar(200) NOT NULL DEFAULT '';`
|
||||
if _, err := tx.Exec(q2); err != nil {
|
||||
return errors.WithMessagef(err, "Query failed %s", q2)
|
||||
return errors.Wrapf(err, "Query failed %s", q2)
|
||||
}
|
||||
|
||||
return nil
|
||||
@ -210,7 +206,7 @@ func migrationList(ctx context.Context, db *sqlx.DB, log *log.Logger, isUnittest
|
||||
Rollback: func(tx *sql.Tx) error {
|
||||
q1 := `DROP TABLE IF EXISTS users`
|
||||
if _, err := tx.Exec(q1); err != nil {
|
||||
return errors.WithMessagef(err, "Query failed %s", q1)
|
||||
return errors.Wrapf(err, "Query failed %s", q1)
|
||||
}
|
||||
return nil
|
||||
},
|
||||
@ -238,9 +234,9 @@ func migrationList(ctx context.Context, db *sqlx.DB, log *log.Logger, isUnittest
|
||||
}
|
||||
|
||||
for _, q := range schemas {
|
||||
_, err := db.Exec(q)
|
||||
_, err := tx.Exec(q)
|
||||
if err != nil {
|
||||
return errors.WithMessagef(err, "Failed to execute sql query '%s'", q)
|
||||
return errors.Wrapf(err, "Failed to execute sql query '%s'", q)
|
||||
}
|
||||
}
|
||||
|
||||
@ -277,13 +273,12 @@ func migrationList(ctx context.Context, db *sqlx.DB, log *log.Logger, isUnittest
|
||||
"VALUES %s", strings.Join(valueStrings, ","))
|
||||
insertStmt = db.Rebind(insertStmt)
|
||||
|
||||
stmt, err := db.Prepare(insertStmt)
|
||||
_, err := tx.Exec(insertStmt, valueArgs...)
|
||||
if err != nil {
|
||||
return errors.WithMessagef(err, "Failed to prepare sql query '%s'", insertStmt)
|
||||
return errors.Wrapf(err, "Failed to execute sql query '%s'", insertStmt)
|
||||
}
|
||||
|
||||
_, err = stmt.Exec(valueArgs...)
|
||||
return err
|
||||
return nil
|
||||
}
|
||||
start := time.Now()
|
||||
for _, country := range countries {
|
||||
@ -294,7 +289,7 @@ func migrationList(ctx context.Context, db *sqlx.DB, log *log.Logger, isUnittest
|
||||
}
|
||||
//fmt.Println("Geoname records: ", len(v))
|
||||
// Max argument values of Postgres is about 54460. So the batch size for bulk insert is selected 4500*12 (ncol)
|
||||
batch := 4500
|
||||
batch := 1000
|
||||
n := len(v) / batch
|
||||
|
||||
//fmt.Println("Number of batch: ", n)
|
||||
@ -336,9 +331,9 @@ func migrationList(ctx context.Context, db *sqlx.DB, log *log.Logger, isUnittest
|
||||
}
|
||||
|
||||
for _, q := range queries {
|
||||
_, err := db.Exec(q)
|
||||
_, err := tx.Exec(q)
|
||||
if err != nil {
|
||||
return errors.WithMessagef(err, "Failed to execute sql query '%s'", q)
|
||||
return errors.Wrapf(err, "Failed to execute sql query '%s'", q)
|
||||
}
|
||||
}
|
||||
|
||||
@ -369,9 +364,9 @@ func migrationList(ctx context.Context, db *sqlx.DB, log *log.Logger, isUnittest
|
||||
}
|
||||
|
||||
for _, q := range schemas {
|
||||
_, err := db.Exec(q)
|
||||
_, err := tx.Exec(q)
|
||||
if err != nil {
|
||||
return errors.WithMessagef(err, "Failed to execute sql query '%s'", q)
|
||||
return errors.Wrapf(err, "Failed to execute sql query '%s'", q)
|
||||
}
|
||||
}
|
||||
|
||||
@ -404,16 +399,16 @@ func migrationList(ctx context.Context, db *sqlx.DB, log *log.Logger, isUnittest
|
||||
}
|
||||
|
||||
for _, q := range prep {
|
||||
_, err := db.Exec(q)
|
||||
_, err := tx.Exec(q)
|
||||
if err != nil {
|
||||
return errors.WithMessagef(err, "Failed to execute sql query '%s'", q)
|
||||
return errors.Wrapf(err, "Failed to execute sql query '%s'", q)
|
||||
}
|
||||
}
|
||||
|
||||
u := "http://download.geonames.org/export/dump/countryInfo.txt"
|
||||
resp, err := pester.Get(u)
|
||||
if err != nil {
|
||||
return errors.WithMessagef(err, "Failed to read country info from '%s'", u)
|
||||
return errors.Wrapf(err, "Failed to read country info from '%s'", u)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
@ -496,9 +491,9 @@ func migrationList(ctx context.Context, db *sqlx.DB, log *log.Logger, isUnittest
|
||||
|
||||
q := "insert into countryinfo (" + strings.Join(columns, ",") + ") values(" + strings.Join(placeholders, ",") + ")"
|
||||
q = db.Rebind(q)
|
||||
stmt, err = db.Prepare(q)
|
||||
stmt, err = tx.Prepare(q)
|
||||
if err != nil {
|
||||
return errors.WithMessagef(err, "Failed to prepare sql query '%s'", q)
|
||||
return errors.Wrapf(err, "Failed to prepare sql query '%s'", q)
|
||||
}
|
||||
}
|
||||
|
||||
@ -536,9 +531,9 @@ func migrationList(ctx context.Context, db *sqlx.DB, log *log.Logger, isUnittest
|
||||
}
|
||||
|
||||
for _, q := range queries {
|
||||
_, err := db.Exec(q)
|
||||
_, err := tx.Exec(q)
|
||||
if err != nil {
|
||||
return errors.WithMessagef(err, "Failed to execute sql query '%s'", q)
|
||||
return errors.Wrapf(err, "Failed to execute sql query '%s'", q)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -563,9 +558,9 @@ func migrationList(ctx context.Context, db *sqlx.DB, log *log.Logger, isUnittest
|
||||
}
|
||||
|
||||
for _, q := range queries {
|
||||
_, err := db.Exec(q)
|
||||
_, err := tx.Exec(q)
|
||||
if err != nil {
|
||||
return errors.WithMessagef(err, "Failed to execute sql query '%s'", q)
|
||||
return errors.Wrapf(err, "Failed to execute sql query '%s'", q)
|
||||
}
|
||||
}
|
||||
|
||||
@ -575,15 +570,15 @@ func migrationList(ctx context.Context, db *sqlx.DB, log *log.Logger, isUnittest
|
||||
u := "http://download.geonames.org/export/dump/timeZones.txt"
|
||||
resp, err := pester.Get(u)
|
||||
if err != nil {
|
||||
return errors.WithMessagef(err, "Failed to read timezones info from '%s'", u)
|
||||
return errors.Wrapf(err, "Failed to read timezones info from '%s'", u)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
q := "insert into country_timezones (country_code,timezone_id) values(?, ?)"
|
||||
q = db.Rebind(q)
|
||||
stmt, err := db.Prepare(q)
|
||||
stmt, err := tx.Prepare(q)
|
||||
if err != nil {
|
||||
return errors.WithMessagef(err, "Failed to prepare sql query '%s'", q)
|
||||
return errors.Wrapf(err, "Failed to prepare sql query '%s'", q)
|
||||
}
|
||||
|
||||
scanner := bufio.NewScanner(resp.Body)
|
||||
@ -638,7 +633,7 @@ func migrationList(ctx context.Context, db *sqlx.DB, log *log.Logger, isUnittest
|
||||
CONSTRAINT account_preferences_pkey UNIQUE (account_id,name)
|
||||
)`
|
||||
if _, err := tx.Exec(q); err != nil {
|
||||
return errors.WithMessagef(err, "Query failed %s", q)
|
||||
return errors.Wrapf(err, "Query failed %s", q)
|
||||
}
|
||||
|
||||
return nil
|
||||
@ -653,12 +648,12 @@ func migrationList(ctx context.Context, db *sqlx.DB, log *log.Logger, isUnittest
|
||||
Migrate: func(tx *sql.Tx) error {
|
||||
q1 := `ALTER TABLE users ALTER COLUMN timezone DROP DEFAULT`
|
||||
if _, err := tx.Exec(q1); err != nil {
|
||||
return errors.WithMessagef(err, "Query failed %s", q1)
|
||||
return errors.Wrapf(err, "Query failed %s", q1)
|
||||
}
|
||||
|
||||
q2 := `ALTER TABLE users ALTER COLUMN timezone DROP NOT NULL`
|
||||
if _, err := tx.Exec(q2); err != nil {
|
||||
return errors.WithMessagef(err, "Query failed %s", q2)
|
||||
return errors.Wrapf(err, "Query failed %s", q2)
|
||||
}
|
||||
|
||||
return nil
|
||||
@ -669,3 +664,54 @@ func migrationList(ctx context.Context, db *sqlx.DB, log *log.Logger, isUnittest
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// dropTypeIfExists executes drop type.
|
||||
func dropTypeIfExists(tx *sql.Tx, name string) error {
|
||||
q := "DROP TYPE IF EXISTS " + name
|
||||
if _, err := tx.Exec(q); err != nil && !errorIsAlreadyExists(err) {
|
||||
return errors.Wrapf(err, "Query failed %s", q)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// createTypeIfNotExists checks to ensure a type doesn't exist before creating.
|
||||
func createTypeIfNotExists(tx *sql.Tx, name, val string) error {
|
||||
|
||||
q1 := "select exists (select 1 from pg_type where typname = '" + name + "')"
|
||||
rows, err := tx.Query(q1)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "Query failed %s", q1)
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
var exists bool
|
||||
for rows.Next() {
|
||||
err := rows.Scan(&exists)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err := rows.Err(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if exists {
|
||||
return nil
|
||||
}
|
||||
|
||||
q2 := "CREATE TYPE " + name + " AS " + val
|
||||
if _, err := tx.Exec(q2); err != nil && !errorIsAlreadyExists(err) {
|
||||
return errors.Wrapf(err, "Query failed %s", q2)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// errorIsAlreadyExists checks an error message for the error "already exists"
|
||||
func errorIsAlreadyExists(err error) bool {
|
||||
if strings.Contains(err.Error(), "already exists") {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
@ -3,12 +3,24 @@ package schema
|
||||
import (
|
||||
"context"
|
||||
"log"
|
||||
"time"
|
||||
|
||||
"geeks-accelerator/oss/saas-starter-kit/internal/platform/web/webcontext"
|
||||
"github.com/geeks-accelerator/sqlxmigrate"
|
||||
"github.com/jmoiron/sqlx"
|
||||
)
|
||||
|
||||
func Migrate(ctx context.Context, masterDb *sqlx.DB, log *log.Logger, isUnittest bool) error {
|
||||
// Migrate is the entry point for performing init schema and running all the migrations.
|
||||
func Migrate(ctx context.Context, targetEnv webcontext.Env, masterDb *sqlx.DB, log *log.Logger, isUnittest bool) error {
|
||||
|
||||
// Set the context with the required values to
|
||||
// process the request.
|
||||
v := webcontext.Values{
|
||||
Now: time.Now(),
|
||||
Env: targetEnv,
|
||||
}
|
||||
ctx = context.WithValue(ctx, webcontext.KeyValues, &v)
|
||||
|
||||
// Load list of Schema migrations and init new sqlxmigrate client
|
||||
migrations := migrationList(ctx, masterDb, log, isUnittest)
|
||||
m := sqlxmigrate.New(masterDb, sqlxmigrate.DefaultOptions, migrations)
|
||||
|
@ -1,131 +0,0 @@
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Sid": "ServiceDeployPermissions",
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"acm:ListCertificates",
|
||||
"acm:RequestCertificate",
|
||||
"acm:DescribeCertificate",
|
||||
"cloudfront:CreateDistribution",
|
||||
"ec2:DescribeSubnets",
|
||||
"ec2:DescribeSecurityGroups",
|
||||
"ec2:CreateSecurityGroup",
|
||||
"ec2:AuthorizeSecurityGroupIngress",
|
||||
"ec2:DescribeNetworkInterfaces",
|
||||
"ec2:DescribeVpcs",
|
||||
"ec2:CreateVpc",
|
||||
"ec2:CreateSubnet",
|
||||
"ec2:DescribeVpcs",
|
||||
"ec2:DescribeInternetGateways",
|
||||
"ec2:CreateInternetGateway",
|
||||
"ec2:CreateTags",
|
||||
"ec2:CreateRouteTable",
|
||||
"ec2:DescribeRouteTables",
|
||||
"ec2:CreateRoute",
|
||||
"ec2:AttachInternetGateway",
|
||||
"ec2:DescribeAccountAttributes",
|
||||
"elasticache:DescribeCacheClusters",
|
||||
"elasticache:CreateCacheCluster",
|
||||
"elasticache:DescribeCacheParameterGroups",
|
||||
"elasticache:CreateCacheParameterGroup",
|
||||
"elasticache:ModifyCacheCluster",
|
||||
"elasticache:ModifyCacheParameterGroup",
|
||||
"elasticloadbalancing:DescribeLoadBalancers",
|
||||
"elasticloadbalancing:CreateLoadBalancer",
|
||||
"elasticloadbalancing:CreateListener",
|
||||
"elasticloadbalancing:DescribeTargetGroups",
|
||||
"elasticloadbalancing:CreateTargetGroup",
|
||||
"elasticloadbalancing:DescribeListeners",
|
||||
"elasticloadbalancing:ModifyTargetGroupAttributes",
|
||||
"ecs:CreateCluster",
|
||||
"ecs:CreateService",
|
||||
"ecs:DeleteService",
|
||||
"ecs:DescribeClusters",
|
||||
"ecs:DescribeServices",
|
||||
"ecs:UpdateService",
|
||||
"ecs:RegisterTaskDefinition",
|
||||
"ecs:ListTaskDefinitions",
|
||||
"ecr:BatchCheckLayerAvailability",
|
||||
"ecr:BatchDeleteImage",
|
||||
"ecr:GetAuthorizationToken",
|
||||
"ecr:DescribeImages",
|
||||
"ecr:DescribeRepositories",
|
||||
"ecs:DescribeTasks",
|
||||
"ecr:CreateRepository",
|
||||
"ecr:ListImages",
|
||||
"ecs:ListTasks",
|
||||
"ecr:PutImage",
|
||||
"ecr:InitiateLayerUpload",
|
||||
"ecr:UploadLayerPart",
|
||||
"ecr:CompleteLayerUpload",
|
||||
"logs:DescribeLogGroups",
|
||||
"logs:CreateLogGroup",
|
||||
"lambda:ListFunctions",
|
||||
"lambda:CreateFunction",
|
||||
"lambda:UpdateFunctionCode",
|
||||
"lambda:UpdateFunctionConfiguration",
|
||||
"iam:GetRole",
|
||||
"iam:PassRole",
|
||||
"iam:CreateRole",
|
||||
"iam:CreateServiceLinkedRole",
|
||||
"iam:CreatePolicy",
|
||||
"iam:PutRolePolicy",
|
||||
"iam:TagRole",
|
||||
"iam:AttachRolePolicy",
|
||||
"iam:ListPolicies",
|
||||
"iam:GetPolicyVersion",
|
||||
"iam:CreatePolicyVersion",
|
||||
"logs:DescribeLogGroups",
|
||||
"logs:CreateLogGroup",
|
||||
"logs:DescribeLogStreams",
|
||||
"logs:CreateExportTask",
|
||||
"logs:DescribeExportTasks",
|
||||
"rds:CreateDBCluster",
|
||||
"rds:CreateDBInstance",
|
||||
"rds:DescribeDBClusters",
|
||||
"rds:DescribeDBInstances",
|
||||
"s3:CreateBucket",
|
||||
"s3:DeleteObject",
|
||||
"s3:DeleteObjectVersion",
|
||||
"s3:GetBucketPublicAccessBlock",
|
||||
"s3:GetBucketAcl",
|
||||
"s3:HeadBucket",
|
||||
"s3:ListObjects",
|
||||
"s3:ListBucket",
|
||||
"s3:GetObject",
|
||||
"s3:PutLifecycleConfiguration",
|
||||
"s3:PutBucketCORS",
|
||||
"s3:PutBucketPolicy",
|
||||
"s3:PutBucketPublicAccessBlock",
|
||||
"route53:CreateHostedZone",
|
||||
"route53:ChangeResourceRecordSets",
|
||||
"route53:ListHostedZones",
|
||||
"secretsmanager:CreateSecret",
|
||||
"secretsmanager:ListSecrets",
|
||||
"secretsmanager:GetSecretValue",
|
||||
"secretsmanager:UpdateSecret",
|
||||
"secretsmanager:RestoreSecret",
|
||||
"secretsmanager:DeleteSecret",
|
||||
"servicediscovery:ListNamespaces",
|
||||
"servicediscovery:CreatePrivateDnsNamespace",
|
||||
"servicediscovery:GetOperation",
|
||||
"servicediscovery:ListServices",
|
||||
"servicediscovery:CreateService",
|
||||
"servicediscovery:GetService"
|
||||
],
|
||||
"Resource": "*"
|
||||
},
|
||||
{
|
||||
"Action": "iam:CreateServiceLinkedRole",
|
||||
"Effect": "Allow",
|
||||
"Resource": "arn:aws:iam::*:role/aws-service-role/rds.amazonaws.com/AWSServiceRoleForRDS",
|
||||
"Condition": {
|
||||
"StringLike": {
|
||||
"iam:AWSServiceName":"rds.amazonaws.com"
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
1
tools/devops/.gitignore
vendored
1
tools/devops/.gitignore
vendored
@ -1 +0,0 @@
|
||||
devops
|
@ -1,28 +0,0 @@
|
||||
FROM golang:1.12.6-alpine3.9 AS builder
|
||||
|
||||
LABEL maintainer="lee@geeksinthewoods.com"
|
||||
|
||||
RUN apk --update --no-cache add \
|
||||
git
|
||||
|
||||
# Change dir to project base.
|
||||
WORKDIR $GOPATH/src/gitlab.com/geeks-accelerator/oss/saas-starter-kit
|
||||
|
||||
# Enable go modules.
|
||||
ENV GO111MODULE="on"
|
||||
COPY go.mod .
|
||||
COPY go.sum .
|
||||
RUN go mod download
|
||||
|
||||
COPY internal ./internal
|
||||
COPY tools/devops ./tools/devops
|
||||
|
||||
WORKDIR ./tools/devops
|
||||
|
||||
RUN CGO_ENABLED=0 GOOS=linux go build -a -installsuffix nocgo -o /go/bin/devops .
|
||||
|
||||
FROM busybox
|
||||
|
||||
COPY --from=builder /go/bin/devops /go/bin/devops
|
||||
|
||||
ENTRYPOINT ["/go/bin/devops"]
|
@ -1,244 +0,0 @@
|
||||
# SaaS Starter Kit
|
||||
|
||||
Copyright 2019, Geeks Accelerator
|
||||
twins@geeksaccelerator.com
|
||||
|
||||
|
||||
## Description
|
||||
|
||||
_Devops_ handles creating AWS resources and deploying your services with minimal additional configuration. You can
|
||||
customizing any of the configuration in the code. While AWS is already a core part of the saas-starter-kit, keeping
|
||||
the deployment in GoLang limits the scope of additional technologies required to get your project successfully up and
|
||||
running. If you understand Golang, then you will be a master at devops with this tool.
|
||||
|
||||
The project includes a Postgres database which adds an additional resource dependency when deploying the
|
||||
project. It is important to know that the tasks running schema migration for the Postgres database can not run as shared
|
||||
GitLab Runners since they will be outside the deployment AWS VPC. There are two options here:
|
||||
1. Enable the AWS RDS database to be publicly available (not recommended).
|
||||
2. Run your own GitLab runners inside the same AWS VPC and grant access for them to communicate with the database.
|
||||
|
||||
This project has opted to implement option 2 and thus setting up the deployment pipeline requires a few more additional steps.
|
||||
|
||||
Note that using shared runners hosted by GitLab also requires AWS credentials to be input into GitLab for configuration.
|
||||
|
||||
Hosted your own GitLab runners uses AWS Roles instead of hardcoding the access key ID and secret access key in GitLab and
|
||||
in other configuration files. And since this project is open-source, we wanted to avoid sharing our AWS credentials.
|
||||
|
||||
If you don't have an AWS account, signup for one now and then proceed with the deployment setup.
|
||||
|
||||
We assume that if you are deploying the SaaS Starter Kit, you are starting from scratch with no existing dependencies.
|
||||
This however, excludes any domain names that you would like to use for resolving your services publicly. To use any
|
||||
pre-purchased domain names, make sure they are added to Route 53 in the AWS account. Or you can let the deploy script
|
||||
create a new zone is Route 53 and update the DNS for the domain name when your ready to make the transition. It is
|
||||
required to hosted the DNS on Route 53 so DNS entries can be managed by this deploy tool. It is possible to use a
|
||||
[subdomain that uses Route 53 as the DNS service without migrating the parent domain](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/CreatingNewSubdomain.html).
|
||||
|
||||
|
||||
## Getting Started
|
||||
|
||||
You can run the both commands `build` and `deploy` locally after setting up the initial
|
||||
AWS permissions.
|
||||
|
||||
1. You will need an existing AWS account or create a new AWS account.
|
||||
|
||||
2. Define a new [AWS IAM Policy](https://console.aws.amazon.com/iam/home?region=us-west-2#/policies$new?step=edit)
|
||||
called `saas-starter-kit-deploy` with a defined JSON statement instead of using the visual
|
||||
editor. The statement is rather large as each permission is granted individually. A copy of
|
||||
the statement is stored in the repo at
|
||||
[resources/saas-starter-kit-deploy-policy.json](https://gitlab.com/geeks-accelerator/oss/saas-starter-kit/blob/master/resources/saas-starter-kit-deploy-policy.json)
|
||||
|
||||
3. Create new [AWS User](https://console.aws.amazon.com/iam/home?region=us-west-2#/users$new?step=details)
|
||||
called `saas-starter-kit-deploy` with _Programmatic Access_ and _Attach existing policies directly_ with the policy
|
||||
created from step 1 `saas-starter-kit-deploy`
|
||||
|
||||
4. Try running the deploy
|
||||
```bash
|
||||
go run main.go deploy -service=web-api -env=dev
|
||||
```
|
||||
|
||||
Note: This user created is only for development purposes and is not needed for the build
|
||||
pipeline using GitLab CI / CD.
|
||||
|
||||
|
||||
## Setup GitLab CI / CD
|
||||
|
||||
Below outlines the basic steps to setup [Autoscaling GitLab Runner on AWS](https://docs.gitlab.com/runner/configuration/runner_autoscale_aws/).
|
||||
|
||||
1. Define an [AWS IAM Role](https://console.aws.amazon.com/iam/home?region=us-west-2#/roles$new?step=type) that will be
|
||||
attached to the GitLab Runner instances. The role will need permission to scale (EC2), update the cache (via S3) and
|
||||
perform the project specific deployment commands.
|
||||
```
|
||||
Trusted Entity: AWS Service
|
||||
Service that will use this role: EC2
|
||||
Attach permissions policies: AmazonEC2FullAccess, AmazonS3FullAccess, saas-starter-kit-deploy
|
||||
Role Name: SaasStarterKitEc2RoleForGitLabRunner
|
||||
Role Description: Allows GitLab runners hosted on EC2 instances to call AWS services on your behalf.
|
||||
```
|
||||
|
||||
2. Launch a new [AWS EC2 Instance](https://us-west-2.console.aws.amazon.com/ec2/v2/home?region=us-west-2#LaunchInstanceWizard).
|
||||
`GitLab Runner` will be installed on this instance and will serve as the bastion that spawns new instances. This
|
||||
instance will be a dedicated host since we need it always up and running, thus it will be the standard costs apply.
|
||||
|
||||
Note: Since this machine will not run any jobs itself, it does not need to be very powerful. A t2.micro instance will be sufficient.
|
||||
```
|
||||
Amazon Machine Image (AMI): Amazon Linux AMI 2018.03.0 (HVM), SSD Volume Type - ami-0f2176987ee50226e
|
||||
Instance Type: t2.micro
|
||||
```
|
||||
|
||||
3. Configure Instance Details.
|
||||
|
||||
Note: Do not forget to select the IAM Role _SaasStarterKitEc2RoleForGitLabRunner_
|
||||
```
|
||||
Number of instances: 1
|
||||
Network: default VPC
|
||||
Subnet: no Preference
|
||||
Auto-assign Public IP: Use subnet setting (Enable)
|
||||
Placement Group: not checked/disabled
|
||||
Capacity Reservation: Open
|
||||
IAM Role: SaasStarterKitEc2RoleForGitLabRunner
|
||||
Shutdown behavior: Stop
|
||||
Enable termination project: checked/enabled
|
||||
Monitoring: not checked/disabled
|
||||
Tenancy: Shared
|
||||
Elastic Interence: not checked/disabled
|
||||
T2/T3 Unlimited: not checked/disabled
|
||||
Advanced Details: none
|
||||
```
|
||||
|
||||
4. Add Storage. Increase the volume size for the root device to 30 GiB.
|
||||
```
|
||||
Volume Type | Device | Size (GiB) | Volume Type
|
||||
Root | /dev/xvda | 30 | General Purpose SSD (gp2)
|
||||
```
|
||||
|
||||
5. Add Tags.
|
||||
```
|
||||
Name: gitlab-runner
|
||||
```
|
||||
|
||||
6. Configure Security Group. Create a new security group with the following details:
|
||||
```
|
||||
Name: gitlab-runner
|
||||
Description: Gitlab runners for running CICD.
|
||||
Rules:
|
||||
Type | Protocol | Port Range | Source | Description
|
||||
SSH | TCP | 22 | My IP | SSH access for setup.
|
||||
```
|
||||
|
||||
7. Review and Launch instance. Select an existing key pair or create a new one. This will be used to SSH into the
|
||||
instance for additional configuration.
|
||||
|
||||
8. Update the security group to reference itself. The instances need to be able to communicate between each other.
|
||||
|
||||
Navigate to edit the security group and add the following two rules where `SECURITY_GROUP_ID` is replaced with the
|
||||
name of the security group created in step 6.
|
||||
```
|
||||
Rules:
|
||||
Type | Protocol | Port Range | Source | Description
|
||||
Custom TCP | TCP | 2376 | SECURITY_GROUP_ID | Gitlab runner for Docker Machine to communicate with Docker daemon.
|
||||
SSH | TCP | 22 | SECURITY_GROUP_ID | SSH access for setup.
|
||||
```
|
||||
|
||||
8. SSH into the newly created instance.
|
||||
|
||||
```bash
|
||||
ssh -i ~/saas-starter-kit-uswest2-gitlabrunner.pem ec2-user@ec2-52-36-105-172.us-west-2.compute.amazonaws.com
|
||||
```
|
||||
Note: If you get the error `Permissions 0666 are too open`, then you will need to `chmod 400 FILENAME`
|
||||
|
||||
9. Install GitLab Runner from the [official GitLab repository](https://docs.gitlab.com/runner/install/linux-repository.html)
|
||||
```bash
|
||||
curl -L https://packages.gitlab.com/install/repositories/runner/gitlab-runner/script.rpm.sh | sudo bash
|
||||
sudo yum install gitlab-runner
|
||||
```
|
||||
|
||||
10. [Install Docker Community Edition](https://docs.docker.com/install/).
|
||||
```bash
|
||||
sudo yum install docker
|
||||
```
|
||||
|
||||
11. [Install Docker Machine](https://docs.docker.com/machine/install-machine/).
|
||||
```bash
|
||||
base=https://github.com/docker/machine/releases/download/v0.16.0 &&
|
||||
curl -L $base/docker-machine-$(uname -s)-$(uname -m) >/tmp/docker-machine &&
|
||||
sudo install /tmp/docker-machine /usr/sbin/docker-machine
|
||||
```
|
||||
|
||||
12. [Register the runner](https://docs.gitlab.com/runner/register/index.html).
|
||||
```bash
|
||||
sudo gitlab-runner register
|
||||
```
|
||||
Notes:
|
||||
* When asked for gitlab-ci tags, enter `master,dev,dev-*`
|
||||
* This will limit commits to the master or dev branches from triggering the pipeline to run. This includes a
|
||||
wildcard for any branch named with the prefix `dev-`.
|
||||
* When asked the executor type, enter `docker+machine`
|
||||
* When asked for the default Docker image, enter `geeksaccelerator/docker-library:golang1.12-docker`
|
||||
|
||||
13. [Configuring the GitLab Runner](https://docs.gitlab.com/runner/configuration/runner_autoscale_aws/#configuring-the-gitlab-runner)
|
||||
|
||||
```bash
|
||||
sudo vim /etc/gitlab-runner/config.toml
|
||||
```
|
||||
|
||||
Update the `[runners.docker]` configuration section in `config.toml` to match the example below replacing the
|
||||
obvious placeholder `XXXXX` with the relevant value.
|
||||
```yaml
|
||||
[runners.docker]
|
||||
tls_verify = false
|
||||
image = "geeksaccelerator/docker-library:golang1.12-docker"
|
||||
privileged = true
|
||||
disable_entrypoint_overwrite = false
|
||||
oom_kill_disable = false
|
||||
disable_cache = true
|
||||
volumes = ["/cache"]
|
||||
shm_size = 0
|
||||
[runners.cache]
|
||||
Type = "s3"
|
||||
Shared = true
|
||||
[runners.cache.s3]
|
||||
ServerAddress = "s3.us-west-2.amazonaws.com"
|
||||
BucketName = "XXXXX"
|
||||
BucketLocation = "us-west-2"
|
||||
[runners.machine]
|
||||
IdleCount = 0
|
||||
IdleTime = 1800
|
||||
MachineDriver = "amazonec2"
|
||||
MachineName = "gitlab-runner-machine-%s"
|
||||
MachineOptions = [
|
||||
"amazonec2-iam-instance-profile=SaasStarterKitEc2RoleForGitLabRunner",
|
||||
"amazonec2-region=us-west-2",
|
||||
"amazonec2-vpc-id=XXXXX",
|
||||
"amazonec2-subnet-id=XXXXX",
|
||||
"amazonec2-zone=d",
|
||||
"amazonec2-use-private-address=true",
|
||||
"amazonec2-tags=runner-manager-name,gitlab-aws-autoscaler,gitlab,true,gitlab-runner-autoscale,true",
|
||||
"amazonec2-security-group=gitlab-runner",
|
||||
"amazonec2-instance-type=t2.large"
|
||||
]
|
||||
```
|
||||
|
||||
You will need use the same VPC subnet and availability zone as the instance launched in step 2. We are using AWS
|
||||
region `us-west-2`. The _ServerAddress_ for S3 will need to be updated if the region is changed. For `us-east-1` the
|
||||
_ServerAddress_ is `s3.amazonaws.com`. Under MachineOptions you can add anything that the [AWS Docker Machine](https://docs.docker.com/machine/drivers/aws/#options)
|
||||
driver supports.
|
||||
|
||||
Below are some example values for the placeholders to ensure for format of your values are correct.
|
||||
```yaml
|
||||
BucketName = saas-starter-kit-usw
|
||||
amazonec2-vpc-id=vpc-5f43f027
|
||||
amazonec2-subnet-id=subnet-693d3110
|
||||
amazonec2-zone=a
|
||||
```
|
||||
|
||||
Once complete, restart the runner.
|
||||
```bash
|
||||
sudo gitlab-runner restart
|
||||
```
|
||||
|
||||
|
||||
## Examples
|
||||
|
||||
```bash
|
||||
go run main.go deploy -service=web-app -env=dev -enable_https=true -primary_host=example.saasstartupkit.com -host_names=example.saasstartupkit.com,dev.example.saasstartupkit.com -private_bucket=saas-starter-kit-private -public_bucket=saas-starter-kit-public -public_bucket_cloudfront=true -static_files_s3=true -static_files_img_resize=1 -recreate_service=0
|
||||
```
|
@ -1,370 +0,0 @@
|
||||
package cicd
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/aws/aws-sdk-go/service/s3/s3manager"
|
||||
"io/ioutil"
|
||||
"net/url"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
"github.com/aws/aws-sdk-go/aws/ec2metadata"
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
"github.com/aws/aws-sdk-go/service/ecr"
|
||||
"github.com/aws/aws-sdk-go/service/ecs"
|
||||
"github.com/aws/aws-sdk-go/service/s3"
|
||||
"github.com/aws/aws-sdk-go/service/secretsmanager"
|
||||
"github.com/pkg/errors"
|
||||
"gopkg.in/go-playground/validator.v9"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultAwsRegistryMaxImages = 1000
|
||||
awsTagNameProject = "project"
|
||||
awsTagNameEnv = "env"
|
||||
awsTagNameName = "Name"
|
||||
)
|
||||
|
||||
// AwsCredentials defines AWS credentials used for deployment. Unable to use roles when deploying
|
||||
// using gitlab CI/CD pipeline.
|
||||
type awsCredentials struct {
|
||||
AccessKeyID string `validate:"required_without=UseRole"`
|
||||
SecretAccessKey string `validate:"required_without=UseRole"`
|
||||
Region string `validate:"required_without=UseRole"`
|
||||
UseRole bool
|
||||
}
|
||||
|
||||
// Session returns a new AWS Session used to access AWS services.
|
||||
func (creds awsCredentials) Session() *session.Session {
|
||||
|
||||
if creds.UseRole {
|
||||
// Get an AWS session from an implicit source if no explicit
|
||||
// configuration is provided. This is useful for taking advantage of
|
||||
// EC2/ECS instance roles.
|
||||
sess := session.Must(session.NewSession())
|
||||
if creds.Region != "" {
|
||||
sess.Config.WithRegion(creds.Region)
|
||||
}
|
||||
|
||||
return sess
|
||||
}
|
||||
|
||||
return session.New(
|
||||
&aws.Config{
|
||||
Region: aws.String(creds.Region),
|
||||
Credentials: credentials.NewStaticCredentials(creds.AccessKeyID, creds.SecretAccessKey, ""),
|
||||
})
|
||||
}
|
||||
|
||||
// IamPolicyDocument defines an AWS IAM policy used for defining access for IAM roles, users, and groups.
|
||||
type IamPolicyDocument struct {
|
||||
Version string `json:"Version"`
|
||||
Statement []IamStatementEntry `json:"Statement"`
|
||||
}
|
||||
|
||||
// IamStatementEntry defines a single statement for an IAM policy.
|
||||
type IamStatementEntry struct {
|
||||
Sid string `json:"Sid"`
|
||||
Effect string `json:"Effect"`
|
||||
Action []string `json:"Action"`
|
||||
Resource interface{} `json:"Resource"`
|
||||
}
|
||||
|
||||
// S3Bucket defines the details need to create a bucket that includes additional configuration.
|
||||
type S3Bucket struct {
|
||||
Name string `validate:"omitempty"`
|
||||
Input *s3.CreateBucketInput
|
||||
LifecycleRules []*s3.LifecycleRule
|
||||
CORSRules []*s3.CORSRule
|
||||
PublicAccessBlock *s3.PublicAccessBlockConfiguration
|
||||
Policy string
|
||||
}
|
||||
|
||||
// DB mimics the general info needed for services used to define placeholders.
|
||||
type DB struct {
|
||||
Host string
|
||||
User string
|
||||
Pass string
|
||||
Database string
|
||||
Driver string
|
||||
DisableTLS bool
|
||||
}
|
||||
|
||||
// URL returns the URL to connect to a database.
|
||||
func (db DB) URL() string {
|
||||
|
||||
// Query parameters.
|
||||
var q url.Values = make(map[string][]string)
|
||||
|
||||
// Handle SSL Mode
|
||||
if db.DisableTLS {
|
||||
q.Set("sslmode", "disable")
|
||||
} else {
|
||||
q.Set("sslmode", "require")
|
||||
}
|
||||
|
||||
// Construct url.
|
||||
dbUrl := url.URL{
|
||||
Scheme: db.Driver,
|
||||
User: url.UserPassword(db.User, db.Pass),
|
||||
Host: db.Host,
|
||||
Path: db.Database,
|
||||
RawQuery: q.Encode(),
|
||||
}
|
||||
|
||||
return dbUrl.String()
|
||||
}
|
||||
|
||||
// GetAwsCredentials loads the AWS Access Keys from env variables unless a role is used.
|
||||
func GetAwsCredentials(targetEnv string) (awsCredentials, error) {
|
||||
var creds awsCredentials
|
||||
|
||||
creds.Region = strings.TrimSpace(getTargetEnv(targetEnv, "AWS_REGION"))
|
||||
|
||||
if v := getTargetEnv(targetEnv, "AWS_USE_ROLE"); v != "" {
|
||||
creds.UseRole, _ = strconv.ParseBool(v)
|
||||
|
||||
sess, err := session.NewSession()
|
||||
if err != nil {
|
||||
return creds, errors.Wrap(err, "Failed to load AWS credentials from instance")
|
||||
}
|
||||
|
||||
if sess.Config != nil && sess.Config.Region != nil && *sess.Config.Region != "" {
|
||||
creds.Region = *sess.Config.Region
|
||||
} else {
|
||||
sm := ec2metadata.New(sess)
|
||||
creds.Region, err = sm.Region()
|
||||
if err != nil {
|
||||
return creds, errors.Wrap(err, "Failed to get region from AWS session")
|
||||
}
|
||||
}
|
||||
|
||||
return creds, nil
|
||||
}
|
||||
|
||||
creds.AccessKeyID = strings.TrimSpace(getTargetEnv(targetEnv, "AWS_ACCESS_KEY_ID"))
|
||||
creds.SecretAccessKey = strings.TrimSpace(getTargetEnv(targetEnv, "AWS_SECRET_ACCESS_KEY"))
|
||||
|
||||
errs := validator.New().Struct(creds)
|
||||
if errs != nil {
|
||||
return creds, errs
|
||||
}
|
||||
|
||||
//os.Setenv("AWS_DEFAULT_REGION", creds.Region)
|
||||
|
||||
return creds, nil
|
||||
}
|
||||
|
||||
// GetAwsSecretValue returns the string value for a secret stored in AWS Secrets Manager.
|
||||
func GetAwsSecretValue(creds awsCredentials, secretId string) (string, error) {
|
||||
svc := secretsmanager.New(creds.Session())
|
||||
|
||||
res, err := svc.GetSecretValue(&secretsmanager.GetSecretValueInput{
|
||||
SecretId: aws.String(secretId),
|
||||
})
|
||||
if err != nil {
|
||||
return "", errors.Wrapf(err, "failed to get value for secret id %s", secretId)
|
||||
}
|
||||
|
||||
return string(res.SecretBinary), nil
|
||||
}
|
||||
|
||||
// EcrPurgeImages ensures pipeline does not generate images for max of 10000 and prevent manual deletion of images.
|
||||
func EcrPurgeImages(req *serviceBuildRequest) ([]*ecr.ImageIdentifier, error) {
|
||||
|
||||
svc := ecr.New(req.awsSession())
|
||||
|
||||
var (
|
||||
ts []int
|
||||
tsImgIds = map[int][]*ecr.ImageIdentifier{}
|
||||
)
|
||||
|
||||
// Describe all the image IDs to determine oldest.
|
||||
err := svc.DescribeImagesPages(&ecr.DescribeImagesInput{
|
||||
RepositoryName: aws.String(req.EcrRepositoryName),
|
||||
}, func(res *ecr.DescribeImagesOutput, lastPage bool) bool {
|
||||
for _, img := range res.ImageDetails {
|
||||
|
||||
imgTs := int(img.ImagePushedAt.Unix())
|
||||
|
||||
if _, ok := tsImgIds[imgTs]; !ok {
|
||||
tsImgIds[imgTs] = []*ecr.ImageIdentifier{}
|
||||
ts = append(ts, imgTs)
|
||||
}
|
||||
|
||||
if img.ImageTags != nil {
|
||||
tsImgIds[imgTs] = append(tsImgIds[imgTs], &ecr.ImageIdentifier{
|
||||
ImageTag: img.ImageTags[0],
|
||||
})
|
||||
} else if img.ImageDigest != nil {
|
||||
tsImgIds[imgTs] = append(tsImgIds[imgTs], &ecr.ImageIdentifier{
|
||||
ImageDigest: img.ImageDigest,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return !lastPage
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to describe images for repository '%s'", req.EcrRepositoryName)
|
||||
}
|
||||
|
||||
// Sort the image timestamps in reverse order.
|
||||
sort.Sort(sort.Reverse(sort.IntSlice(ts)))
|
||||
|
||||
// Loop over all the timestamps, skip the newest images until count exceeds limit.
|
||||
var imgCnt int
|
||||
var delIds []*ecr.ImageIdentifier
|
||||
for _, imgTs := range ts {
|
||||
for _, imgId := range tsImgIds[imgTs] {
|
||||
imgCnt = imgCnt + 1
|
||||
|
||||
if imgCnt <= req.EcrRepositoryMaxImages {
|
||||
continue
|
||||
}
|
||||
delIds = append(delIds, imgId)
|
||||
}
|
||||
}
|
||||
|
||||
// If there are image IDs to delete, delete them.
|
||||
if len(delIds) > 0 {
|
||||
//log.Printf("\t\tECR has %d images for repository '%s' which exceeds limit of %d", imgCnt, creds.EcrRepositoryName, creds.EcrRepositoryMaxImages)
|
||||
//for _, imgId := range delIds {
|
||||
// log.Printf("\t\t\tDelete %s", *imgId.ImageTag)
|
||||
//}
|
||||
|
||||
_, err = svc.BatchDeleteImage(&ecr.BatchDeleteImageInput{
|
||||
ImageIds: delIds,
|
||||
RepositoryName: aws.String(req.EcrRepositoryName),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to delete %d images for repository '%s'", len(delIds), req.EcrRepositoryName)
|
||||
}
|
||||
}
|
||||
|
||||
return delIds, nil
|
||||
}
|
||||
|
||||
// SyncPublicS3Files copies the local files from the static directory to s3 with public-read enabled.
|
||||
func SyncPublicS3Files(awsSession *session.Session, staticS3Bucket, staticS3Prefix, staticDir string) error {
|
||||
uploader := s3manager.NewUploader(awsSession)
|
||||
|
||||
di := NewDirectoryIterator(staticS3Bucket, staticS3Prefix, staticDir, "public-read")
|
||||
if err := uploader.UploadWithIterator(aws.BackgroundContext(), di); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// EcsReadTaskDefinition reads a task definition file and json decodes it.
|
||||
func EcsReadTaskDefinition(serviceDir, targetEnv string) ([]byte, error) {
|
||||
checkPaths := []string{
|
||||
filepath.Join(serviceDir, fmt.Sprintf("ecs-task-definition-%s.json", targetEnv)),
|
||||
filepath.Join(serviceDir, "ecs-task-definition.json"),
|
||||
}
|
||||
|
||||
var defFile string
|
||||
for _, tf := range checkPaths {
|
||||
ok, _ := exists(tf)
|
||||
if ok {
|
||||
defFile = tf
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if defFile == "" {
|
||||
return nil, errors.Errorf("failed to locate task definition - checked %s", strings.Join(checkPaths, ", "))
|
||||
}
|
||||
|
||||
dat, err := ioutil.ReadFile(defFile)
|
||||
if err != nil {
|
||||
return nil, errors.WithMessagef(err, "failed to read file %s", defFile)
|
||||
}
|
||||
|
||||
return dat, nil
|
||||
}
|
||||
|
||||
// LambdaReadFuncDefinition reads a task definition file and json decodes it.
|
||||
func LambdaReadFuncDefinition(serviceDir, targetEnv string) ([]byte, error) {
|
||||
checkPaths := []string{
|
||||
filepath.Join(serviceDir, fmt.Sprintf("lambda-func-definition-%s.json", targetEnv)),
|
||||
filepath.Join(serviceDir, "lambda-func-definition.json"),
|
||||
}
|
||||
|
||||
var defFile string
|
||||
for _, tf := range checkPaths {
|
||||
ok, _ := exists(tf)
|
||||
if ok {
|
||||
defFile = tf
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if defFile == "" {
|
||||
return nil, errors.Errorf("failed to locate task definition - checked %s", strings.Join(checkPaths, ", "))
|
||||
}
|
||||
|
||||
dat, err := ioutil.ReadFile(defFile)
|
||||
if err != nil {
|
||||
return nil, errors.WithMessagef(err, "failed to read file %s", defFile)
|
||||
}
|
||||
|
||||
return dat, nil
|
||||
}
|
||||
|
||||
// LambdaS3KeyFromReleaseImage generates an S3 key from a release image.
|
||||
func LambdaS3KeyFromReleaseImage(releaseImage string) string {
|
||||
it := filepath.Base(releaseImage)
|
||||
it = strings.Replace(it, ":", "/", -1)
|
||||
return filepath.Join("src/aws/lambda/", it+".zip")
|
||||
}
|
||||
|
||||
// parseTaskDefinition json decodes it.
|
||||
func parseTaskDefinitionInput(dat []byte) (*ecs.RegisterTaskDefinitionInput, error) {
|
||||
dat = convertKeys(dat)
|
||||
|
||||
var taskDef *ecs.RegisterTaskDefinitionInput
|
||||
if err := json.Unmarshal(dat, &taskDef); err != nil {
|
||||
return nil, errors.WithMessagef(err, "failed to json decode task definition - %s", string(dat))
|
||||
}
|
||||
|
||||
return taskDef, nil
|
||||
}
|
||||
|
||||
// convertKeys fixes json keys to they can be unmarshaled into aws types. No AWS structs have json tags.
|
||||
func convertKeys(j json.RawMessage) json.RawMessage {
|
||||
m := make(map[string]json.RawMessage)
|
||||
if err := json.Unmarshal([]byte(j), &m); err != nil {
|
||||
// Not a JSON object
|
||||
return j
|
||||
}
|
||||
|
||||
for k, v := range m {
|
||||
fixed := fixKey(k)
|
||||
delete(m, k)
|
||||
m[fixed] = convertKeys(v)
|
||||
}
|
||||
|
||||
b, err := json.Marshal(m)
|
||||
if err != nil {
|
||||
return j
|
||||
}
|
||||
|
||||
return json.RawMessage(b)
|
||||
}
|
||||
|
||||
func fixKey(key string) string {
|
||||
return strings.ToTitle(key)
|
||||
}
|
||||
|
||||
// jsonEncodeStringValue json encodes string values to be used in the ECS task definition.
|
||||
func jsonEncodeStringValue(str string) string {
|
||||
dat, _ := json.Marshal(str)
|
||||
return strings.Trim(string(dat), "\"")
|
||||
}
|
@ -1,212 +0,0 @@
|
||||
package cicd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"log"
|
||||
"path/filepath"
|
||||
|
||||
"geeks-accelerator/oss/saas-starter-kit/internal/platform/tests"
|
||||
"geeks-accelerator/oss/saas-starter-kit/internal/schema"
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
"github.com/aws/aws-sdk-go/service/secretsmanager"
|
||||
"github.com/lib/pq"
|
||||
_ "github.com/lib/pq"
|
||||
"github.com/pkg/errors"
|
||||
sqltrace "gopkg.in/DataDog/dd-trace-go.v1/contrib/database/sql"
|
||||
sqlxtrace "gopkg.in/DataDog/dd-trace-go.v1/contrib/jmoiron/sqlx"
|
||||
"gopkg.in/go-playground/validator.v9"
|
||||
)
|
||||
|
||||
// MigrateFlags defines the flags used for executing schema migration.
|
||||
type MigrateFlags struct {
|
||||
// Required flags.
|
||||
Env string `validate:"oneof=dev stage prod" example:"dev"`
|
||||
|
||||
// Optional flags.
|
||||
ProjectRoot string `validate:"omitempty" example:"."`
|
||||
ProjectName string ` validate:"omitempty" example:"example-project"`
|
||||
}
|
||||
|
||||
// migrateRequest defines the details needed to execute a service build.
|
||||
type migrateRequest struct {
|
||||
Env string `validate:"oneof=dev stage prod"`
|
||||
ProjectRoot string `validate:"required"`
|
||||
ProjectName string `validate:"required"`
|
||||
GoModFile string `validate:"required"`
|
||||
GoModName string `validate:"required"`
|
||||
|
||||
AwsCreds awsCredentials `validate:"required,dive,required"`
|
||||
_awsSession *session.Session
|
||||
|
||||
flags MigrateFlags
|
||||
}
|
||||
|
||||
// awsSession returns the current AWS session for the serviceDeployRequest.
|
||||
func (r *migrateRequest) awsSession() *session.Session {
|
||||
if r._awsSession == nil {
|
||||
r._awsSession = r.AwsCreds.Session()
|
||||
}
|
||||
|
||||
return r._awsSession
|
||||
}
|
||||
|
||||
// NewMigrateRequest generates a new request for executing schema migration for a given set of CLI flags.
|
||||
func NewMigrateRequest(log *log.Logger, flags MigrateFlags) (*migrateRequest, error) {
|
||||
|
||||
// Validates specified CLI flags map to struct successfully.
|
||||
log.Println("Validate flags.")
|
||||
{
|
||||
errs := validator.New().Struct(flags)
|
||||
if errs != nil {
|
||||
return nil, errs
|
||||
}
|
||||
log.Printf("\t%s\tFlags ok.", tests.Success)
|
||||
}
|
||||
|
||||
// Generate a migrate request using CLI flags and AWS credentials.
|
||||
log.Println("Generate migrate request.")
|
||||
var req migrateRequest
|
||||
{
|
||||
|
||||
// Define new migrate request.
|
||||
req = migrateRequest{
|
||||
Env: flags.Env,
|
||||
ProjectRoot: flags.ProjectRoot,
|
||||
ProjectName: flags.ProjectName,
|
||||
|
||||
flags: flags,
|
||||
}
|
||||
|
||||
// When project root directory is empty or set to current working path, then search for the project root by locating
|
||||
// the go.mod file.
|
||||
log.Println("\tDetermining the project root directory.")
|
||||
{
|
||||
if req.ProjectRoot == "" || req.ProjectRoot == "." {
|
||||
log.Println("\tAttempting to location project root directory from current working directory.")
|
||||
|
||||
var err error
|
||||
req.GoModFile, err = findProjectGoModFile()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req.ProjectRoot = filepath.Dir(req.GoModFile)
|
||||
} else {
|
||||
log.Printf("\t\tUsing supplied project root directory '%s'.\n", req.ProjectRoot)
|
||||
req.GoModFile = filepath.Join(req.ProjectRoot, "go.mod")
|
||||
}
|
||||
log.Printf("\t\t\tproject root: %s", req.ProjectRoot)
|
||||
log.Printf("\t\t\tgo.mod: %s", req.GoModFile)
|
||||
}
|
||||
|
||||
log.Println("\tExtracting go module name from go.mod.")
|
||||
{
|
||||
var err error
|
||||
req.GoModName, err = loadGoModName(req.GoModFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
log.Printf("\t\t\tmodule name: %s", req.GoModName)
|
||||
}
|
||||
|
||||
log.Println("\tDetermining the project name.")
|
||||
{
|
||||
if req.ProjectName != "" {
|
||||
log.Printf("\t\tUse provided value.")
|
||||
} else {
|
||||
req.ProjectName = filepath.Base(req.GoModName)
|
||||
log.Printf("\t\tSet from go module.")
|
||||
}
|
||||
log.Printf("\t\t\tproject name: %s", req.ProjectName)
|
||||
}
|
||||
|
||||
// Verifies AWS credentials specified as environment variables.
|
||||
log.Println("\tVerify AWS credentials.")
|
||||
{
|
||||
var err error
|
||||
req.AwsCreds, err = GetAwsCredentials(req.Env)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if req.AwsCreds.UseRole {
|
||||
log.Printf("\t\t\tUsing role")
|
||||
} else {
|
||||
log.Printf("\t\t\tAccessKeyID: '%s'", req.AwsCreds.AccessKeyID)
|
||||
}
|
||||
|
||||
log.Printf("\t\t\tRegion: '%s'", req.AwsCreds.Region)
|
||||
log.Printf("\t%s\tAWS credentials valid.", tests.Success)
|
||||
}
|
||||
}
|
||||
|
||||
return &req, nil
|
||||
}
|
||||
|
||||
// Run is the main entrypoint for migration of database schema for a given target environment.
|
||||
func Migrate(log *log.Logger, ctx context.Context, req *migrateRequest) error {
|
||||
|
||||
// Load the database details.
|
||||
var db DB
|
||||
{
|
||||
log.Println("Get Database Details from AWS Secret Manager")
|
||||
|
||||
dbId := dBInstanceIdentifier(req.ProjectName, req.Env)
|
||||
|
||||
// Secret ID used to store the DB username and password across deploys.
|
||||
dbSecretId := secretID(req.ProjectName, req.Env, dbId)
|
||||
|
||||
// Retrieve the current secret value if something is stored.
|
||||
{
|
||||
sm := secretsmanager.New(req.awsSession())
|
||||
res, err := sm.GetSecretValue(&secretsmanager.GetSecretValueInput{
|
||||
SecretId: aws.String(dbSecretId),
|
||||
})
|
||||
if err != nil {
|
||||
if aerr, ok := err.(awserr.Error); !ok || aerr.Code() != secretsmanager.ErrCodeResourceNotFoundException {
|
||||
return errors.Wrapf(err, "Failed to get value for secret id %s", dbSecretId)
|
||||
} else {
|
||||
// This should only happen when the deploy script first runs and no resources exist in the
|
||||
// AWS account. To create a database, need the VPC and need to come up with a better strategy for
|
||||
// defining resources that can be shared between deployment steps.
|
||||
log.Printf("\t%s\tDatabase credentials not found.", tests.Failed)
|
||||
return nil
|
||||
}
|
||||
} else {
|
||||
err = json.Unmarshal([]byte(*res.SecretString), &db)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Failed to json decode db credentials")
|
||||
}
|
||||
}
|
||||
|
||||
log.Printf("\t%s\tDatabase credentials found.", tests.Success)
|
||||
}
|
||||
}
|
||||
|
||||
// Start Database and run the migration.
|
||||
{
|
||||
log.Println("Proceed with schema migration")
|
||||
|
||||
log.Printf("\t\tOpen database connection")
|
||||
// Register informs the sqlxtrace package of the driver that we will be using in our program.
|
||||
// It uses a default service name, in the below case "postgres.db". To use a custom service
|
||||
// name use RegisterWithServiceName.
|
||||
sqltrace.Register(db.Driver, &pq.Driver{}, sqltrace.WithServiceName("devops:migrate"))
|
||||
masterDb, err := sqlxtrace.Open(db.Driver, db.URL())
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
defer masterDb.Close()
|
||||
|
||||
// Start Migrations
|
||||
log.Printf("\t\tStart migrations.")
|
||||
if err = schema.Migrate(ctx, masterDb, log, false); err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
log.Printf("\t%s\tMigrate complete.", tests.Success)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
@ -1,119 +0,0 @@
|
||||
package cicd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"mime"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/service/s3/s3manager"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// DirectoryIterator represents an iterator of a specified directory
|
||||
type DirectoryIterator struct {
|
||||
dir string
|
||||
filePaths []string
|
||||
bucket string
|
||||
keyPrefix string
|
||||
acl string
|
||||
next struct {
|
||||
path string
|
||||
f *os.File
|
||||
}
|
||||
err error
|
||||
}
|
||||
|
||||
// NewDirectoryIterator builds a new DirectoryIterator
|
||||
func NewDirectoryIterator(bucket, keyPrefix, dir, acl string) s3manager.BatchUploadIterator {
|
||||
|
||||
var paths []string
|
||||
filepath.Walk(dir, func(path string, info os.FileInfo, err error) error {
|
||||
if !info.IsDir() {
|
||||
paths = append(paths, path)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
return &DirectoryIterator{
|
||||
dir: dir,
|
||||
filePaths: paths,
|
||||
bucket: bucket,
|
||||
keyPrefix: keyPrefix,
|
||||
acl: acl,
|
||||
}
|
||||
}
|
||||
|
||||
// Next returns whether next file exists or not
|
||||
func (di *DirectoryIterator) Next() bool {
|
||||
if len(di.filePaths) == 0 {
|
||||
di.next.f = nil
|
||||
return false
|
||||
}
|
||||
|
||||
f, err := os.Open(di.filePaths[0])
|
||||
di.err = err
|
||||
di.next.f = f
|
||||
di.next.path = di.filePaths[0]
|
||||
di.filePaths = di.filePaths[1:]
|
||||
|
||||
return true && di.Err() == nil
|
||||
}
|
||||
|
||||
// Err returns error of DirectoryIterator
|
||||
func (di *DirectoryIterator) Err() error {
|
||||
return errors.WithStack(di.err)
|
||||
}
|
||||
|
||||
// UploadObject uploads a file
|
||||
func (di *DirectoryIterator) UploadObject() s3manager.BatchUploadObject {
|
||||
f := di.next.f
|
||||
|
||||
var acl *string
|
||||
if di.acl != "" {
|
||||
acl = aws.String(di.acl)
|
||||
}
|
||||
|
||||
buffer, contentType, rerr := readFile(f)
|
||||
|
||||
nextPath, _ := filepath.Rel(di.dir, di.next.path)
|
||||
|
||||
return s3manager.BatchUploadObject{
|
||||
Object: &s3manager.UploadInput{
|
||||
Bucket: aws.String(di.bucket),
|
||||
Key: aws.String(filepath.Join(di.keyPrefix, nextPath)),
|
||||
Body: bytes.NewReader(buffer),
|
||||
ContentType: aws.String(contentType),
|
||||
ACL: acl,
|
||||
},
|
||||
After: func() error {
|
||||
if rerr != nil {
|
||||
return rerr
|
||||
}
|
||||
return f.Close()
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func readFile(f *os.File) ([]byte, string, error) {
|
||||
// Get file size and read the file content into a buffer
|
||||
fileInfo, err := f.Stat()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
return nil, "", err
|
||||
}
|
||||
var size int64 = fileInfo.Size()
|
||||
buffer := make([]byte, size)
|
||||
f.Read(buffer)
|
||||
|
||||
ext := filepath.Ext(f.Name())
|
||||
contentType := mime.TypeByExtension(ext)
|
||||
|
||||
//f.Seek(0, io.SeekStart)
|
||||
//ctBuf := make([]byte, 512)
|
||||
//f.Read(ctBuf)
|
||||
//contentType = http.DetectContentType(ctBuf)
|
||||
|
||||
return buffer, contentType, nil
|
||||
}
|
@ -1,314 +0,0 @@
|
||||
package cicd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"geeks-accelerator/oss/saas-starter-kit/internal/platform/tests"
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
"github.com/iancoleman/strcase"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// serviceDeployRequest defines the details needed to execute a service deployment.
|
||||
type serviceRequest struct {
|
||||
ServiceName string `validate:"required"`
|
||||
ServiceDir string `validate:"required"`
|
||||
Env string `validate:"oneof=dev stage prod"`
|
||||
ProjectRoot string `validate:"required"`
|
||||
ProjectName string `validate:"required"`
|
||||
DockerFile string `validate:"required"`
|
||||
GoModFile string `validate:"required"`
|
||||
GoModName string `validate:"required"`
|
||||
|
||||
AwsCreds awsCredentials `validate:"required,dive,required"`
|
||||
_awsSession *session.Session
|
||||
|
||||
ReleaseImage string
|
||||
}
|
||||
|
||||
// projectNameCamel takes a project name and returns the camel cased version.
|
||||
func (r *serviceRequest) ProjectNameCamel() string {
|
||||
s := strings.Replace(r.ProjectName, "_", " ", -1)
|
||||
s = strings.Replace(s, "-", " ", -1)
|
||||
s = strcase.ToCamel(s)
|
||||
return s
|
||||
}
|
||||
|
||||
// awsSession returns the current AWS session for the serviceDeployRequest.
|
||||
func (r *serviceRequest) awsSession() *session.Session {
|
||||
if r._awsSession == nil {
|
||||
r._awsSession = r.AwsCreds.Session()
|
||||
}
|
||||
|
||||
return r._awsSession
|
||||
}
|
||||
|
||||
// init sets the basic details needed for both build and deploy for serviceRequest.
|
||||
func (req *serviceRequest) init(log *log.Logger) error {
|
||||
// When project root directory is empty or set to current working path, then search for the project root by locating
|
||||
// the go.mod file.
|
||||
log.Println("\tDetermining the project root directory.")
|
||||
{
|
||||
if req.ProjectRoot == "" || req.ProjectRoot == "." {
|
||||
log.Println("\tAttempting to location project root directory from current working directory.")
|
||||
|
||||
var err error
|
||||
req.GoModFile, err = findProjectGoModFile()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req.ProjectRoot = filepath.Dir(req.GoModFile)
|
||||
} else {
|
||||
log.Printf("\t\tUsing supplied project root directory '%s'.\n", req.ProjectRoot)
|
||||
req.GoModFile = filepath.Join(req.ProjectRoot, "go.mod")
|
||||
}
|
||||
log.Printf("\t\t\tproject root: %s", req.ProjectRoot)
|
||||
log.Printf("\t\t\tgo.mod: %s", req.GoModFile)
|
||||
}
|
||||
|
||||
log.Println("\tExtracting go module name from go.mod.")
|
||||
{
|
||||
var err error
|
||||
req.GoModName, err = loadGoModName(req.GoModFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Printf("\t\t\tmodule name: %s", req.GoModName)
|
||||
}
|
||||
|
||||
log.Println("\tDetermining the project name.")
|
||||
{
|
||||
if req.ProjectName != "" {
|
||||
log.Printf("\t\tUse provided value.")
|
||||
} else {
|
||||
req.ProjectName = filepath.Base(req.GoModName)
|
||||
log.Printf("\t\tSet from go module.")
|
||||
}
|
||||
log.Printf("\t\t\tproject name: %s", req.ProjectName)
|
||||
}
|
||||
|
||||
log.Println("\tAttempting to locate service directory from project root directory.")
|
||||
{
|
||||
if req.DockerFile != "" {
|
||||
req.DockerFile = req.DockerFile
|
||||
log.Printf("\t\tUse provided value.")
|
||||
|
||||
} else {
|
||||
log.Printf("\t\tFind from project root looking for Dockerfile.")
|
||||
var err error
|
||||
req.DockerFile, err = findServiceDockerFile(req.ProjectRoot, req.ServiceName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
req.ServiceDir = filepath.Dir(req.DockerFile)
|
||||
|
||||
log.Printf("\t\t\tservice directory: %s", req.ServiceDir)
|
||||
log.Printf("\t\t\tdockerfile: %s", req.DockerFile)
|
||||
}
|
||||
|
||||
// Verifies AWS credentials specified as environment variables.
|
||||
log.Println("\tVerify AWS credentials.")
|
||||
{
|
||||
var err error
|
||||
req.AwsCreds, err = GetAwsCredentials(req.Env)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if req.AwsCreds.UseRole {
|
||||
log.Printf("\t\t\tUsing role")
|
||||
} else {
|
||||
log.Printf("\t\t\tAccessKeyID: '%s'", req.AwsCreds.AccessKeyID)
|
||||
}
|
||||
|
||||
log.Printf("\t\t\tRegion: '%s'", req.AwsCreds.Region)
|
||||
log.Printf("\t%s\tAWS credentials valid.", tests.Success)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ecrRepositoryName returns the name used for the AWS ECR Repository.
|
||||
func ecrRepositoryName(projectName string) string {
|
||||
return projectName
|
||||
}
|
||||
|
||||
// releaseImage returns the name used for tagging a release image will always include one with environment and
|
||||
// service name. If the env var CI_COMMIT_REF_NAME is set, it will be appended.
|
||||
func releaseTag(env, serviceName string) string {
|
||||
|
||||
tag1 := env + "-" + serviceName
|
||||
|
||||
// Generate tags for the release image.
|
||||
var releaseTag string
|
||||
if v := os.Getenv("BUILDINFO_CI_COMMIT_SHA"); v != "" {
|
||||
tag2 := tag1 + "-" + v[0:8]
|
||||
releaseTag = tag2
|
||||
} else if v := os.Getenv("CI_COMMIT_SHA"); v != "" {
|
||||
tag2 := tag1 + "-" + v[0:8]
|
||||
releaseTag = tag2
|
||||
} else if v := os.Getenv("BUILDINFO_CI_COMMIT_REF_NAME"); v != "" {
|
||||
tag2 := tag1 + "-" + v
|
||||
releaseTag = tag2
|
||||
} else if v := os.Getenv("CI_COMMIT_REF_NAME"); v != "" {
|
||||
tag2 := tag1 + "-" + v
|
||||
releaseTag = tag2
|
||||
} else {
|
||||
releaseTag = tag1
|
||||
}
|
||||
return releaseTag
|
||||
}
|
||||
|
||||
// releaseImage returns the name used for tagging a release image will always include one with environment and
|
||||
// service name. If the env var CI_COMMIT_REF_NAME is set, it will be appended.
|
||||
func releaseImage(env, serviceName, repositoryUri string) string {
|
||||
return repositoryUri + ":" + releaseTag(env, serviceName)
|
||||
}
|
||||
|
||||
// dBInstanceIdentifier returns the database name.
|
||||
func dBInstanceIdentifier(projectName, env string) string {
|
||||
return projectName + "-" + env
|
||||
}
|
||||
|
||||
// secretID returns the secret name with a standard prefix.
|
||||
func secretID(projectName, env, secretName string) string {
|
||||
return filepath.Join(projectName, env, secretName)
|
||||
}
|
||||
|
||||
// findProjectGoModFile finds the project root directory from the current working directory.
|
||||
func findProjectGoModFile() (string, error) {
|
||||
var err error
|
||||
projectRoot, err := os.Getwd()
|
||||
if err != nil {
|
||||
return "", errors.WithMessage(err, "failed to get current working directory")
|
||||
}
|
||||
|
||||
// Try to find the project root for looking for the go.mod file in a parent directory.
|
||||
var goModFile string
|
||||
testDir := projectRoot
|
||||
for i := 0; i < 3; i++ {
|
||||
if goModFile != "" {
|
||||
testDir = filepath.Join(testDir, "../")
|
||||
}
|
||||
goModFile = filepath.Join(testDir, "go.mod")
|
||||
ok, _ := exists(goModFile)
|
||||
if ok {
|
||||
projectRoot = testDir
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Verify the go.mod file was found.
|
||||
ok, err := exists(goModFile)
|
||||
if err != nil {
|
||||
return "", errors.WithMessagef(err, "failed to load go.mod for project using project root %s")
|
||||
} else if !ok {
|
||||
return "", errors.Errorf("failed to locate project go.mod in project root %s", projectRoot)
|
||||
}
|
||||
|
||||
return goModFile, nil
|
||||
}
|
||||
|
||||
// findServiceDockerFile finds the service directory.
|
||||
func findServiceDockerFile(projectRoot, targetService string) (string, error) {
|
||||
checkDirs := []string{
|
||||
filepath.Join(projectRoot, "cmd", targetService),
|
||||
filepath.Join(projectRoot, "tools", targetService),
|
||||
}
|
||||
|
||||
var dockerFile string
|
||||
for _, cd := range checkDirs {
|
||||
// Check to see if directory contains Dockerfile.
|
||||
tf := filepath.Join(cd, "Dockerfile")
|
||||
|
||||
ok, _ := exists(tf)
|
||||
if ok {
|
||||
dockerFile = tf
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if dockerFile == "" {
|
||||
return "", errors.Errorf("failed to locate Dockerfile for service %s", targetService)
|
||||
}
|
||||
|
||||
return dockerFile, nil
|
||||
}
|
||||
|
||||
// getTargetEnv checks for an env var that is prefixed with the current target env.
|
||||
func getTargetEnv(targetEnv, envName string) string {
|
||||
k := fmt.Sprintf("%s_%s", strings.ToUpper(targetEnv), envName)
|
||||
|
||||
if v := os.Getenv(k); v != "" {
|
||||
// Set the non prefixed env var with the prefixed value.
|
||||
os.Setenv(envName, v)
|
||||
return v
|
||||
}
|
||||
|
||||
return os.Getenv(envName)
|
||||
}
|
||||
|
||||
// loadGoModName parses out the module name from go.mod.
|
||||
func loadGoModName(goModFile string) (string, error) {
|
||||
ok, err := exists(goModFile)
|
||||
if err != nil {
|
||||
return "", errors.WithMessage(err, "Failed to load go.mod for project")
|
||||
} else if !ok {
|
||||
return "", errors.Errorf("Failed to locate project go.mod at %s", goModFile)
|
||||
}
|
||||
|
||||
b, err := ioutil.ReadFile(goModFile)
|
||||
if err != nil {
|
||||
return "", errors.WithMessagef(err, "Failed to read go.mod at %s", goModFile)
|
||||
}
|
||||
|
||||
var name string
|
||||
lines := strings.Split(string(b), "\n")
|
||||
for _, l := range lines {
|
||||
if strings.HasPrefix(l, "module ") {
|
||||
name = strings.TrimSpace(strings.Split(l, " ")[1])
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return name, nil
|
||||
}
|
||||
|
||||
// exists returns a bool as to whether a file path exists.
|
||||
func exists(path string) (bool, error) {
|
||||
_, err := os.Stat(path)
|
||||
if err == nil {
|
||||
return true, nil
|
||||
}
|
||||
if os.IsNotExist(err) {
|
||||
return false, nil
|
||||
}
|
||||
return true, err
|
||||
}
|
||||
|
||||
// execCmds executes a set of commands using the current env variables.
|
||||
func execCmds(log *log.Logger, workDir string, cmds ...[]string) error {
|
||||
for _, cmdVals := range cmds {
|
||||
cmd := exec.Command(cmdVals[0], cmdVals[1:]...)
|
||||
cmd.Dir = workDir
|
||||
cmd.Env = os.Environ()
|
||||
|
||||
cmd.Stderr = log.Writer()
|
||||
cmd.Stdout = log.Writer()
|
||||
|
||||
err := cmd.Run()
|
||||
|
||||
if err != nil {
|
||||
return errors.WithMessagef(err, "failed to execute %s", strings.Join(cmdVals, " "))
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
@ -1,449 +0,0 @@
|
||||
package cicd
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"crypto/md5"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"geeks-accelerator/oss/saas-starter-kit/internal/platform/tests"
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/service/ecr"
|
||||
"github.com/aws/aws-sdk-go/service/s3/s3manager"
|
||||
"github.com/pborman/uuid"
|
||||
"github.com/pkg/errors"
|
||||
"gopkg.in/go-playground/validator.v9"
|
||||
)
|
||||
|
||||
// ServiceBuildFlags defines the flags used for executing a service build.
|
||||
type ServiceBuildFlags struct {
|
||||
// Required flags.
|
||||
ServiceName string `validate:"required" example:"web-api"`
|
||||
Env string `validate:"oneof=dev stage prod" example:"dev"`
|
||||
|
||||
// Optional flags.
|
||||
ProjectRoot string `validate:"omitempty" example:"."`
|
||||
ProjectName string ` validate:"omitempty" example:"example-project"`
|
||||
DockerFile string `validate:"omitempty" example:"./cmd/web-api/Dockerfile"`
|
||||
CommitRef string `validate:"omitempty" example:"master@1ecfd275"`
|
||||
S3BucketPrivateName string `validate:"omitempty" example:"saas-example-project-private"`
|
||||
BuildDir string `validate:"omitempty" example:"."`
|
||||
NoCache bool `validate:"omitempty" example:"false"`
|
||||
NoPush bool `validate:"omitempty" example:"false"`
|
||||
IsLambda bool `validate:"omitempty" example:"false"`
|
||||
}
|
||||
|
||||
// serviceBuildRequest defines the details needed to execute a service build.
|
||||
type serviceBuildRequest struct {
|
||||
*serviceRequest
|
||||
|
||||
EcrRepositoryName string `validate:"required"`
|
||||
EcrRepository *ecr.CreateRepositoryInput
|
||||
EcrRepositoryMaxImages int `validate:"omitempty"`
|
||||
|
||||
BuildDir string `validate:"omitempty" example:""`
|
||||
CommitRef string `validate:"omitempty"`
|
||||
S3BucketPrivateName string `validate:"omitempty"`
|
||||
NoCache bool `validate:"omitempty"`
|
||||
NoPush bool `validate:"omitempty"`
|
||||
IsLambda bool `validate:"omitempty"`
|
||||
|
||||
flags ServiceBuildFlags
|
||||
}
|
||||
|
||||
// NewServiceBuildRequest generates a new request for executing build of a single service for a given set of CLI flags.
|
||||
func NewServiceBuildRequest(log *log.Logger, flags ServiceBuildFlags) (*serviceBuildRequest, error) {
|
||||
|
||||
// Validates specified CLI flags map to struct successfully.
|
||||
log.Println("Validate flags.")
|
||||
{
|
||||
errs := validator.New().Struct(flags)
|
||||
if errs != nil {
|
||||
return nil, errs
|
||||
}
|
||||
log.Printf("\t%s\tFlags ok.", tests.Success)
|
||||
}
|
||||
|
||||
// Generate a deploy request using CLI flags and AWS credentials.
|
||||
log.Println("Generate deploy request.")
|
||||
var req serviceBuildRequest
|
||||
{
|
||||
// Define new service request.
|
||||
sr := &serviceRequest{
|
||||
ServiceName: flags.ServiceName,
|
||||
Env: flags.Env,
|
||||
ProjectRoot: flags.ProjectRoot,
|
||||
ProjectName: flags.ProjectName,
|
||||
DockerFile: flags.DockerFile,
|
||||
}
|
||||
if err := sr.init(log); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
req = serviceBuildRequest{
|
||||
serviceRequest: sr,
|
||||
|
||||
CommitRef: flags.CommitRef,
|
||||
S3BucketPrivateName: flags.S3BucketPrivateName,
|
||||
BuildDir: flags.BuildDir,
|
||||
NoCache: flags.NoCache,
|
||||
NoPush: flags.NoPush,
|
||||
IsLambda: flags.IsLambda,
|
||||
|
||||
flags: flags,
|
||||
}
|
||||
|
||||
if req.BuildDir == "" {
|
||||
req.BuildDir = req.ProjectRoot
|
||||
}
|
||||
|
||||
// Set default AWS ECR Repository Name.
|
||||
req.EcrRepositoryName = ecrRepositoryName(req.ProjectName)
|
||||
req.EcrRepository = &ecr.CreateRepositoryInput{
|
||||
RepositoryName: aws.String(req.EcrRepositoryName),
|
||||
Tags: []*ecr.Tag{
|
||||
&ecr.Tag{Key: aws.String(awsTagNameProject), Value: aws.String(req.ProjectName)},
|
||||
&ecr.Tag{Key: aws.String(awsTagNameEnv), Value: aws.String(req.Env)},
|
||||
},
|
||||
}
|
||||
log.Printf("\t\t\tSet ECR Repository Name to '%s'.", req.EcrRepositoryName)
|
||||
|
||||
// Set default AWS ECR Regsistry Max Images.
|
||||
req.EcrRepositoryMaxImages = defaultAwsRegistryMaxImages
|
||||
log.Printf("\t\t\tSet ECR Regsistry Max Images to '%d'.", req.EcrRepositoryMaxImages)
|
||||
|
||||
// Get the default commit ref.
|
||||
if req.CommitRef == "" {
|
||||
if ev := os.Getenv("CI_COMMIT_TAG"); ev != "" {
|
||||
req.CommitRef = "tag-" + ev
|
||||
} else if ev := os.Getenv("CI_COMMIT_REF_NAME"); ev != "" {
|
||||
req.CommitRef = "branch-" + ev
|
||||
}
|
||||
|
||||
if ev := os.Getenv("CI_COMMIT_SHORT_SHA"); ev != "" {
|
||||
req.CommitRef = req.CommitRef + "@" + ev
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return &req, nil
|
||||
}
|
||||
|
||||
// Run is the main entrypoint for building a service for a given target environment.
|
||||
func ServiceBuild(log *log.Logger, req *serviceBuildRequest) error {
|
||||
|
||||
// Load the AWS ECR repository. Try to find by name else create new one.
|
||||
var dockerLoginCmd []string
|
||||
{
|
||||
log.Println("ECR - Get or create repository.")
|
||||
|
||||
svc := ecr.New(req.awsSession())
|
||||
|
||||
// First try to find ECR repository by name.
|
||||
var awsRepo *ecr.Repository
|
||||
descRes, err := svc.DescribeRepositories(&ecr.DescribeRepositoriesInput{
|
||||
RepositoryNames: []*string{aws.String(req.EcrRepositoryName)},
|
||||
})
|
||||
if err != nil {
|
||||
if aerr, ok := err.(awserr.Error); !ok || aerr.Code() != ecr.ErrCodeRepositoryNotFoundException {
|
||||
return errors.Wrapf(err, "failed to describe repository '%s'", req.EcrRepositoryName)
|
||||
}
|
||||
} else if len(descRes.Repositories) > 0 {
|
||||
awsRepo = descRes.Repositories[0]
|
||||
}
|
||||
|
||||
if awsRepo == nil {
|
||||
// If no repository was found, create one.
|
||||
createRes, err := svc.CreateRepository(req.EcrRepository)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to create repository '%s'", req.EcrRepositoryName)
|
||||
}
|
||||
awsRepo = createRes.Repository
|
||||
log.Printf("\t\tCreated: %s.", *awsRepo.RepositoryArn)
|
||||
} else {
|
||||
log.Printf("\t\tFound: %s.", *awsRepo.RepositoryArn)
|
||||
|
||||
log.Println("\t\tChecking old ECR images.")
|
||||
delIds, err := EcrPurgeImages(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Since ECR has max number of repository images, need to delete old ones so can stay under limit.
|
||||
// If there are image IDs to delete, delete them.
|
||||
if len(delIds) > 0 {
|
||||
log.Printf("\t\tDeleted %d images that exceeded limit of %d", len(delIds), req.EcrRepositoryMaxImages)
|
||||
for _, imgId := range delIds {
|
||||
log.Printf("\t\t\t%s", *imgId.ImageTag)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
req.ReleaseImage = releaseImage(req.Env, req.ServiceName, *awsRepo.RepositoryUri)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Printf("\t\trelease image: %s", req.ReleaseImage)
|
||||
log.Printf("\t%s\tRelease image valid.", tests.Success)
|
||||
|
||||
log.Println("ECR - Retrieve authorization token used for docker login.")
|
||||
|
||||
// Get the credentials necessary for logging into the AWS Elastic Container Registry
|
||||
// made available with the AWS access key and AWS secret access keys.
|
||||
res, err := svc.GetAuthorizationToken(&ecr.GetAuthorizationTokenInput{})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to get ecr authorization token")
|
||||
}
|
||||
|
||||
authToken, err := base64.StdEncoding.DecodeString(*res.AuthorizationData[0].AuthorizationToken)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to base64 decode ecr authorization token")
|
||||
}
|
||||
pts := strings.Split(string(authToken), ":")
|
||||
user := pts[0]
|
||||
pass := pts[1]
|
||||
|
||||
dockerLoginCmd = []string{
|
||||
"docker",
|
||||
"login",
|
||||
"-u", user,
|
||||
"-p", pass,
|
||||
*res.AuthorizationData[0].ProxyEndpoint,
|
||||
}
|
||||
|
||||
log.Printf("\t%s\tdocker login ok.", tests.Success)
|
||||
}
|
||||
|
||||
// Once we can access the repository in ECR, do the docker build.
|
||||
{
|
||||
log.Printf("Starting docker build %s\n", req.ReleaseImage)
|
||||
|
||||
var dockerFile string
|
||||
dockerPath := filepath.Join(req.BuildDir, req.DockerFile)
|
||||
if _, err := os.Stat(dockerPath); err == nil {
|
||||
dockerFile = req.DockerFile
|
||||
} else {
|
||||
dockerPath = req.DockerFile
|
||||
|
||||
dockerFile, err = filepath.Rel(req.BuildDir, dockerPath)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "Failed parse relative path for %s from %s", dockerPath, req.BuildDir)
|
||||
}
|
||||
}
|
||||
|
||||
// Name of the first build stage declared in the docckerFile.
|
||||
var buildStageName string
|
||||
|
||||
// When the dockerFile is multistage, caching can be applied. Scan the dockerFile for the first stage.
|
||||
// FROM golang:1.12.6-alpine3.9 AS build_base
|
||||
var buildBaseImageTag string
|
||||
{
|
||||
file, err := os.Open(dockerPath)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
// List of lines in the dockerfile for the first stage. This will be used to tag the image to help ensure
|
||||
// any changes to the lines associated with the first stage force cache to be reset.
|
||||
var stageLines []string
|
||||
|
||||
// Loop through all the lines in the Dockerfile searching for the lines associated with the first build stage.
|
||||
scanner := bufio.NewScanner(file)
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
|
||||
lineLower := strings.ToLower(line)
|
||||
|
||||
if strings.HasPrefix(lineLower, "from ") {
|
||||
if buildStageName != "" {
|
||||
// Only need to scan all the lines for the first build stage. Break when reach next FROM.
|
||||
break
|
||||
} else if !strings.Contains(lineLower, " as ") {
|
||||
// Caching is only supported if the first FROM has a name.
|
||||
log.Printf("\t\t\tSkipping stage cache, build stage not detected.\n")
|
||||
break
|
||||
}
|
||||
|
||||
buildStageName = strings.TrimSpace(strings.Split(lineLower, " as ")[1])
|
||||
stageLines = append(stageLines, line)
|
||||
} else if buildStageName != "" {
|
||||
stageLines = append(stageLines, line)
|
||||
}
|
||||
}
|
||||
|
||||
if err := scanner.Err(); err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
// If we have detected a build stage, then generate the appropriate tag.
|
||||
if buildStageName != "" {
|
||||
log.Printf("\t\tFound build stage %s for caching.\n", buildStageName)
|
||||
|
||||
// Generate a checksum for the lines associated with the build stage.
|
||||
buildBaseHashPts := []string{
|
||||
fmt.Sprintf("%x", md5.Sum([]byte(strings.Join(stageLines, "\n")))),
|
||||
}
|
||||
|
||||
switch buildStageName {
|
||||
case "build_base_golang":
|
||||
// Compute the checksum for the go.mod file.
|
||||
goSumPath := filepath.Join(req.ProjectRoot, "go.sum")
|
||||
goSumDat, err := ioutil.ReadFile(goSumPath)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "Failed parse relative path for %s from %s", req.DockerFile, req.ProjectRoot)
|
||||
}
|
||||
buildBaseHashPts = append(buildBaseHashPts, fmt.Sprintf("%x", md5.Sum(goSumDat)))
|
||||
}
|
||||
|
||||
// Combine all the checksums to be used to tag the target build stage.
|
||||
buildBaseHash := fmt.Sprintf("%x", md5.Sum([]byte(strings.Join(buildBaseHashPts, "|"))))
|
||||
|
||||
// New stage image tag.
|
||||
buildBaseImageTag = buildStageName + "-" + buildBaseHash[0:8]
|
||||
}
|
||||
}
|
||||
|
||||
var cmds [][]string
|
||||
|
||||
// Enabling caching of the first build stage defined in the dockerFile.
|
||||
var buildBaseImage string
|
||||
if !req.NoCache && buildBaseImageTag != "" {
|
||||
var pushTargetImg bool
|
||||
if ciReg := os.Getenv("CI_REGISTRY"); ciReg != "" {
|
||||
cmds = append(cmds, []string{
|
||||
"docker", "login",
|
||||
"-u", os.Getenv("CI_REGISTRY_USER"),
|
||||
"-p", os.Getenv("CI_REGISTRY_PASSWORD"),
|
||||
ciReg})
|
||||
|
||||
buildBaseImage = os.Getenv("CI_REGISTRY_IMAGE") + ":" + buildBaseImageTag
|
||||
pushTargetImg = true
|
||||
} else {
|
||||
buildBaseImage = req.ProjectName + ":" + req.Env + "-" + req.ServiceName + "-" + buildBaseImageTag
|
||||
}
|
||||
|
||||
cmds = append(cmds, []string{"docker", "pull", buildBaseImage})
|
||||
|
||||
cmds = append(cmds, []string{
|
||||
"docker", "build",
|
||||
"--file=" + dockerFile,
|
||||
"--cache-from", buildBaseImage,
|
||||
"--build-arg", "service=" + req.ServiceName,
|
||||
"--build-arg", "env=" + req.Env,
|
||||
"-t", buildBaseImage,
|
||||
"--target", buildStageName,
|
||||
".",
|
||||
})
|
||||
|
||||
if pushTargetImg {
|
||||
cmds = append(cmds, []string{"docker", "push", buildBaseImage})
|
||||
}
|
||||
}
|
||||
|
||||
// The initial build command slice.
|
||||
buildCmd := []string{
|
||||
"docker", "build",
|
||||
"--file=" + dockerFile,
|
||||
"--build-arg", "service=" + req.ServiceName,
|
||||
"--build-arg", "env=" + req.Env,
|
||||
"--build-arg", "commit_ref=" + req.CommitRef,
|
||||
"--build-arg", "swagInit=1",
|
||||
"-t", req.ReleaseImage,
|
||||
}
|
||||
|
||||
// Append additional build flags.
|
||||
if req.NoCache {
|
||||
buildCmd = append(buildCmd, "--no-cache")
|
||||
} else if buildBaseImage != "" {
|
||||
buildCmd = append(buildCmd, "--cache-from", buildBaseImage)
|
||||
}
|
||||
|
||||
// Finally append the build context as the current directory since os.Exec will use the project root as
|
||||
// the working directory.
|
||||
buildCmd = append(buildCmd, ".")
|
||||
|
||||
cmds = append(cmds, buildCmd)
|
||||
|
||||
s3Files := make(map[string]*s3manager.UploadInput)
|
||||
if req.NoPush == false {
|
||||
if req.IsLambda {
|
||||
|
||||
lambdaS3Key := LambdaS3KeyFromReleaseImage(req.ReleaseImage)
|
||||
|
||||
tmpDir := os.TempDir()
|
||||
lambdaZip := filepath.Join(tmpDir, filepath.Base(lambdaS3Key))
|
||||
|
||||
containerName := uuid.NewRandom().String()
|
||||
|
||||
cmds = append(cmds, []string{"docker", "create", "-ti", "--name", containerName, req.ReleaseImage, "bash"})
|
||||
cmds = append(cmds, []string{"docker", "cp", containerName + ":/var/task", tmpDir})
|
||||
cmds = append(cmds, []string{"docker", "rm", containerName})
|
||||
cmds = append(cmds, []string{"cd", tmpDir + "/task"})
|
||||
cmds = append(cmds, []string{"zip", "-r", lambdaZip, "."})
|
||||
|
||||
s3Files[lambdaZip] = &s3manager.UploadInput{
|
||||
Bucket: &req.S3BucketPrivateName,
|
||||
Key: &lambdaS3Key,
|
||||
}
|
||||
|
||||
} else {
|
||||
cmds = append(cmds, dockerLoginCmd)
|
||||
cmds = append(cmds, []string{"docker", "push", req.ReleaseImage})
|
||||
}
|
||||
}
|
||||
|
||||
for _, cmd := range cmds {
|
||||
var logCmd string
|
||||
if len(cmd) >= 2 && cmd[1] == "login" {
|
||||
logCmd = strings.Join(cmd[0:2], " ")
|
||||
} else {
|
||||
logCmd = strings.Join(cmd, " ")
|
||||
}
|
||||
|
||||
log.Printf("\t\t%s\n", logCmd)
|
||||
|
||||
err := execCmds(log, req.BuildDir, cmd)
|
||||
if err != nil {
|
||||
if len(cmd) > 2 && cmd[1] == "pull" {
|
||||
log.Printf("\t\t\tSkipping pull - %s\n", err.Error())
|
||||
} else {
|
||||
return errors.Wrapf(err, "Failed to exec %s", strings.Join(cmd, " "))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if s3Files != nil && len(s3Files) > 0 {
|
||||
// Create an uploader with the session and default options
|
||||
uploader := s3manager.NewUploader(req.awsSession())
|
||||
|
||||
// Perform an upload.
|
||||
for lf, upParams := range s3Files {
|
||||
f, err := os.Open(lf)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "Failed open file to %s", lf)
|
||||
}
|
||||
upParams.Body = f
|
||||
|
||||
_, err = uploader.Upload(upParams)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "Failed upload file to %s", *upParams.Key)
|
||||
}
|
||||
|
||||
log.Printf("\t\tUploaded %s to s3://%s/%s\n", lf, *upParams.Bucket, *upParams.Key)
|
||||
}
|
||||
}
|
||||
|
||||
log.Printf("\t%s\tbuild complete.\n", tests.Success)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
File diff suppressed because it is too large
Load Diff
@ -1,85 +0,0 @@
|
||||
// Package retry contains a simple retry mechanism defined by a slice of delay
|
||||
// times. There are no maximum retries accounted for here. If retries should be
|
||||
// limited, use a Timeout context to keep from retrying forever. This should
|
||||
// probably be made into something more robust.
|
||||
package retry
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
)
|
||||
|
||||
// queryPollIntervals is a slice of the delays before re-checking the status on
|
||||
// an executing query, backing off from a short delay at first. This sequence
|
||||
// has been selected with Athena queries in mind, which may operate very
|
||||
// quickly for things like schema manipulation, or which may run for an
|
||||
// extended period of time, when running an actual data analysis query.
|
||||
// Long-running queries will exhaust their rapid retries quickly, and fall back
|
||||
// to checking every few seconds or longer.
|
||||
var DefaultPollIntervals = []time.Duration{
|
||||
time.Millisecond,
|
||||
2 * time.Millisecond,
|
||||
2 * time.Millisecond,
|
||||
5 * time.Millisecond,
|
||||
10 * time.Millisecond,
|
||||
20 * time.Millisecond,
|
||||
50 * time.Millisecond,
|
||||
50 * time.Millisecond,
|
||||
100 * time.Millisecond,
|
||||
100 * time.Millisecond,
|
||||
200 * time.Millisecond,
|
||||
500 * time.Millisecond,
|
||||
time.Second,
|
||||
2 * time.Second,
|
||||
5 * time.Second,
|
||||
10 * time.Second,
|
||||
20 * time.Second,
|
||||
30 * time.Second,
|
||||
time.Minute,
|
||||
}
|
||||
|
||||
// delayer keeps track of the current delay between retries.
|
||||
type delayer struct {
|
||||
Delays []time.Duration
|
||||
currentIndex int
|
||||
}
|
||||
|
||||
// Delay returns the current delay duration, and advances the index to the next
|
||||
// delay defined. If the index has reached the end of the delay slice, then it
|
||||
// will continue to return the maximum delay defined.
|
||||
func (d *delayer) Delay() time.Duration {
|
||||
t := d.Delays[d.currentIndex]
|
||||
if d.currentIndex < len(d.Delays)-1 {
|
||||
d.currentIndex++
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
// Retry uses a slice of time.Duration interval delays to retry a function
|
||||
// until it either errors or indicates that it is ready to proceed. If f
|
||||
// returns true, or an error, the retry loop is broken. Pass a closure as f if
|
||||
// you need to record a value from the operation that you are performing inside
|
||||
// f.
|
||||
func Retry(ctx context.Context, retryIntervals []time.Duration, f func() (bool, error)) (err error) {
|
||||
if retryIntervals == nil || len(retryIntervals) == 0 {
|
||||
retryIntervals = DefaultPollIntervals
|
||||
}
|
||||
|
||||
d := delayer{Delays: retryIntervals}
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
default:
|
||||
ok, err := f()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if ok {
|
||||
return nil
|
||||
}
|
||||
time.Sleep(d.Delay())
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
@ -1,86 +0,0 @@
|
||||
package retry
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
var errExpectedFailure = errors.New("expected failure for test purposes")
|
||||
|
||||
func TestDelayer(t *testing.T) {
|
||||
delays := []time.Duration{
|
||||
time.Millisecond,
|
||||
2 * time.Millisecond,
|
||||
4 * time.Millisecond,
|
||||
10 * time.Millisecond,
|
||||
}
|
||||
tt := []struct {
|
||||
desc string
|
||||
numRetries int
|
||||
expDelay time.Duration
|
||||
}{
|
||||
{"first try", 0, time.Millisecond},
|
||||
{"second try", 1, 2 * time.Millisecond},
|
||||
{"len(delays) try", len(delays) - 1, delays[len(delays)-1]},
|
||||
{"len(delays) + 1 try", len(delays), delays[len(delays)-1]},
|
||||
{"len(delays) * 2 try", len(delays) * 2, delays[len(delays)-1]},
|
||||
}
|
||||
|
||||
for _, tc := range tt {
|
||||
t.Run(tc.desc, func(t *testing.T) {
|
||||
var (
|
||||
d = delayer{Delays: delays}
|
||||
delay time.Duration
|
||||
)
|
||||
for i := tc.numRetries + 1; i > 0; i-- {
|
||||
delay = d.Delay()
|
||||
}
|
||||
if delay != tc.expDelay {
|
||||
t.Fatalf(
|
||||
"expected delay of %s after %d retries, but got %s",
|
||||
tc.expDelay, tc.numRetries, delay)
|
||||
}
|
||||
})
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
func TestRetry(t *testing.T) {
|
||||
delays := []time.Duration{
|
||||
time.Millisecond,
|
||||
2 * time.Millisecond,
|
||||
3 * time.Millisecond,
|
||||
}
|
||||
tt := []struct {
|
||||
desc string
|
||||
tries int
|
||||
success bool
|
||||
err error
|
||||
}{
|
||||
{"first try", 1, true, nil},
|
||||
{"second try error", 2, false, errExpectedFailure},
|
||||
{"third try success", 3, true, nil},
|
||||
}
|
||||
for _, tc := range tt {
|
||||
t.Run(tc.desc, func(t *testing.T) {
|
||||
tries := 0
|
||||
retryFunc := func() (bool, error) {
|
||||
tries++
|
||||
if tries == tc.tries {
|
||||
return tc.success, tc.err
|
||||
}
|
||||
t.Logf("try #%d unsuccessful: trying again up to %d times", tries, tc.tries)
|
||||
return false, nil
|
||||
}
|
||||
err := Retry(context.Background(), delays, retryFunc)
|
||||
if err != tc.err {
|
||||
t.Fatalf("expected error %s, but got error %s", err, tc.err)
|
||||
}
|
||||
if tries != tc.tries {
|
||||
t.Fatalf("expected %d tries, but tried %d times", tc.tries, tries)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
@ -1,162 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"expvar"
|
||||
"geeks-accelerator/oss/saas-starter-kit/internal/platform/web/webcontext"
|
||||
"log"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"geeks-accelerator/oss/saas-starter-kit/tools/devops/cmd/cicd"
|
||||
_ "github.com/lib/pq"
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
// build is the git version of this program. It is set using build flags in the makefile.
|
||||
var build = "develop"
|
||||
|
||||
// service is the name of the program used for logging, tracing and the
|
||||
// the prefix used for loading env variables
|
||||
// ie: export TRUSS_ENV=dev
|
||||
var service = "DEVOPS"
|
||||
|
||||
func main() {
|
||||
// =========================================================================
|
||||
// Logging
|
||||
|
||||
log := log.New(os.Stdout, service+" : ", log.LstdFlags|log.Lmicroseconds|log.Lshortfile)
|
||||
|
||||
// =========================================================================
|
||||
// Log App Info
|
||||
|
||||
// Print the build version for our logs. Also expose it under /debug/vars.
|
||||
expvar.NewString("build").Set(build)
|
||||
log.Printf("main : Started : Application Initializing version %q", build)
|
||||
defer log.Println("main : Completed")
|
||||
|
||||
log.Printf("main : Args: %s", strings.Join(os.Args, " "))
|
||||
|
||||
// =========================================================================
|
||||
// Start Truss
|
||||
|
||||
var (
|
||||
buildFlags cicd.ServiceBuildFlags
|
||||
deployFlags cicd.ServiceDeployFlags
|
||||
migrateFlags cicd.MigrateFlags
|
||||
)
|
||||
|
||||
app := cli.NewApp()
|
||||
app.Commands = []cli.Command{
|
||||
{
|
||||
Name: "build",
|
||||
Usage: "-service=web-api -env=dev",
|
||||
Flags: []cli.Flag{
|
||||
cli.StringFlag{Name: "service", Usage: "name of cmd", Destination: &buildFlags.ServiceName},
|
||||
cli.StringFlag{Name: "env", Usage: "dev, stage, or prod", Destination: &buildFlags.Env},
|
||||
cli.StringFlag{Name: "dockerfile", Usage: "DockerFile for service", Destination: &buildFlags.DockerFile},
|
||||
cli.StringFlag{Name: "root", Usage: "project root directory", Destination: &buildFlags.ProjectRoot},
|
||||
cli.StringFlag{Name: "project", Usage: "name of project", Destination: &buildFlags.ProjectName},
|
||||
cli.StringFlag{Name: "build_dir", Usage: "build context directory", Destination: &buildFlags.BuildDir},
|
||||
cli.StringFlag{Name: "private_bucket", Usage: "dev, stage, or prod", Destination: &buildFlags.S3BucketPrivateName},
|
||||
cli.BoolFlag{Name: "lambda", Usage: "build as lambda function", Destination: &buildFlags.IsLambda},
|
||||
cli.BoolFlag{Name: "no_cache", Usage: "skip docker cache", Destination: &buildFlags.NoCache},
|
||||
cli.BoolFlag{Name: "no_push", Usage: "skip docker push after build", Destination: &buildFlags.NoPush},
|
||||
},
|
||||
Action: func(c *cli.Context) error {
|
||||
req, err := cicd.NewServiceBuildRequest(log, buildFlags)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return cicd.ServiceBuild(log, req)
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "deploy",
|
||||
Usage: "-service=web-api -env=dev",
|
||||
Flags: []cli.Flag{
|
||||
cli.StringFlag{Name: "service", Usage: "name of cmd", Destination: &deployFlags.ServiceName},
|
||||
cli.StringFlag{Name: "env", Usage: "dev, stage, or prod", Destination: &deployFlags.Env},
|
||||
cli.BoolFlag{Name: "enable_https", Usage: "enable HTTPS", Destination: &deployFlags.EnableHTTPS},
|
||||
cli.StringFlag{Name: "primary_host", Usage: "dev, stage, or prod", Destination: &deployFlags.ServiceHostPrimary},
|
||||
cli.StringSliceFlag{Name: "host_names", Usage: "dev, stage, or prod", Value: &deployFlags.ServiceHostNames},
|
||||
cli.StringFlag{Name: "private_bucket", Usage: "dev, stage, or prod", Destination: &deployFlags.S3BucketPrivateName},
|
||||
cli.StringFlag{Name: "public_bucket", Usage: "dev, stage, or prod", Destination: &deployFlags.S3BucketPublicName},
|
||||
cli.BoolFlag{Name: "public_bucket_cloudfront", Usage: "serve static files from Cloudfront", Destination: &deployFlags.S3BucketPublicCloudfront},
|
||||
cli.StringFlag{Name: "dockerfile", Usage: "DockerFile for service", Destination: &deployFlags.DockerFile},
|
||||
cli.StringFlag{Name: "root", Usage: "project root directory", Destination: &deployFlags.ProjectRoot},
|
||||
cli.StringFlag{Name: "project", Usage: "name of project", Destination: &deployFlags.ProjectName},
|
||||
cli.BoolFlag{Name: "enable_elb", Usage: "enable deployed to use Elastic Load Balancer", Destination: &deployFlags.EnableEcsElb},
|
||||
cli.BoolTFlag{Name: "lambda_vpc", Usage: "deploy lambda behind VPC", Destination: &deployFlags.EnableLambdaVPC},
|
||||
cli.BoolFlag{Name: "static_files_s3", Usage: "service static files from S3", Destination: &deployFlags.StaticFilesS3Enable},
|
||||
cli.BoolFlag{Name: "static_files_img_resize", Usage: "enable response images from service", Destination: &deployFlags.StaticFilesImgResizeEnable},
|
||||
cli.BoolFlag{Name: "recreate_service", Usage: "skip docker push after build", Destination: &deployFlags.RecreateService},
|
||||
},
|
||||
Action: func(c *cli.Context) error {
|
||||
if len(deployFlags.ServiceHostNames.Value()) == 1 {
|
||||
var hostNames []string
|
||||
for _, inpVal := range deployFlags.ServiceHostNames.Value() {
|
||||
pts := strings.Split(inpVal, ",")
|
||||
|
||||
for _, h := range pts {
|
||||
h = strings.TrimSpace(h)
|
||||
if h != "" {
|
||||
hostNames = append(hostNames, h)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
deployFlags.ServiceHostNames = hostNames
|
||||
}
|
||||
|
||||
req, err := cicd.NewServiceDeployRequest(log, deployFlags)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Set the context with the required values to
|
||||
// process the request.
|
||||
v := webcontext.Values{
|
||||
Now: time.Now(),
|
||||
Env: req.Env,
|
||||
}
|
||||
ctx := context.WithValue(context.Background(), webcontext.KeyValues, &v)
|
||||
|
||||
return cicd.ServiceDeploy(log, ctx, req)
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "migrate",
|
||||
Usage: "-env=dev",
|
||||
Flags: []cli.Flag{
|
||||
cli.StringFlag{Name: "env", Usage: "dev, stage, or prod", Destination: &migrateFlags.Env},
|
||||
cli.StringFlag{Name: "root", Usage: "project root directory", Destination: &migrateFlags.ProjectRoot},
|
||||
cli.StringFlag{Name: "project", Usage: "name of project", Destination: &migrateFlags.ProjectName},
|
||||
},
|
||||
Action: func(c *cli.Context) error {
|
||||
req, err := cicd.NewMigrateRequest(log, migrateFlags)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Set the context with the required values to
|
||||
// process the request.
|
||||
v := webcontext.Values{
|
||||
Now: time.Now(),
|
||||
Env: req.Env,
|
||||
}
|
||||
ctx := context.WithValue(context.Background(), webcontext.KeyValues, &v)
|
||||
|
||||
return cicd.Migrate(log, ctx, req)
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
err := app.Run(os.Args)
|
||||
if err != nil {
|
||||
log.Fatalf("main : Truss : %+v", err)
|
||||
}
|
||||
|
||||
log.Printf("main : Truss : Completed")
|
||||
}
|
@ -1,8 +0,0 @@
|
||||
SHELL := /bin/bash
|
||||
|
||||
install:
|
||||
go install .
|
||||
|
||||
build:
|
||||
go install .
|
||||
|
@ -1,24 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
doPush=0
|
||||
if [[ "${CI_REGISTRY_IMAGE}" != "" ]]; then
|
||||
docker login -u ${CI_REGISTRY_USER} -p ${CI_REGISTRY_PASSWORD} ${CI_REGISTRY}
|
||||
|
||||
releaseImg="${CI_REGISTRY_IMAGE}:devops-${CI_COMMIT_REF_NAME}"
|
||||
doPush=1
|
||||
else :
|
||||
releaseImg="devops"
|
||||
fi
|
||||
|
||||
echo "release image: ${releaseImg}"
|
||||
|
||||
docker pull ${releaseImg} || true
|
||||
|
||||
docker build -f tools/devops/Dockerfile --cache-from ${releaseImg} -t ${releaseImg} .
|
||||
|
||||
if [[ $doPush == 1 ]]; then
|
||||
docker push ${releaseImg}
|
||||
fi
|
||||
|
||||
docker run --rm --entrypoint=cat ${releaseImg} /go/bin/devops > devops
|
||||
chmod +x devops
|
2
tools/schema/.gitignore
vendored
2
tools/schema/.gitignore
vendored
@ -1,2 +1,2 @@
|
||||
schema
|
||||
local.env
|
||||
.local.env
|
||||
|
@ -1,68 +1,103 @@
|
||||
# SaaS Schema
|
||||
|
||||
Copyright 2019, Geeks Accelerator
|
||||
accelerator@geeksinthewoods.com.com
|
||||
|
||||
|
||||
## Description
|
||||
schema
|
||||
===
|
||||
|
||||
Service is handles the schema migration for the project.
|
||||
_schema_ is a command line tool for local development that executes database migrations.
|
||||
|
||||
|
||||
## Local Installation
|
||||
<!-- toc -->
|
||||
|
||||
- [Overview](#overview)
|
||||
- [Installation](#installation)
|
||||
- [Usage](#usage)
|
||||
* [Commands](#commands)
|
||||
* [Examples](#examples)
|
||||
- [Join us on Gopher Slack](#join-us-on-gopher-slack)
|
||||
|
||||
<!-- tocstop -->
|
||||
|
||||
|
||||
|
||||
## Overview
|
||||
|
||||
The command line tool that executes the database migrations defined in
|
||||
[internal/schema](https://gitlab.com/geeks-accelerator/oss/saas-starter-kit/tree/master/internal/schema). This tool
|
||||
should be used to test and deploy schema migrations against your local development database (hosted by docker).
|
||||
|
||||
For additional details regarding this tool, refer to
|
||||
[build/cicd](https://gitlab.com/geeks-accelerator/oss/saas-starter-kit/tree/master/build/cicd#schema-migrations)
|
||||
|
||||
|
||||
|
||||
## Installation
|
||||
|
||||
Make sure you have a working Go environment. Go version 1.2+ is supported. [See
|
||||
the install instructions for Go](http://golang.org/doc/install.html).
|
||||
|
||||
|
||||
|
||||
## Usage
|
||||
|
||||
### Build
|
||||
```bash
|
||||
go build .
|
||||
$ go run main.go [global options] command [command options] [arguments...]
|
||||
```
|
||||
|
||||
### Usage
|
||||
```bash
|
||||
./schema -h
|
||||
### Global Options
|
||||
|
||||
Usage of ./schema
|
||||
--env string <dev>
|
||||
--db_host string <127.0.0.1:5433>
|
||||
--db_user string <postgres>
|
||||
--db_pass string <postgres>
|
||||
--db_database string <shared>
|
||||
--db_driver string <postgres>
|
||||
--db_timezone string <utc>
|
||||
--db_disabletls bool <false>
|
||||
|
||||
* Show help
|
||||
|
||||
`--help, -h`
|
||||
|
||||
* Print the version
|
||||
|
||||
`--version, -v`
|
||||
|
||||
### Commands
|
||||
|
||||
* `migrate` - Executes the database migrations defined in
|
||||
[internal/schema](https://gitlab.com/geeks-accelerator/oss/saas-starter-kit/tree/master/internal/schema) for local
|
||||
development. Default values are set for all command options that target the Postgres database running via
|
||||
[docker compose](https://gitlab.com/geeks-accelerator/oss/saas-starter-kit/blob/master/docker-compose.yaml#L11).
|
||||
Environment variables can be set as an alternative to passing in the command line options.
|
||||
|
||||
```bash
|
||||
$ go run main.go migrate [command options]
|
||||
```
|
||||
|
||||
Options:
|
||||
```bash
|
||||
--env value target environment, one of [dev, stage, prod] (default: "dev") [$ENV]
|
||||
--host value host (default: "127.0.0.1:5433") [$SCHEMA_DB_HOST]
|
||||
--user value username (default: "postgres") [$SCHEMA_DB_USER]
|
||||
--pass value password (default: "postgres") [$SCHEMA_DB_PASS]
|
||||
--database value name of the default (default: "shared") [$SCHEMA_DB_DATABASE]
|
||||
--driver value database drive to use for connection (default: "postgres") [$SCHEMA_DB_DRIVER]
|
||||
--disable-tls disable TLS for the database connection [$SCHEMA_DB_DISABLE_TLS]
|
||||
```
|
||||
|
||||
* `help` - Shows a list of commands
|
||||
|
||||
```bash
|
||||
$ go run main.go help
|
||||
```
|
||||
|
||||
Or for one command:
|
||||
```bash
|
||||
$ go run main.go help migrate
|
||||
```
|
||||
|
||||
|
||||
### Examples
|
||||
|
||||
Execute the database migrations against the local Postgres database.
|
||||
```bash
|
||||
$ go run main.go migrate
|
||||
```
|
||||
|
||||
### Execution
|
||||
Manually execute binary after build
|
||||
```bash
|
||||
./schema
|
||||
Schema : 2019/05/25 08:20:08.152557 main.go:64: main : Started : Application Initializing version "develop"
|
||||
Schema : 2019/05/25 08:20:08.152814 main.go:75: main : Config : {
|
||||
"Env": "dev",
|
||||
"DB": {
|
||||
"Host": "127.0.0.1:5433",
|
||||
"User": "postgres",
|
||||
"Database": "shared",
|
||||
"Driver": "postgres",
|
||||
"Timezone": "utc",
|
||||
"DisableTLS": true
|
||||
}
|
||||
}
|
||||
Schema : 2019/05/25 08:20:08.158270 sqlxmigrate.go:478: HasTable migrations - SELECT 1 FROM migrations
|
||||
Schema : 2019/05/25 08:20:08.164275 sqlxmigrate.go:413: Migration SCHEMA_INIT - SELECT count(0) FROM migrations WHERE id = $1
|
||||
Schema : 2019/05/25 08:20:08.166391 sqlxmigrate.go:368: Migration 20190522-01a - checking
|
||||
Schema : 2019/05/25 08:20:08.166405 sqlxmigrate.go:413: Migration 20190522-01a - SELECT count(0) FROM migrations WHERE id = $1
|
||||
Schema : 2019/05/25 08:20:08.168066 sqlxmigrate.go:375: Migration 20190522-01a - already ran
|
||||
Schema : 2019/05/25 08:20:08.168078 sqlxmigrate.go:368: Migration 20190522-01b - checking
|
||||
Schema : 2019/05/25 08:20:08.168084 sqlxmigrate.go:413: Migration 20190522-01b - SELECT count(0) FROM migrations WHERE id = $1
|
||||
Schema : 2019/05/25 08:20:08.170297 sqlxmigrate.go:375: Migration 20190522-01b - already ran
|
||||
Schema : 2019/05/25 08:20:08.170319 sqlxmigrate.go:368: Migration 20190522-01c - checking
|
||||
Schema : 2019/05/25 08:20:08.170327 sqlxmigrate.go:413: Migration 20190522-01c - SELECT count(0) FROM migrations WHERE id = $1
|
||||
Schema : 2019/05/25 08:20:08.172044 sqlxmigrate.go:375: Migration 20190522-01c - already ran
|
||||
Schema : 2019/05/25 08:20:08.172831 main.go:130: main : Migrate : Completed
|
||||
Schema : 2019/05/25 08:20:08.172935 main.go:131: main : Completed
|
||||
```
|
||||
|
||||
Or alternative use the make file
|
||||
```bash
|
||||
make run
|
||||
```
|
||||
## Join us on Gopher Slack
|
||||
|
||||
If you are having problems installing, troubles getting the project running or would like to contribute, join the
|
||||
channel #saas-starter-kit on [Gopher Slack](http://invite.slack.golangbridge.org/)
|
||||
|
@ -2,26 +2,21 @@ package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"expvar"
|
||||
"geeks-accelerator/oss/saas-starter-kit/internal/platform/web/webcontext"
|
||||
"fmt"
|
||||
"log"
|
||||
"net/url"
|
||||
"os"
|
||||
"time"
|
||||
"strings"
|
||||
|
||||
"geeks-accelerator/oss/saas-starter-kit/internal/platform/flag"
|
||||
"geeks-accelerator/oss/saas-starter-kit/internal/platform/web/webcontext"
|
||||
"geeks-accelerator/oss/saas-starter-kit/internal/schema"
|
||||
"github.com/kelseyhightower/envconfig"
|
||||
"github.com/lib/pq"
|
||||
_ "github.com/lib/pq"
|
||||
"github.com/urfave/cli"
|
||||
sqltrace "gopkg.in/DataDog/dd-trace-go.v1/contrib/database/sql"
|
||||
sqlxtrace "gopkg.in/DataDog/dd-trace-go.v1/contrib/jmoiron/sqlx"
|
||||
)
|
||||
|
||||
// build is the git version of this program. It is set using build flags in the makefile.
|
||||
var build = "develop"
|
||||
|
||||
// service is the name of the program used for logging, tracing and the
|
||||
// the prefix used for loading env variables
|
||||
// ie: export SCHEMA_ENV=dev
|
||||
@ -38,57 +33,95 @@ type DB struct {
|
||||
}
|
||||
|
||||
func main() {
|
||||
|
||||
// =========================================================================
|
||||
// Logging
|
||||
|
||||
log := log.New(os.Stdout, service+" : ", log.LstdFlags|log.Lmicroseconds|log.Lshortfile)
|
||||
log.SetFlags(log.LstdFlags | log.Lmicroseconds | log.Lshortfile)
|
||||
log.SetPrefix(service + " : ")
|
||||
log := log.New(os.Stdout, log.Prefix(), log.Flags())
|
||||
|
||||
// =========================================================================
|
||||
// Configuration
|
||||
var cfg struct {
|
||||
Env string `default:"dev" envconfig:"ENV"`
|
||||
DB struct {
|
||||
Host string `default:"127.0.0.1:5433" envconfig:"HOST"`
|
||||
User string `default:"postgres" envconfig:"USER"`
|
||||
Pass string `default:"postgres" envconfig:"PASS" json:"-"` // don't print
|
||||
Database string `default:"shared" envconfig:"DATABASE"`
|
||||
Driver string `default:"postgres" envconfig:"DRIVER"`
|
||||
Timezone string `default:"utc" envconfig:"TIMEZONE"`
|
||||
DisableTLS bool `default:"true" envconfig:"DISABLE_TLS"`
|
||||
}
|
||||
// New CLI application.
|
||||
app := cli.NewApp()
|
||||
app.Name = "schema"
|
||||
app.Version = "1.0.0"
|
||||
app.Author = "Lee Brown"
|
||||
app.Email = "lee@geeksinthewoods.com"
|
||||
|
||||
app.Commands = []cli.Command{
|
||||
{
|
||||
Name: "migrate",
|
||||
Aliases: []string{"m"},
|
||||
Usage: "run schema migration",
|
||||
Flags: []cli.Flag{
|
||||
cli.StringFlag{
|
||||
Name: "env",
|
||||
Usage: fmt.Sprintf("target environment, one of [%s]",
|
||||
strings.Join(webcontext.EnvNames, ", ")),
|
||||
Value: "dev",
|
||||
EnvVar: "ENV",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "host",
|
||||
Usage: "host",
|
||||
Value: "127.0.0.1:5433",
|
||||
EnvVar: "SCHEMA_DB_HOST",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "user",
|
||||
Usage: "username",
|
||||
Value: "postgres",
|
||||
EnvVar: "SCHEMA_DB_USER",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "pass",
|
||||
Usage: "password",
|
||||
Value: "postgres",
|
||||
EnvVar: "SCHEMA_DB_PASS",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "database",
|
||||
Usage: "name of the default",
|
||||
Value: "shared",
|
||||
EnvVar: "SCHEMA_DB_DATABASE",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "driver",
|
||||
Usage: "database drive to use for connection",
|
||||
Value: "postgres",
|
||||
EnvVar: "SCHEMA_DB_DRIVER",
|
||||
},
|
||||
cli.BoolTFlag{
|
||||
Name: "disable-tls",
|
||||
Usage: "disable TLS for the database connection",
|
||||
EnvVar: "SCHEMA_DB_DISABLE_TLS",
|
||||
},
|
||||
},
|
||||
Action: func(c *cli.Context) error {
|
||||
targetEnv := c.String("env")
|
||||
var dbInfo = DB{
|
||||
Host: c.String("host"),
|
||||
User: c.String("user"),
|
||||
Pass: c.String("pass"),
|
||||
Database: c.String("database"),
|
||||
|
||||
Driver: c.String("driver"),
|
||||
DisableTLS: c.Bool("disable-tls"),
|
||||
}
|
||||
|
||||
return runMigrate(log, targetEnv, dbInfo)
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// For additional details refer to https://github.com/kelseyhightower/envconfig
|
||||
if err := envconfig.Process(service, &cfg); err != nil {
|
||||
log.Fatalf("main : Parsing Config : %v", err)
|
||||
}
|
||||
|
||||
if err := flag.Process(&cfg); err != nil {
|
||||
if err != flag.ErrHelp {
|
||||
log.Fatalf("main : Parsing Command Line : %v", err)
|
||||
}
|
||||
return // We displayed help.
|
||||
}
|
||||
|
||||
// =========================================================================
|
||||
// Log App Info
|
||||
|
||||
// Print the build version for our logs. Also expose it under /debug/vars.
|
||||
expvar.NewString("build").Set(build)
|
||||
log.Printf("main : Started : Application Initializing version %q", build)
|
||||
defer log.Println("main : Completed")
|
||||
|
||||
// Print the config for our logs. It's important to any credentials in the config
|
||||
// that could expose a security risk are excluded from being json encoded by
|
||||
// applying the tag `json:"-"` to the struct var.
|
||||
{
|
||||
cfgJSON, err := json.MarshalIndent(cfg, "", " ")
|
||||
if err != nil {
|
||||
log.Fatalf("main : Marshalling Config to JSON : %v", err)
|
||||
}
|
||||
log.Printf("main : Config : %v\n", string(cfgJSON))
|
||||
err := app.Run(os.Args)
|
||||
if err != nil {
|
||||
log.Fatalf("%+v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// runMigrate executes the schema migration against the provided database connection details.
|
||||
func runMigrate(log *log.Logger, targetEnv string, dbInfo DB) error {
|
||||
// =========================================================================
|
||||
// Start Database
|
||||
var dbUrl url.URL
|
||||
@ -97,20 +130,18 @@ func main() {
|
||||
var q url.Values = make(map[string][]string)
|
||||
|
||||
// Handle SSL Mode
|
||||
if cfg.DB.DisableTLS {
|
||||
if dbInfo.DisableTLS {
|
||||
q.Set("sslmode", "disable")
|
||||
} else {
|
||||
q.Set("sslmode", "require")
|
||||
}
|
||||
|
||||
q.Set("timezone", cfg.DB.Timezone)
|
||||
|
||||
// Construct url.
|
||||
dbUrl = url.URL{
|
||||
Scheme: cfg.DB.Driver,
|
||||
User: url.UserPassword(cfg.DB.User, cfg.DB.Pass),
|
||||
Host: cfg.DB.Host,
|
||||
Path: cfg.DB.Database,
|
||||
Scheme: dbInfo.Driver,
|
||||
User: url.UserPassword(dbInfo.User, dbInfo.Pass),
|
||||
Host: dbInfo.Host,
|
||||
Path: dbInfo.Database,
|
||||
RawQuery: q.Encode(),
|
||||
}
|
||||
}
|
||||
@ -118,27 +149,23 @@ func main() {
|
||||
// Register informs the sqlxtrace package of the driver that we will be using in our program.
|
||||
// It uses a default service name, in the below case "postgres.db". To use a custom service
|
||||
// name use RegisterWithServiceName.
|
||||
sqltrace.Register(cfg.DB.Driver, &pq.Driver{}, sqltrace.WithServiceName(service))
|
||||
masterDb, err := sqlxtrace.Open(cfg.DB.Driver, dbUrl.String())
|
||||
sqltrace.Register(dbInfo.Driver, &pq.Driver{}, sqltrace.WithServiceName(service))
|
||||
masterDb, err := sqlxtrace.Open(dbInfo.Driver, dbUrl.String())
|
||||
if err != nil {
|
||||
log.Fatalf("main : Register DB : %s : %v", cfg.DB.Driver, err)
|
||||
log.Fatalf("main : Register DB : %s : %v", dbInfo.Driver, err)
|
||||
}
|
||||
defer masterDb.Close()
|
||||
|
||||
// =========================================================================
|
||||
// Start Migrations
|
||||
|
||||
// Set the context with the required values to
|
||||
// process the request.
|
||||
v := webcontext.Values{
|
||||
Now: time.Now(),
|
||||
Env: cfg.Env,
|
||||
}
|
||||
ctx := context.WithValue(context.Background(), webcontext.KeyValues, &v)
|
||||
ctx := context.Background()
|
||||
|
||||
// Execute the migrations
|
||||
if err = schema.Migrate(ctx, masterDb, log, false); err != nil {
|
||||
log.Fatalf("main : Migrate : %v", err)
|
||||
if err = schema.Migrate(ctx, targetEnv, masterDb, log, false); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Printf("main : Migrate : Completed")
|
||||
return nil
|
||||
}
|
||||
|
@ -1,10 +0,0 @@
|
||||
SHELL := /bin/bash
|
||||
|
||||
install:
|
||||
go install .
|
||||
|
||||
build:
|
||||
go install .
|
||||
|
||||
run:
|
||||
go build . && ./schema
|
Reference in New Issue
Block a user