1
0
mirror of https://github.com/raseels-repos/golang-saas-starter-kit.git synced 2025-08-08 22:36:41 +02:00

completed devops migration

This commit is contained in:
Lee Brown
2019-08-21 16:28:50 -08:00
parent f9273881bd
commit 3efba4fc1e
25 changed files with 164 additions and 1748 deletions

4
.gitignore vendored
View File

@ -1,8 +1,6 @@
.idea .idea
aws.lee
aws.*
.env_docker_compose .env_docker_compose
local.env
.local.env .local.env
.aws-deploy.env
.DS_Store .DS_Store
tmp tmp

View File

@ -7,7 +7,7 @@ variables:
DOCKER_TLS_CERTDIR: "" DOCKER_TLS_CERTDIR: ""
before_script: before_script:
- './tools/devops/scripts/build.sh' - 'go install ./build/cicd'
stages: stages:
- build:dev - build:dev
@ -32,136 +32,124 @@ cache:
.build_tmpl: &build_tmpl .build_tmpl: &build_tmpl
<<: *job_tmpl <<: *job_tmpl
script: script:
- './devops build -service=${SERVICE} -project=${PROJECT_NAME} -env=${TARGET_ENV}' - 'cicd --env=${TARGET_ENV} build ${TARGET_TYPE} --name=${TARGET_REF}'
.deploy_tmpl: &deploy_tmpl .deploy_tmpl: &deploy_tmpl
<<: *job_tmpl <<: *job_tmpl
script: script:
- './devops deploy -service=${SERVICE} -project=${PROJECT_NAME} -env=${TARGET_ENV} -enable_https=${ENABLE_HTTPS} -enable_elb=${ENABLE_ELB} -primary_host=${PRIMARY_HOST} -host_names=${HOST_NAMES} -private_bucket=${S3_BUCKET_PRIVATE} -public_bucket=${S3_BUCKET_PUBLIC} -public_bucket_cloudfront=${S3_BUCKET_PUBLIC_CLOUDFRONT} -static_files_s3=${STATIC_FILES_S3} -static_files_img_resize=${STATIC_FILES_IMG_RESIZE}' - 'cicd --env=${TARGET_ENV} deploy ${TARGET_TYPE} --name=${TARGET_REF}'
.migrate_tmpl: &migrate_tmpl .migrate_tmpl: &migrate_tmpl
<<: *job_tmpl <<: *job_tmpl
script: script:
- './devops migrate -project=${PROJECT_NAME} -env=${TARGET_ENV}' - 'cicd --env=${TARGET_ENV} schema migrate'
db:migrate:dev: db:migrate:prod:
<<: *migrate_tmpl <<: *migrate_tmpl
stage: migrate:dev stage: migrate:prod
tags: tags:
- dev - prod
only: only:
- master - master
- dev - prod
- /^dev-.*$/ - /^prod-.*$/
variables: variables:
TARGET_ENV: 'dev' TARGET_ENV: 'prod'
AWS_USE_ROLE: 'true' AWS_USE_ROLE: 'true'
webapp:build:dev: webapp:build:prod:
<<: *build_tmpl <<: *build_tmpl
stage: build:dev stage: build:prod
tags: tags:
- dev - prod
only: only:
- master - master
- dev - prod
- dev-web-app - prod-web-app
variables: variables:
TARGET_ENV: 'dev' TARGET_ENV: 'prod'
SERVICE: 'web-app' TARGET_TYPE: 'service'
TARGET_REF: 'web-app'
AWS_USE_ROLE: 'true' AWS_USE_ROLE: 'true'
webapp:deploy:dev: webapp:deploy:prod:
<<: *deploy_tmpl <<: *deploy_tmpl
stage: deploy:dev stage: deploy:prod
tags: tags:
- dev - prod
only: only:
- master - master
- dev - prod
- dev-web-app - prod-web-app
dependencies: dependencies:
- 'webapp:build:dev' - 'webapp:build:prod'
- 'db:migrate:dev' - 'db:migrate:prod'
variables: variables:
TARGET_ENV: 'dev' TARGET_ENV: 'prod'
SERVICE: 'web-app' TARGET_TYPE: 'service'
ENABLE_HTTPS: 1 TARGET_REF: 'web-app'
ENABLE_ELB: 0
PRIMARY_HOST: 'example.saasstartupkit.com'
HOST_NAMES: 'example.saasstartupkit.com,dev.example.saasstartupkit.com'
S3_BUCKET_PRIVATE: 'saas-starter-kit-private'
S3_BUCKET_PUBLIC: 'saas-starter-kit-public'
S3_BUCKET_PUBLIC_CLOUDFRONT: 'true'
STATIC_FILES_S3: 'true'
STATIC_FILES_IMG_RESIZE: 'true'
AWS_USE_ROLE: 'true' AWS_USE_ROLE: 'true'
EMAIL_SENDER: 'lee+saas-starter-kit@geeksinthewoods.com'
WEB_API_BASE_URL: https://api.example.saasstartupkit.com
webapi:build:dev: webapi:build:prod:
<<: *build_tmpl <<: *build_tmpl
stage: build:dev stage: build:prod
tags: tags:
- dev - prod
only: only:
- master - master
- dev - prod
- dev-web-api - prod-web-api
variables: variables:
TARGET_ENV: 'dev' TARGET_ENV: 'prod'
SERVICE: 'web-api' TARGET_TYPE: 'service'
TARGET_REF: 'web-api'
AWS_USE_ROLE: 'true' AWS_USE_ROLE: 'true'
webapi:deploy:dev: webapi:deploy:prod:
<<: *deploy_tmpl <<: *deploy_tmpl
stage: deploy:dev stage: deploy:prod
tags: tags:
- dev - prod
only: only:
- master - master
- dev - prod
- dev-web-api - prod-web-api
dependencies: dependencies:
- 'webapi:build:dev' - 'webapi:build:prod'
- 'db:migrate:dev' - 'db:migrate:prod'
variables: variables:
TARGET_ENV: 'dev' TARGET_ENV: 'prod'
SERVICE: 'web-api' TARGET_TYPE: 'service'
ENABLE_HTTPS: 1 TARGET_REF: 'web-api'
ENABLE_ELB: 0 AWS_USE_ROLE: 'true'
PRIMARY_HOST: 'api.example.saasstartupkit.com'
HOST_NAMES: 'api.dev.example.saasstartupkit.com' ddlogscollector:build:prod:
S3_BUCKET_PRIVATE: 'saas-starter-kit-private' <<: *build_tmpl
S3_BUCKET_PUBLIC: 'saas-starter-kit-public' stage: build:prod
S3_BUCKET_PUBLIC_CLOUDFRONT: 'false' tags:
STATIC_FILES_S3: 'false' - prod
STATIC_FILES_IMG_RESIZE: 'false' only:
- master
- prod
- prod-ddlogs
variables:
TARGET_ENV: 'prod'
TARGET_TYPE: 'function'
TARGET_REF: 'ddlogscollector'
AWS_USE_ROLE: 'true'
ddlogscollector:deploy:prod:
<<: *deploy_tmpl
stage: deploy:prod
tags:
- prod
only:
- master
- prod
- prod-ddlogs
dependencies:
- 'ddlogscollector:build:prod'
- 'db:migrate:prod'
variables:
TARGET_ENV: 'prod'
TARGET_TYPE: 'function'
TARGET_REF: 'ddlogscollector'
AWS_USE_ROLE: 'true' AWS_USE_ROLE: 'true'
EMAIL_SENDER: 'lee+saas-starter-kit@geeksinthewoods.com'
WEB_APP_BASE_URL: https://example.saasstartupkit.com
#ddlogscollector:deploy:stage:
# <<: *deploy_stage_tmpl
# variables:
# TARGET_ENV: 'stage'
# ECS_CLUSTER: '${ECS_CLUSTER}'
# SERVICE: 'ddlogscollector'
# S3_BUCKET: 'keenispace-services-stage'
# S3_KEY: 'aws/lambda/ddlogscollector/src/ddlogscollector-stage.zip'
# ENABLE_VPC: 0
# only:
# - master
# - stage
#ddlogscollector:deploy:prod:
# <<: *deploy_prod_tmpl
# variables:
# TARGET_ENV: 'prod'
# ECS_CLUSTER: '${ECS_CLUSTER}'
# SERVICE: 'ddlogscollector'
# S3_BUCKET: 'keenispace-services-prod'
# S3_KEY: 'aws/lambda/ddlogscollector/src/ddlogscollector-prod.zip'
# ENABLE_VPC: 0
# only:
# - master
# - prod
# #dependencies:
# # - 'ddlogscollector:deploy:stage'

View File

@ -13,8 +13,8 @@ driven deployment.
* [Services](#services) * [Services](#services)
* [Functions](#functions) * [Functions](#functions)
* [Schema Migrations](#schema-migrations) * [Schema Migrations](#schema-migrations)
- [Installation](#installation)
- [Getting Started](#getting-started) - [Getting Started](#getting-started)
- [Installing locally](#installing-locally)
- [Usage](#usage) - [Usage](#usage)
* [Commands](#commands) * [Commands](#commands)
* [Examples](#examples) * [Examples](#examples)
@ -132,14 +132,14 @@ in the compiled binary resulting in a docker container that is around 50mbs excl
possible to swap out `alpine:3.9` with [busybox](https://willschenk.com/articles/2019/building_a_slimmer_go_docker_container/) possible to swap out `alpine:3.9` with [busybox](https://willschenk.com/articles/2019/building_a_slimmer_go_docker_container/)
for an even small resulting docker image. for an even small resulting docker image.
A service is built using the defined Dockerfile. The resulting image is pushed to A service is built using the defined service Dockerfile. The resulting image is pushed to
[Amazon Elastic Container Registry](https://aws.amazon.com/ecr/). [Amazon Elastic Container Registry](https://aws.amazon.com/ecr/).
Amazon Elastic Container Registry (ECR) is a fully-managed Docker container registry that makes it easy for Amazon Elastic Container Registry (ECR) is a fully-managed Docker container registry that makes it easy for
developers to store, manage, and deploy Docker container images. Amazon ECR is integrated with Amazon Elastic developers to store, manage, and deploy Docker container images. Amazon ECR is integrated with Amazon Elastic
Container Service (ECS) simplifying the development to production workflow. Container Service (ECS) simplifying the development to production workflow.
A service is configured for deployment in [services.go](https://gitlab.com/saas-starter-kit/oss/devops/blob/master/build/cicd/internal/config/service.go). A service is configured for deployment in [services.go](https://gitlab.com/geeks-accelerator/oss/saas-starter-kit/oss/devops/blob/master/build/cicd/internal/config/service.go).
Services are deployed to [AWS Fargate](https://aws.amazon.com/fargate/) based on the defined task definition. Services are deployed to [AWS Fargate](https://aws.amazon.com/fargate/) based on the defined task definition.
AWS Fargate is a compute engine for Amazon ECS that allows you to run containers without having to manage servers or AWS Fargate is a compute engine for Amazon ECS that allows you to run containers without having to manage servers or
@ -155,66 +155,93 @@ image is tagged with the go.mod hash and pushed to the projects
### Functions ### Functions
Functions are applications that can be executed in short period of time. The configured function is: Functions are applications that can be executed in short period of time. The configured function is:
* *[Datadog Log Collection](https://gitlab.com/geeks-accelerator/oss/saas-starter-kit/tree/master/deployments/ddlogscollector) -
Python script used to ship logs from AWS Cloudwatch to Datadog.
A function is built using the defined Dockerfile. The `Dockerfile` for a function should use a
An python script for [lambdaci image](https://hub.docker.com/r/lambci/lambda/) as the base image.
[Datadog Log Collection](https://gitlab.com/geeks-accelerator/oss/devops/tree/master/examples/aws-lambda-python-ddlogs)
deployed as a function is provided by the devops project in
[examples]((https://gitlab.com/geeks-accelerator/oss/devops/tree/master/examples).
A function is built using the defined
[Dockerfile](https://gitlab.com/geeks-accelerator/oss/devops/blob/master/examples/aws-lambda-python-ddlogs/Dockerfile).
The `Dockerfile` should use a [lambdaci image](https://hub.docker.com/r/lambci/lambda/) as the base image.
Lambdaci images provide a sandboxed local environment that replicates the live AWS Lambda environment almost Lambdaci images provide a sandboxed local environment that replicates the live AWS Lambda environment almost
identically – including installed software and libraries, file structure and permissions, environment variables, identically – including installed software and libraries, file structure and permissions, environment variables,
context objects and behaviors – even the user and running process are the same. context objects and behaviors – even the user and running process are the same.
The build command then uses `docker cp` to extract all files from the resulting container image that are located in The build command then uses `docker cp` to extract all files from the resulting container image that are located in
`/var/task`. These files are zipped and uploaded to AWS S3 for deployment. `/var/task`. These files are zipped and uploaded to the private AWS S3 bucket for deployment.
A function is configured for deployment in [functions.go](https://gitlab.com/geeks-accelerator/oss/devops/blob/master/build/cicd/internal/config/function.go). A function is configured for deployment in [functions.go](https://gitlab.com/geeks-accelerator/oss/saas-starter-kit/oss/devops/blob/master/build/cicd/internal/config/function.go).
Functions are deployed to [AWS Lambda](https://aws.amazon.com/lambda/). Functions are deployed to [AWS Lambda](https://aws.amazon.com/lambda/).
AWS Lambda lets you run code without provisioning or managing servers. You pay only for the compute time you consume AWS Lambda lets you run code without provisioning or managing servers. You pay only for the compute time you consume
- there is no charge when your code is not running. - there is no charge when your code is not running.
### Schema Migrations ### Schema Migrations
_cicd_ includes a minimalistic database migration script that implements _cicd_ includes a minimalistic database migration script that implements
[github.com/geeks-accelerator/sqlxmigrate](https://godoc.org/github.com/geeks-accelerator/sqlxmigrate). It provides [github.com/geeks-accelerator/sqlxmigrate](https://godoc.org/github.com/geeks-accelerator/sqlxmigrate). It provides
schema versioning and migration rollback. The schema for the entire project is defined globally and is located inside schema versioning and migration rollback. The schema for the entire project is defined globally and is located at
internal: [internal/schema](https://gitlab.com/geeks-accelerator/oss/devops/tree/master/build/cicd/internal/schema) [internal/schema](https://gitlab.com/geeks-accelerator/oss/saas-starter-kit/tree/issue8/datadog-lambda-func/internal/schema)
The example schema package provides two separate methods for handling schema migration: The example schema package provides two separate methods for handling schema migration:
* [Migrations](https://gitlab.com/geeks-accelerator/oss/devops/blob/master/build/cicd/internal/schema/migrations.go) - * [Migrations](https://gitlab.com/geeks-accelerator/oss/saas-starter-kit/blob/issue8/datadog-lambda-func/internal/schema/migrations.go) -
List of direct SQL statements for each migration with defined version ID. A database table is created to persist List of direct SQL statements for each migration with defined version ID. A database table is created to persist
executed migrations. Upon run of each schema migration run, the migration logic checks the migration database table to executed migrations. Upon run of each schema migration run, the migration logic checks the migration database table to
check if it’s already been executed. Thus, schema migrations are only ever executed once. Migrations are defined as a check if it’s already been executed. Thus, schema migrations are only ever executed once. Migrations are defined as a
function to enable complex migrations so results from query manipulated before being piped to the next query. function to enable complex migrations so results from query manipulated before being piped to the next query.
* [Init Schema](https://gitlab.com/geeks-accelerator/oss/devops/blob/master/build/cicd/internal/schema/init_schema.go) - * [Init Schema](https://gitlab.com/geeks-accelerator/oss/saas-starter-kit/blob/issue8/datadog-lambda-func/internal/schema/init_schema.go) -
If you have a lot of migrations, it can be a pain to run all them. For example, when you are deploying a new instance of If you have a lot of migrations, it can be a pain to run all them. For example, when you are deploying a new instance of
the app into a clean database. To prevent this, use the initSchema function that will run as-if no migration was run the app into a clean database. To prevent this, use the initSchema function that will run as-if no migration was run
before (in a new clean database). before (in a new clean database).
Another bonus with the globally defined schema is that it enables your testing package the ability to dynamically spin Another bonus with the globally defined schema is that it enables your testing package the ability to dynamically [spin
up database containers on-demand and automatically include all the migrations. This allows the testing package to up database containers](https://gitlab.com/geeks-accelerator/oss/saas-starter-kit/blob/issue8/datadog-lambda-func/internal/platform/tests/main.go#L127)
programmatically execute schema migrations before running any unit tests. on-demand and automatically include all the migrations. This allows the testing package to
[programmatically execute schema migrations](https://gitlab.com/geeks-accelerator/oss/saas-starter-kit/blob/issue8/datadog-lambda-func/internal/platform/tests/main.go#L172)
before running any unit tests.
## Installation ## Getting Started
One of the philosophies behind the SaaS Startup Kit is that building and deploying SaaS product offers should be easy
allowing you focus on what's most important, writing the business logic. Below outline the steps needed to get a
full build pipeline that includes both continious integration and continious deployment.
1. Configure your AWS infrastructure in [config.go](https://gitlab.com/geeks-accelerator/oss/saas-starter-kit/tree/master/build/cicd/internal/config/config.go)
2. Define your services in [service.go](https://gitlab.com/geeks-accelerator/oss/saas-starter-kit/tree/master/build/cicd/internal/config/service.go)
that will be deployed to AWS Fargate. This includes settings for your [AWS ECS Cluster](https://godoc.org/gitlab.com/geeks-accelerator/oss/devops/pkg/devdeploy#AwsEcsCluster),
the associated [AWS ECS Service](https://godoc.org/gitlab.com/geeks-accelerator/oss/devops/pkg/devdeploy#AwsEcsService)
and [AWS ECS Task Definition](https://godoc.org/gitlab.com/geeks-accelerator/oss/devops/pkg/devdeploy#AwsEcsTaskDefinition).
3. Define your functions in [function.go](https://gitlab.com/geeks-accelerator/oss/saas-starter-kit/tree/master/build/cicd/internal/config/function.go)
that will be deployed to AWS Lambda. This includes settings for the runtime, amount of memory, and timeout.
4. Ensure your [schema](https://gitlab.com/geeks-accelerator/oss/saas-starter-kit/tree/master/internal/schema) is ready
for deployment. You should already be using the
[schema tool](https://gitlab.com/geeks-accelerator/oss/saas-starter-kit/tree/master/tools/schema) for maintaining
database schemas for local development, so no additional effort should be required for this step.
5. Update the [.gitlab-ci.yml](https://gitlab.com/geeks-accelerator/oss/saas-starter-kit/blob/master/.gitlab-ci.yml) in
the project root to include the services and functions you have configured here. `.gitlab-ci.yml` will be used by GitLab
to determine which services and functions should be built and deployed.
6. Setup a GitLab runner in your AWS account. This will allow the _cicd_ tool to execute database migration since the
database deployed by default is not publicly available. GitLab does provide shared runners, but these will not be able
to access your database.
[Follow the instructions here](https://gitlab.com/geeks-accelerator/oss/devops/blob/master/README.md#gitlab-cicd) for
setting up a GitLab Runner.
## Installing locally
Make sure you have a working Go environment. Go version 1.2+ is supported. [See Make sure you have a working Go environment. Go version 1.2+ is supported. [See
the install instructions for Go](http://golang.org/doc/install.html). the install instructions for Go](http://golang.org/doc/install.html).
@ -231,12 +258,8 @@ be easily used:
export PATH=$PATH:$GOPATH/bin export PATH=$PATH:$GOPATH/bin
``` ```
## Getting Started
_cicd_ requires AWS permissions to be executed locally. For the GitLab CI/CD build pipeline, AWS roles will be used. This _cicd_ requires AWS permissions to be executed locally. For the GitLab CI/CD build pipeline, AWS roles will be used. This
user is only nessissary for running _cicd_ locally. user is only necessary for running _cicd_ locally.
1. You will need an existing AWS account or create a new AWS account. 1. You will need an existing AWS account or create a new AWS account.
@ -248,16 +271,15 @@ the statement is stored in the devops repo at
3. Create new [AWS User](https://console.aws.amazon.com/iam/home?region=us-west-2#/users$new?step=details) 3. Create new [AWS User](https://console.aws.amazon.com/iam/home?region=us-west-2#/users$new?step=details)
called `saas-starter-kit-deploy` with _Programmatic Access_ and _Attach existing policies directly_ with the policy called `saas-starter-kit-deploy` with _Programmatic Access_ and _Attach existing policies directly_ with the policy
created from step 1 `saas-starter-kit-deploy` created from step 2 `saas-starter-kit-deploy`
4. Try running the build for a single service. 4. Set your AWS credentials as [environment variables](https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-envvars.html).
These can also be passed into _cicd_ as command line options.
```bash ```bash
cicd --env=dev build service --name=aws-ecs-go-web-api --release-tag=testv1 export AWS_ACCESS_KEY_ID=XXXXXXXXX
``` export AWS_SECRET_ACCESS_KEY=XXXXXXXXX
export AWS_REGION="us-west-2"
4. Try running the deploy for a single service. export AWS_USE_ROLE=false
```bash
cicd --env=dev deploy service --name=aws-ecs-go-web-api --release-tag=testv1
``` ```
@ -355,7 +377,7 @@ Access/Secret Keys are required
--dry-run print out the deploy details --dry-run print out the deploy details
``` ```
* `schema` - Runs the database migration * `schema` - Runs the database migration using credentials from AWS Secrets Manager.
```bash ```bash
$ cicd -env [dev|stage|prod] schema $ cicd -env [dev|stage|prod] schema
@ -374,14 +396,14 @@ Access/Secret Keys are required
### Examples ### Examples
Build the example service _aws-ecs-go-web-api_ Build the example service _web-app_
```bash ```bash
$ cicid --env=dev build service --name=aws-ecs-go-web-api --release-tag=testv1 --dry-run=false $ cicid --env=dev build service --name=web-app --release-tag=testv1 --dry-run=false
``` ```
Deploy the example service _aws-ecs-go-web-api_ Deploy the example service _web-app_
```bash ```bash
$ cicid --env=dev deploy service --name=aws-ecs-go-web-api --release-tag=testv1 --dry-run=false $ cicid --env=dev deploy service --name=web-app --release-tag=testv1 --dry-run=false
``` ```

View File

@ -189,6 +189,7 @@ func NewServiceContext(serviceName Service, cfg *devdeploy.Config) (*ServiceCont
ecsKeyValuePair("CI_PIPELINE_URL", ciPipelineURL), ecsKeyValuePair("CI_PIPELINE_URL", ciPipelineURL),
ecsKeyValuePair("WEB_APP_BASE_URL", webAppCfg.BaseUrl()), ecsKeyValuePair("WEB_APP_BASE_URL", webAppCfg.BaseUrl()),
ecsKeyValuePair("WEB_API_BASE_URL", webApiCfg.BaseUrl()), ecsKeyValuePair("WEB_API_BASE_URL", webApiCfg.BaseUrl()),
ecsKeyValuePair("EMAIL_SENDER", "lee+saas-starter-kit@geeksinthewoods.com"),
} }
} }

View File

@ -47,7 +47,7 @@ COPY cmd/${service}/templates /templates
# Copy the global templates. # Copy the global templates.
ADD resources/templates/shared /templates/shared ADD resources/templates/shared /templates/shared
ADD fresh-auto-reload.conf /runner.conf ADD configs/fresh-auto-reload.conf /runner.conf
ENV TEMPLATE_DIR=/templates ENV TEMPLATE_DIR=/templates

View File

@ -30,7 +30,7 @@ COPY cmd/${service}/static /static
# Copy the global templates. # Copy the global templates.
ADD resources/templates/shared /templates/shared ADD resources/templates/shared /templates/shared
ADD fresh-auto-reload.conf /runner.conf ADD configs/fresh-auto-reload.conf /runner.conf
ENV TEMPLATE_DIR=/templates ENV TEMPLATE_DIR=/templates

View File

@ -0,0 +1 @@
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 640 512"><path d="M180.41 203.01c-.72 22.65 10.6 32.68 10.88 39.05a8.164 8.164 0 0 1-4.1 6.27l-12.8 8.96a10.66 10.66 0 0 1-5.63 1.92c-.43-.02-8.19 1.83-20.48-25.61a78.608 78.608 0 0 1-62.61 29.45c-16.28.89-60.4-9.24-58.13-56.21-1.59-38.28 34.06-62.06 70.93-60.05 7.1.02 21.6.37 46.99 6.27v-15.62c2.69-26.46-14.7-46.99-44.81-43.91-2.4.01-19.4-.5-45.84 10.11-7.36 3.38-8.3 2.82-10.75 2.82-7.41 0-4.36-21.48-2.94-24.2 5.21-6.4 35.86-18.35 65.94-18.18a76.857 76.857 0 0 1 55.69 17.28 70.285 70.285 0 0 1 17.67 52.36l-.01 69.29zM93.99 235.4c32.43-.47 46.16-19.97 49.29-30.47 2.46-10.05 2.05-16.41 2.05-27.4-9.67-2.32-23.59-4.85-39.56-4.87-15.15-1.14-42.82 5.63-41.74 32.26-1.24 16.79 11.12 31.4 29.96 30.48zm170.92 23.05c-7.86.72-11.52-4.86-12.68-10.37l-49.8-164.65c-.97-2.78-1.61-5.65-1.92-8.58a4.61 4.61 0 0 1 3.86-5.25c.24-.04-2.13 0 22.25 0 8.78-.88 11.64 6.03 12.55 10.37l35.72 140.83 33.16-140.83c.53-3.22 2.94-11.07 12.8-10.24h17.16c2.17-.18 11.11-.5 12.68 10.37l33.42 142.63L420.98 80.1c.48-2.18 2.72-11.37 12.68-10.37h19.72c.85-.13 6.15-.81 5.25 8.58-.43 1.85 3.41-10.66-52.75 169.9-1.15 5.51-4.82 11.09-12.68 10.37h-18.69c-10.94 1.15-12.51-9.66-12.68-10.75L328.67 110.7l-32.78 136.99c-.16 1.09-1.73 11.9-12.68 10.75h-18.3zm273.48 5.63c-5.88.01-33.92-.3-57.36-12.29a12.802 12.802 0 0 1-7.81-11.91v-10.75c0-8.45 6.2-6.9 8.83-5.89 10.04 4.06 16.48 7.14 28.81 9.6 36.65 7.53 52.77-2.3 56.72-4.48 13.15-7.81 14.19-25.68 5.25-34.95-10.48-8.79-15.48-9.12-53.13-21-4.64-1.29-43.7-13.61-43.79-52.36-.61-28.24 25.05-56.18 69.52-55.95 12.67-.01 46.43 4.13 55.57 15.62 1.35 2.09 2.02 4.55 1.92 7.04v10.11c0 4.44-1.62 6.66-4.87 6.66-7.71-.86-21.39-11.17-49.16-10.75-6.89-.36-39.89.91-38.41 24.97-.43 18.96 26.61 26.07 29.7 26.89 36.46 10.97 48.65 12.79 63.12 29.58 17.14 22.25 7.9 48.3 4.35 55.44-19.08 37.49-68.42 34.44-69.26 34.42zm40.2 104.86c-70.03 51.72-171.69 79.25-258.49 79.25A469.127 469.127 0 0 1 2.83 327.46c-6.53-5.89-.77-13.96 7.17-9.47a637.37 637.37 0 0 0 316.88 84.12 630.22 630.22 0 0 0 241.59-49.55c11.78-5 21.77 7.8 10.12 16.38zm29.19-33.29c-8.96-11.52-59.28-5.38-81.81-2.69-6.79.77-7.94-5.12-1.79-9.47 40.07-28.17 105.88-20.1 113.44-10.63 7.55 9.47-2.05 75.41-39.56 106.91-5.76 4.87-11.27 2.3-8.71-4.1 8.44-21.25 27.39-68.49 18.43-80.02z"/></svg>

After

Width:  |  Height:  |  Size: 2.3 KiB

1
configs/.gitignore vendored Normal file
View File

@ -0,0 +1 @@
.env_docker_compose

View File

@ -48,7 +48,7 @@ services:
aliases: aliases:
- datadog - datadog
env_file: env_file:
- .env_docker_compose - configs/.env_docker_compose
environment: environment:
- DD_LOGS_ENABLED=true - DD_LOGS_ENABLED=true
- DD_APM_ENABLED=true - DD_APM_ENABLED=true
@ -77,7 +77,7 @@ services:
aliases: aliases:
- web-app - web-app
env_file: env_file:
- .env_docker_compose - configs/.env_docker_compose
environment: environment:
- WEB_APP_HTTP_HOST=:3000 - WEB_APP_HTTP_HOST=:3000
- WEB_APP_APP_BASE_URL=http://127.0.0.1:3000 - WEB_APP_APP_BASE_URL=http://127.0.0.1:3000
@ -112,7 +112,7 @@ services:
aliases: aliases:
- web-api - web-api
env_file: env_file:
- .env_docker_compose - configs/.env_docker_compose
environment: environment:
- WEB_API_HTTP_HOST=:3001 - WEB_API_HTTP_HOST=:3001
- WEB_API_APP_BASE_URL=http://127.0.0.1:3001 - WEB_API_APP_BASE_URL=http://127.0.0.1:3001

7
go.mod
View File

@ -29,20 +29,21 @@ require (
github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e // indirect github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e // indirect
github.com/nfnt/resize v0.0.0-20180221191011-83c6a9932646 github.com/nfnt/resize v0.0.0-20180221191011-83c6a9932646
github.com/pborman/uuid v1.2.0 github.com/pborman/uuid v1.2.0
github.com/philhofer/fwd v1.0.0 // indirect
github.com/pkg/errors v0.8.1 github.com/pkg/errors v0.8.1
github.com/sethgrid/pester v0.0.0-20190127155807-68a33a018ad0 github.com/sethgrid/pester v0.0.0-20190127155807-68a33a018ad0
github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24 github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24
github.com/stretchr/testify v1.4.0 github.com/stretchr/testify v1.4.0
github.com/sudo-suhas/symcrypto v1.0.0 github.com/sudo-suhas/symcrypto v1.0.0
github.com/tinylib/msgp v1.1.0 // indirect
github.com/urfave/cli v1.21.0 github.com/urfave/cli v1.21.0
github.com/xwb1989/sqlparser v0.0.0-20180606152119-120387863bf2 github.com/xwb1989/sqlparser v0.0.0-20180606152119-120387863bf2
gitlab.com/geeks-accelerator/oss/devops v0.0.0-20190815180027-17c30c1f4c9e gitlab.com/geeks-accelerator/oss/devops v1.0.0
golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4 golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4
golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7 golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7 // indirect
gopkg.in/DataDog/dd-trace-go.v1 v1.16.1 gopkg.in/DataDog/dd-trace-go.v1 v1.16.1
gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc // indirect gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc // indirect
gopkg.in/go-playground/validator.v9 v9.29.1 gopkg.in/go-playground/validator.v9 v9.29.1
gopkg.in/gomail.v2 v2.0.0-20160411212932-81ebce5c23df gopkg.in/gomail.v2 v2.0.0-20160411212932-81ebce5c23df
) )
replace gitlab.com/geeks-accelerator/oss/devops => ../devops

6
go.sum
View File

@ -306,6 +306,11 @@ github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
github.com/xwb1989/sqlparser v0.0.0-20180606152119-120387863bf2 h1:zzrxE1FKn5ryBNl9eKOeqQ58Y/Qpo3Q9QNxKHX5uzzQ= github.com/xwb1989/sqlparser v0.0.0-20180606152119-120387863bf2 h1:zzrxE1FKn5ryBNl9eKOeqQ58Y/Qpo3Q9QNxKHX5uzzQ=
github.com/xwb1989/sqlparser v0.0.0-20180606152119-120387863bf2/go.mod h1:hzfGeIUDq/j97IG+FhNqkowIyEcD88LrW6fyU3K3WqY= github.com/xwb1989/sqlparser v0.0.0-20180606152119-120387863bf2/go.mod h1:hzfGeIUDq/j97IG+FhNqkowIyEcD88LrW6fyU3K3WqY=
gitlab.com/geeks-accelerator/oss/devops v0.0.0-20190815180027-17c30c1f4c9e/go.mod h1:Wo5wuTV4TI/AVcAF7sDTQvTREnEkyvAmfWTSxqNDov8=
gitlab.com/geeks-accelerator/oss/devops v0.0.0-20190822001238-2bc33b036611 h1:uvz0HV7VMHpvdlS2YhV4pYXjRFMZovkAZv9t6koC0sA=
gitlab.com/geeks-accelerator/oss/devops v0.0.0-20190822001238-2bc33b036611/go.mod h1:rvI71qNJyNiO99ZgGnv/PmJCVrjJjupsXBmfYFXdjGM=
gitlab.com/geeks-accelerator/oss/devops v1.0.0 h1:5XMS1NO34ZJbrSN8/yOwukCP9NeuDY1JLMyWr5bwzng=
gitlab.com/geeks-accelerator/oss/devops v1.0.0/go.mod h1:rvI71qNJyNiO99ZgGnv/PmJCVrjJjupsXBmfYFXdjGM=
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
@ -407,6 +412,7 @@ golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBn
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190611222205-d73e1c7e250b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190611222205-d73e1c7e250b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59 h1:QjA/9ArTfVTLfEhClDCG7SGrZkZixxWpwNCDiwJfh88=
golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190624180213-70d37148ca0c/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190624180213-70d37148ca0c/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=

View File

@ -1 +0,0 @@
devops

View File

@ -1,28 +0,0 @@
FROM golang:1.12.6-alpine3.9 AS builder
LABEL maintainer="lee@geeksinthewoods.com"
RUN apk --update --no-cache add \
git
# Change dir to project base.
WORKDIR $GOPATH/src/gitlab.com/geeks-accelerator/oss/saas-starter-kit
# Enable go modules.
ENV GO111MODULE="on"
COPY go.mod .
COPY go.sum .
RUN go mod download
COPY internal ./internal
COPY tools/devops ./tools/devops
WORKDIR ./tools/devops
RUN CGO_ENABLED=0 GOOS=linux go build -a -installsuffix nocgo -o /go/bin/devops .
FROM busybox
COPY --from=builder /go/bin/devops /go/bin/devops
ENTRYPOINT ["/go/bin/devops"]

View File

@ -1,244 +0,0 @@
# SaaS Starter Kit
Copyright 2019, Geeks Accelerator
twins@geeksaccelerator.com
## Description
_Devops_ handles creating AWS resources and deploying your services with minimal additional configuration. You can
customizing any of the configuration in the code. While AWS is already a core part of the saas-starter-kit, keeping
the deployment in GoLang limits the scope of additional technologies required to get your project successfully up and
running. If you understand Golang, then you will be a master at devops with this tool.
The project includes a Postgres database which adds an additional resource dependency when deploying the
project. It is important to know that the tasks running schema migration for the Postgres database can not run as shared
GitLab Runners since they will be outside the deployment AWS VPC. There are two options here:
1. Enable the AWS RDS database to be publicly available (not recommended).
2. Run your own GitLab runners inside the same AWS VPC and grant access for them to communicate with the database.
This project has opted to implement option 2 and thus setting up the deployment pipeline requires a few more additional steps.
Note that using shared runners hosted by GitLab also requires AWS credentials to be input into GitLab for configuration.
Hosted your own GitLab runners uses AWS Roles instead of hardcoding the access key ID and secret access key in GitLab and
in other configuration files. And since this project is open-source, we wanted to avoid sharing our AWS credentials.
If you don't have an AWS account, signup for one now and then proceed with the deployment setup.
We assume that if you are deploying the SaaS Starter Kit, you are starting from scratch with no existing dependencies.
This however, excludes any domain names that you would like to use for resolving your services publicly. To use any
pre-purchased domain names, make sure they are added to Route 53 in the AWS account. Or you can let the deploy script
create a new zone is Route 53 and update the DNS for the domain name when your ready to make the transition. It is
required to hosted the DNS on Route 53 so DNS entries can be managed by this deploy tool. It is possible to use a
[subdomain that uses Route 53 as the DNS service without migrating the parent domain](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/CreatingNewSubdomain.html).
## Getting Started
You can run the both commands `build` and `deploy` locally after setting up the initial
AWS permissions.
1. You will need an existing AWS account or create a new AWS account.
2. Define a new [AWS IAM Policy](https://console.aws.amazon.com/iam/home?region=us-west-2#/policies$new?step=edit)
called `saas-starter-kit-deploy` with a defined JSON statement instead of using the visual
editor. The statement is rather large as each permission is granted individually. A copy of
the statement is stored in the repo at
[resources/saas-starter-kit-deploy-policy.json](https://gitlab.com/geeks-accelerator/oss/saas-starter-kit/blob/master/resources/saas-starter-kit-deploy-policy.json)
3. Create new [AWS User](https://console.aws.amazon.com/iam/home?region=us-west-2#/users$new?step=details)
called `saas-starter-kit-deploy` with _Programmatic Access_ and _Attach existing policies directly_ with the policy
created from step 1 `saas-starter-kit-deploy`
4. Try running the deploy
```bash
go run main.go deploy -service=web-api -env=dev
```
Note: This user created is only for development purposes and is not needed for the build
pipeline using GitLab CI / CD.
## Setup GitLab CI / CD
Below outlines the basic steps to setup [Autoscaling GitLab Runner on AWS](https://docs.gitlab.com/runner/configuration/runner_autoscale_aws/).
1. Define an [AWS IAM Role](https://console.aws.amazon.com/iam/home?region=us-west-2#/roles$new?step=type) that will be
attached to the GitLab Runner instances. The role will need permission to scale (EC2), update the cache (via S3) and
perform the project specific deployment commands.
```
Trusted Entity: AWS Service
Service that will use this role: EC2
Attach permissions policies: AmazonEC2FullAccess, AmazonS3FullAccess, saas-starter-kit-deploy
Role Name: SaasStarterKitEc2RoleForGitLabRunner
Role Description: Allows GitLab runners hosted on EC2 instances to call AWS services on your behalf.
```
2. Launch a new [AWS EC2 Instance](https://us-west-2.console.aws.amazon.com/ec2/v2/home?region=us-west-2#LaunchInstanceWizard).
`GitLab Runner` will be installed on this instance and will serve as the bastion that spawns new instances. This
instance will be a dedicated host since we need it always up and running, thus it will be the standard costs apply.
Note: Since this machine will not run any jobs itself, it does not need to be very powerful. A t2.micro instance will be sufficient.
```
Amazon Machine Image (AMI): Amazon Linux AMI 2018.03.0 (HVM), SSD Volume Type - ami-0f2176987ee50226e
Instance Type: t2.micro
```
3. Configure Instance Details.
Note: Do not forget to select the IAM Role _SaasStarterKitEc2RoleForGitLabRunner_
```
Number of instances: 1
Network: default VPC
Subnet: no Preference
Auto-assign Public IP: Use subnet setting (Enable)
Placement Group: not checked/disabled
Capacity Reservation: Open
IAM Role: SaasStarterKitEc2RoleForGitLabRunner
Shutdown behavior: Stop
Enable termination project: checked/enabled
Monitoring: not checked/disabled
Tenancy: Shared
Elastic Interence: not checked/disabled
T2/T3 Unlimited: not checked/disabled
Advanced Details: none
```
4. Add Storage. Increase the volume size for the root device to 30 GiB.
```
Volume Type | Device | Size (GiB) | Volume Type
Root | /dev/xvda | 30 | General Purpose SSD (gp2)
```
5. Add Tags.
```
Name: gitlab-runner
```
6. Configure Security Group. Create a new security group with the following details:
```
Name: gitlab-runner
Description: Gitlab runners for running CICD.
Rules:
Type | Protocol | Port Range | Source | Description
SSH | TCP | 22 | My IP | SSH access for setup.
```
7. Review and Launch instance. Select an existing key pair or create a new one. This will be used to SSH into the
instance for additional configuration.
8. Update the security group to reference itself. The instances need to be able to communicate between each other.
Navigate to edit the security group and add the following two rules where `SECURITY_GROUP_ID` is replaced with the
name of the security group created in step 6.
```
Rules:
Type | Protocol | Port Range | Source | Description
Custom TCP | TCP | 2376 | SECURITY_GROUP_ID | Gitlab runner for Docker Machine to communicate with Docker daemon.
SSH | TCP | 22 | SECURITY_GROUP_ID | SSH access for setup.
```
8. SSH into the newly created instance.
```bash
ssh -i ~/saas-starter-kit-uswest2-gitlabrunner.pem ec2-user@ec2-52-36-105-172.us-west-2.compute.amazonaws.com
```
Note: If you get the error `Permissions 0666 are too open`, then you will need to `chmod 400 FILENAME`
9. Install GitLab Runner from the [official GitLab repository](https://docs.gitlab.com/runner/install/linux-repository.html)
```bash
curl -L https://packages.gitlab.com/install/repositories/runner/gitlab-runner/script.rpm.sh | sudo bash
sudo yum install gitlab-runner
```
10. [Install Docker Community Edition](https://docs.docker.com/install/).
```bash
sudo yum install docker
```
11. [Install Docker Machine](https://docs.docker.com/machine/install-machine/).
```bash
base=https://github.com/docker/machine/releases/download/v0.16.0 &&
curl -L $base/docker-machine-$(uname -s)-$(uname -m) >/tmp/docker-machine &&
sudo install /tmp/docker-machine /usr/sbin/docker-machine
```
12. [Register the runner](https://docs.gitlab.com/runner/register/index.html).
```bash
sudo gitlab-runner register
```
Notes:
* When asked for gitlab-ci tags, enter `master,dev,dev-*`
* This will limit commits to the master or dev branches from triggering the pipeline to run. This includes a
wildcard for any branch named with the prefix `dev-`.
* When asked the executor type, enter `docker+machine`
* When asked for the default Docker image, enter `geeksaccelerator/docker-library:golang1.12-docker`
13. [Configuring the GitLab Runner](https://docs.gitlab.com/runner/configuration/runner_autoscale_aws/#configuring-the-gitlab-runner)
```bash
sudo vim /etc/gitlab-runner/config.toml
```
Update the `[runners.docker]` configuration section in `config.toml` to match the example below replacing the
obvious placeholder `XXXXX` with the relevant value.
```yaml
[runners.docker]
tls_verify = false
image = "geeksaccelerator/docker-library:golang1.12-docker"
privileged = true
disable_entrypoint_overwrite = false
oom_kill_disable = false
disable_cache = true
volumes = ["/cache"]
shm_size = 0
[runners.cache]
Type = "s3"
Shared = true
[runners.cache.s3]
ServerAddress = "s3.us-west-2.amazonaws.com"
BucketName = "XXXXX"
BucketLocation = "us-west-2"
[runners.machine]
IdleCount = 0
IdleTime = 1800
MachineDriver = "amazonec2"
MachineName = "gitlab-runner-machine-%s"
MachineOptions = [
"amazonec2-iam-instance-profile=SaasStarterKitEc2RoleForGitLabRunner",
"amazonec2-region=us-west-2",
"amazonec2-vpc-id=XXXXX",
"amazonec2-subnet-id=XXXXX",
"amazonec2-zone=d",
"amazonec2-use-private-address=true",
"amazonec2-tags=runner-manager-name,gitlab-aws-autoscaler,gitlab,true,gitlab-runner-autoscale,true",
"amazonec2-security-group=gitlab-runner",
"amazonec2-instance-type=t2.large"
]
```
You will need use the same VPC subnet and availability zone as the instance launched in step 2. We are using AWS
region `us-west-2`. The _ServerAddress_ for S3 will need to be updated if the region is changed. For `us-east-1` the
_ServerAddress_ is `s3.amazonaws.com`. Under MachineOptions you can add anything that the [AWS Docker Machine](https://docs.docker.com/machine/drivers/aws/#options)
driver supports.
Below are some example values for the placeholders to ensure for format of your values are correct.
```yaml
BucketName = saas-starter-kit-usw
amazonec2-vpc-id=vpc-5f43f027
amazonec2-subnet-id=subnet-693d3110
amazonec2-zone=a
```
Once complete, restart the runner.
```bash
sudo gitlab-runner restart
```
## Examples
```bash
go run main.go deploy -service=web-app -env=dev -enable_https=true -primary_host=example.saasstartupkit.com -host_names=example.saasstartupkit.com,dev.example.saasstartupkit.com -private_bucket=saas-starter-kit-private -public_bucket=saas-starter-kit-public -public_bucket_cloudfront=true -static_files_s3=true -static_files_img_resize=1 -recreate_service=0
```

View File

@ -1,704 +0,0 @@
package cicd
import (
"fmt"
"geeks-accelerator/oss/saas-starter-kit/internal/platform/tests"
"geeks-accelerator/oss/saas-starter-kit/internal/platform/web/webcontext"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/cloudfront"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/aws/aws-sdk-go/service/secretsmanager"
"github.com/iancoleman/strcase"
"github.com/pkg/errors"
"github.com/urfave/cli"
"gitlab.com/geeks-accelerator/oss/devops/pkg/devdeploy"
"log"
"os"
"path/filepath"
"strings"
)
/*
// Register informs the sqlxtrace package of the driver that we will be using in our program.
// It uses a default service name, in the below case "postgres.db". To use a custom service
// name use RegisterWithServiceName.
sqltrace.Register(db.Driver, &pq.Driver{}, sqltrace.WithServiceName("devops:migrate"))
masterDb, err := sqlxtrace.Open(db.Driver, db.URL())
if err != nil {
return errors.WithStack(err)
}
defer masterDb.Close()
// Start the database migrations.
log.Printf("\t\tStart migrations.")
if err = schema.Migrate(masterDb, log, false); err != nil {
return errors.WithStack(err)
}
log.Printf("\t\tFinished migrations.")
*/
// BuildContext defines the flags for defining the deployment env.
type BuildContext struct {
// Env is the target environment used for the deployment.
Env string `validate:"oneof=dev stage prod"`
// AwsCredentials defines the credentials used for deployment.
AwsCredentials devdeploy.AwsCredentials `validate:"required,dive,required"`
}
// DefineBuildEnv defines the details to setup the target environment for the project to build services and functions.
func DefineBuildEnv(buildCtx BuildContext) (*devdeploy.BuildEnv, error) {
// If AWS Credentials are not set and use role is not enabled, try to load the credentials from env vars.
if buildCtx.AwsCredentials.UseRole == false && buildCtx.AwsCredentials.AccessKeyID == "" {
var err error
buildCtx.AwsCredentials, err = devdeploy.GetAwsCredentialsFromEnv(buildCtx.Env)
if err != nil {
return nil, err
}
}
// Init a new build target environment for the project.
buildEnv := &devdeploy.BuildEnv{
Env: buildCtx.Env,
AwsCredentials: buildCtx.AwsCredentials,
}
// Get the current working directory. This should be somewhere contained within the project.
workDir, err := os.Getwd()
if err != nil {
return buildEnv, errors.WithMessage(err, "Failed to get current working directory.")
}
// Set the project root directory and project name. This is current set by finding the go.mod file for the project
// repo. Project name is the directory name.
modDetails, err := devdeploy.LoadModuleDetails(workDir)
if err != nil {
return buildEnv, err
}
// ProjectRoot should be the root directory for the project.
buildEnv.ProjectRoot = modDetails.ProjectRoot
// ProjectName will be used for prefixing AWS resources. This could be changed as needed or manually defined.
buildEnv.ProjectName = modDetails.ProjectName
// Set default AWS ECR Repository Name.
buildEnv.AwsEcrRepository = &devdeploy.AwsEcrRepository{
RepositoryName: buildEnv.ProjectName,
Tags: []devdeploy.Tag{
{Key: devdeploy.AwsTagNameProject, Value: buildEnv.ProjectName},
{Key: devdeploy.AwsTagNameEnv, Value: buildEnv.Env},
},
}
return buildEnv, nil
}
// DefineDeploymentEnv handles defining all the information needed to setup the target env including RDS and cache.
func DefineDeploymentEnv(log *log.Logger, buildEnv *devdeploy.BuildEnv) (*devdeploy.DeploymentEnv, error) {
// Init a new deployment target environment for the project.
deployEnv := &devdeploy.DeploymentEnv{
BuildEnv: buildEnv,
}
// Set the deployment to use the default VPC for the region.
deployEnv.AwsEc2Vpc = &devdeploy.AwsEc2Vpc{
IsDefault : true,
}
// Set the security group to use for the deployed services, database and cluster. This will used the VPC ID defined
// for the deployment.
deployEnv.AwsEc2SecurityGroup = &devdeploy.AwsEc2SecurityGroup{
GroupName: deployEnv.ProjectName + "-" + buildEnv.Env,
Description: fmt.Sprintf("Security group for %s services running on ECS", deployEnv.ProjectName),
Tags: []devdeploy.Tag{
{Key: devdeploy.AwsTagNameProject, Value: deployEnv.ProjectName},
{Key: devdeploy.AwsTagNameEnv, Value: deployEnv.Env},
},
}
// Set the name of the EC2 Security Group used by the gitlab runner. This is used to ensure the security
// group defined above has access to the RDS cluster/instance and can thus handle schema migrations.
deployEnv.GitlabRunnerEc2SecurityGroupName = "gitlab-runner"
// Set the s3 buckets used by the deployed services.
// S3 temp prefix used by services for short term storage. A lifecycle policy will be used for expiration.
s3BucketTempPrefix := "tmp/"
// Defines a life cycle policy to expire keys for the temp directory.
bucketLifecycleTempRule := &s3.LifecycleRule{
ID: aws.String("Rule for : " + s3BucketTempPrefix),
Status: aws.String("Enabled"),
Filter: &s3.LifecycleRuleFilter{
Prefix: aws.String(s3BucketTempPrefix),
},
Expiration: &s3.LifecycleExpiration{
// Indicates the lifetime, in days, of the objects that are subject to the rule.
// The value must be a non-zero positive integer.
Days: aws.Int64(1),
},
// Specifies the days since the initiation of an incomplete multipart upload
// that Amazon S3 will wait before permanently removing all parts of the upload.
// For more information, see Aborting Incomplete Multipart Uploads Using a Bucket
// Lifecycle Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config)
// in the Amazon Simple Storage Service Developer Guide.
AbortIncompleteMultipartUpload: &s3.AbortIncompleteMultipartUpload{
DaysAfterInitiation: aws.Int64(1),
},
}
// Define the public S3 bucket used to serve static files for all the services.
deployEnv.AwsS3BucketPublic = &devdeploy.AwsS3Bucket{
BucketName: deployEnv.ProjectName+"-public",
IsPublic: true,
TempPrefix: s3BucketTempPrefix,
LocationConstraint: &buildEnv.AwsCredentials.Region,
LifecycleRules: []*s3.LifecycleRule{bucketLifecycleTempRule},
CORSRules: []*s3.CORSRule{
&s3.CORSRule{
// Headers that are specified in the Access-Control-Request-Headers header.
// These headers are allowed in a preflight OPTIONS request. In response to
// any preflight OPTIONS request, Amazon S3 returns any requested headers that
// are allowed.
// AllowedHeaders: aws.StringSlice([]string{}),
// An HTTP method that you allow the origin to execute. Valid values are GET,
// PUT, HEAD, POST, and DELETE.
//
// AllowedMethods is a required field
AllowedMethods: aws.StringSlice([]string{"GET", "POST"}),
// One or more origins you want customers to be able to access the bucket from.
//
// AllowedOrigins is a required field
AllowedOrigins: aws.StringSlice([]string{"*"}),
// One or more headers in the response that you want customers to be able to
// access from their applications (for example, from a JavaScript XMLHttpRequest
// object).
// ExposeHeaders: aws.StringSlice([]string{}),
// The time in seconds that your browser is to cache the preflight response
// for the specified resource.
// MaxAgeSeconds: aws.Int64(),
},
},
}
// The base s3 key prefix used to upload static files.
deployEnv.AwsS3BucketPublicKeyPrefix = "/public"
// For production, enable Cloudfront CND for all static files to avoid serving them from the slower S3 option.
if deployEnv.Env == webcontext.Env_Prod {
deployEnv.AwsS3BucketPublic.CloudFront = &devdeploy.AwsS3BucketCloudFront{
// S3 key prefix to request your content from a directory in your Amazon S3 bucket.
OriginPath : deployEnv.AwsS3BucketPublicKeyPrefix ,
// A complex type that controls whether CloudFront caches the response to requests.
CachedMethods: []string{"HEAD", "GET"},
// The distribution's configuration information.
DistributionConfig: &cloudfront.DistributionConfig{
Comment: aws.String(""),
Enabled: aws.Bool(true),
HttpVersion: aws.String("http2"),
IsIPV6Enabled: aws.Bool(true),
DefaultCacheBehavior: &cloudfront.DefaultCacheBehavior{
Compress: aws.Bool(true),
DefaultTTL: aws.Int64(1209600),
MinTTL: aws.Int64(604800),
MaxTTL: aws.Int64(31536000),
ForwardedValues: &cloudfront.ForwardedValues{
QueryString: aws.Bool(true),
Cookies: &cloudfront.CookiePreference{
Forward: aws.String("none"),
},
},
TrustedSigners: &cloudfront.TrustedSigners{
Enabled: aws.Bool(false),
Quantity: aws.Int64(0),
},
ViewerProtocolPolicy: aws.String("allow-all"),
},
ViewerCertificate: &cloudfront.ViewerCertificate{
CertificateSource: aws.String("cloudfront"),
MinimumProtocolVersion: aws.String("TLSv1"),
CloudFrontDefaultCertificate: aws.Bool(true),
},
PriceClass: aws.String("PriceClass_All"),
CallerReference: aws.String("devops-deploy"),
},
}
}
// Define the private S3 bucket used for long term file storage including but not limited to: log exports,
// AWS Lambda code, application caching.
deployEnv.AwsS3BucketPrivate = &devdeploy.AwsS3Bucket{
BucketName: deployEnv.ProjectName+"-private",
IsPublic: false,
TempPrefix: s3BucketTempPrefix,
LocationConstraint: &buildEnv.AwsCredentials.Region,
LifecycleRules: []*s3.LifecycleRule{bucketLifecycleTempRule},
PublicAccessBlock: &s3.PublicAccessBlockConfiguration{
// Specifies whether Amazon S3 should block public access control lists (ACLs)
// for this bucket and objects in this bucket. Setting this element to TRUE
// causes the following behavior:
//
// * PUT Bucket acl and PUT Object acl calls fail if the specified ACL is
// public.
//
// * PUT Object calls fail if the request includes a public ACL.
//
// Enabling this setting doesn't affect existing policies or ACLs.
BlockPublicAcls: aws.Bool(true),
// Specifies whether Amazon S3 should block public bucket policies for this
// bucket. Setting this element to TRUE causes Amazon S3 to reject calls to
// PUT Bucket policy if the specified bucket policy allows public access.
//
// Enabling this setting doesn't affect existing bucket policies.
BlockPublicPolicy: aws.Bool(true),
// Specifies whether Amazon S3 should restrict public bucket policies for this
// bucket. Setting this element to TRUE restricts access to this bucket to only
// AWS services and authorized users within this account if the bucket has a
// public policy.
//
// Enabling this setting doesn't affect previously stored bucket policies, except
// that public and cross-account access within any public bucket policy, including
// non-public delegation to specific accounts, is blocked.
RestrictPublicBuckets: aws.Bool(true),
// Specifies whether Amazon S3 should ignore public ACLs for this bucket and
// objects in this bucket. Setting this element to TRUE causes Amazon S3 to
// ignore all public ACLs on this bucket and objects in this bucket.
//
// Enabling this setting doesn't affect the persistence of any existing ACLs
// and doesn't prevent new public ACLs from being set.
IgnorePublicAcls: aws.Bool(true),
},
}
// Add a bucket policy to enable exports from Cloudwatch Logs for the private S3 bucket.
deployEnv.AwsS3BucketPrivate.Policy = func() string {
policyResource := strings.Trim(filepath.Join(deployEnv.AwsS3BucketPrivate.BucketName, deployEnv.AwsS3BucketPrivate.TempPrefix), "/")
return fmt.Sprintf(`{
"Version": "2012-10-17",
"Statement": [
{
"Action": "s3:GetBucketAcl",
"Effect": "Allow",
"Resource": "arn:aws:s3:::%s",
"Principal": { "Service": "logs.%s.amazonaws.com" }
},
{
"Action": "s3:PutObject" ,
"Effect": "Allow",
"Resource": "arn:aws:s3:::%s/*",
"Condition": { "StringEquals": { "s3:x-amz-acl": "bucket-owner-full-control" } },
"Principal": { "Service": "logs.%s.amazonaws.com" }
}
]
}`, deployEnv.AwsS3BucketPrivate.BucketName, buildEnv.AwsCredentials.Region, policyResource, buildEnv.AwsCredentials.Region)
}()
// Define the Redis Cache cluster used for ephemeral storage.
deployEnv.AwsElasticCacheCluster = &devdeploy.AwsElasticCacheCluster{
CacheClusterId: deployEnv.ProjectName + "-" + buildEnv.Env,
CacheNodeType: "cache.t2.micro",
CacheSubnetGroupName: "default",
Engine: "redis",
EngineVersion: "5.0.4",
NumCacheNodes: 1,
Port: 6379,
AutoMinorVersionUpgrade: aws.Bool(true),
SnapshotRetentionLimit: aws.Int64(7),
ParameterNameValues: []devdeploy.AwsElasticCacheParameter{
devdeploy.AwsElasticCacheParameter{
ParameterName:"maxmemory-policy",
ParameterValue: "allkeys-lru",
},
},
}
// Define the RDS Database instance for transactional data. A random one will be generated for any created instance.
deployEnv.AwsRdsDBInstance = &devdeploy.AwsRdsDBInstance{
DBInstanceIdentifier: deployEnv.ProjectName + "-" + buildEnv.Env,
DBName: "shared",
Engine: "postgres",
MasterUsername: "god",
Port: 5432,
DBInstanceClass: "db.t2.small",
AllocatedStorage: 20,
CharacterSetName: aws.String("UTF8"),
PubliclyAccessible: false,
BackupRetentionPeriod: aws.Int64(7),
AutoMinorVersionUpgrade: true,
CopyTagsToSnapshot: aws.Bool(true),
Tags: []devdeploy.Tag{
{Key: devdeploy.AwsTagNameProject, Value: deployEnv.ProjectName},
{Key: devdeploy.AwsTagNameEnv, Value: deployEnv.Env},
},
}
return deployEnv, nil
}
// ServiceContext defines the flags for deploying a service.
type ServiceContext struct {
// Required flags.
ServiceName string `validate:"required" example:"web-api"`
// Optional flags.
EnableHTTPS bool `validate:"omitempty" example:"false"`
EnableElb bool `validate:"omitempty" example:"false"`
ServiceHostPrimary string `validate:"omitempty" example:"example-project.com"`
ServiceHostNames cli.StringSlice `validate:"omitempty" example:"subdomain.example-project.com"`
DesiredCount int `validate:"omitempty" example:"2"`
Dockerfile string `validate:"omitempty" example:"./cmd/web-api/Dockerfile"`
ServiceDir string `validate:"omitempty" example:"./cmd/web-api"`
StaticFilesS3Enable bool `validate:"omitempty" example:"false"`
StaticFilesImgResizeEnable bool `validate:"omitempty" example:"false"`
RecreateService bool `validate:"omitempty" example:"false"`
}
// DefineDeployService handles defining all the information needed to deploy a service to AWS ECS.
func DefineDeployService(log *log.Logger, ctx ServiceContext, deployEnv *devdeploy.DeploymentEnv) (*devdeploy.DeployService, error) {
log.Printf("\tDefine deploy for service '%s'.", ctx.ServiceName)
// Start to define all the information for the service from the service context.
srv := &devdeploy.DeployService{
//DeploymentEnv: deployEnv,
ServiceName: ctx.ServiceName,
EnableHTTPS: ctx.EnableHTTPS,
ServiceHostPrimary: ctx.ServiceHostPrimary,
ServiceHostNames: ctx.ServiceHostNames,
StaticFilesImgResizeEnable: ctx.StaticFilesImgResizeEnable,
}
// When only service host names are set, choose the first item as the primary host.
if srv.ServiceHostPrimary == "" && len(srv.ServiceHostNames) > 0 {
srv.ServiceHostPrimary = srv.ServiceHostNames[0]
log.Printf("\t\tSet Service Primary Host to '%s'.", srv.ServiceHostPrimary)
}
// Set the release tag for the image to use include env + service name + commit hash/tag.
srv.ReleaseTag = devdeploy.GitLabCiReleaseTag(deployEnv.Env, srv.ServiceName)
log.Printf("\t\tSet ReleaseTag '%s'.", srv.ReleaseTag)
// The S3 prefix used to upload static files served to public.
if ctx.StaticFilesS3Enable {
srv.StaticFilesS3Prefix = filepath.Join(deployEnv.AwsS3BucketPublicKeyPrefix, srv.ReleaseTag, "static")
}
// Determine the Dockerfile for the service.
if ctx.Dockerfile != "" {
srv.Dockerfile = ctx.Dockerfile
log.Printf("\t\tUsing docker file '%s'.", srv.Dockerfile)
} else {
var err error
srv.Dockerfile, err = devdeploy.FindServiceDockerFile(deployEnv.ProjectRoot, srv.ServiceName)
if err != nil {
return nil, err
}
log.Printf("\t\tFound service docker file '%s'.", srv.Dockerfile)
}
// Set the service directory.
if ctx.ServiceDir == "" {
ctx.ServiceDir = filepath.Dir(srv.Dockerfile)
}
srv.StaticFilesDir = filepath.Join(ctx.ServiceDir, "static")
// Define the ECS Cluster used to host the serverless fargate tasks.
srv.AwsEcsCluster = &devdeploy.AwsEcsCluster{
ClusterName: deployEnv.ProjectName + "-" + deployEnv.Env,
Tags: []devdeploy.Tag{
{Key: devdeploy.AwsTagNameProject, Value: deployEnv.ProjectName},
{Key: devdeploy.AwsTagNameEnv, Value: deployEnv.Env},
},
}
// Define the ECS task execution role. This role executes ECS actions such as pulling the image and storing the
// application logs in cloudwatch.
srv.AwsEcsExecutionRole = &devdeploy.AwsIamRole{
RoleName: fmt.Sprintf("ecsExecutionRole%s%s", deployEnv.ProjectNameCamel(), strcase.ToCamel(deployEnv.Env)),
Description: fmt.Sprintf("Provides access to other AWS service resources that are required to run Amazon ECS tasks for %s. ", deployEnv.ProjectName),
AssumeRolePolicyDocument: "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Service\":[\"ecs-tasks.amazonaws.com\"]},\"Action\":[\"sts:AssumeRole\"]}]}",
Tags: []devdeploy.Tag{
{Key: devdeploy.AwsTagNameProject, Value: deployEnv.ProjectName},
{Key: devdeploy.AwsTagNameEnv, Value: deployEnv.Env},
},
AttachRolePolicyArns: []string{"arn:aws:iam::aws:policy/service-role/AmazonECSTaskExecutionRolePolicy"},
}
log.Printf("\t\tSet ECS Execution Role Name to '%s'.", srv.AwsEcsExecutionRole)
// Define the ECS task role. This role is used by the task itself for calling other AWS services.
srv.AwsEcsTaskRole = &devdeploy.AwsIamRole{
RoleName: fmt.Sprintf("ecsTaskRole%s%s", deployEnv.ProjectNameCamel(), strcase.ToCamel(deployEnv.Env)),
Description: fmt.Sprintf("Allows ECS tasks for %s to call AWS services on your behalf.", deployEnv.ProjectName),
AssumeRolePolicyDocument:"{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Service\":[\"ecs-tasks.amazonaws.com\"]},\"Action\":[\"sts:AssumeRole\"]}]}",
Tags: []devdeploy.Tag{
{Key: devdeploy.AwsTagNameProject, Value: deployEnv.ProjectName},
{Key: devdeploy.AwsTagNameEnv, Value: deployEnv.Env},
},
}
log.Printf("\t\tSet ECS Task Role Name to '%s'.", srv.AwsEcsTaskRole)
// AwsEcsTaskPolicy defines the name and policy that will be attached to the task role. The policy document grants
// the permissions required for deployed services to access AWS services. If the policy already exists, the
// statements will be used to add new required actions, but not for removal.
srv.AwsEcsTaskPolicy = &devdeploy.AwsIamPolicy{
PolicyName: fmt.Sprintf("%s%sServices", deployEnv.ProjectNameCamel(), strcase.ToCamel(deployEnv.Env)),
Description: fmt.Sprintf("Defines access for %s services. ", deployEnv.ProjectName),
PolicyDocument: devdeploy.AwsIamPolicyDocument{
Version: "2012-10-17",
Statement: []devdeploy.AwsIamStatementEntry{
{
Sid: "DefaultServiceAccess",
Effect: "Allow",
Action: []string{
"s3:HeadBucket",
"s3:ListObjects",
"s3:PutObject",
"s3:PutObjectAcl",
"cloudfront:ListDistributions",
"ec2:DescribeNetworkInterfaces",
"ec2:DeleteNetworkInterface",
"ecs:ListTasks",
"ecs:DescribeServices",
"ecs:DescribeTasks",
"ec2:DescribeNetworkInterfaces",
"route53:ListHostedZones",
"route53:ListResourceRecordSets",
"route53:ChangeResourceRecordSets",
"ecs:UpdateService",
"ses:SendEmail",
"ses:ListIdentities",
"secretsmanager:ListSecretVersionIds",
"secretsmanager:GetSecretValue",
"secretsmanager:CreateSecret",
"secretsmanager:UpdateSecret",
"secretsmanager:RestoreSecret",
"secretsmanager:DeleteSecret",
},
Resource: "*",
},
{
Sid: "ServiceInvokeLambda",
Effect: "Allow",
Action: []string{
"iam:GetRole",
"lambda:InvokeFunction",
"lambda:ListVersionsByFunction",
"lambda:GetFunction",
"lambda:InvokeAsync",
"lambda:GetFunctionConfiguration",
"iam:PassRole",
"lambda:GetAlias",
"lambda:GetPolicy",
},
Resource: []string{
"arn:aws:iam:::role/*",
"arn:aws:lambda:::function:*",
},
},
{
Sid: "datadoglambda",
Effect: "Allow",
Action: []string{
"cloudwatch:Get*",
"cloudwatch:List*",
"ec2:Describe*",
"support:*",
"tag:GetResources",
"tag:GetTagKeys",
"tag:GetTagValues",
},
Resource: "*",
},
},
},
}
log.Printf("\t\tSet ECS Task Policy Name to '%s'.", srv.AwsEcsTaskPolicy.PolicyName)
// AwsCloudWatchLogGroup defines the name of the cloudwatch log group that will be used to store logs for the ECS tasks.
srv.AwsCloudWatchLogGroup = &devdeploy.AwsCloudWatchLogGroup {
LogGroupName: fmt.Sprintf("logs/env_%s/aws/ecs/cluster_%s/service_%s", deployEnv.Env, srv.AwsEcsCluster.ClusterName, srv.ServiceName),
Tags: []devdeploy.Tag{
{Key: devdeploy.AwsTagNameProject, Value: deployEnv.ProjectName},
{Key: devdeploy.AwsTagNameEnv, Value: deployEnv.Env},
},
}
log.Printf("\t\tSet AWS Log Group Name to '%s'.", srv.AwsCloudWatchLogGroup.LogGroupName)
// AwsSdPrivateDnsNamespace defines the service discovery group.
srv.AwsSdPrivateDnsNamespace = &devdeploy.AwsSdPrivateDnsNamespace{
Name: srv.AwsEcsCluster.ClusterName,
Description: fmt.Sprintf("Private DNS namespace used for services running on the ECS Cluster %s", srv.AwsEcsCluster.ClusterName),
Service: &devdeploy.AwsSdService{
Name: ctx.ServiceName,
Description: fmt.Sprintf("Service %s running on the ECS Cluster %s",ctx.ServiceName, srv.AwsEcsCluster.ClusterName),
DnsRecordTTL: 300,
HealthCheckFailureThreshold: 3,
},
}
log.Printf("\t\tSet AWS Service Discovery Namespace to '%s'.", srv.AwsSdPrivateDnsNamespace.Name)
// If the service is requested to use an elastic load balancer then define.
if ctx.EnableElb {
// AwsElbLoadBalancer defines if the service should use an elastic load balancer.
srv.AwsElbLoadBalancer = &devdeploy.AwsElbLoadBalancer{
Name: fmt.Sprintf("%s-%s-%s", deployEnv.Env, srv.AwsEcsCluster.ClusterName, srv.ServiceName),
IpAddressType: "ipv4",
Scheme: "internet-facing",
Type: "application",
Tags: []devdeploy.Tag{
{Key: devdeploy.AwsTagNameProject, Value: deployEnv.ProjectName},
{Key: devdeploy.AwsTagNameEnv, Value: deployEnv.Env},
},
}
log.Printf("\t\tSet ELB Name to '%s'.", srv.AwsElbLoadBalancer.Name)
// Define the target group for service to receive HTTP traffic from the load balancer.
srv.AwsElbLoadBalancer.TargetGroup = &devdeploy.AwsElbTargetGroup{
Name: fmt.Sprintf("%s-http", srv.ServiceName),
Port: 80,
Protocol:"HTTP",
TargetType: "ip",
HealthCheckEnabled: true,
HealthCheckIntervalSeconds: 30,
HealthCheckPath: "/ping",
HealthCheckProtocol: "HTTP",
HealthCheckTimeoutSeconds: 5,
HealthyThresholdCount: 3,
UnhealthyThresholdCount: 3,
Matcher: "200",
}
log.Printf("\t\t\tSet ELB Target Group Name for %s to '%s'.",
srv.AwsElbLoadBalancer.TargetGroup.Protocol,
srv.AwsElbLoadBalancer.TargetGroup .Name)
// Set ECS configs based on specified env.
if deployEnv.Env == "prod" {
srv.AwsElbLoadBalancer.EcsTaskDeregistrationDelay = 300
} else {
// Force staging to deploy immediately without waiting for connections to drain
srv.AwsElbLoadBalancer.EcsTaskDeregistrationDelay =0
}
}
// AwsEcsService defines the details for the ecs service.
srv.AwsEcsService = &devdeploy.AwsEcsService{
ServiceName: ctx.ServiceName,
DesiredCount: int64(ctx.DesiredCount),
EnableECSManagedTags: false,
HealthCheckGracePeriodSeconds: 60,
LaunchType: "FARGATE",
}
// Ensure when deploying a new service there is always at-least one running.
if srv.AwsEcsService.DesiredCount == 0 {
srv.AwsEcsService.DesiredCount = 1
}
// Set ECS configs based on specified env.
if deployEnv.Env == "prod" {
srv.AwsEcsService .DeploymentMinimumHealthyPercent =100
srv.AwsEcsService .DeploymentMaximumPercent = 200
} else {
srv.AwsEcsService .DeploymentMinimumHealthyPercent = 100
srv.AwsEcsService .DeploymentMaximumPercent = 200
}
// Read the defined json task definition for the service.
dat, err := devdeploy.EcsReadTaskDefinition(ctx.ServiceDir, deployEnv.Env)
if err != nil {
return srv, err
}
// JSON decode the task definition.
taskDef, err := devdeploy.ParseTaskDefinitionInput(dat)
if err != nil {
return srv, err
}
// AwsEcsTaskDefinition defines the details for registering a new ECS task definition.
srv.AwsEcsTaskDefinition = &devdeploy.AwsEcsTaskDefinition{
RegisterInput: taskDef,
UpdatePlaceholders: func(placeholders map[string]string) error {
// Try to find the Datadog API key, this value is optional.
// If Datadog API key is not specified, then integration with Datadog for observability will not be active.
{
// Load Datadog API key which can be either stored in an environment variable or in AWS Secrets Manager.
// 1. Check env vars for [DEV|STAGE|PROD]_DD_API_KEY and DD_API_KEY
datadogApiKey := devdeploy.GetTargetEnv(deployEnv.Env, "DD_API_KEY")
// 2. Check AWS Secrets Manager for datadog entry prefixed with target environment.
if datadogApiKey == "" {
prefixedSecretId := deployEnv.SecretID("datadog/api-key")
var err error
datadogApiKey, err = devdeploy.GetAwsSecretValue(deployEnv.AwsCredentials, prefixedSecretId)
if err != nil {
if aerr, ok := errors.Cause(err).(awserr.Error); !ok || aerr.Code() != secretsmanager.ErrCodeResourceNotFoundException {
return err
}
}
}
// 3. Check AWS Secrets Manager for Datadog entry.
if datadogApiKey == "" {
secretId := "DATADOG"
var err error
datadogApiKey, err = devdeploy.GetAwsSecretValue(deployEnv.AwsCredentials, secretId)
if err != nil {
if aerr, ok := errors.Cause(err).(awserr.Error); !ok || aerr.Code() != secretsmanager.ErrCodeResourceNotFoundException {
return err
}
}
}
if datadogApiKey != "" {
log.Printf("\t%s\tAPI Key set.\n", tests.Success)
} else {
log.Printf("\t%s\tAPI Key NOT set.\n", tests.Failed)
}
placeholders["{DATADOG_APIKEY}"] = datadogApiKey
// When the datadog API key is empty, don't force the container to be essential have have the whole task fail.
if datadogApiKey != "" {
placeholders["{DATADOG_ESSENTIAL}"] = "true"
} else {
placeholders["{DATADOG_ESSENTIAL}"] = "false"
}
}
// Support for resizing static images files to be responsive.
if ctx.StaticFilesImgResizeEnable {
placeholders["{STATIC_FILES_IMG_RESIZE_ENABLED}"] = "true"
} else {
placeholders["{STATIC_FILES_IMG_RESIZE_ENABLED}"] = "false"
}
return nil
},
}
return srv, nil
}
// FunctionContext defines the flags for deploying a function.
type FunctionContext struct {
EnableVPC bool `validate:"omitempty" example:"false"`
}

View File

@ -1,212 +0,0 @@
package cicd
import (
"context"
"encoding/json"
"log"
"path/filepath"
"geeks-accelerator/oss/saas-starter-kit/internal/platform/tests"
"geeks-accelerator/oss/saas-starter-kit/internal/schema"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/secretsmanager"
"github.com/lib/pq"
_ "github.com/lib/pq"
"github.com/pkg/errors"
sqltrace "gopkg.in/DataDog/dd-trace-go.v1/contrib/database/sql"
sqlxtrace "gopkg.in/DataDog/dd-trace-go.v1/contrib/jmoiron/sqlx"
"gopkg.in/go-playground/validator.v9"
)
// MigrateFlags defines the flags used for executing schema migration.
type MigrateFlags struct {
// Required flags.
Env string `validate:"oneof=dev stage prod" example:"dev"`
// Optional flags.
ProjectRoot string `validate:"omitempty" example:"."`
ProjectName string ` validate:"omitempty" example:"example-project"`
}
// migrateRequest defines the details needed to execute a service build.
type migrateRequest struct {
Env string `validate:"oneof=dev stage prod"`
ProjectRoot string `validate:"required"`
ProjectName string `validate:"required"`
GoModFile string `validate:"required"`
GoModName string `validate:"required"`
AwsCreds awsCredentials `validate:"required,dive,required"`
_awsSession *session.Session
flags MigrateFlags
}
// awsSession returns the current AWS session for the serviceDeployRequest.
func (r *migrateRequest) awsSession() *session.Session {
if r._awsSession == nil {
r._awsSession = r.AwsCreds.Session()
}
return r._awsSession
}
// NewMigrateRequest generates a new request for executing schema migration for a given set of CLI flags.
func NewMigrateRequest(log *log.Logger, flags MigrateFlags) (*migrateRequest, error) {
// Validates specified CLI flags map to struct successfully.
log.Println("Validate flags.")
{
errs := validator.New().Struct(flags)
if errs != nil {
return nil, errs
}
log.Printf("\t%s\tFlags ok.", tests.Success)
}
// Generate a migrate request using CLI flags and AWS credentials.
log.Println("Generate migrate request.")
var req migrateRequest
{
// Define new migrate request.
req = migrateRequest{
Env: flags.Env,
ProjectRoot: flags.ProjectRoot,
ProjectName: flags.ProjectName,
flags: flags,
}
// When project root directory is empty or set to current working path, then search for the project root by locating
// the go.mod file.
log.Println("\tDetermining the project root directory.")
{
if req.ProjectRoot == "" || req.ProjectRoot == "." {
log.Println("\tAttempting to location project root directory from current working directory.")
var err error
req.GoModFile, err = findProjectGoModFile()
if err != nil {
return nil, err
}
req.ProjectRoot = filepath.Dir(req.GoModFile)
} else {
log.Printf("\t\tUsing supplied project root directory '%s'.\n", req.ProjectRoot)
req.GoModFile = filepath.Join(req.ProjectRoot, "go.mod")
}
log.Printf("\t\t\tproject root: %s", req.ProjectRoot)
log.Printf("\t\t\tgo.mod: %s", req.GoModFile)
}
log.Println("\tExtracting go module name from go.mod.")
{
var err error
req.GoModName, err = loadGoModName(req.GoModFile)
if err != nil {
return nil, err
}
log.Printf("\t\t\tmodule name: %s", req.GoModName)
}
log.Println("\tDetermining the project name.")
{
if req.ProjectName != "" {
log.Printf("\t\tUse provided value.")
} else {
req.ProjectName = filepath.Base(req.GoModName)
log.Printf("\t\tSet from go module.")
}
log.Printf("\t\t\tproject name: %s", req.ProjectName)
}
// Verifies AWS credentials specified as environment variables.
log.Println("\tVerify AWS credentials.")
{
var err error
req.AwsCreds, err = GetAwsCredentials(req.Env)
if err != nil {
return nil, err
}
if req.AwsCreds.UseRole {
log.Printf("\t\t\tUsing role")
} else {
log.Printf("\t\t\tAccessKeyID: '%s'", req.AwsCreds.AccessKeyID)
}
log.Printf("\t\t\tRegion: '%s'", req.AwsCreds.Region)
log.Printf("\t%s\tAWS credentials valid.", tests.Success)
}
}
return &req, nil
}
// Run is the main entrypoint for migration of database schema for a given target environment.
func Migrate(log *log.Logger, ctx context.Context, req *migrateRequest) error {
// Load the database details.
var db DB
{
log.Println("Get Database Details from AWS Secret Manager")
dbId := dBInstanceIdentifier(req.ProjectName, req.Env)
// Secret ID used to store the DB username and password across deploys.
dbSecretId := secretID(req.ProjectName, req.Env, dbId)
// Retrieve the current secret value if something is stored.
{
sm := secretsmanager.New(req.awsSession())
res, err := sm.GetSecretValue(&secretsmanager.GetSecretValueInput{
SecretId: aws.String(dbSecretId),
})
if err != nil {
if aerr, ok := err.(awserr.Error); !ok || aerr.Code() != secretsmanager.ErrCodeResourceNotFoundException {
return errors.Wrapf(err, "Failed to get value for secret id %s", dbSecretId)
} else {
// This should only happen when the deploy script first runs and no resources exist in the
// AWS account. To create a database, need the VPC and need to come up with a better strategy for
// defining resources that can be shared between deployment steps.
log.Printf("\t%s\tDatabase credentials not found.", tests.Failed)
return nil
}
} else {
err = json.Unmarshal([]byte(*res.SecretString), &db)
if err != nil {
return errors.Wrap(err, "Failed to json decode db credentials")
}
}
log.Printf("\t%s\tDatabase credentials found.", tests.Success)
}
}
// Start Database and run the migration.
{
log.Println("Proceed with schema migration")
log.Printf("\t\tOpen database connection")
// Register informs the sqlxtrace package of the driver that we will be using in our program.
// It uses a default service name, in the below case "postgres.db". To use a custom service
// name use RegisterWithServiceName.
sqltrace.Register(db.Driver, &pq.Driver{}, sqltrace.WithServiceName("devops:migrate"))
masterDb, err := sqlxtrace.Open(db.Driver, db.URL())
if err != nil {
return errors.WithStack(err)
}
defer masterDb.Close()
// Start Migrations
log.Printf("\t\tStart migrations.")
if err = schema.Migrate(ctx, masterDb, log, false); err != nil {
return errors.WithStack(err)
}
log.Printf("\t%s\tMigrate complete.", tests.Success)
}
return nil
}

View File

@ -1,85 +0,0 @@
// Package retry contains a simple retry mechanism defined by a slice of delay
// times. There are no maximum retries accounted for here. If retries should be
// limited, use a Timeout context to keep from retrying forever. This should
// probably be made into something more robust.
package retry
import (
"context"
"time"
)
// queryPollIntervals is a slice of the delays before re-checking the status on
// an executing query, backing off from a short delay at first. This sequence
// has been selected with Athena queries in mind, which may operate very
// quickly for things like schema manipulation, or which may run for an
// extended period of time, when running an actual data analysis query.
// Long-running queries will exhaust their rapid retries quickly, and fall back
// to checking every few seconds or longer.
var DefaultPollIntervals = []time.Duration{
time.Millisecond,
2 * time.Millisecond,
2 * time.Millisecond,
5 * time.Millisecond,
10 * time.Millisecond,
20 * time.Millisecond,
50 * time.Millisecond,
50 * time.Millisecond,
100 * time.Millisecond,
100 * time.Millisecond,
200 * time.Millisecond,
500 * time.Millisecond,
time.Second,
2 * time.Second,
5 * time.Second,
10 * time.Second,
20 * time.Second,
30 * time.Second,
time.Minute,
}
// delayer keeps track of the current delay between retries.
type delayer struct {
Delays []time.Duration
currentIndex int
}
// Delay returns the current delay duration, and advances the index to the next
// delay defined. If the index has reached the end of the delay slice, then it
// will continue to return the maximum delay defined.
func (d *delayer) Delay() time.Duration {
t := d.Delays[d.currentIndex]
if d.currentIndex < len(d.Delays)-1 {
d.currentIndex++
}
return t
}
// Retry uses a slice of time.Duration interval delays to retry a function
// until it either errors or indicates that it is ready to proceed. If f
// returns true, or an error, the retry loop is broken. Pass a closure as f if
// you need to record a value from the operation that you are performing inside
// f.
func Retry(ctx context.Context, retryIntervals []time.Duration, f func() (bool, error)) (err error) {
if retryIntervals == nil || len(retryIntervals) == 0 {
retryIntervals = DefaultPollIntervals
}
d := delayer{Delays: retryIntervals}
for {
select {
case <-ctx.Done():
return ctx.Err()
default:
ok, err := f()
if err != nil {
return err
}
if ok {
return nil
}
time.Sleep(d.Delay())
}
}
return err
}

View File

@ -1,86 +0,0 @@
package retry
import (
"context"
"errors"
"testing"
"time"
)
var errExpectedFailure = errors.New("expected failure for test purposes")
func TestDelayer(t *testing.T) {
delays := []time.Duration{
time.Millisecond,
2 * time.Millisecond,
4 * time.Millisecond,
10 * time.Millisecond,
}
tt := []struct {
desc string
numRetries int
expDelay time.Duration
}{
{"first try", 0, time.Millisecond},
{"second try", 1, 2 * time.Millisecond},
{"len(delays) try", len(delays) - 1, delays[len(delays)-1]},
{"len(delays) + 1 try", len(delays), delays[len(delays)-1]},
{"len(delays) * 2 try", len(delays) * 2, delays[len(delays)-1]},
}
for _, tc := range tt {
t.Run(tc.desc, func(t *testing.T) {
var (
d = delayer{Delays: delays}
delay time.Duration
)
for i := tc.numRetries + 1; i > 0; i-- {
delay = d.Delay()
}
if delay != tc.expDelay {
t.Fatalf(
"expected delay of %s after %d retries, but got %s",
tc.expDelay, tc.numRetries, delay)
}
})
}
}
func TestRetry(t *testing.T) {
delays := []time.Duration{
time.Millisecond,
2 * time.Millisecond,
3 * time.Millisecond,
}
tt := []struct {
desc string
tries int
success bool
err error
}{
{"first try", 1, true, nil},
{"second try error", 2, false, errExpectedFailure},
{"third try success", 3, true, nil},
}
for _, tc := range tt {
t.Run(tc.desc, func(t *testing.T) {
tries := 0
retryFunc := func() (bool, error) {
tries++
if tries == tc.tries {
return tc.success, tc.err
}
t.Logf("try #%d unsuccessful: trying again up to %d times", tries, tc.tries)
return false, nil
}
err := Retry(context.Background(), delays, retryFunc)
if err != tc.err {
t.Fatalf("expected error %s, but got error %s", err, tc.err)
}
if tries != tc.tries {
t.Fatalf("expected %d tries, but tried %d times", tc.tries, tries)
}
})
}
}

View File

@ -1,200 +0,0 @@
package main
import (
"context"
"expvar"
"geeks-accelerator/oss/saas-starter-kit/internal/platform/web/webcontext"
"log"
"os"
"strings"
"time"
"geeks-accelerator/oss/saas-starter-kit/tools/devops/cmd/cicd"
_ "github.com/lib/pq"
"github.com/urfave/cli"
)
// build is the git version of this program. It is set using build flags in the makefile.
var build = "develop"
// service is the name of the program used for logging, tracing and the
// the prefix used for loading env variables
// ie: export TRUSS_ENV=dev
var service = "DEVOPS"
func main() {
// =========================================================================
// Logging
log := log.New(os.Stdout, service+" : ", log.LstdFlags|log.Lmicroseconds|log.Lshortfile)
// =========================================================================
// Log App Info
// Print the build version for our logs. Also expose it under /debug/vars.
expvar.NewString("build").Set(build)
log.Printf("main : Started : Application Initializing version %q", build)
defer log.Println("main : Completed")
log.Printf("main : Args: %s", strings.Join(os.Args, " "))
// =========================================================================
// Start Truss
var (
buildFlags cicd.ServiceBuildFlags
deployFlags cicd.ServiceDeployFlags
migrateFlags cicd.MigrateFlags
)
app := cli.NewApp()
app.Commands = []cli.Command{
{
Name: "build",
Usage: "-service=web-api -env=dev",
Flags: []cli.Flag{
cli.StringFlag{Name: "service", Usage: "name of cmd", Destination: &buildFlags.ServiceName},
cli.StringFlag{Name: "env", Usage: "dev, stage, or prod", Destination: &buildFlags.Env},
cli.StringFlag{Name: "dockerfile", Usage: "DockerFile for service", Destination: &buildFlags.DockerFile},
cli.StringFlag{Name: "root", Usage: "project root directory", Destination: &buildFlags.ProjectRoot},
cli.StringFlag{Name: "project", Usage: "name of project", Destination: &buildFlags.ProjectName},
cli.StringFlag{Name: "build_dir", Usage: "build context directory", Destination: &buildFlags.BuildDir},
cli.StringFlag{Name: "private_bucket", Usage: "dev, stage, or prod", Destination: &buildFlags.S3BucketPrivateName},
cli.BoolFlag{Name: "lambda", Usage: "build as lambda function", Destination: &buildFlags.IsLambda},
cli.BoolFlag{Name: "no_cache", Usage: "skip docker cache", Destination: &buildFlags.NoCache},
cli.BoolFlag{Name: "no_push", Usage: "skip docker push after build", Destination: &buildFlags.NoPush},
},
Action: func(c *cli.Context) error {
req, err := cicd.NewServiceBuildRequest(log, buildFlags)
if err != nil {
return err
}
return cicd.ServiceBuild(log, req)
},
},
{
Name: "deploy-service",
Usage: "-service=web-api -env=dev",
Flags: []cli.Flag{
cli.StringFlag{Name: "service", Usage: "name of cmd", Destination: &deployFlags.ServiceName},
cli.StringFlag{Name: "env", Usage: "dev, stage, or prod", Destination: &deployFlags.Env},
cli.BoolFlag{Name: "enable_https", Usage: "enable HTTPS", Destination: &deployFlags.EnableHTTPS},
cli.StringFlag{Name: "primary_host", Usage: "dev, stage, or prod", Destination: &deployFlags.ServiceHostPrimary},
cli.StringSliceFlag{Name: "host_names", Usage: "dev, stage, or prod", Value: &deployFlags.ServiceHostNames},
cli.StringFlag{Name: "private_bucket", Usage: "dev, stage, or prod", Destination: &deployFlags.S3BucketPrivateName},
cli.StringFlag{Name: "public_bucket", Usage: "dev, stage, or prod", Destination: &deployFlags.S3BucketPublicName},
cli.BoolFlag{Name: "public_bucket_cloudfront", Usage: "serve static files from Cloudfront", Destination: &deployFlags.S3BucketPublicCloudfront},
cli.StringFlag{Name: "dockerfile", Usage: "DockerFile for service", Destination: &deployFlags.DockerFile},
cli.StringFlag{Name: "root", Usage: "project root directory", Destination: &deployFlags.ProjectRoot},
cli.StringFlag{Name: "project", Usage: "name of project", Destination: &deployFlags.ProjectName},
cli.BoolFlag{Name: "enable_elb", Usage: "enable deployed to use Elastic Load Balancer", Destination: &deployFlags.EnableEcsElb},
cli.BoolTFlag{Name: "lambda_vpc", Usage: "deploy lambda behind VPC", Destination: &deployFlags.EnableLambdaVPC},
cli.BoolFlag{Name: "static_files_s3", Usage: "service static files from S3", Destination: &deployFlags.StaticFilesS3Enable},
cli.BoolFlag{Name: "static_files_img_resize", Usage: "enable response images from service", Destination: &deployFlags.StaticFilesImgResizeEnable},
cli.BoolFlag{Name: "recreate", Usage: "skip docker push after build", Destination: &deployFlags.RecreateService},
},
Action: func(c *cli.Context) error {
if len(deployFlags.ServiceHostNames.Value()) == 1 {
var hostNames []string
for _, inpVal := range deployFlags.ServiceHostNames.Value() {
pts := strings.Split(inpVal, ",")
for _, h := range pts {
h = strings.TrimSpace(h)
if h != "" {
hostNames = append(hostNames, h)
}
}
}
deployFlags.ServiceHostNames = hostNames
}
req, err := cicd.NewServiceDeployRequest(log, deployFlags)
if err != nil {
return err
}
// Set the context with the required values to
// process the request.
v := webcontext.Values{
Now: time.Now(),
Env: req.Env,
}
ctx := context.WithValue(context.Background(), webcontext.KeyValues, &v)
return cicd.ServiceDeploy(log, ctx, req)
},
},
{
Name: "deploy-function",
Usage: "-function=web-api -env=dev",
Flags: []cli.Flag{
cli.StringFlag{Name: "function", Usage: "name of function", Destination: &deployFlags.ServiceName},
cli.StringFlag{Name: "env", Usage: "dev, stage, or prod", Destination: &deployFlags.Env},
cli.StringFlag{Name: "private_bucket", Usage: "dev, stage, or prod", Destination: &deployFlags.S3BucketPrivateName},
cli.StringFlag{Name: "dockerfile", Usage: "DockerFile for service", Destination: &deployFlags.DockerFile},
cli.StringFlag{Name: "root", Usage: "project root directory", Destination: &deployFlags.ProjectRoot},
cli.StringFlag{Name: "project", Usage: "name of project", Destination: &deployFlags.ProjectName},
cli.BoolTFlag{Name: "use_vpc", Usage: "deploy lambda behind VPC", Destination: &deployFlags.EnableLambdaVPC},
cli.BoolFlag{Name: "recreate", Usage: "skip docker push after build", Destination: &deployFlags.RecreateService},
},
Action: func(c *cli.Context) error {
if len(deployFlags.ServiceHostNames.Value()) == 1 {
var hostNames []string
for _, inpVal := range deployFlags.ServiceHostNames.Value() {
pts := strings.Split(inpVal, ",")
for _, h := range pts {
h = strings.TrimSpace(h)
if h != "" {
hostNames = append(hostNames, h)
}
}
}
deployFlags.ServiceHostNames = hostNames
}
req, err := cicd.NewServiceDeployRequest(log, deployFlags)
if err != nil {
return err
}
return cicd.ServiceDeploy(log, req)
},
},
{
Name: "migrate",
Usage: "-env=dev",
Flags: []cli.Flag{
cli.StringFlag{Name: "env", Usage: "dev, stage, or prod", Destination: &migrateFlags.Env},
cli.StringFlag{Name: "root", Usage: "project root directory", Destination: &migrateFlags.ProjectRoot},
cli.StringFlag{Name: "project", Usage: "name of project", Destination: &migrateFlags.ProjectName},
},
Action: func(c *cli.Context) error {
req, err := cicd.NewMigrateRequest(log, migrateFlags)
if err != nil {
return err
}
// Set the context with the required values to
// process the request.
v := webcontext.Values{
Now: time.Now(),
Env: req.Env,
}
ctx := context.WithValue(context.Background(), webcontext.KeyValues, &v)
return cicd.Migrate(log, ctx, req)
},
},
}
err := app.Run(os.Args)
if err != nil {
log.Fatalf("main : Truss : %+v", err)
}
log.Printf("main : Truss : Completed")
}

View File

@ -1,8 +0,0 @@
SHELL := /bin/bash
install:
go install .
build:
go install .

View File

@ -1,24 +0,0 @@
#!/usr/bin/env bash
doPush=0
if [[ "${CI_REGISTRY_IMAGE}" != "" ]]; then
docker login -u ${CI_REGISTRY_USER} -p ${CI_REGISTRY_PASSWORD} ${CI_REGISTRY}
releaseImg="${CI_REGISTRY_IMAGE}:devops-${CI_COMMIT_REF_NAME}"
doPush=1
else :
releaseImg="devops"
fi
echo "release image: ${releaseImg}"
docker pull ${releaseImg} || true
docker build -f tools/devops/Dockerfile --cache-from ${releaseImg} -t ${releaseImg} .
if [[ $doPush == 1 ]]; then
docker push ${releaseImg}
fi
docker run --rm --entrypoint=cat ${releaseImg} /go/bin/devops > devops
chmod +x devops

View File

@ -1,2 +1,2 @@
schema schema
local.env .local.env

View File

@ -1,10 +0,0 @@
SHELL := /bin/bash
install:
go install .
build:
go install .
run:
go build . && ./schema