From f9273881bdd25f325a77b2cf219eb36195cc871c Mon Sep 17 00:00:00 2001 From: Lee Brown Date: Wed, 21 Aug 2019 14:31:28 -0800 Subject: [PATCH] devops migration --- CONTRIBUTORS | 2 +- build/cicd/README.md | 391 +++++++++++++ build/cicd/internal/config/config.go | 498 ++++++++++++++++ build/cicd/internal/config/function.go | 385 +++++++++++++ build/cicd/internal/config/schema.go | 38 ++ build/cicd/internal/config/service.go | 756 +++++++++++++++++++++++++ build/cicd/main.go | 249 ++++++++ 7 files changed, 2318 insertions(+), 1 deletion(-) create mode 100644 build/cicd/README.md create mode 100644 build/cicd/internal/config/config.go create mode 100644 build/cicd/internal/config/function.go create mode 100644 build/cicd/internal/config/schema.go create mode 100644 build/cicd/internal/config/service.go create mode 100644 build/cicd/main.go diff --git a/CONTRIBUTORS b/CONTRIBUTORS index 98d3bb6..a46502d 100644 --- a/CONTRIBUTORS +++ b/CONTRIBUTORS @@ -1,5 +1,5 @@ # This is the official list of people who can contribute -# (and typically have contributed) code to the gotraining repository. +# (and typically have contributed) code to the saas-starter-kit repository. # # Names should be added to this file only after verifying that # the individual or the individual's organization has agreed to diff --git a/build/cicd/README.md b/build/cicd/README.md new file mode 100644 index 0000000..a8ffb0c --- /dev/null +++ b/build/cicd/README.md @@ -0,0 +1,391 @@ + +cicd +=== + +_cicd_ is a simple command line tool that facilitates build and deployment for your project. The goal is to help enable +developers to easily setup a continuous build pipeline using [GitLab CI/CD](https://docs.gitlab.com/ee/ci/) and code +driven deployment. + + + +- [Overview](#overview) + * [Deployment Environments](#deployment-environments) + * [Services](#services) + * [Functions](#functions) + * [Schema Migrations](#schema-migrations) +- [Installation](#installation) +- [Getting Started](#getting-started) +- [Usage](#usage) + * [Commands](#commands) + * [Examples](#examples) +- [Join us on Gopher Slack](#join-us-on-gopher-slack) + + + + + +## Overview + +The command line tool provides the functionality to configure, build and deploy your code. When new code is push to GitLab, +this tool will enable building, testing and deploying your code to [Amazon AWS](https://aws.amazon.com/). + +Deploying your code to production always requires additional tooling and configuration. Instead of patching together a +system of of existing tools and configuration files. This tool centralizes configuration for the application and any +additional deployment resources needed. + +Configuration is define with code. AWS resources are created/maintained using the [AWS SDK for Go](https://docs.aws.amazon.com/sdk-for-go/api/). + +**This tool is used by GitLab CI/CD** and is configured by a file called `.gitlab-ci.yml` placed at the repository’s root. + +**All code is deployed to Amazon AWS**. + +Check out the [full presentation](https://docs.google.com/presentation/d/1sRFQwipziZlxBtN7xuF-ol8vtUqD55l_4GE-4_ns-qM/edit?usp=sharing) +that covers how to setup your [GitLab CI/CD](https://docs.gitlab.com/ee/ci/) pipeline that uses autoscaling GitLab +Runners on AWS. + +Support is provided for both services and functions. The build process for both relies on docker and thus, neither are +required to be written in go. + +Configuration for build and deploy is provided by +[gitlab.com/geeks-accelerator/oss/devops/pkg/devdeploy](https://godoc.org/gitlab.com/geeks-accelerator/oss/devops/pkg/devdeploy) + +For additional details regarding this tool, refer to +[gitlab.com/geeks-accelerator/oss/devops](https://gitlab.com/geeks-accelerator/oss/devops) + + + +### Deployment Environments + +All configuration for the deployment environments is defined in code that is located in the +[internal/config](https://gitlab.com/geeks-accelerator/oss/saas-starter-kit/tree/master/build/cicd/internal/config) +package. This includes configuration for the following deployment resources: + +* [AWS ECR Repository](https://godoc.org/gitlab.com/geeks-accelerator/oss/devops/pkg/devdeploy#AwsEcrRepository) +named `saas-starter-kit` + +* [AWS EC2 VPC](https://godoc.org/gitlab.com/geeks-accelerator/oss/devops/pkg/devdeploy#AwsEc2Vpc) defined as using the +default for the AWS Region. + +* [AWS EC2 Security Group](https://godoc.org/gitlab.com/geeks-accelerator/oss/devops/pkg/devdeploy#AwsEc2SecurityGroup) +named `saas-starter-kit-[dev|stage|prod]` + * The name of the GitLab runner security group as `gitlab-runner` that will be added to the security group as ingress. + +* Private [AWS S3 Bucket](https://godoc.org/gitlab.com/geeks-accelerator/oss/devops/pkg/devdeploy#AwsS3Bucket) +named `saas-starter-kit-private` used by the `web-app` and `web-api` for large object storage. + * A lifecycle policy is applied to the key prefix `tmp/` that will expire objects after 1 day for temporary storage + like exports. + * Configured to [block all public access](https://aws.amazon.com/blogs/aws/amazon-s3-block-public-access-another-layer-of-protection-for-your-accounts-and-buckets/) + +* Public [AWS S3 Bucket](https://godoc.org/gitlab.com/geeks-accelerator/oss/devops/pkg/devdeploy#AwsS3Bucket) +named `saas-starter-kit-public` used to serve static files primary for the `web-app`. + * CORS rules for GET and POST to support static files served directly from the S3 Bucket or via Cloudfront. + * Defined key prefix of `public/` used by deployment for uploading static files. + * AWS CloudFront configured for the `prod` environment for serving static files from the S3 Bucket as origin. + +* Redis [AWS Elastic Cache Cluster](https://godoc.org/gitlab.com/geeks-accelerator/oss/devops/pkg/devdeploy#AwsElasticCacheCluster) +named `saas-starter-kit-[dev|stage|prod]` for ephemeral storage. + * Configured using Redis version 5.0.4. + * Deployed as a single node cache cluster using the instance type `cache.t2.micro`, 1vCPU with 512mbs of memory. + * `maxmemory-policy` parameter set to `allkeys-lru` which will evict keys by trying to remove the less recently used + (LRU) keys first, in order to make space for the new data added. This will prevent the cache cluster from ever + running out of memory. + +* Postgres [AWS RDS Database Instance](https://godoc.org/gitlab.com/geeks-accelerator/oss/devops/pkg/devdeploy#AwsRdsDBInstance) +named `saas-starter-kit-[dev|stage|prod]`. + * Configured with the default database `shared`, username `god` on port 5432. + * The password is randomly generated during creation and stored in [AWS Secrets Manager](https://aws.amazon.com/secrets-manager/). + * Deployed as a single instance using the instance type `db.t2.small`, 1vCPU with 2GiB of memory. + * 20GiB of disk space has been allocated. + +* [AWS Iam Policy](https://godoc.org/gitlab.com/geeks-accelerator/oss/devops/pkg/devdeploy#AwsIamPolicy) named +`saasStarterKitService[Dev|Stage|Prod]` that will be used to grants permissions for AWS ECS tasks and AWS Lambda +functions to access to the defined AWS resources listed above. + +* Support for datadog can be enabled by added your Datadog API key to [AWS Secrets Manager](https://aws.amazon.com/secrets-manager/) + using the key `saas-starter-kit/[dev|stage|prod]/datadog` + + +Multiple development environments can easily be configured for more control. This tool supports three target deployment +environments: +* dev +* stage +* prod + +`.gitlab-ci.yml` only has prod enabled. + + + +### Services +Services are generally applications that will need to be long running or continuously available. The configured services +are: + +* [web-app](https://gitlab.com/geeks-accelerator/oss/saas-starter-kit/tree/master/cmd/web-app) - Publicly accessible +website and web application. +* [web-api](https://gitlab.com/geeks-accelerator/oss/saas-starter-kit/tree/master/cmd/web-api) - Publicly accessible web +API and documentation. + + +The `Dockerfile` for both services is defined as [multi-stage build](https://docs.docker.com/develop/develop-images/multistage-build/) +that includes building a base layer, running unittests and compiling the go application as static binary. The final +layer in the multi-stage uses [alpine:3.9](https://hub.docker.com/_/alpine?tab=description) as its base image and copies +in the compiled binary resulting in a docker container that is around 50mbs excluding any additional static assets. It's +possible to swap out `alpine:3.9` with [busybox](https://willschenk.com/articles/2019/building_a_slimmer_go_docker_container/) +for an even small resulting docker image. + +A service is built using the defined Dockerfile. The resulting image is pushed to +[Amazon Elastic Container Registry](https://aws.amazon.com/ecr/). + + Amazon Elastic Container Registry (ECR) is a fully-managed Docker container registry that makes it easy for + developers to store, manage, and deploy Docker container images. Amazon ECR is integrated with Amazon Elastic + Container Service (ECS) simplifying the development to production workflow. + +A service is configured for deployment in [services.go](https://gitlab.com/saas-starter-kit/oss/devops/blob/master/build/cicd/internal/config/service.go). +Services are deployed to [AWS Fargate](https://aws.amazon.com/fargate/) based on the defined task definition. + + AWS Fargate is a compute engine for Amazon ECS that allows you to run containers without having to manage servers or + clusters. With AWS Fargate, you no longer have to provision, configure, and scale clusters of virtual machines to + run containers. + +If the docker file is a multi-stage build and it contains a stage with the name `build_base_golang`, additional caching will +be implemented to reduce build times. The build command assumes for a stage named `build_base_golang` assumes that the +stage will run `go mod download` to pull down all package dependencies. The build command computes a checksum for the +project go.mod and then executes a docker build that targets the specific stage `build_base_golang`. The built container +image is tagged with the go.mod hash and pushed to the projects +[GitLab repository](https://docs.gitlab.com/ee/user/project/repository/). + + + + + +### Functions + +Functions are applications that can be executed in short period of time. The configured function is: + +* + + + +An python script for +[Datadog Log Collection](https://gitlab.com/geeks-accelerator/oss/devops/tree/master/examples/aws-lambda-python-ddlogs) +deployed as a function is provided by the devops project in +[examples]((https://gitlab.com/geeks-accelerator/oss/devops/tree/master/examples). + +A function is built using the defined +[Dockerfile](https://gitlab.com/geeks-accelerator/oss/devops/blob/master/examples/aws-lambda-python-ddlogs/Dockerfile). + + The `Dockerfile` should use a [lambdaci image](https://hub.docker.com/r/lambci/lambda/) as the base image. + + Lambdaci images provide a sandboxed local environment that replicates the live AWS Lambda environment almost + identically – including installed software and libraries, file structure and permissions, environment variables, + context objects and behaviors – even the user and running process are the same. + +The build command then uses `docker cp` to extract all files from the resulting container image that are located in +`/var/task`. These files are zipped and uploaded to AWS S3 for deployment. + +A function is configured for deployment in [functions.go](https://gitlab.com/geeks-accelerator/oss/devops/blob/master/build/cicd/internal/config/function.go). +Functions are deployed to [AWS Lambda](https://aws.amazon.com/lambda/). + + AWS Lambda lets you run code without provisioning or managing servers. You pay only for the compute time you consume + - there is no charge when your code is not running. + + +### Schema Migrations + +_cicd_ includes a minimalistic database migration script that implements +[github.com/geeks-accelerator/sqlxmigrate](https://godoc.org/github.com/geeks-accelerator/sqlxmigrate). It provides +schema versioning and migration rollback. The schema for the entire project is defined globally and is located inside +internal: [internal/schema](https://gitlab.com/geeks-accelerator/oss/devops/tree/master/build/cicd/internal/schema) + +The example schema package provides two separate methods for handling schema migration: +* [Migrations](https://gitlab.com/geeks-accelerator/oss/devops/blob/master/build/cicd/internal/schema/migrations.go) - +List of direct SQL statements for each migration with defined version ID. A database table is created to persist +executed migrations. Upon run of each schema migration run, the migration logic checks the migration database table to +check if it’s already been executed. Thus, schema migrations are only ever executed once. Migrations are defined as a +function to enable complex migrations so results from query manipulated before being piped to the next query. + +* [Init Schema](https://gitlab.com/geeks-accelerator/oss/devops/blob/master/build/cicd/internal/schema/init_schema.go) - +If you have a lot of migrations, it can be a pain to run all them. For example, when you are deploying a new instance of +the app into a clean database. To prevent this, use the initSchema function that will run as-if no migration was run +before (in a new clean database). + +Another bonus with the globally defined schema is that it enables your testing package the ability to dynamically spin +up database containers on-demand and automatically include all the migrations. This allows the testing package to +programmatically execute schema migrations before running any unit tests. + + + +## Installation + +Make sure you have a working Go environment. Go version 1.2+ is supported. [See +the install instructions for Go](http://golang.org/doc/install.html). + + +To install _cicd_, simply run: +``` +$ go get geeks-accelerator/oss/saas-starter-kit/build/cicd +``` + +Make sure your `PATH` includes the `$GOPATH/bin` directory so your commands can +be easily used: +``` +export PATH=$PATH:$GOPATH/bin +``` + + + +## Getting Started + +_cicd_ requires AWS permissions to be executed locally. For the GitLab CI/CD build pipeline, AWS roles will be used. This +user is only nessissary for running _cicd_ locally. + +1. You will need an existing AWS account or create a new AWS account. + +2. Define a new [AWS IAM Policy](https://console.aws.amazon.com/iam/home?region=us-west-2#/policies$new?step=edit) +called `saas-starter-kit-deploy` with a defined JSON statement instead of using the visual +editor. The statement is rather large as each permission is granted individually. A copy of +the statement is stored in the devops repo at +[configs/aws-aim-deploy-policy.json](https://gitlab.com/geeks-accelerator/oss/devops/blob/master/configs/aws-aim-deploy-policy.json) + +3. Create new [AWS User](https://console.aws.amazon.com/iam/home?region=us-west-2#/users$new?step=details) +called `saas-starter-kit-deploy` with _Programmatic Access_ and _Attach existing policies directly_ with the policy +created from step 1 `saas-starter-kit-deploy` + +4. Try running the build for a single service. +```bash +cicd --env=dev build service --name=aws-ecs-go-web-api --release-tag=testv1 +``` + +4. Try running the deploy for a single service. +```bash +cicd --env=dev deploy service --name=aws-ecs-go-web-api --release-tag=testv1 +``` + + + +## Usage + +```bash +$ cicd [global options] command [command options] [arguments...] +``` + +### Global Options + +* Target Environment - __required__ + + `--env [dev|stage|prod]` + +* AWS Access Key - optional or can be set via env variable `AWS_ACCESS_KEY_ID` + + `--aws-access-key value` + +* AWS Secret Key - optional, can be set via env variable `AWS_SECRET_ACCESS_KEY` + + `--aws-secret-key value` + +* AWS Region - optional, can be set via env variable `AWS_REGION` + + `--aws-region value` + +* AWS Use Role - optional, can be set via env variable `AWS_USE_ROLE`, when enabled an IAM Role else AWS +Access/Secret Keys are required + +* Show help + + `--help, -h` + +* Print the version + + `--version, -v` + +### Commands + +* `build service` - Executes a build for a single service + + ```bash + $ cicd -env [dev|stage|prod] build service -name NNNNN [command options] + ``` + + Options: + ```bash + --name value, -n value target service, required + --release-tag value, --tag value optional tag to override default CI_COMMIT_SHORT_SHA + --dry-run print out the build details + --no-cache skip caching for the docker build + --no-push disable pushing release image to remote repository + ``` + +* `build function` - Executes a build for a single function + + ```bash + $ cicd -env [dev|stage|prod] build function -name NNNNN [command options] + ``` + + Options: + ```bash + --name value, -n value target function, required + --release-tag value, --tag value optional tag to override default CI_COMMIT_SHORT_SHA + --dry-run print out the build details + --no-cache skip caching for the docker build + --no-push disable pushing release image to remote repository + ``` + +* `deploy service` - Executes a deploy for a single service + + ```bash + $ cicd -env [dev|stage|prod] deploy service -name NNNNN [command options] + ``` + + Options: + ```bash + --name value, -n value target service, one of [aws-ecs-go-web-api] + --release-tag value, --tag value optional tag to override default CI_COMMIT_SHORT_SHA + --dry-run print out the deploy details + ``` + +* `deploy function` - Executes a deploy for a single function + + ```bash + $ cicd -env [dev|stage|prod] deploy function -name NNNNN [command options] + ``` + + Options: + ```bash + --name value, -n value target function, required + --release-tag value, --tag value optional tag to override default CI_COMMIT_SHORT_SHA + --dry-run print out the deploy details + ``` + +* `schema` - Runs the database migration + + ```bash + $ cicd -env [dev|stage|prod] schema + ``` + +* `help` - Shows a list of commands + + ```bash + $ cicd help + ``` + + Or for one command: + ```bash + $ cicd build help + ``` + +### Examples + +Build the example service _aws-ecs-go-web-api_ +```bash +$ cicid --env=dev build service --name=aws-ecs-go-web-api --release-tag=testv1 --dry-run=false +``` + +Deploy the example service _aws-ecs-go-web-api_ +```bash +$ cicid --env=dev deploy service --name=aws-ecs-go-web-api --release-tag=testv1 --dry-run=false +``` + + +## Join us on Gopher Slack + +If you are having problems installing, troubles getting the project running or would like to contribute, join the +channel #saas-starter-kit on [Gopher Slack](http://invite.slack.golangbridge.org/) diff --git a/build/cicd/internal/config/config.go b/build/cicd/internal/config/config.go new file mode 100644 index 0000000..87b0419 --- /dev/null +++ b/build/cicd/internal/config/config.go @@ -0,0 +1,498 @@ +package config + +import ( + "context" + "fmt" + "log" + "os" + "path/filepath" + "strings" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/cloudfront" + "github.com/aws/aws-sdk-go/service/rds" + "github.com/aws/aws-sdk-go/service/s3" + "github.com/aws/aws-sdk-go/service/secretsmanager" + "github.com/iancoleman/strcase" + "github.com/jmoiron/sqlx" + "github.com/pkg/errors" + "geeks-accelerator/oss/saas-starter-kit/internal/schema" + "gitlab.com/geeks-accelerator/oss/devops/pkg/devdeploy" +) + +const ( + // ProjectNamePrefix will be appending to the name of the project. + ProjectNamePrefix = "" + + // GitLabProjectBaseUrl is the base url used to create links to a specific CI/CD job or pipeline by ID. + GitLabProjectBaseUrl = "https://gitlab.com/geeks-accelerator/oss/saas-starter-kit" +) + +// Env defines the target deployment environment. +type Env = string + +var ( + EnvDev Env = "dev" + EnvStage Env = "stage" + EnvProd Env = "prod" +) + +// List of env names used by main.go for help. +var EnvNames = []Function{ + EnvDev, + EnvStage, + EnvProd, +} + +// ConfigContext defines the flags for build env. +type ConfigContext struct { + // Env is the target environment used for the deployment. + Env string `validate:"oneof=dev stage prod"` + + // AwsCredentials defines the credentials used for deployment. + AwsCredentials devdeploy.AwsCredentials `validate:"required,dive,required"` +} + +// NewConfigContext returns the ConfigContext. +func NewConfigContext(targetEnv Env, awsCredentials devdeploy.AwsCredentials) (*ConfigContext, error) { + ctx := &ConfigContext{ + Env: targetEnv, + AwsCredentials: awsCredentials, + } + + // If AWS Credentials are not set and use role is not enabled, try to load the credentials from env vars. + if ctx.AwsCredentials.UseRole == false && ctx.AwsCredentials.AccessKeyID == "" { + var err error + ctx.AwsCredentials, err = devdeploy.GetAwsCredentialsFromEnv(ctx.Env) + if err != nil { + return nil, err + } + } else if ctx.AwsCredentials.Region == "" { + awsCreds, err := devdeploy.GetAwsCredentialsFromEnv(ctx.Env) + if err != nil { + return nil, err + } + ctx.AwsCredentials.Region = awsCreds.Region + } + + return ctx, nil +} + +// Config defines the details to setup the target environment for the project to build services and functions. +func (cfgCtx *ConfigContext) Config(log *log.Logger) (*devdeploy.Config, error) { + + // Init a new build target environment for the project. + cfg := &devdeploy.Config{ + Env: cfgCtx.Env, + AwsCredentials: cfgCtx.AwsCredentials, + } + + // Get the current working directory. This should be somewhere contained within the project. + workDir, err := os.Getwd() + if err != nil { + return cfg, errors.WithMessage(err, "Failed to get current working directory.") + } + + // Set the project root directory and project name. This is current set by finding the go.mod file for the project + // repo. Project name is the directory name. + modDetails, err := devdeploy.LoadModuleDetails(workDir) + if err != nil { + return cfg, err + } + + // ProjectRoot should be the root directory for the project. + cfg.ProjectRoot = modDetails.ProjectRoot + + // ProjectName will be used for prefixing AWS resources. This could be changed as needed or manually defined. + cfg.ProjectName = ProjectNamePrefix + modDetails.ProjectName + + // Set default AWS ECR Repository Name. + cfg.AwsEcrRepository = &devdeploy.AwsEcrRepository{ + RepositoryName: cfg.ProjectName, + Tags: []devdeploy.Tag{ + {Key: devdeploy.AwsTagNameProject, Value: cfg.ProjectName}, + {Key: devdeploy.AwsTagNameEnv, Value: cfg.Env}, + }, + } + + // Set the deployment to use the default VPC for the region. + cfg.AwsEc2Vpc = &devdeploy.AwsEc2Vpc{ + IsDefault: true, + } + + // Set the security group to use for the deployed services, database and cluster. This will used the VPC ID defined + // for the deployment. + cfg.AwsEc2SecurityGroup = &devdeploy.AwsEc2SecurityGroup{ + GroupName: cfg.ProjectName + "-" + cfg.Env, + Description: fmt.Sprintf("Security group for %s services running on ECS", cfg.ProjectName), + Tags: []devdeploy.Tag{ + {Key: devdeploy.AwsTagNameProject, Value: cfg.ProjectName}, + {Key: devdeploy.AwsTagNameEnv, Value: cfg.Env}, + }, + } + + // Set the name of the EC2 Security Group used by the gitlab runner. This is used to ensure the security + // group defined above has access to the RDS cluster/instance and can thus handle schema migrations. + cfg.GitlabRunnerEc2SecurityGroupName = "gitlab-runner" + + // Set the s3 buckets used by the deployed services. + // S3 temp prefix used by services for short term storage. A lifecycle policy will be used for expiration. + s3BucketTempPrefix := "tmp/" + + // Defines a life cycle policy to expire keys for the temp directory. + bucketLifecycleTempRule := &s3.LifecycleRule{ + ID: aws.String("Rule for : " + s3BucketTempPrefix), + Status: aws.String("Enabled"), + Filter: &s3.LifecycleRuleFilter{ + Prefix: aws.String(s3BucketTempPrefix), + }, + Expiration: &s3.LifecycleExpiration{ + // Indicates the lifetime, in days, of the objects that are subject to the rule. + // The value must be a non-zero positive integer. + Days: aws.Int64(1), + }, + // Specifies the days since the initiation of an incomplete multipart upload + // that Amazon S3 will wait before permanently removing all parts of the upload. + // For more information, see Aborting Incomplete Multipart Uploads Using a Bucket + // Lifecycle Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config) + // in the Amazon Simple Storage Service Developer Guide. + AbortIncompleteMultipartUpload: &s3.AbortIncompleteMultipartUpload{ + DaysAfterInitiation: aws.Int64(1), + }, + } + + // Define the public S3 bucket used to serve static files for all the services. + cfg.AwsS3BucketPublic = &devdeploy.AwsS3Bucket{ + BucketName: cfg.ProjectName + "-public", + IsPublic: true, + TempPrefix: s3BucketTempPrefix, + LocationConstraint: &cfg.AwsCredentials.Region, + LifecycleRules: []*s3.LifecycleRule{bucketLifecycleTempRule}, + CORSRules: []*s3.CORSRule{ + &s3.CORSRule{ + // Headers that are specified in the Access-Control-Request-Headers header. + // These headers are allowed in a preflight OPTIONS request. In response to + // any preflight OPTIONS request, Amazon S3 returns any requested headers that + // are allowed. + // AllowedHeaders: aws.StringSlice([]string{}), + + // An HTTP method that you allow the origin to execute. Valid values are GET, + // PUT, HEAD, POST, and DELETE. + // + // AllowedMethods is a required field + AllowedMethods: aws.StringSlice([]string{"GET", "POST"}), + + // One or more origins you want customers to be able to access the bucket from. + // + // AllowedOrigins is a required field + AllowedOrigins: aws.StringSlice([]string{"*"}), + + // One or more headers in the response that you want customers to be able to + // access from their applications (for example, from a JavaScript XMLHttpRequest + // object). + // ExposeHeaders: aws.StringSlice([]string{}), + + // The time in seconds that your browser is to cache the preflight response + // for the specified resource. + // MaxAgeSeconds: aws.Int64(), + }, + }, + } + + // The base s3 key prefix used to upload static files. + cfg.AwsS3BucketPublicKeyPrefix = "/public" + + // For production, enable Cloudfront CDN for all static files to avoid serving them from the slower S3 option. + if cfg.Env == EnvProd { + cfg.AwsS3BucketPublic.CloudFront = &devdeploy.AwsS3BucketCloudFront{ + // S3 key prefix to request your content from a directory in your Amazon S3 bucket. + OriginPath: cfg.AwsS3BucketPublicKeyPrefix, + + // A complex type that controls whether CloudFront caches the response to requests. + CachedMethods: []string{"HEAD", "GET"}, + + // The distribution's configuration information. + DistributionConfig: &cloudfront.DistributionConfig{ + Comment: aws.String(""), + Enabled: aws.Bool(true), + HttpVersion: aws.String("http2"), + IsIPV6Enabled: aws.Bool(true), + DefaultCacheBehavior: &cloudfront.DefaultCacheBehavior{ + Compress: aws.Bool(true), + DefaultTTL: aws.Int64(1209600), + MinTTL: aws.Int64(604800), + MaxTTL: aws.Int64(31536000), + ForwardedValues: &cloudfront.ForwardedValues{ + QueryString: aws.Bool(true), + Cookies: &cloudfront.CookiePreference{ + Forward: aws.String("none"), + }, + }, + TrustedSigners: &cloudfront.TrustedSigners{ + Enabled: aws.Bool(false), + Quantity: aws.Int64(0), + }, + ViewerProtocolPolicy: aws.String("allow-all"), + }, + ViewerCertificate: &cloudfront.ViewerCertificate{ + CertificateSource: aws.String("cloudfront"), + MinimumProtocolVersion: aws.String("TLSv1"), + CloudFrontDefaultCertificate: aws.Bool(true), + }, + PriceClass: aws.String("PriceClass_All"), + CallerReference: aws.String("devops-deploy" + cfg.AwsS3BucketPublic.BucketName), + }, + } + } + + // Define the private S3 bucket used for long term file storage including but not limited to: log exports, + // AWS Lambda code, application caching. + cfg.AwsS3BucketPrivate = &devdeploy.AwsS3Bucket{ + BucketName: cfg.ProjectName + "-private", + IsPublic: false, + TempPrefix: s3BucketTempPrefix, + LocationConstraint: &cfg.AwsCredentials.Region, + LifecycleRules: []*s3.LifecycleRule{bucketLifecycleTempRule}, + PublicAccessBlock: &s3.PublicAccessBlockConfiguration{ + // Specifies whether Amazon S3 should block public access control lists (ACLs) + // for this bucket and objects in this bucket. Setting this element to TRUE + // causes the following behavior: + // + // * PUT Bucket acl and PUT Object acl calls fail if the specified ACL is + // public. + // + // * PUT Object calls fail if the request includes a public ACL. + // + // Enabling this setting doesn't affect existing policies or ACLs. + BlockPublicAcls: aws.Bool(true), + + // Specifies whether Amazon S3 should block public bucket policies for this + // bucket. Setting this element to TRUE causes Amazon S3 to reject calls to + // PUT Bucket policy if the specified bucket policy allows public access. + // + // Enabling this setting doesn't affect existing bucket policies. + BlockPublicPolicy: aws.Bool(true), + + // Specifies whether Amazon S3 should restrict public bucket policies for this + // bucket. Setting this element to TRUE restricts access to this bucket to only + // AWS services and authorized users within this account if the bucket has a + // public policy. + // + // Enabling this setting doesn't affect previously stored bucket policies, except + // that public and cross-account access within any public bucket policy, including + // non-public delegation to specific accounts, is blocked. + RestrictPublicBuckets: aws.Bool(true), + + // Specifies whether Amazon S3 should ignore public ACLs for this bucket and + // objects in this bucket. Setting this element to TRUE causes Amazon S3 to + // ignore all public ACLs on this bucket and objects in this bucket. + // + // Enabling this setting doesn't affect the persistence of any existing ACLs + // and doesn't prevent new public ACLs from being set. + IgnorePublicAcls: aws.Bool(true), + }, + } + + // Add a bucket policy to enable exports from Cloudwatch Logs for the private S3 bucket. + cfg.AwsS3BucketPrivate.Policy = func() string { + policyResource := strings.Trim(filepath.Join(cfg.AwsS3BucketPrivate.BucketName, cfg.AwsS3BucketPrivate.TempPrefix), "/") + return fmt.Sprintf(`{ + "Version": "2012-10-17", + "Statement": [ + { + "Action": "s3:GetBucketAcl", + "Effect": "Allow", + "Resource": "arn:aws:s3:::%s", + "Principal": { "Service": "logs.%s.amazonaws.com" } + }, + { + "Action": "s3:PutObject" , + "Effect": "Allow", + "Resource": "arn:aws:s3:::%s/*", + "Condition": { "StringEquals": { "s3:x-amz-acl": "bucket-owner-full-control" } }, + "Principal": { "Service": "logs.%s.amazonaws.com" } + } + ] + }`, cfg.AwsS3BucketPrivate.BucketName, cfg.AwsCredentials.Region, policyResource, cfg.AwsCredentials.Region) + }() + + // Define the Redis Cache cluster used for ephemeral storage. + cfg.AwsElasticCacheCluster = &devdeploy.AwsElasticCacheCluster{ + CacheClusterId: cfg.ProjectName + "-" + cfg.Env, + CacheNodeType: "cache.t2.micro", + CacheSubnetGroupName: "default", + Engine: "redis", + EngineVersion: "5.0.4", + NumCacheNodes: 1, + Port: 6379, + AutoMinorVersionUpgrade: aws.Bool(true), + SnapshotRetentionLimit: aws.Int64(7), + ParameterNameValues: []devdeploy.AwsElasticCacheParameter{ + devdeploy.AwsElasticCacheParameter{ + ParameterName: "maxmemory-policy", + ParameterValue: "allkeys-lru", + }, + }, + } + + // Define the RDS Database instance for transactional data. A random one will be generated for any created instance. + cfg.AwsRdsDBInstance = &devdeploy.AwsRdsDBInstance{ + DBInstanceIdentifier: cfg.ProjectName + "-" + cfg.Env, + DBName: "shared", + Engine: "postgres", + MasterUsername: "god", + Port: 5432, + DBInstanceClass: "db.t2.small", + AllocatedStorage: 20, + PubliclyAccessible: false, + BackupRetentionPeriod: aws.Int64(7), + AutoMinorVersionUpgrade: true, + CopyTagsToSnapshot: aws.Bool(true), + Tags: []devdeploy.Tag{ + {Key: devdeploy.AwsTagNameProject, Value: cfg.ProjectName}, + {Key: devdeploy.AwsTagNameEnv, Value: cfg.Env}, + }, + AfterCreate: func(res *rds.DBInstance, dbInfo *devdeploy.DBConnInfo) error { + masterDb, err := sqlx.Open(dbInfo.Driver, dbInfo.URL()) + if err != nil { + return errors.WithMessage(err, "Failed to connect to db for schema migration.") + } + defer masterDb.Close() + + return schema.Migrate(context.Background(), masterDb, log, false) + }, + } + + // AwsIamPolicy defines the name and policy that will be attached to the task role. The policy document grants + // the permissions required for deployed services to access AWS services. If the policy already exists, the + // statements will be used to add new required actions, but not for removal. + cfg.AwsIamPolicy = &devdeploy.AwsIamPolicy{ + PolicyName: fmt.Sprintf("%s%sServices", cfg.ProjectNameCamel(), strcase.ToCamel(cfg.Env)), + Description: fmt.Sprintf("Defines access for %s services. ", cfg.ProjectName), + PolicyDocument: devdeploy.AwsIamPolicyDocument{ + Version: "2012-10-17", + Statement: []devdeploy.AwsIamStatementEntry{ + { + Sid: "DefaultServiceAccess", + Effect: "Allow", + Action: []string{ + "s3:HeadBucket", + "s3:ListObjects", + "s3:PutObject", + "s3:PutObjectAcl", + "cloudfront:ListDistributions", + "ec2:DescribeNetworkInterfaces", + "ec2:DeleteNetworkInterface", + "ecs:ListTasks", + "ecs:DescribeServices", + "ecs:DescribeTasks", + "ec2:DescribeNetworkInterfaces", + "route53:ListHostedZones", + "route53:ListResourceRecordSets", + "route53:ChangeResourceRecordSets", + "ecs:UpdateService", + "ses:SendEmail", + "ses:ListIdentities", + "secretsmanager:ListSecretVersionIds", + "secretsmanager:GetSecretValue", + "secretsmanager:CreateSecret", + "secretsmanager:UpdateSecret", + "secretsmanager:RestoreSecret", + "secretsmanager:DeleteSecret", + }, + Resource: "*", + }, + { + Sid: "ServiceInvokeLambda", + Effect: "Allow", + Action: []string{ + "iam:GetRole", + "lambda:InvokeFunction", + "lambda:ListVersionsByFunction", + "lambda:GetFunction", + "lambda:InvokeAsync", + "lambda:GetFunctionConfiguration", + "iam:PassRole", + "lambda:GetAlias", + "lambda:GetPolicy", + }, + Resource: []string{ + "arn:aws:iam:::role/*", + "arn:aws:lambda:::function:*", + }, + }, + { + Sid: "datadoglambda", + Effect: "Allow", + Action: []string{ + "cloudwatch:Get*", + "cloudwatch:List*", + "ec2:Describe*", + "support:*", + "tag:GetResources", + "tag:GetTagKeys", + "tag:GetTagValues", + }, + Resource: "*", + }, + }, + }, + } + log.Printf("\t\tSet Task Policy Name to '%s'.", cfg.AwsIamPolicy.PolicyName) + + return cfg, nil +} + +// getDatadogApiKey tries to find the datadog api key from env variable or AWS Secrets Manager. +func getDatadogApiKey(cfg *devdeploy.Config) (string, error) { + // Load Datadog API key which can be either stored in an environment variable or in AWS Secrets Manager. + // 1. Check env vars for [DEV|STAGE|PROD]_DD_API_KEY and DD_API_KEY + apiKey := devdeploy.GetTargetEnv(cfg.Env, "DD_API_KEY") + + // 2. Check AWS Secrets Manager for datadog entry prefixed with target environment. + if apiKey == "" { + prefixedSecretId := cfg.SecretID("datadog") + var err error + apiKey, err = devdeploy.GetAwsSecretValue(cfg.AwsCredentials, prefixedSecretId) + if err != nil { + if aerr, ok := errors.Cause(err).(awserr.Error); !ok || aerr.Code() != secretsmanager.ErrCodeResourceNotFoundException { + return "", err + } + } + } + + // 3. Check AWS Secrets Manager for Datadog entry. + if apiKey == "" { + secretId := "DATADOG" + var err error + apiKey, err = devdeploy.GetAwsSecretValue(cfg.AwsCredentials, secretId) + if err != nil { + if aerr, ok := errors.Cause(err).(awserr.Error); !ok || aerr.Code() != secretsmanager.ErrCodeResourceNotFoundException { + return "", err + } + } + } + + return apiKey, nil +} + +// getCommitRef returns a string that will be used by go build to replace main.go:build constant. +func getCommitRef() string { + var commitRef string + + // Set the commit ref based on the GitLab CI/CD environment variables. + if ev := os.Getenv("CI_COMMIT_TAG"); ev != "" { + commitRef = "tag-" + ev + } else if ev := os.Getenv("CI_COMMIT_REF_NAME"); ev != "" { + commitRef = "branch-" + ev + } + + if commitRef != "" { + if ev := os.Getenv("CI_COMMIT_SHORT_SHA"); ev != "" { + commitRef = commitRef + "@" + ev + } + } + + return commitRef +} diff --git a/build/cicd/internal/config/function.go b/build/cicd/internal/config/function.go new file mode 100644 index 0000000..c579709 --- /dev/null +++ b/build/cicd/internal/config/function.go @@ -0,0 +1,385 @@ +package config + +import ( + "log" + "path/filepath" + + "encoding/json" + "github.com/aws/aws-sdk-go/aws" + "github.com/pkg/errors" + "gitlab.com/geeks-accelerator/oss/devops/pkg/devdeploy" +) + +// Function define the name of a function. +type Function = string + +var ( + Function_Ddlogscollector = "ddlogscollector" + Function_YourNewFunction = "your-new-function" +) + +// List of function names used by main.go for help. +var FunctionNames = []Function{ + // Python Datadog Logs Collector + Function_Ddlogscollector, + Function_YourNewFunction, +} + +// FunctionContext defines the flags for deploying a function. +type FunctionContext struct { + // Required flags. + Name string `validate:"required" example:"aws-lambda-go-func"` + AwsLambdaFunction *devdeploy.AwsLambdaFunction `validate:"required"` + AwsIamRole *devdeploy.AwsIamRole `validate:"required"` + AwsIamPolicy *devdeploy.AwsIamPolicy `validate:"required"` + + // Optional flags. + FunctionDir string `validate:"omitempty"` + BuildDir string `validate:"omitempty"` + DockerBuildContext string `validate:"omitempty" example:"."` + Dockerfile string `validate:"required" example:"./cmd/web-api/Dockerfile"` + ReleaseTag string `validate:"required"` + EnableVPC bool `validate:"omitempty" example:"false"` +} + +// NewFunctionContext returns the FunctionContext. +func NewFunctionContext(funcName string, cfg *devdeploy.Config) (*FunctionContext, error) { + + ctx := &FunctionContext{ + Name: funcName, + + FunctionDir: filepath.Join(cfg.ProjectRoot, "examples", funcName), + + DockerBuildContext: ".", + + // Set the release tag for the image to use include env + service name + commit hash/tag. + ReleaseTag: devdeploy.GitLabCiReleaseTag(cfg.Env, funcName), + } + + switch funcName { + case Function_YourNewFunction: + // No additional settings for function. + case Function_Ddlogscollector: + + // Python Datadog Logs Collector is + ctx.FunctionDir = filepath.Join(cfg.ProjectRoot, "deployments/ddlogscollector") + + // Change the build directory to the function directory instead of project root. + ctx.BuildDir = ctx.FunctionDir + + // AwsLambdaFunction defines the details needed to create an lambda function. + ctx.AwsLambdaFunction = &devdeploy.AwsLambdaFunction{ + FunctionName: ctx.Name, + Description: "Ship logs from cloudwatch to datadog", + + Handler: "lambda_function.lambda_handler", + Runtime: "python2.7", + MemorySize: 512, + + Timeout: aws.Int64(300), + Environment: map[string]string{ + "DD_API_KEY": "", + "LAMBDA_FUNC": ctx.Name, + }, + Tags: []devdeploy.Tag{ + {Key: devdeploy.AwsTagNameProject, Value: cfg.ProjectName}, + {Key: devdeploy.AwsTagNameEnv, Value: cfg.Env}, + }, + } + + ctx.AwsIamRole = &devdeploy.AwsIamRole{ + RoleName: "DatadogAWSIntegrationLambdaRole", + Description: "Allows Datadog to run Lambda functions to call AWS services on your behalf.", + AssumeRolePolicyDocument: "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Service\":[\"lambda.amazonaws.com\"]},\"Action\":[\"sts:AssumeRole\"]}]}", + Tags: []devdeploy.Tag{ + {Key: devdeploy.AwsTagNameProject, Value: cfg.ProjectName}, + {Key: devdeploy.AwsTagNameEnv, Value: cfg.Env}, + }, + } + + ctx.AwsIamPolicy = &devdeploy.AwsIamPolicy{ + PolicyName: "DatadogAWSIntegrationPolicy", + Description: "Provides Datadog Lambda function the ability to ship AWS service related logs back to Datadog.", + PolicyDocument: devdeploy.AwsIamPolicyDocument{ + Version: "2012-10-17", + Statement: []devdeploy.AwsIamStatementEntry{ + { + Action: []string{ + "apigateway:GET", + "autoscaling:Describe*", + "budgets:ViewBudget", + "cloudfront:GetDistributionConfig", + "cloudfront:ListDistributions", + "cloudtrail:DescribeTrails", + "cloudtrail:GetTrailStatus", + "cloudwatch:Describe*", + "cloudwatch:Get*", + "cloudwatch:List*", + "codedeploy:List*", + "codedeploy:BatchGet*", + "directconnect:Describe*", + "dynamodb:List*", + "dynamodb:Describe*", + "ec2:Describe*", + "ecs:Describe*", + "ecs:List*", + "elasticache:Describe*", + "elasticache:List*", + "elasticfilesystem:DescribeFileSystems", + "elasticfilesystem:DescribeTags", + "elasticloadbalancing:Describe*", + "elasticmapreduce:List*", + "elasticmapreduce:Describe*", + "es:ListTags", + "es:ListDomainNames", + "es:DescribeElasticsearchDomains", + "health:DescribeEvents", + "health:DescribeEventDetails", + "health:DescribeAffectedEntities", + "kinesis:List*", + "kinesis:Describe*", + "lambda:AddPermission", + "lambda:GetPolicy", + "lambda:List*", + "lambda:RemovePermission", + "logs:Get*", + "logs:Describe*", + "logs:FilterLogEvents", + "logs:TestMetricFilter", + "logs:PutSubscriptionFilter", + "logs:DeleteSubscriptionFilter", + "logs:DescribeSubscriptionFilters", + "rds:Describe*", + "rds:List*", + "redshift:DescribeClusters", + "redshift:DescribeLoggingStatus", + "route53:List*", + "s3:GetBucketLogging", + "s3:GetBucketLocation", + "s3:GetBucketNotification", + "s3:GetBucketTagging", + "s3:ListAllMyBuckets", + "s3:PutBucketNotification", + "ses:Get*", + "sns:List*", + "sns:Publish", + "sqs:ListQueues", + "support:*", + "tag:GetResources", + "tag:GetTagKeys", + "tag:GetTagValues", + "xray:BatchGetTraces", + "xray:GetTraceSummaries", + "lambda:List*", + "logs:DescribeLogGroups", + "logs:DescribeLogStreams", + "logs:FilterLogEvents", + "tag:GetResources", + "cloudfront:GetDistributionConfig", + "cloudfront:ListDistributions", + "elasticloadbalancing:DescribeLoadBalancers", + "elasticloadbalancing:DescribeLoadBalancerAttributes", + "lambda:AddPermission", + "lambda:GetPolicy", + "lambda:RemovePermission", + "redshift:DescribeClusters", + "redshift:DescribeLoggingStatus", + "s3:GetBucketLogging", + "s3:GetBucketLocation", + "s3:GetBucketNotification", + "s3:ListAllMyBuckets", + "s3:PutBucketNotification", + "logs:PutSubscriptionFilter", + "logs:DeleteSubscriptionFilter", + "logs:DescribeSubscriptionFilters", + }, + Effect: "Allow", + Resource: "*", + }, + }, + }, + } + default: + return nil, errors.Wrapf(devdeploy.ErrInvalidFunction, + "No function context defined for function '%s'", + funcName) + } + + // Append the datadog api key before execution. + ctx.AwsLambdaFunction.UpdateEnvironment = func(vars map[string]string) error { + datadogApiKey, err := getDatadogApiKey(cfg) + if err != nil { + return err + } + vars["DD_API_KEY"] = datadogApiKey + return nil + } + + // Set the docker file if no custom one has been defined for the service. + if ctx.Dockerfile == "" { + ctx.Dockerfile = filepath.Join(ctx.BuildDir, "Dockerfile") + } + + return ctx, nil +} + +// Build handles defining all the information needed to deploy a service to AWS ECS. +func (ctx *FunctionContext) Build(log *log.Logger, noCache, noPush bool) (*devdeploy.BuildLambda, error) { + + log.Printf("Define build for function '%s'.", ctx.Name) + log.Printf("\tUsing release tag %s.", ctx.ReleaseTag) + + srv := &devdeploy.BuildLambda{ + FuncName: ctx.Name, + ReleaseTag: ctx.ReleaseTag, + BuildDir: ctx.BuildDir, + Dockerfile: ctx.Dockerfile, + DockerBuildContext: ctx.DockerBuildContext, + NoCache: noCache, + NoPush: noPush, + } + + return srv, nil +} + +// Deploy handles defining all the information needed to deploy a service to AWS ECS. +func (ctx *FunctionContext) Deploy(log *log.Logger) (*devdeploy.DeployLambda, error) { + + log.Printf("Define build for function '%s'.", ctx.Name) + log.Printf("\tUsing release tag %s.", ctx.ReleaseTag) + + srv := &devdeploy.DeployLambda{ + FuncName: ctx.Name, + EnableVPC: ctx.EnableVPC, + AwsLambdaFunction: ctx.AwsLambdaFunction, + AwsIamPolicy: ctx.AwsIamPolicy, + AwsIamRole: ctx.AwsIamRole, + } + + return srv, nil +} + +// S3Location returns the s3 bucket and key used to upload the code to. +func (ctx *FunctionContext) S3Location(cfg *devdeploy.Config) (string, string) { + s3Bucket := cfg.AwsS3BucketPrivate.BucketName + s3Key := filepath.Join("src", "aws", "lambda", cfg.Env, ctx.Name, ctx.ReleaseTag+".zip") + + return s3Bucket, s3Key +} + +// BuildFunctionForTargetEnv executes the build commands for a target function. +func BuildFunctionForTargetEnv(log *log.Logger, awsCredentials devdeploy.AwsCredentials, targetEnv Env, functionName, releaseTag string, dryRun, noCache, noPush bool) error { + + cfgCtx, err := NewConfigContext(targetEnv, awsCredentials) + if err != nil { + return err + } + + cfg, err := cfgCtx.Config(log) + if err != nil { + return err + } + + funcCtx, err := NewFunctionContext(functionName, cfg) + if err != nil { + return err + } + + // Override the release tag if set. + if releaseTag != "" { + funcCtx.ReleaseTag = releaseTag + } + + details, err := funcCtx.Build(log, noCache, noPush) + if err != nil { + return err + } + + // Set the s3 bucket and s3 for uploading the zip file. + details.CodeS3Bucket, details.CodeS3Key = funcCtx.S3Location(cfg) + + // funcPath is used to copy the service specific code in the Dockerfile. + funcPath, err := filepath.Rel(cfg.ProjectRoot, funcCtx.FunctionDir) + if err != nil { + return err + } + + // commitRef is used by main.go:build constant. + commitRef := getCommitRef() + if commitRef == "" { + commitRef = funcCtx.ReleaseTag + } + + details.BuildArgs = map[string]string{ + "func_path": funcPath, + "commit_ref": commitRef, + } + + if dryRun { + cfgJSON, err := json.MarshalIndent(cfg, "", " ") + if err != nil { + log.Fatalf("BuildFunctionForTargetEnv : Marshalling config to JSON : %+v", err) + } + log.Printf("BuildFunctionForTargetEnv : config : %v\n", string(cfgJSON)) + + detailsJSON, err := json.MarshalIndent(details, "", " ") + if err != nil { + log.Fatalf("BuildFunctionForTargetEnv : Marshalling details to JSON : %+v", err) + } + log.Printf("BuildFunctionForTargetEnv : details : %v\n", string(detailsJSON)) + + return nil + } + + return devdeploy.BuildLambdaForTargetEnv(log, cfg, details) +} + +// DeployFunctionForTargetEnv executes the deploy commands for a target function. +func DeployFunctionForTargetEnv(log *log.Logger, awsCredentials devdeploy.AwsCredentials, targetEnv Env, functionName, releaseTag string, dryRun bool) error { + + cfgCtx, err := NewConfigContext(targetEnv, awsCredentials) + if err != nil { + return err + } + + cfg, err := cfgCtx.Config(log) + if err != nil { + return err + } + + funcCtx, err := NewFunctionContext(functionName, cfg) + if err != nil { + return err + } + + // Override the release tag if set. + if releaseTag != "" { + funcCtx.ReleaseTag = releaseTag + } + + details, err := funcCtx.Deploy(log) + if err != nil { + return err + } + + // Set the s3 bucket and s3 for uploading the zip file. + details.CodeS3Bucket, details.CodeS3Key = funcCtx.S3Location(cfg) + + if dryRun { + cfgJSON, err := json.MarshalIndent(cfg, "", " ") + if err != nil { + log.Fatalf("DeployFunctionForTargetEnv : Marshalling config to JSON : %+v", err) + } + log.Printf("DeployFunctionForTargetEnv : config : %v\n", string(cfgJSON)) + + detailsJSON, err := json.MarshalIndent(details, "", " ") + if err != nil { + log.Fatalf("DeployFunctionForTargetEnv : Marshalling details to JSON : %+v", err) + } + log.Printf("DeployFunctionForTargetEnv : details : %v\n", string(detailsJSON)) + + return nil + } + + return devdeploy.DeployLambdaToTargetEnv(log, cfg, details) +} diff --git a/build/cicd/internal/config/schema.go b/build/cicd/internal/config/schema.go new file mode 100644 index 0000000..bebbf79 --- /dev/null +++ b/build/cicd/internal/config/schema.go @@ -0,0 +1,38 @@ +package config + +import ( + "context" + "log" + + "github.com/jmoiron/sqlx" + "github.com/pkg/errors" + "geeks-accelerator/oss/saas-starter-kit/internal/schema" + "gitlab.com/geeks-accelerator/oss/devops/pkg/devdeploy" +) + +// RunSchemaMigrationsForTargetEnv executes the build commands for a target service. +func RunSchemaMigrationsForTargetEnv(log *log.Logger, awsCredentials devdeploy.AwsCredentials, targetEnv Env, isUnittest bool) error { + + cfgCtx, err := NewConfigContext(targetEnv, awsCredentials) + if err != nil { + return err + } + + cfg, err := cfgCtx.Config(log) + if err != nil { + return err + } + + err = devdeploy.SetupDeploymentEnv(log, cfg) + if err != nil { + return err + } + + masterDb, err := sqlx.Open(cfg.DBConnInfo.Driver, cfg.DBConnInfo.URL()) + if err != nil { + return errors.WithMessage(err, "Failed to connect to db for schema migration.") + } + defer masterDb.Close() + + return schema.Migrate(context.Background(), masterDb, log, false) +} diff --git a/build/cicd/internal/config/service.go b/build/cicd/internal/config/service.go new file mode 100644 index 0000000..99777e8 --- /dev/null +++ b/build/cicd/internal/config/service.go @@ -0,0 +1,756 @@ +package config + +import ( + "encoding/json" + "fmt" + "log" + "os" + "path/filepath" + "strings" + + "github.com/pkg/errors" + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/ecs" + "github.com/iancoleman/strcase" + "gitlab.com/geeks-accelerator/oss/devops/pkg/devdeploy" + "gopkg.in/go-playground/validator.v9" +) + +// Service define the name of a service. +type Service = string + +var ( + ServiceWebApi = "web-api" + ServiceWebApp = "web-app" +) + +// List of service names used by main.go for help. +var ServiceNames = []Service{ + ServiceWebApi, + ServiceWebApp, +} + +// ServiceConfig defines the settings for a service. +type ServiceConfig struct { + // Required flags. + Name string `validate:"required" example:"web-api"` + ServiceHostPrimary string `validate:"required" example:"example-project.com"` + DesiredCount int `validate:"required" example:"2"` + ServiceDir string `validate:"required"` + Dockerfile string `validate:"required" example:"./cmd/web-api/Dockerfile"` + ReleaseTag string `validate:"required"` + + // Optional flags. + ServiceHostNames []string `validate:"omitempty" example:"subdomain.example-project.com"` + EnableHTTPS bool `validate:"omitempty" example:"false"` + EnableElb bool `validate:"omitempty" example:"false"` + StaticFilesS3Enable bool `validate:"omitempty" example:"false"` + BuildDir string `validate:"omitempty"` + DockerBuildContext string `validate:"omitempty" example:"."` +} + +// ServiceContext includes the config and task definition for building and deploying a service. +type ServiceContext struct { + ServiceConfig + + // AwsEcsTaskDefinition defines the ECS task definition based on the service configs. + AwsEcsTaskDefinition func(cfg *devdeploy.Config, srv *devdeploy.DeployService) (*ecs.RegisterTaskDefinitionInput, error) +} + +// NewServiceConfig returns the Service for a service that is configured for the target deployment env. +func NewServiceConfig(serviceName string, cfg *devdeploy.Config) (ServiceConfig, error) { + + // ========================================================================= + // New service context. + srv := ServiceConfig{ + Name: serviceName, + DesiredCount: 1, + DockerBuildContext: ".", + ServiceDir: filepath.Join(cfg.ProjectRoot, "examples", serviceName), + + // Set the release tag for the image to use include env + service name + commit hash/tag. + ReleaseTag: devdeploy.GitLabCiReleaseTag(cfg.Env, serviceName), + } + + // ========================================================================= + // Context settings based on target env. + if cfg.Env == EnvStage || cfg.Env == EnvProd { + srv.EnableHTTPS = true + srv.StaticFilesS3Enable = true + } else { + srv.EnableHTTPS = false + srv.StaticFilesS3Enable = false + } + + // ========================================================================= + // Service dependant settings. + switch serviceName { + case ServiceWebApp: + + // Set the hostnames for the service. + if cfg.Env == EnvProd { + srv.ServiceHostPrimary = "example.saasstartupkit.com" + + // Any hostname listed here that doesn't match the primary hostname will be updated in Route 53 but the + // service itself will redirect any requests back to the primary hostname. + srv.ServiceHostNames = []string{ + fmt.Sprintf("%s.example.saasstartupkit.com", cfg.Env), + } + } else { + srv.ServiceHostPrimary = fmt.Sprintf("%s.example.saasstartupkit.com", cfg.Env) + } + + case ServiceWebApi: + + // Set the hostnames for the service. + if cfg.Env == EnvProd { + srv.ServiceHostPrimary = "api.example.saasstartupkit.com" + } else { + srv.ServiceHostPrimary = fmt.Sprintf("api.%s.example.saasstartupkit.com", cfg.Env) + } + + default: + return ServiceConfig{}, errors.Wrapf(devdeploy.ErrInvalidService, + "No service config defined for service '%s'", + serviceName) + } + + // Set the docker file if no custom one has been defined for the service. + if srv.Dockerfile == "" { + srv.Dockerfile = filepath.Join(srv.ServiceDir, "Dockerfile") + } + + // Ensure the config is valid. + errs := validator.New().Struct(cfg) + if errs != nil { + return srv, errs + } + + return srv, nil +} + +// BaseUrl returns the base url for a specific service. +func (c ServiceConfig) BaseUrl() string { + var schema string + if c.EnableHTTPS { + schema = "https" + } else { + schema = "http" + } + return fmt.Sprintf("%s://%s/", schema, c.ServiceHostPrimary) +} + + +// NewServiceContext returns the ServiceContext for a service that is configured for the target deployment env. +func NewServiceContext(serviceName Service, cfg *devdeploy.Config) (*ServiceContext, error) { + + // ========================================================================= + // Shared details that could be applied to all task definitions. + + // Load the web-app config for the web-api can reference it's hostname. + webAppCfg, err := NewServiceConfig(ServiceWebApp, cfg) + if err != nil { + return nil, err + } + + // Load the web-api config for the web-app can reference it's hostname. + webApiCfg, err := NewServiceConfig(ServiceWebApi, cfg) + if err != nil { + return nil, err + } + + // Define a base set of environment variables that can be assigned to individual container definitions. + baseEnvVals := func(cfg *devdeploy.Config, srv *devdeploy.DeployService) []*ecs.KeyValuePair { + + var ciJobURL string + if id := os.Getenv("CI_JOB_ID"); id != "" { + ciJobURL = strings.TrimRight(GitLabProjectBaseUrl, "/") + "/-/jobs/" + os.Getenv("CI_JOB_ID") + } + + var ciPipelineURL string + if id := os.Getenv("CI_PIPELINE_ID"); id != "" { + ciPipelineURL = strings.TrimRight(GitLabProjectBaseUrl, "/") + "/pipelines/" + os.Getenv("CI_PIPELINE_ID") + } + + return []*ecs.KeyValuePair{ + ecsKeyValuePair(devdeploy.ENV_KEY_ECS_CLUSTER, srv.AwsEcsCluster.ClusterName), + ecsKeyValuePair(devdeploy.ENV_KEY_ECS_SERVICE, srv.AwsEcsService.ServiceName), + ecsKeyValuePair("AWS_REGION", cfg.AwsCredentials.Region), + ecsKeyValuePair("AWS_USE_ROLE", "true"), + ecsKeyValuePair("AWSLOGS_GROUP", srv.AwsCloudWatchLogGroup.LogGroupName), + ecsKeyValuePair("ECS_ENABLE_CONTAINER_METADATA", "true"), + ecsKeyValuePair("CI_COMMIT_REF_NAME", os.Getenv("CI_COMMIT_REF_NAME")), + ecsKeyValuePair("CI_COMMIT_SHORT_SHA", os.Getenv("CI_COMMIT_SHORT_SHA")), + ecsKeyValuePair("CI_COMMIT_SHA", os.Getenv("CI_COMMIT_SHA")), + ecsKeyValuePair("CI_COMMIT_TAG", os.Getenv("CI_COMMIT_TAG")), + ecsKeyValuePair("CI_JOB_ID", os.Getenv("CI_JOB_ID")), + ecsKeyValuePair("CI_PIPELINE_ID", os.Getenv("CI_PIPELINE_ID")), + ecsKeyValuePair("CI_JOB_URL", ciJobURL), + ecsKeyValuePair("CI_PIPELINE_URL", ciPipelineURL), + ecsKeyValuePair("WEB_APP_BASE_URL", webAppCfg.BaseUrl()), + ecsKeyValuePair("WEB_API_BASE_URL", webApiCfg.BaseUrl()), + } + } + + + // ========================================================================= + // Service dependant settings. + + var ctx *ServiceContext + switch serviceName { + + // Define the ServiceContext for the web-app that will be used for build and deploy. + case ServiceWebApp: + + ctx := &ServiceContext{ + ServiceConfig: webAppCfg, + } + + // Define the service task definition with a function to enable use of config and deploy details. + ctx.AwsEcsTaskDefinition = func(cfg *devdeploy.Config, srv *devdeploy.DeployService) (*ecs.RegisterTaskDefinitionInput, error) { + + // Defined a container definition for the specific service. + container1 := &ecs.ContainerDefinition{ + Name: aws.String(ctx.Name), + Image: aws.String(srv.ReleaseImage), + Essential: aws.Bool(true), + LogConfiguration: &ecs.LogConfiguration{ + LogDriver: aws.String("awslogs"), + Options: map[string]*string{ + "awslogs-group": aws.String(srv.AwsCloudWatchLogGroup.LogGroupName), + "awslogs-region": aws.String(cfg.AwsCredentials.Region), + "awslogs-stream-prefix": aws.String("ecs"), + }, + }, + PortMappings: []*ecs.PortMapping{ + &ecs.PortMapping{ + HostPort: aws.Int64(80), + Protocol: aws.String("tcp"), + ContainerPort: aws.Int64(80), + }, + }, + Cpu: aws.Int64(128), + MemoryReservation: aws.Int64(128), + Environment: baseEnvVals(cfg, srv), + HealthCheck: &ecs.HealthCheck{ + Retries: aws.Int64(3), + Command: aws.StringSlice([]string{ + "CMD-SHELL", + "curl -f http://localhost/ping || exit 1", + }), + Timeout: aws.Int64(5), + Interval: aws.Int64(60), + StartPeriod: aws.Int64(60), + }, + Ulimits: []*ecs.Ulimit{ + &ecs.Ulimit{ + Name: aws.String("nofile"), + SoftLimit: aws.Int64(987654), + HardLimit: aws.Int64(999999), + }, + }, + } + + // If the service has HTTPS enabled with the use of an AWS Elastic Load Balancer, then need to enable + // traffic for port 443 for SSL traffic to get terminated on the deployed tasks. + if ctx.EnableHTTPS && !ctx.EnableElb { + container1.PortMappings = append(container1.PortMappings, &ecs.PortMapping{ + HostPort: aws.Int64(443), + Protocol: aws.String("tcp"), + ContainerPort: aws.Int64(443), + }) + } + + // Append env vars for the service task. + container1.Environment = append(container1.Environment, + ecsKeyValuePair("SERVICE_NAME", srv.ServiceName), + ecsKeyValuePair("PROJECT_NAME", cfg.ProjectName), + + // Use placeholders for these environment variables that will be replaced with devdeploy.DeployServiceToTargetEnv + ecsKeyValuePair("WEB_API_SERVICE_HOST", "{HTTP_HOST}"), + ecsKeyValuePair("WEB_API_SERVICE_HTTPS_HOST", "{HTTPS_HOST}"), + ecsKeyValuePair("WEB_API_SERVICE_ENABLE_HTTPS", "{HTTPS_ENABLED}"), + ecsKeyValuePair("WEB_API_SERVICE_BASE_URL", "{APP_BASE_URL}"), + ecsKeyValuePair("WEB_API_SERVICE_HOST_NAMES", "{HOST_NAMES}"), + ecsKeyValuePair("WEB_API_SERVICE_STATICFILES_S3_ENABLED", "{STATIC_FILES_S3_ENABLED}"), + ecsKeyValuePair("WEB_API_SERVICE_STATICFILES_S3_PREFIX", "{STATIC_FILES_S3_PREFIX}"), + ecsKeyValuePair("WEB_API_SERVICE_STATICFILES_CLOUDFRONT_ENABLED", "{STATIC_FILES_CLOUDFRONT_ENABLED}"), + ecsKeyValuePair("WEB_API_REDIS_HOST", "{CACHE_HOST}"), + ecsKeyValuePair("WEB_API_DB_HOST", "{DB_HOST}"), + ecsKeyValuePair("WEB_API_DB_USERNAME", "{DB_USER}"), + ecsKeyValuePair("WEB_API_DB_PASSWORD", "{DB_PASS}"), + ecsKeyValuePair("WEB_API_DB_DATABASE", "{DB_DATABASE}"), + ecsKeyValuePair("WEB_API_DB_DRIVER", "{DB_DRIVER}"), + ecsKeyValuePair("WEB_API_DB_DISABLE_TLS", "{DB_DISABLE_TLS}"), + ecsKeyValuePair("WEB_API_AWS_S3_BUCKET_PRIVATE", "{AWS_S3_BUCKET_PRIVATE}"), + ecsKeyValuePair("WEB_API_AWS_S3_BUCKET_PUBLIC", "{AWS_S3_BUCKET_PUBLIC}"), + ecsKeyValuePair(devdeploy.ENV_KEY_ROUTE53_UPDATE_TASK_IPS, "{ROUTE53_UPDATE_TASK_IPS}"), + ecsKeyValuePair(devdeploy.ENV_KEY_ROUTE53_ZONES, "{ROUTE53_ZONES}"), + ) + + // Define the full task definition for the service. + def := &ecs.RegisterTaskDefinitionInput{ + Family: aws.String(srv.ServiceName), + ExecutionRoleArn: aws.String(srv.AwsEcsExecutionRole.Arn()), + TaskRoleArn: aws.String(srv.AwsEcsTaskRole.Arn()), + NetworkMode: aws.String("awsvpc"), + ContainerDefinitions: []*ecs.ContainerDefinition{ + // Include the single container definition for the service. Additional definitions could be added + // here like one for datadog. + container1, + }, + RequiresCompatibilities: aws.StringSlice([]string{"FARGATE"}), + } + + return def, nil + } + + + // Define the ServiceContext for the web-api that will be used for build and deploy. + case ServiceWebApi: + + ctx := &ServiceContext{ + ServiceConfig: webApiCfg, + } + + // Define the service task definition with a function to enable use of config and deploy details. + ctx.AwsEcsTaskDefinition = func(cfg *devdeploy.Config, srv *devdeploy.DeployService) (*ecs.RegisterTaskDefinitionInput, error) { + + // Defined a container definition for the specific service. + container1 := &ecs.ContainerDefinition{ + Name: aws.String(ctx.Name), + Image: aws.String(srv.ReleaseImage), + Essential: aws.Bool(true), + LogConfiguration: &ecs.LogConfiguration{ + LogDriver: aws.String("awslogs"), + Options: map[string]*string{ + "awslogs-group": aws.String(srv.AwsCloudWatchLogGroup.LogGroupName), + "awslogs-region": aws.String(cfg.AwsCredentials.Region), + "awslogs-stream-prefix": aws.String("ecs"), + }, + }, + PortMappings: []*ecs.PortMapping{ + &ecs.PortMapping{ + HostPort: aws.Int64(80), + Protocol: aws.String("tcp"), + ContainerPort: aws.Int64(80), + }, + }, + Cpu: aws.Int64(128), + MemoryReservation: aws.Int64(128), + Environment: baseEnvVals(cfg, srv), + HealthCheck: &ecs.HealthCheck{ + Retries: aws.Int64(3), + Command: aws.StringSlice([]string{ + "CMD-SHELL", + "curl -f http://localhost/ping || exit 1", + }), + Timeout: aws.Int64(5), + Interval: aws.Int64(60), + StartPeriod: aws.Int64(60), + }, + Ulimits: []*ecs.Ulimit{ + &ecs.Ulimit{ + Name: aws.String("nofile"), + SoftLimit: aws.Int64(987654), + HardLimit: aws.Int64(999999), + }, + }, + } + + // If the service has HTTPS enabled with the use of an AWS Elastic Load Balancer, then need to enable + // traffic for port 443 for SSL traffic to get terminated on the deployed tasks. + if ctx.EnableHTTPS && !ctx.EnableElb { + container1.PortMappings = append(container1.PortMappings, &ecs.PortMapping{ + HostPort: aws.Int64(443), + Protocol: aws.String("tcp"), + ContainerPort: aws.Int64(443), + }) + } + + // Append env vars for the service task. + container1.Environment = append(container1.Environment, + ecsKeyValuePair("SERVICE_NAME", srv.ServiceName), + ecsKeyValuePair("PROJECT_NAME", cfg.ProjectName), + + // Use placeholders for these environment variables that will be replaced with devdeploy.DeployServiceToTargetEnv + ecsKeyValuePair("WEB_API_SERVICE_HOST", "{HTTP_HOST}"), + ecsKeyValuePair("WEB_API_SERVICE_HTTPS_HOST", "{HTTPS_HOST}"), + ecsKeyValuePair("WEB_API_SERVICE_ENABLE_HTTPS", "{HTTPS_ENABLED}"), + ecsKeyValuePair("WEB_API_SERVICE_BASE_URL", "{APP_BASE_URL}"), + ecsKeyValuePair("WEB_API_SERVICE_HOST_NAMES", "{HOST_NAMES}"), + ecsKeyValuePair("WEB_API_SERVICE_STATICFILES_S3_ENABLED", "{STATIC_FILES_S3_ENABLED}"), + ecsKeyValuePair("WEB_API_SERVICE_STATICFILES_S3_PREFIX", "{STATIC_FILES_S3_PREFIX}"), + ecsKeyValuePair("WEB_API_SERVICE_STATICFILES_CLOUDFRONT_ENABLED", "{STATIC_FILES_CLOUDFRONT_ENABLED}"), + ecsKeyValuePair("WEB_API_REDIS_HOST", "{CACHE_HOST}"), + ecsKeyValuePair("WEB_API_DB_HOST", "{DB_HOST}"), + ecsKeyValuePair("WEB_API_DB_USERNAME", "{DB_USER}"), + ecsKeyValuePair("WEB_API_DB_PASSWORD", "{DB_PASS}"), + ecsKeyValuePair("WEB_API_DB_DATABASE", "{DB_DATABASE}"), + ecsKeyValuePair("WEB_API_DB_DRIVER", "{DB_DRIVER}"), + ecsKeyValuePair("WEB_API_DB_DISABLE_TLS", "{DB_DISABLE_TLS}"), + ecsKeyValuePair("WEB_API_AWS_S3_BUCKET_PRIVATE", "{AWS_S3_BUCKET_PRIVATE}"), + ecsKeyValuePair("WEB_API_AWS_S3_BUCKET_PUBLIC", "{AWS_S3_BUCKET_PUBLIC}"), + ecsKeyValuePair(devdeploy.ENV_KEY_ROUTE53_UPDATE_TASK_IPS, "{ROUTE53_UPDATE_TASK_IPS}"), + ecsKeyValuePair(devdeploy.ENV_KEY_ROUTE53_ZONES, "{ROUTE53_ZONES}"), + ) + + // Define the full task definition for the service. + def := &ecs.RegisterTaskDefinitionInput{ + Family: aws.String(srv.ServiceName), + ExecutionRoleArn: aws.String(srv.AwsEcsExecutionRole.Arn()), + TaskRoleArn: aws.String(srv.AwsEcsTaskRole.Arn()), + NetworkMode: aws.String("awsvpc"), + ContainerDefinitions: []*ecs.ContainerDefinition{ + // Include the single container definition for the service. Additional definitions could be added + // here like one for datadog. + container1, + }, + RequiresCompatibilities: aws.StringSlice([]string{"FARGATE"}), + } + + return def, nil + } + + default: + return nil, errors.Wrapf(devdeploy.ErrInvalidService, + "No service context defined for service '%s'", + serviceName) + } + + return ctx, nil +} + +// BuildService handles defining all the information needed to a service with docker and push to AWS ECR. +func (ctx *ServiceContext) Build(log *log.Logger, noCache, noPush bool) (*devdeploy.BuildService, error) { + + log.Printf("Define build for service '%s'.", ctx.Name) + log.Printf("\tUsing release tag %s.", ctx.ReleaseTag) + + srv := &devdeploy.BuildService{ + ServiceName: ctx.Name, + ReleaseTag: ctx.ReleaseTag, + BuildDir: ctx.BuildDir, + Dockerfile: ctx.Dockerfile, + DockerBuildContext: ctx.DockerBuildContext, + NoCache: noCache, + NoPush: noPush, + } + + return srv, nil +} + +// DeployService handles defining all the information needed to deploy a service to AWS ECS. +func (ctx *ServiceContext) Deploy(log *log.Logger, cfg *devdeploy.Config) (*devdeploy.DeployService, error) { + + log.Printf("Define deploy for service '%s'.", ctx.Name) + log.Printf("\tUsing release tag %s.", ctx.ReleaseTag) + + // Start to define all the information for the service from the service context. + srv := &devdeploy.DeployService{ + ServiceName: ctx.Name, + ReleaseTag: ctx.ReleaseTag, + EnableHTTPS: ctx.EnableHTTPS, + ServiceHostPrimary: ctx.ServiceHostPrimary, + ServiceHostNames: ctx.ServiceHostNames, + } + + // When only service host names are set, choose the first item as the primary host. + if srv.ServiceHostPrimary == "" && len(srv.ServiceHostNames) > 0 { + srv.ServiceHostPrimary = srv.ServiceHostNames[0] + log.Printf("\t\tSet Service Primary Host to '%s'.", srv.ServiceHostPrimary) + } + + // The S3 prefix used to upload static files served to public. + if ctx.StaticFilesS3Enable { + srv.StaticFilesS3Prefix = filepath.Join(cfg.AwsS3BucketPublicKeyPrefix, srv.ReleaseTag, "static") + } + + // Determine the Dockerfile for the service. + if ctx.Dockerfile != "" { + srv.Dockerfile = ctx.Dockerfile + log.Printf("\t\tUsing docker file '%s'.", srv.Dockerfile) + } else { + var err error + srv.Dockerfile, err = devdeploy.FindServiceDockerFile(cfg.ProjectRoot, srv.ServiceName) + if err != nil { + return nil, err + } + log.Printf("\t\tFound service docker file '%s'.", srv.Dockerfile) + } + + // Set the service directory. + if ctx.ServiceDir == "" { + ctx.ServiceDir = filepath.Dir(srv.Dockerfile) + } + srv.StaticFilesDir = filepath.Join(ctx.ServiceDir, "static") + + // Define the ECS Cluster used to host the serverless fargate tasks. + srv.AwsEcsCluster = &devdeploy.AwsEcsCluster{ + ClusterName: cfg.ProjectName + "-" + cfg.Env, + Tags: []devdeploy.Tag{ + {Key: devdeploy.AwsTagNameProject, Value: cfg.ProjectName}, + {Key: devdeploy.AwsTagNameEnv, Value: cfg.Env}, + }, + } + + // Define the ECS task execution role. This role executes ECS actions such as pulling the image and storing the + // application logs in cloudwatch. + srv.AwsEcsExecutionRole = &devdeploy.AwsIamRole{ + RoleName: fmt.Sprintf("ecsExecutionRole%s%s", cfg.ProjectNameCamel(), strcase.ToCamel(cfg.Env)), + Description: fmt.Sprintf("Provides access to other AWS service resources that are required to run Amazon ECS tasks for %s. ", cfg.ProjectName), + AssumeRolePolicyDocument: "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Service\":[\"ecs-tasks.amazonaws.com\"]},\"Action\":[\"sts:AssumeRole\"]}]}", + Tags: []devdeploy.Tag{ + {Key: devdeploy.AwsTagNameProject, Value: cfg.ProjectName}, + {Key: devdeploy.AwsTagNameEnv, Value: cfg.Env}, + }, + AttachRolePolicyArns: []string{"arn:aws:iam::aws:policy/service-role/AmazonECSTaskExecutionRolePolicy"}, + } + log.Printf("\t\tSet ECS Execution Role Name to '%s'.", srv.AwsEcsExecutionRole.RoleName) + + // Define the ECS task role. This role is used by the task itself for calling other AWS services. + srv.AwsEcsTaskRole = &devdeploy.AwsIamRole{ + RoleName: fmt.Sprintf("ecsTaskRole%s%s", cfg.ProjectNameCamel(), strcase.ToCamel(cfg.Env)), + Description: fmt.Sprintf("Allows ECS tasks for %s to call AWS services on your behalf.", cfg.ProjectName), + AssumeRolePolicyDocument: "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Service\":[\"ecs-tasks.amazonaws.com\"]},\"Action\":[\"sts:AssumeRole\"]}]}", + Tags: []devdeploy.Tag{ + {Key: devdeploy.AwsTagNameProject, Value: cfg.ProjectName}, + {Key: devdeploy.AwsTagNameEnv, Value: cfg.Env}, + }, + } + log.Printf("\t\tSet ECS Task Role Name to '%s'.", srv.AwsEcsTaskRole.RoleName) + + // AwsCloudWatchLogGroup defines the name of the cloudwatch log group that will be used to store logs for the ECS tasks. + srv.AwsCloudWatchLogGroup = &devdeploy.AwsCloudWatchLogGroup{ + LogGroupName: fmt.Sprintf("logs/env_%s/aws/ecs/cluster_%s/service_%s", cfg.Env, srv.AwsEcsCluster.ClusterName, srv.ServiceName), + Tags: []devdeploy.Tag{ + {Key: devdeploy.AwsTagNameProject, Value: cfg.ProjectName}, + {Key: devdeploy.AwsTagNameEnv, Value: cfg.Env}, + }, + } + log.Printf("\t\tSet AWS Log Group Name to '%s'.", srv.AwsCloudWatchLogGroup.LogGroupName) + + // AwsSdPrivateDnsNamespace defines the service discovery group. + srv.AwsSdPrivateDnsNamespace = &devdeploy.AwsSdPrivateDnsNamespace{ + Name: srv.AwsEcsCluster.ClusterName, + Description: fmt.Sprintf("Private DNS namespace used for services running on the ECS Cluster %s", srv.AwsEcsCluster.ClusterName), + Service: &devdeploy.AwsSdService{ + Name: ctx.Name, + Description: fmt.Sprintf("Service %s running on the ECS Cluster %s", ctx.Name, srv.AwsEcsCluster.ClusterName), + DnsRecordTTL: 300, + HealthCheckFailureThreshold: 3, + }, + } + log.Printf("\t\tSet AWS Service Discovery Namespace to '%s'.", srv.AwsSdPrivateDnsNamespace.Name) + + // If the service is requested to use an elastic load balancer then define. + if ctx.EnableElb { + // AwsElbLoadBalancer defines if the service should use an elastic load balancer. + srv.AwsElbLoadBalancer = &devdeploy.AwsElbLoadBalancer{ + Name: fmt.Sprintf("%s-%s-%s", cfg.Env, srv.AwsEcsCluster.ClusterName, srv.ServiceName), + IpAddressType: "ipv4", + Scheme: "internet-facing", + Type: "application", + Tags: []devdeploy.Tag{ + {Key: devdeploy.AwsTagNameProject, Value: cfg.ProjectName}, + {Key: devdeploy.AwsTagNameEnv, Value: cfg.Env}, + }, + } + log.Printf("\t\tSet ELB Name to '%s'.", srv.AwsElbLoadBalancer.Name) + + // Define the target group for service to receive HTTP traffic from the load balancer. + srv.AwsElbLoadBalancer.TargetGroup = &devdeploy.AwsElbTargetGroup{ + Name: fmt.Sprintf("%s-http", srv.ServiceName), + Port: 80, + Protocol: "HTTP", + TargetType: "ip", + HealthCheckEnabled: true, + HealthCheckIntervalSeconds: 30, + HealthCheckPath: "/ping", + HealthCheckProtocol: "HTTP", + HealthCheckTimeoutSeconds: 5, + HealthyThresholdCount: 3, + UnhealthyThresholdCount: 3, + Matcher: "200", + } + log.Printf("\t\t\tSet ELB Target Group Name for %s to '%s'.", + srv.AwsElbLoadBalancer.TargetGroup.Protocol, + srv.AwsElbLoadBalancer.TargetGroup.Name) + + // Set ECS configs based on specified env. + if cfg.Env == "prod" { + srv.AwsElbLoadBalancer.EcsTaskDeregistrationDelay = 300 + } else { + // Force staging to deploy immediately without waiting for connections to drain + srv.AwsElbLoadBalancer.EcsTaskDeregistrationDelay = 0 + } + } + + // AwsEcsService defines the details for the ecs service. + srv.AwsEcsService = &devdeploy.AwsEcsService{ + ServiceName: ctx.Name, + DesiredCount: int64(ctx.DesiredCount), + EnableECSManagedTags: false, + HealthCheckGracePeriodSeconds: 60, + LaunchType: "FARGATE", + } + + // Ensure when deploying a new service there is always at-least one running. + if srv.AwsEcsService.DesiredCount == 0 { + srv.AwsEcsService.DesiredCount = 1 + } + + // Set ECS configs based on specified env. + if cfg.Env == "prod" { + srv.AwsEcsService.DeploymentMinimumHealthyPercent = 100 + srv.AwsEcsService.DeploymentMaximumPercent = 200 + } else { + srv.AwsEcsService.DeploymentMinimumHealthyPercent = 100 + srv.AwsEcsService.DeploymentMaximumPercent = 200 + } + + // AwsEcsTaskDefinition defines the details for registering a new ECS task definition. + taskDef, err := ctx.AwsEcsTaskDefinition(cfg, srv) + if err != nil { + return nil, err + } + + srv.AwsEcsTaskDefinition = &devdeploy.AwsEcsTaskDefinition{ + RegisterInput: taskDef, + UpdatePlaceholders: func(placeholders map[string]string) error { + + // Try to find the Datadog API key, this value is optional. + // If Datadog API key is not specified, then integration with Datadog for observability will not be active. + { + datadogApiKey, err := getDatadogApiKey(cfg) + if err != nil { + return err + } + + if datadogApiKey != "" { + log.Println("DATADOG API Key set.") + } else { + log.Printf("DATADOG API Key NOT set.") + } + + placeholders["{DATADOG_APIKEY}"] = datadogApiKey + + // When the datadog API key is empty, don't force the container to be essential have have the whole task fail. + if datadogApiKey != "" { + placeholders["{DATADOG_ESSENTIAL}"] = "true" + } else { + placeholders["{DATADOG_ESSENTIAL}"] = "false" + } + } + + return nil + }, + } + + log.Printf("\t\tDeploying task to '%s'.", ctx.ServiceHostPrimary) + + return srv, nil +} + +// BuildServiceForTargetEnv executes the build commands for a target service. +func BuildServiceForTargetEnv(log *log.Logger, awsCredentials devdeploy.AwsCredentials, targetEnv Env, serviceName, releaseTag string, dryRun, noCache, noPush bool) error { + + cfgCtx, err := NewConfigContext(targetEnv, awsCredentials) + if err != nil { + return err + } + + cfg, err := cfgCtx.Config(log) + if err != nil { + return err + } + + srvCtx, err := NewServiceContext(serviceName, cfg) + if err != nil { + return err + } + + // Override the release tag if set. + if releaseTag != "" { + srvCtx.ReleaseTag = releaseTag + } + + details, err := srvCtx.Build(log, noCache, noPush) + if err != nil { + return err + } + + // servicePath is used to copy the service specific code in the Dockerfile. + servicePath, err := filepath.Rel(cfg.ProjectRoot, srvCtx.ServiceDir) + if err != nil { + return err + } + + // commitRef is used by main.go:build constant. + commitRef := getCommitRef() + if commitRef == "" { + commitRef = srvCtx.ReleaseTag + } + + details.BuildArgs = map[string]string{ + "service_path": servicePath, + "commit_ref": commitRef, + } + + if dryRun { + cfgJSON, err := json.MarshalIndent(cfg, "", " ") + if err != nil { + log.Fatalf("BuildServiceForTargetEnv : Marshalling config to JSON : %+v", err) + } + log.Printf("BuildServiceForTargetEnv : config : %v\n", string(cfgJSON)) + + detailsJSON, err := json.MarshalIndent(details, "", " ") + if err != nil { + log.Fatalf("BuildServiceForTargetEnv : Marshalling details to JSON : %+v", err) + } + log.Printf("BuildServiceForTargetEnv : details : %v\n", string(detailsJSON)) + + return nil + } + + return devdeploy.BuildServiceForTargetEnv(log, cfg, details) +} + +// DeployServiceForTargetEnv executes the build commands for a target service. +func DeployServiceForTargetEnv(log *log.Logger, awsCredentials devdeploy.AwsCredentials, targetEnv Env, serviceName, releaseTag string, dryRun bool) error { + + cfgCtx, err := NewConfigContext(targetEnv, awsCredentials) + if err != nil { + return err + } + + cfg, err := cfgCtx.Config(log) + if err != nil { + return err + } + + srvCtx, err := NewServiceContext(serviceName, cfg) + if err != nil { + return err + } + + // Override the release tag if set. + if releaseTag != "" { + srvCtx.ReleaseTag = releaseTag + } + + details, err := srvCtx.Deploy(log, cfg) + if err != nil { + return err + } + + return devdeploy.DeployServiceToTargetEnv(log, cfg, details) +} + +// ecsKeyValuePair returns an *ecs.KeyValuePair +func ecsKeyValuePair(name, value string) *ecs.KeyValuePair { + return &ecs.KeyValuePair{ + Name: aws.String(name), + Value: aws.String(value), + } +} diff --git a/build/cicd/main.go b/build/cicd/main.go new file mode 100644 index 0000000..20aaa0b --- /dev/null +++ b/build/cicd/main.go @@ -0,0 +1,249 @@ +package main + +import ( + "fmt" + "log" + "os" + "strings" + + "github.com/urfave/cli" + "geeks-accelerator/oss/saas-starter-kit/build/cicd/internal/config" + "gitlab.com/geeks-accelerator/oss/devops/pkg/devdeploy" +) + +// service is the name of the program used for logging, tracing, etc. +var service = "CICD" + +func main() { + + // ========================================================================= + // Logging + log.SetFlags(log.LstdFlags | log.Lmicroseconds | log.Lshortfile) + log.SetPrefix(service + " : ") + log := log.New(os.Stdout, log.Prefix(), log.Flags()) + + // ========================================================================= + // New CLI application. + app := cli.NewApp() + app.Name = "cicd" + app.Usage = "Provides build and deploy for GitLab to Amazon AWS" + app.Version = "1.0" + app.Author = "Lee Brown" + app.Email = "lee@geeksinthewoods.com" + + // Define global CLI flags. + var awsCredentials devdeploy.AwsCredentials + app.Flags = []cli.Flag{ + cli.StringFlag{ + Name: "env", + Usage: fmt.Sprintf("target environment, one of [%s]", + strings.Join(config.EnvNames, ", ")), + Required: true, + }, + cli.StringFlag{ + Name: "aws-access-key", + Usage: "AWS Access Key", + EnvVar: "AWS_ACCESS_KEY_ID", + Destination: &awsCredentials.AccessKeyID, + }, + cli.StringFlag{ + Name: "aws-secret-key", + Usage: "AWS Secret Key", + EnvVar: "AWS_SECRET_ACCESS_KEY", + Destination: &awsCredentials.SecretAccessKey, + }, + cli.StringFlag{ + Name: "aws-region", + Usage: "AWS Region", + EnvVar: "AWS_REGION", + Destination: &awsCredentials.Region, + }, + cli.BoolFlag{ + Name: "aws-use-role", + Usage: "Use an IAM Role else AWS Access/Secret Keys are required", + EnvVar: "AWS_USE_ROLE", + Destination: &awsCredentials.UseRole, + }, + } + + app.Commands = []cli.Command{ + // Build command for services and functions. + { + Name: "build", + Aliases: []string{"b"}, + Usage: "build a service or function", + Subcommands: []cli.Command{ + { + Name: "service", + Usage: "build a service", + Flags: []cli.Flag{ + cli.StringFlag{ + Name: "name, n", + Usage: fmt.Sprintf("target service, one of [%s]", + strings.Join(config.ServiceNames, ", ")), + Required: true, + }, + cli.StringFlag{ + Name: "release-tag, tag", + Usage: "optional tag to override default CI_COMMIT_SHORT_SHA", + }, + cli.BoolFlag{ + Name: "dry-run", + Usage: "print out the build details", + }, + cli.BoolFlag{ + Name: "no-cache", + Usage: "skip caching for the docker build", + }, + cli.BoolFlag{ + Name: "no-push", + Usage: "disable pushing release image to remote repository", + }, + }, + Action: func(c *cli.Context) error { + targetEnv := c.GlobalString("env") + serviceName := c.String("name") + releaseTag := c.String("release-tag") + dryRun := c.Bool("dry-run") + noCache := c.Bool("no-cache") + noPush := c.Bool("no-push") + + return config.BuildServiceForTargetEnv(log, awsCredentials, targetEnv, serviceName, releaseTag, dryRun, noCache, noPush) + }, + }, + { + Name: "function", + Usage: "build a function", + Flags: []cli.Flag{ + cli.StringFlag{ + Name: "name, n", + Usage: fmt.Sprintf("target function, one of [%s]", + strings.Join(config.FunctionNames, ", ")), + Required: true, + }, + cli.StringFlag{ + Name: "release-tag, tag", + Usage: "optional tag to override default CI_COMMIT_SHORT_SHA", + }, + cli.BoolFlag{ + Name: "dry-run", + Usage: "print out the build details", + }, + cli.BoolFlag{ + Name: "no-cache", + Usage: "skip caching for the docker build", + }, + cli.BoolFlag{ + Name: "no-push", + Usage: "disable pushing release image to remote repository", + }, + }, + Action: func(c *cli.Context) error { + targetEnv := c.GlobalString("env") + funcName := c.String("name") + releaseTag := c.String("release-tag") + dryRun := c.Bool("dry-run") + noCache := c.Bool("no-cache") + noPush := c.Bool("no-push") + + return config.BuildFunctionForTargetEnv(log, awsCredentials, targetEnv, funcName, releaseTag, dryRun, noCache, noPush) + }, + }, + }, + }, + + // deploy command for services and functions. + { + Name: "deploy", + Aliases: []string{"d"}, + Usage: "deploy a service or function", + Subcommands: []cli.Command{ + { + Name: "service", + Usage: "deploy a service", + Flags: []cli.Flag{ + cli.StringFlag{ + Name: "name, n", + Usage: fmt.Sprintf("target service, one of [%s]", + strings.Join(config.ServiceNames, ", ")), + Required: true, + }, + cli.StringFlag{ + Name: "release-tag, tag", + Usage: "optional tag to override default CI_COMMIT_SHORT_SHA", + }, + cli.BoolFlag{ + Name: "dry-run", + Usage: "print out the deploy details", + }, + }, + Action: func(c *cli.Context) error { + targetEnv := c.GlobalString("env") + serviceName := c.String("name") + releaseTag := c.String("release-tag") + dryRun := c.Bool("dry-run") + + return config.DeployServiceForTargetEnv(log, awsCredentials, targetEnv, serviceName, releaseTag, dryRun) + }, + }, + { + Name: "function", + Usage: "deploy a function", + Flags: []cli.Flag{ + cli.StringFlag{ + Name: "name, n", + Usage: fmt.Sprintf("target function, one of [%s]", + strings.Join(config.FunctionNames, ", ")), + Required: true, + }, + cli.StringFlag{ + Name: "release-tag, tag", + Usage: "optional tag to override default CI_COMMIT_SHORT_SHA", + }, + cli.BoolFlag{ + Name: "dry-run", + Usage: "print out the deploy details", + }, + }, + Action: func(c *cli.Context) error { + targetEnv := c.GlobalString("env") + funcName := c.String("name") + releaseTag := c.String("release-tag") + dryRun := c.Bool("dry-run") + + return config.DeployFunctionForTargetEnv(log, awsCredentials, targetEnv, funcName, releaseTag, dryRun) + }, + }, + }, + }, + + // schema command used to run database schema migrations. + { + Name: "schema", + Aliases: []string{"s"}, + Usage: "manage the database schema", + Subcommands: []cli.Command{ + { + Name: "migrate", + Usage: "run the schema migrations", + Flags: []cli.Flag{ + cli.BoolFlag{ + Name: "unittest", + Usage: "print out the build details", + }, + }, + Action: func(c *cli.Context) error { + targetEnv := c.GlobalString("env") + isUnittest := c.Bool("unittest") + + return config.RunSchemaMigrationsForTargetEnv(log, awsCredentials, targetEnv, isUnittest) + }, + }, + }, + }, + } + + if err := app.Run(os.Args); err != nil { + log.Fatalf("%+v", err) + } +}