1
0
mirror of https://github.com/raseels-repos/golang-saas-starter-kit.git synced 2025-08-08 22:36:41 +02:00

Merge branch 'prod' into jsign/aws2

This commit is contained in:
Lee Brown
2019-08-22 11:16:51 -08:00
64 changed files with 2926 additions and 6718 deletions

View File

@ -1 +0,0 @@
devops

View File

@ -1,28 +0,0 @@
FROM golang:1.12.6-alpine3.9 AS builder
LABEL maintainer="lee@geeksinthewoods.com"
RUN apk --update --no-cache add \
git
# Change dir to project base.
WORKDIR $GOPATH/src/gitlab.com/geeks-accelerator/oss/saas-starter-kit
# Enable go modules.
ENV GO111MODULE="on"
COPY go.mod .
COPY go.sum .
RUN go mod download
COPY internal ./internal
COPY tools/devops ./tools/devops
WORKDIR ./tools/devops
RUN CGO_ENABLED=0 GOOS=linux go build -a -installsuffix nocgo -o /go/bin/devops .
FROM busybox
COPY --from=builder /go/bin/devops /go/bin/devops
ENTRYPOINT ["/go/bin/devops"]

View File

@ -1,244 +0,0 @@
# SaaS Starter Kit
Copyright 2019, Geeks Accelerator
twins@geeksaccelerator.com
## Description
_Devops_ handles creating AWS resources and deploying your services with minimal additional configuration. You can
customizing any of the configuration in the code. While AWS is already a core part of the saas-starter-kit, keeping
the deployment in GoLang limits the scope of additional technologies required to get your project successfully up and
running. If you understand Golang, then you will be a master at devops with this tool.
The project includes a Postgres database which adds an additional resource dependency when deploying the
project. It is important to know that the tasks running schema migration for the Postgres database can not run as shared
GitLab Runners since they will be outside the deployment AWS VPC. There are two options here:
1. Enable the AWS RDS database to be publicly available (not recommended).
2. Run your own GitLab runners inside the same AWS VPC and grant access for them to communicate with the database.
This project has opted to implement option 2 and thus setting up the deployment pipeline requires a few more additional steps.
Note that using shared runners hosted by GitLab also requires AWS credentials to be input into GitLab for configuration.
Hosted your own GitLab runners uses AWS Roles instead of hardcoding the access key ID and secret access key in GitLab and
in other configuration files. And since this project is open-source, we wanted to avoid sharing our AWS credentials.
If you don't have an AWS account, signup for one now and then proceed with the deployment setup.
We assume that if you are deploying the SaaS Starter Kit, you are starting from scratch with no existing dependencies.
This however, excludes any domain names that you would like to use for resolving your services publicly. To use any
pre-purchased domain names, make sure they are added to Route 53 in the AWS account. Or you can let the deploy script
create a new zone is Route 53 and update the DNS for the domain name when your ready to make the transition. It is
required to hosted the DNS on Route 53 so DNS entries can be managed by this deploy tool. It is possible to use a
[subdomain that uses Route 53 as the DNS service without migrating the parent domain](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/CreatingNewSubdomain.html).
## Getting Started
You can run the both commands `build` and `deploy` locally after setting up the initial
AWS permissions.
1. You will need an existing AWS account or create a new AWS account.
2. Define a new [AWS IAM Policy](https://console.aws.amazon.com/iam/home?region=us-west-2#/policies$new?step=edit)
called `saas-starter-kit-deploy` with a defined JSON statement instead of using the visual
editor. The statement is rather large as each permission is granted individually. A copy of
the statement is stored in the repo at
[resources/saas-starter-kit-deploy-policy.json](https://gitlab.com/geeks-accelerator/oss/saas-starter-kit/blob/master/resources/saas-starter-kit-deploy-policy.json)
3. Create new [AWS User](https://console.aws.amazon.com/iam/home?region=us-west-2#/users$new?step=details)
called `saas-starter-kit-deploy` with _Programmatic Access_ and _Attach existing policies directly_ with the policy
created from step 1 `saas-starter-kit-deploy`
4. Try running the deploy
```bash
go run main.go deploy -service=web-api -env=dev
```
Note: This user created is only for development purposes and is not needed for the build
pipeline using GitLab CI / CD.
## Setup GitLab CI / CD
Below outlines the basic steps to setup [Autoscaling GitLab Runner on AWS](https://docs.gitlab.com/runner/configuration/runner_autoscale_aws/).
1. Define an [AWS IAM Role](https://console.aws.amazon.com/iam/home?region=us-west-2#/roles$new?step=type) that will be
attached to the GitLab Runner instances. The role will need permission to scale (EC2), update the cache (via S3) and
perform the project specific deployment commands.
```
Trusted Entity: AWS Service
Service that will use this role: EC2
Attach permissions policies: AmazonEC2FullAccess, AmazonS3FullAccess, saas-starter-kit-deploy
Role Name: SaasStarterKitEc2RoleForGitLabRunner
Role Description: Allows GitLab runners hosted on EC2 instances to call AWS services on your behalf.
```
2. Launch a new [AWS EC2 Instance](https://us-west-2.console.aws.amazon.com/ec2/v2/home?region=us-west-2#LaunchInstanceWizard).
`GitLab Runner` will be installed on this instance and will serve as the bastion that spawns new instances. This
instance will be a dedicated host since we need it always up and running, thus it will be the standard costs apply.
Note: Since this machine will not run any jobs itself, it does not need to be very powerful. A t2.micro instance will be sufficient.
```
Amazon Machine Image (AMI): Amazon Linux AMI 2018.03.0 (HVM), SSD Volume Type - ami-0f2176987ee50226e
Instance Type: t2.micro
```
3. Configure Instance Details.
Note: Do not forget to select the IAM Role _SaasStarterKitEc2RoleForGitLabRunner_
```
Number of instances: 1
Network: default VPC
Subnet: no Preference
Auto-assign Public IP: Use subnet setting (Enable)
Placement Group: not checked/disabled
Capacity Reservation: Open
IAM Role: SaasStarterKitEc2RoleForGitLabRunner
Shutdown behavior: Stop
Enable termination project: checked/enabled
Monitoring: not checked/disabled
Tenancy: Shared
Elastic Interence: not checked/disabled
T2/T3 Unlimited: not checked/disabled
Advanced Details: none
```
4. Add Storage. Increase the volume size for the root device to 30 GiB.
```
Volume Type | Device | Size (GiB) | Volume Type
Root | /dev/xvda | 30 | General Purpose SSD (gp2)
```
5. Add Tags.
```
Name: gitlab-runner
```
6. Configure Security Group. Create a new security group with the following details:
```
Name: gitlab-runner
Description: Gitlab runners for running CICD.
Rules:
Type | Protocol | Port Range | Source | Description
SSH | TCP | 22 | My IP | SSH access for setup.
```
7. Review and Launch instance. Select an existing key pair or create a new one. This will be used to SSH into the
instance for additional configuration.
8. Update the security group to reference itself. The instances need to be able to communicate between each other.
Navigate to edit the security group and add the following two rules where `SECURITY_GROUP_ID` is replaced with the
name of the security group created in step 6.
```
Rules:
Type | Protocol | Port Range | Source | Description
Custom TCP | TCP | 2376 | SECURITY_GROUP_ID | Gitlab runner for Docker Machine to communicate with Docker daemon.
SSH | TCP | 22 | SECURITY_GROUP_ID | SSH access for setup.
```
8. SSH into the newly created instance.
```bash
ssh -i ~/saas-starter-kit-uswest2-gitlabrunner.pem ec2-user@ec2-52-36-105-172.us-west-2.compute.amazonaws.com
```
Note: If you get the error `Permissions 0666 are too open`, then you will need to `chmod 400 FILENAME`
9. Install GitLab Runner from the [official GitLab repository](https://docs.gitlab.com/runner/install/linux-repository.html)
```bash
curl -L https://packages.gitlab.com/install/repositories/runner/gitlab-runner/script.rpm.sh | sudo bash
sudo yum install gitlab-runner
```
10. [Install Docker Community Edition](https://docs.docker.com/install/).
```bash
sudo yum install docker
```
11. [Install Docker Machine](https://docs.docker.com/machine/install-machine/).
```bash
base=https://github.com/docker/machine/releases/download/v0.16.0 &&
curl -L $base/docker-machine-$(uname -s)-$(uname -m) >/tmp/docker-machine &&
sudo install /tmp/docker-machine /usr/sbin/docker-machine
```
12. [Register the runner](https://docs.gitlab.com/runner/register/index.html).
```bash
sudo gitlab-runner register
```
Notes:
* When asked for gitlab-ci tags, enter `master,dev,dev-*`
* This will limit commits to the master or dev branches from triggering the pipeline to run. This includes a
wildcard for any branch named with the prefix `dev-`.
* When asked the executor type, enter `docker+machine`
* When asked for the default Docker image, enter `geeksaccelerator/docker-library:golang1.12-docker`
13. [Configuring the GitLab Runner](https://docs.gitlab.com/runner/configuration/runner_autoscale_aws/#configuring-the-gitlab-runner)
```bash
sudo vim /etc/gitlab-runner/config.toml
```
Update the `[runners.docker]` configuration section in `config.toml` to match the example below replacing the
obvious placeholder `XXXXX` with the relevant value.
```yaml
[runners.docker]
tls_verify = false
image = "geeksaccelerator/docker-library:golang1.12-docker"
privileged = true
disable_entrypoint_overwrite = false
oom_kill_disable = false
disable_cache = true
volumes = ["/cache"]
shm_size = 0
[runners.cache]
Type = "s3"
Shared = true
[runners.cache.s3]
ServerAddress = "s3.us-west-2.amazonaws.com"
BucketName = "XXXXX"
BucketLocation = "us-west-2"
[runners.machine]
IdleCount = 0
IdleTime = 1800
MachineDriver = "amazonec2"
MachineName = "gitlab-runner-machine-%s"
MachineOptions = [
"amazonec2-iam-instance-profile=SaasStarterKitEc2RoleForGitLabRunner",
"amazonec2-region=us-west-2",
"amazonec2-vpc-id=XXXXX",
"amazonec2-subnet-id=XXXXX",
"amazonec2-zone=d",
"amazonec2-use-private-address=true",
"amazonec2-tags=runner-manager-name,gitlab-aws-autoscaler,gitlab,true,gitlab-runner-autoscale,true",
"amazonec2-security-group=gitlab-runner",
"amazonec2-instance-type=t2.large"
]
```
You will need use the same VPC subnet and availability zone as the instance launched in step 2. We are using AWS
region `us-west-2`. The _ServerAddress_ for S3 will need to be updated if the region is changed. For `us-east-1` the
_ServerAddress_ is `s3.amazonaws.com`. Under MachineOptions you can add anything that the [AWS Docker Machine](https://docs.docker.com/machine/drivers/aws/#options)
driver supports.
Below are some example values for the placeholders to ensure for format of your values are correct.
```yaml
BucketName = saas-starter-kit-usw
amazonec2-vpc-id=vpc-5f43f027
amazonec2-subnet-id=subnet-693d3110
amazonec2-zone=a
```
Once complete, restart the runner.
```bash
sudo gitlab-runner restart
```
## Examples
```bash
go run main.go deploy -service=web-app -env=dev -enable_https=true -primary_host=example.saasstartupkit.com -host_names=example.saasstartupkit.com,dev.example.saasstartupkit.com -private_bucket=saas-starter-kit-private -public_bucket=saas-starter-kit-public -public_bucket_cloudfront=true -static_files_s3=true -static_files_img_resize=1 -recreate_service=0
```

View File

@ -1,370 +0,0 @@
package cicd
import (
"encoding/json"
"fmt"
"github.com/aws/aws-sdk-go/service/s3/s3manager"
"io/ioutil"
"net/url"
"path/filepath"
"sort"
"strconv"
"strings"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/ec2metadata"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/ecr"
"github.com/aws/aws-sdk-go/service/ecs"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/aws/aws-sdk-go/service/secretsmanager"
"github.com/pkg/errors"
"gopkg.in/go-playground/validator.v9"
)
const (
defaultAwsRegistryMaxImages = 1000
awsTagNameProject = "project"
awsTagNameEnv = "env"
awsTagNameName = "Name"
)
// AwsCredentials defines AWS credentials used for deployment. Unable to use roles when deploying
// using gitlab CI/CD pipeline.
type awsCredentials struct {
AccessKeyID string `validate:"required_without=UseRole"`
SecretAccessKey string `validate:"required_without=UseRole"`
Region string `validate:"required_without=UseRole"`
UseRole bool
}
// Session returns a new AWS Session used to access AWS services.
func (creds awsCredentials) Session() *session.Session {
if creds.UseRole {
// Get an AWS session from an implicit source if no explicit
// configuration is provided. This is useful for taking advantage of
// EC2/ECS instance roles.
sess := session.Must(session.NewSession())
if creds.Region != "" {
sess.Config.WithRegion(creds.Region)
}
return sess
}
return session.New(
&aws.Config{
Region: aws.String(creds.Region),
Credentials: credentials.NewStaticCredentials(creds.AccessKeyID, creds.SecretAccessKey, ""),
})
}
// IamPolicyDocument defines an AWS IAM policy used for defining access for IAM roles, users, and groups.
type IamPolicyDocument struct {
Version string `json:"Version"`
Statement []IamStatementEntry `json:"Statement"`
}
// IamStatementEntry defines a single statement for an IAM policy.
type IamStatementEntry struct {
Sid string `json:"Sid"`
Effect string `json:"Effect"`
Action []string `json:"Action"`
Resource interface{} `json:"Resource"`
}
// S3Bucket defines the details need to create a bucket that includes additional configuration.
type S3Bucket struct {
Name string `validate:"omitempty"`
Input *s3.CreateBucketInput
LifecycleRules []*s3.LifecycleRule
CORSRules []*s3.CORSRule
PublicAccessBlock *s3.PublicAccessBlockConfiguration
Policy string
}
// DB mimics the general info needed for services used to define placeholders.
type DB struct {
Host string
User string
Pass string
Database string
Driver string
DisableTLS bool
}
// URL returns the URL to connect to a database.
func (db DB) URL() string {
// Query parameters.
var q url.Values = make(map[string][]string)
// Handle SSL Mode
if db.DisableTLS {
q.Set("sslmode", "disable")
} else {
q.Set("sslmode", "require")
}
// Construct url.
dbUrl := url.URL{
Scheme: db.Driver,
User: url.UserPassword(db.User, db.Pass),
Host: db.Host,
Path: db.Database,
RawQuery: q.Encode(),
}
return dbUrl.String()
}
// GetAwsCredentials loads the AWS Access Keys from env variables unless a role is used.
func GetAwsCredentials(targetEnv string) (awsCredentials, error) {
var creds awsCredentials
creds.Region = strings.TrimSpace(getTargetEnv(targetEnv, "AWS_REGION"))
if v := getTargetEnv(targetEnv, "AWS_USE_ROLE"); v != "" {
creds.UseRole, _ = strconv.ParseBool(v)
sess, err := session.NewSession()
if err != nil {
return creds, errors.Wrap(err, "Failed to load AWS credentials from instance")
}
if sess.Config != nil && sess.Config.Region != nil && *sess.Config.Region != "" {
creds.Region = *sess.Config.Region
} else {
sm := ec2metadata.New(sess)
creds.Region, err = sm.Region()
if err != nil {
return creds, errors.Wrap(err, "Failed to get region from AWS session")
}
}
return creds, nil
}
creds.AccessKeyID = strings.TrimSpace(getTargetEnv(targetEnv, "AWS_ACCESS_KEY_ID"))
creds.SecretAccessKey = strings.TrimSpace(getTargetEnv(targetEnv, "AWS_SECRET_ACCESS_KEY"))
errs := validator.New().Struct(creds)
if errs != nil {
return creds, errs
}
//os.Setenv("AWS_DEFAULT_REGION", creds.Region)
return creds, nil
}
// GetAwsSecretValue returns the string value for a secret stored in AWS Secrets Manager.
func GetAwsSecretValue(creds awsCredentials, secretId string) (string, error) {
svc := secretsmanager.New(creds.Session())
res, err := svc.GetSecretValue(&secretsmanager.GetSecretValueInput{
SecretId: aws.String(secretId),
})
if err != nil {
return "", errors.Wrapf(err, "failed to get value for secret id %s", secretId)
}
return string(res.SecretBinary), nil
}
// EcrPurgeImages ensures pipeline does not generate images for max of 10000 and prevent manual deletion of images.
func EcrPurgeImages(req *serviceBuildRequest) ([]*ecr.ImageIdentifier, error) {
svc := ecr.New(req.awsSession())
var (
ts []int
tsImgIds = map[int][]*ecr.ImageIdentifier{}
)
// Describe all the image IDs to determine oldest.
err := svc.DescribeImagesPages(&ecr.DescribeImagesInput{
RepositoryName: aws.String(req.EcrRepositoryName),
}, func(res *ecr.DescribeImagesOutput, lastPage bool) bool {
for _, img := range res.ImageDetails {
imgTs := int(img.ImagePushedAt.Unix())
if _, ok := tsImgIds[imgTs]; !ok {
tsImgIds[imgTs] = []*ecr.ImageIdentifier{}
ts = append(ts, imgTs)
}
if img.ImageTags != nil {
tsImgIds[imgTs] = append(tsImgIds[imgTs], &ecr.ImageIdentifier{
ImageTag: img.ImageTags[0],
})
} else if img.ImageDigest != nil {
tsImgIds[imgTs] = append(tsImgIds[imgTs], &ecr.ImageIdentifier{
ImageDigest: img.ImageDigest,
})
}
}
return !lastPage
})
if err != nil {
return nil, errors.Wrapf(err, "failed to describe images for repository '%s'", req.EcrRepositoryName)
}
// Sort the image timestamps in reverse order.
sort.Sort(sort.Reverse(sort.IntSlice(ts)))
// Loop over all the timestamps, skip the newest images until count exceeds limit.
var imgCnt int
var delIds []*ecr.ImageIdentifier
for _, imgTs := range ts {
for _, imgId := range tsImgIds[imgTs] {
imgCnt = imgCnt + 1
if imgCnt <= req.EcrRepositoryMaxImages {
continue
}
delIds = append(delIds, imgId)
}
}
// If there are image IDs to delete, delete them.
if len(delIds) > 0 {
//log.Printf("\t\tECR has %d images for repository '%s' which exceeds limit of %d", imgCnt, creds.EcrRepositoryName, creds.EcrRepositoryMaxImages)
//for _, imgId := range delIds {
// log.Printf("\t\t\tDelete %s", *imgId.ImageTag)
//}
_, err = svc.BatchDeleteImage(&ecr.BatchDeleteImageInput{
ImageIds: delIds,
RepositoryName: aws.String(req.EcrRepositoryName),
})
if err != nil {
return nil, errors.Wrapf(err, "failed to delete %d images for repository '%s'", len(delIds), req.EcrRepositoryName)
}
}
return delIds, nil
}
// SyncPublicS3Files copies the local files from the static directory to s3 with public-read enabled.
func SyncPublicS3Files(awsSession *session.Session, staticS3Bucket, staticS3Prefix, staticDir string) error {
uploader := s3manager.NewUploader(awsSession)
di := NewDirectoryIterator(staticS3Bucket, staticS3Prefix, staticDir, "public-read")
if err := uploader.UploadWithIterator(aws.BackgroundContext(), di); err != nil {
return err
}
return nil
}
// EcsReadTaskDefinition reads a task definition file and json decodes it.
func EcsReadTaskDefinition(serviceDir, targetEnv string) ([]byte, error) {
checkPaths := []string{
filepath.Join(serviceDir, fmt.Sprintf("ecs-task-definition-%s.json", targetEnv)),
filepath.Join(serviceDir, "ecs-task-definition.json"),
}
var defFile string
for _, tf := range checkPaths {
ok, _ := exists(tf)
if ok {
defFile = tf
break
}
}
if defFile == "" {
return nil, errors.Errorf("failed to locate task definition - checked %s", strings.Join(checkPaths, ", "))
}
dat, err := ioutil.ReadFile(defFile)
if err != nil {
return nil, errors.WithMessagef(err, "failed to read file %s", defFile)
}
return dat, nil
}
// LambdaReadFuncDefinition reads a task definition file and json decodes it.
func LambdaReadFuncDefinition(serviceDir, targetEnv string) ([]byte, error) {
checkPaths := []string{
filepath.Join(serviceDir, fmt.Sprintf("lambda-func-definition-%s.json", targetEnv)),
filepath.Join(serviceDir, "lambda-func-definition.json"),
}
var defFile string
for _, tf := range checkPaths {
ok, _ := exists(tf)
if ok {
defFile = tf
break
}
}
if defFile == "" {
return nil, errors.Errorf("failed to locate task definition - checked %s", strings.Join(checkPaths, ", "))
}
dat, err := ioutil.ReadFile(defFile)
if err != nil {
return nil, errors.WithMessagef(err, "failed to read file %s", defFile)
}
return dat, nil
}
// LambdaS3KeyFromReleaseImage generates an S3 key from a release image.
func LambdaS3KeyFromReleaseImage(releaseImage string) string {
it := filepath.Base(releaseImage)
it = strings.Replace(it, ":", "/", -1)
return filepath.Join("src/aws/lambda/", it+".zip")
}
// parseTaskDefinition json decodes it.
func parseTaskDefinitionInput(dat []byte) (*ecs.RegisterTaskDefinitionInput, error) {
dat = convertKeys(dat)
var taskDef *ecs.RegisterTaskDefinitionInput
if err := json.Unmarshal(dat, &taskDef); err != nil {
return nil, errors.WithMessagef(err, "failed to json decode task definition - %s", string(dat))
}
return taskDef, nil
}
// convertKeys fixes json keys to they can be unmarshaled into aws types. No AWS structs have json tags.
func convertKeys(j json.RawMessage) json.RawMessage {
m := make(map[string]json.RawMessage)
if err := json.Unmarshal([]byte(j), &m); err != nil {
// Not a JSON object
return j
}
for k, v := range m {
fixed := fixKey(k)
delete(m, k)
m[fixed] = convertKeys(v)
}
b, err := json.Marshal(m)
if err != nil {
return j
}
return json.RawMessage(b)
}
func fixKey(key string) string {
return strings.ToTitle(key)
}
// jsonEncodeStringValue json encodes string values to be used in the ECS task definition.
func jsonEncodeStringValue(str string) string {
dat, _ := json.Marshal(str)
return strings.Trim(string(dat), "\"")
}

View File

@ -1,212 +0,0 @@
package cicd
import (
"context"
"encoding/json"
"log"
"path/filepath"
"geeks-accelerator/oss/saas-starter-kit/internal/platform/tests"
"geeks-accelerator/oss/saas-starter-kit/internal/schema"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/secretsmanager"
"github.com/lib/pq"
_ "github.com/lib/pq"
"github.com/pkg/errors"
sqltrace "gopkg.in/DataDog/dd-trace-go.v1/contrib/database/sql"
sqlxtrace "gopkg.in/DataDog/dd-trace-go.v1/contrib/jmoiron/sqlx"
"gopkg.in/go-playground/validator.v9"
)
// MigrateFlags defines the flags used for executing schema migration.
type MigrateFlags struct {
// Required flags.
Env string `validate:"oneof=dev stage prod" example:"dev"`
// Optional flags.
ProjectRoot string `validate:"omitempty" example:"."`
ProjectName string ` validate:"omitempty" example:"example-project"`
}
// migrateRequest defines the details needed to execute a service build.
type migrateRequest struct {
Env string `validate:"oneof=dev stage prod"`
ProjectRoot string `validate:"required"`
ProjectName string `validate:"required"`
GoModFile string `validate:"required"`
GoModName string `validate:"required"`
AwsCreds awsCredentials `validate:"required,dive,required"`
_awsSession *session.Session
flags MigrateFlags
}
// awsSession returns the current AWS session for the serviceDeployRequest.
func (r *migrateRequest) awsSession() *session.Session {
if r._awsSession == nil {
r._awsSession = r.AwsCreds.Session()
}
return r._awsSession
}
// NewMigrateRequest generates a new request for executing schema migration for a given set of CLI flags.
func NewMigrateRequest(log *log.Logger, flags MigrateFlags) (*migrateRequest, error) {
// Validates specified CLI flags map to struct successfully.
log.Println("Validate flags.")
{
errs := validator.New().Struct(flags)
if errs != nil {
return nil, errs
}
log.Printf("\t%s\tFlags ok.", tests.Success)
}
// Generate a migrate request using CLI flags and AWS credentials.
log.Println("Generate migrate request.")
var req migrateRequest
{
// Define new migrate request.
req = migrateRequest{
Env: flags.Env,
ProjectRoot: flags.ProjectRoot,
ProjectName: flags.ProjectName,
flags: flags,
}
// When project root directory is empty or set to current working path, then search for the project root by locating
// the go.mod file.
log.Println("\tDetermining the project root directory.")
{
if req.ProjectRoot == "" || req.ProjectRoot == "." {
log.Println("\tAttempting to location project root directory from current working directory.")
var err error
req.GoModFile, err = findProjectGoModFile()
if err != nil {
return nil, err
}
req.ProjectRoot = filepath.Dir(req.GoModFile)
} else {
log.Printf("\t\tUsing supplied project root directory '%s'.\n", req.ProjectRoot)
req.GoModFile = filepath.Join(req.ProjectRoot, "go.mod")
}
log.Printf("\t\t\tproject root: %s", req.ProjectRoot)
log.Printf("\t\t\tgo.mod: %s", req.GoModFile)
}
log.Println("\tExtracting go module name from go.mod.")
{
var err error
req.GoModName, err = loadGoModName(req.GoModFile)
if err != nil {
return nil, err
}
log.Printf("\t\t\tmodule name: %s", req.GoModName)
}
log.Println("\tDetermining the project name.")
{
if req.ProjectName != "" {
log.Printf("\t\tUse provided value.")
} else {
req.ProjectName = filepath.Base(req.GoModName)
log.Printf("\t\tSet from go module.")
}
log.Printf("\t\t\tproject name: %s", req.ProjectName)
}
// Verifies AWS credentials specified as environment variables.
log.Println("\tVerify AWS credentials.")
{
var err error
req.AwsCreds, err = GetAwsCredentials(req.Env)
if err != nil {
return nil, err
}
if req.AwsCreds.UseRole {
log.Printf("\t\t\tUsing role")
} else {
log.Printf("\t\t\tAccessKeyID: '%s'", req.AwsCreds.AccessKeyID)
}
log.Printf("\t\t\tRegion: '%s'", req.AwsCreds.Region)
log.Printf("\t%s\tAWS credentials valid.", tests.Success)
}
}
return &req, nil
}
// Run is the main entrypoint for migration of database schema for a given target environment.
func Migrate(log *log.Logger, ctx context.Context, req *migrateRequest) error {
// Load the database details.
var db DB
{
log.Println("Get Database Details from AWS Secret Manager")
dbId := dBInstanceIdentifier(req.ProjectName, req.Env)
// Secret ID used to store the DB username and password across deploys.
dbSecretId := secretID(req.ProjectName, req.Env, dbId)
// Retrieve the current secret value if something is stored.
{
sm := secretsmanager.New(req.awsSession())
res, err := sm.GetSecretValue(&secretsmanager.GetSecretValueInput{
SecretId: aws.String(dbSecretId),
})
if err != nil {
if aerr, ok := err.(awserr.Error); !ok || aerr.Code() != secretsmanager.ErrCodeResourceNotFoundException {
return errors.Wrapf(err, "Failed to get value for secret id %s", dbSecretId)
} else {
// This should only happen when the deploy script first runs and no resources exist in the
// AWS account. To create a database, need the VPC and need to come up with a better strategy for
// defining resources that can be shared between deployment steps.
log.Printf("\t%s\tDatabase credentials not found.", tests.Failed)
return nil
}
} else {
err = json.Unmarshal([]byte(*res.SecretString), &db)
if err != nil {
return errors.Wrap(err, "Failed to json decode db credentials")
}
}
log.Printf("\t%s\tDatabase credentials found.", tests.Success)
}
}
// Start Database and run the migration.
{
log.Println("Proceed with schema migration")
log.Printf("\t\tOpen database connection")
// Register informs the sqlxtrace package of the driver that we will be using in our program.
// It uses a default service name, in the below case "postgres.db". To use a custom service
// name use RegisterWithServiceName.
sqltrace.Register(db.Driver, &pq.Driver{}, sqltrace.WithServiceName("devops:migrate"))
masterDb, err := sqlxtrace.Open(db.Driver, db.URL())
if err != nil {
return errors.WithStack(err)
}
defer masterDb.Close()
// Start Migrations
log.Printf("\t\tStart migrations.")
if err = schema.Migrate(ctx, masterDb, log, false); err != nil {
return errors.WithStack(err)
}
log.Printf("\t%s\tMigrate complete.", tests.Success)
}
return nil
}

View File

@ -1,119 +0,0 @@
package cicd
import (
"bytes"
"mime"
"os"
"path/filepath"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/s3/s3manager"
"github.com/pkg/errors"
)
// DirectoryIterator represents an iterator of a specified directory
type DirectoryIterator struct {
dir string
filePaths []string
bucket string
keyPrefix string
acl string
next struct {
path string
f *os.File
}
err error
}
// NewDirectoryIterator builds a new DirectoryIterator
func NewDirectoryIterator(bucket, keyPrefix, dir, acl string) s3manager.BatchUploadIterator {
var paths []string
filepath.Walk(dir, func(path string, info os.FileInfo, err error) error {
if !info.IsDir() {
paths = append(paths, path)
}
return nil
})
return &DirectoryIterator{
dir: dir,
filePaths: paths,
bucket: bucket,
keyPrefix: keyPrefix,
acl: acl,
}
}
// Next returns whether next file exists or not
func (di *DirectoryIterator) Next() bool {
if len(di.filePaths) == 0 {
di.next.f = nil
return false
}
f, err := os.Open(di.filePaths[0])
di.err = err
di.next.f = f
di.next.path = di.filePaths[0]
di.filePaths = di.filePaths[1:]
return true && di.Err() == nil
}
// Err returns error of DirectoryIterator
func (di *DirectoryIterator) Err() error {
return errors.WithStack(di.err)
}
// UploadObject uploads a file
func (di *DirectoryIterator) UploadObject() s3manager.BatchUploadObject {
f := di.next.f
var acl *string
if di.acl != "" {
acl = aws.String(di.acl)
}
buffer, contentType, rerr := readFile(f)
nextPath, _ := filepath.Rel(di.dir, di.next.path)
return s3manager.BatchUploadObject{
Object: &s3manager.UploadInput{
Bucket: aws.String(di.bucket),
Key: aws.String(filepath.Join(di.keyPrefix, nextPath)),
Body: bytes.NewReader(buffer),
ContentType: aws.String(contentType),
ACL: acl,
},
After: func() error {
if rerr != nil {
return rerr
}
return f.Close()
},
}
}
func readFile(f *os.File) ([]byte, string, error) {
// Get file size and read the file content into a buffer
fileInfo, err := f.Stat()
if err != nil {
panic(err)
return nil, "", err
}
var size int64 = fileInfo.Size()
buffer := make([]byte, size)
f.Read(buffer)
ext := filepath.Ext(f.Name())
contentType := mime.TypeByExtension(ext)
//f.Seek(0, io.SeekStart)
//ctBuf := make([]byte, 512)
//f.Read(ctBuf)
//contentType = http.DetectContentType(ctBuf)
return buffer, contentType, nil
}

View File

@ -1,314 +0,0 @@
package cicd
import (
"fmt"
"io/ioutil"
"log"
"os"
"os/exec"
"path/filepath"
"strings"
"geeks-accelerator/oss/saas-starter-kit/internal/platform/tests"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/iancoleman/strcase"
"github.com/pkg/errors"
)
// serviceDeployRequest defines the details needed to execute a service deployment.
type serviceRequest struct {
ServiceName string `validate:"required"`
ServiceDir string `validate:"required"`
Env string `validate:"oneof=dev stage prod"`
ProjectRoot string `validate:"required"`
ProjectName string `validate:"required"`
DockerFile string `validate:"required"`
GoModFile string `validate:"required"`
GoModName string `validate:"required"`
AwsCreds awsCredentials `validate:"required,dive,required"`
_awsSession *session.Session
ReleaseImage string
}
// projectNameCamel takes a project name and returns the camel cased version.
func (r *serviceRequest) ProjectNameCamel() string {
s := strings.Replace(r.ProjectName, "_", " ", -1)
s = strings.Replace(s, "-", " ", -1)
s = strcase.ToCamel(s)
return s
}
// awsSession returns the current AWS session for the serviceDeployRequest.
func (r *serviceRequest) awsSession() *session.Session {
if r._awsSession == nil {
r._awsSession = r.AwsCreds.Session()
}
return r._awsSession
}
// init sets the basic details needed for both build and deploy for serviceRequest.
func (req *serviceRequest) init(log *log.Logger) error {
// When project root directory is empty or set to current working path, then search for the project root by locating
// the go.mod file.
log.Println("\tDetermining the project root directory.")
{
if req.ProjectRoot == "" || req.ProjectRoot == "." {
log.Println("\tAttempting to location project root directory from current working directory.")
var err error
req.GoModFile, err = findProjectGoModFile()
if err != nil {
return err
}
req.ProjectRoot = filepath.Dir(req.GoModFile)
} else {
log.Printf("\t\tUsing supplied project root directory '%s'.\n", req.ProjectRoot)
req.GoModFile = filepath.Join(req.ProjectRoot, "go.mod")
}
log.Printf("\t\t\tproject root: %s", req.ProjectRoot)
log.Printf("\t\t\tgo.mod: %s", req.GoModFile)
}
log.Println("\tExtracting go module name from go.mod.")
{
var err error
req.GoModName, err = loadGoModName(req.GoModFile)
if err != nil {
return err
}
log.Printf("\t\t\tmodule name: %s", req.GoModName)
}
log.Println("\tDetermining the project name.")
{
if req.ProjectName != "" {
log.Printf("\t\tUse provided value.")
} else {
req.ProjectName = filepath.Base(req.GoModName)
log.Printf("\t\tSet from go module.")
}
log.Printf("\t\t\tproject name: %s", req.ProjectName)
}
log.Println("\tAttempting to locate service directory from project root directory.")
{
if req.DockerFile != "" {
req.DockerFile = req.DockerFile
log.Printf("\t\tUse provided value.")
} else {
log.Printf("\t\tFind from project root looking for Dockerfile.")
var err error
req.DockerFile, err = findServiceDockerFile(req.ProjectRoot, req.ServiceName)
if err != nil {
return err
}
}
req.ServiceDir = filepath.Dir(req.DockerFile)
log.Printf("\t\t\tservice directory: %s", req.ServiceDir)
log.Printf("\t\t\tdockerfile: %s", req.DockerFile)
}
// Verifies AWS credentials specified as environment variables.
log.Println("\tVerify AWS credentials.")
{
var err error
req.AwsCreds, err = GetAwsCredentials(req.Env)
if err != nil {
return err
}
if req.AwsCreds.UseRole {
log.Printf("\t\t\tUsing role")
} else {
log.Printf("\t\t\tAccessKeyID: '%s'", req.AwsCreds.AccessKeyID)
}
log.Printf("\t\t\tRegion: '%s'", req.AwsCreds.Region)
log.Printf("\t%s\tAWS credentials valid.", tests.Success)
}
return nil
}
// ecrRepositoryName returns the name used for the AWS ECR Repository.
func ecrRepositoryName(projectName string) string {
return projectName
}
// releaseImage returns the name used for tagging a release image will always include one with environment and
// service name. If the env var CI_COMMIT_REF_NAME is set, it will be appended.
func releaseTag(env, serviceName string) string {
tag1 := env + "-" + serviceName
// Generate tags for the release image.
var releaseTag string
if v := os.Getenv("BUILDINFO_CI_COMMIT_SHA"); v != "" {
tag2 := tag1 + "-" + v[0:8]
releaseTag = tag2
} else if v := os.Getenv("CI_COMMIT_SHA"); v != "" {
tag2 := tag1 + "-" + v[0:8]
releaseTag = tag2
} else if v := os.Getenv("BUILDINFO_CI_COMMIT_REF_NAME"); v != "" {
tag2 := tag1 + "-" + v
releaseTag = tag2
} else if v := os.Getenv("CI_COMMIT_REF_NAME"); v != "" {
tag2 := tag1 + "-" + v
releaseTag = tag2
} else {
releaseTag = tag1
}
return releaseTag
}
// releaseImage returns the name used for tagging a release image will always include one with environment and
// service name. If the env var CI_COMMIT_REF_NAME is set, it will be appended.
func releaseImage(env, serviceName, repositoryUri string) string {
return repositoryUri + ":" + releaseTag(env, serviceName)
}
// dBInstanceIdentifier returns the database name.
func dBInstanceIdentifier(projectName, env string) string {
return projectName + "-" + env
}
// secretID returns the secret name with a standard prefix.
func secretID(projectName, env, secretName string) string {
return filepath.Join(projectName, env, secretName)
}
// findProjectGoModFile finds the project root directory from the current working directory.
func findProjectGoModFile() (string, error) {
var err error
projectRoot, err := os.Getwd()
if err != nil {
return "", errors.WithMessage(err, "failed to get current working directory")
}
// Try to find the project root for looking for the go.mod file in a parent directory.
var goModFile string
testDir := projectRoot
for i := 0; i < 3; i++ {
if goModFile != "" {
testDir = filepath.Join(testDir, "../")
}
goModFile = filepath.Join(testDir, "go.mod")
ok, _ := exists(goModFile)
if ok {
projectRoot = testDir
break
}
}
// Verify the go.mod file was found.
ok, err := exists(goModFile)
if err != nil {
return "", errors.WithMessagef(err, "failed to load go.mod for project using project root %s")
} else if !ok {
return "", errors.Errorf("failed to locate project go.mod in project root %s", projectRoot)
}
return goModFile, nil
}
// findServiceDockerFile finds the service directory.
func findServiceDockerFile(projectRoot, targetService string) (string, error) {
checkDirs := []string{
filepath.Join(projectRoot, "cmd", targetService),
filepath.Join(projectRoot, "tools", targetService),
}
var dockerFile string
for _, cd := range checkDirs {
// Check to see if directory contains Dockerfile.
tf := filepath.Join(cd, "Dockerfile")
ok, _ := exists(tf)
if ok {
dockerFile = tf
break
}
}
if dockerFile == "" {
return "", errors.Errorf("failed to locate Dockerfile for service %s", targetService)
}
return dockerFile, nil
}
// getTargetEnv checks for an env var that is prefixed with the current target env.
func getTargetEnv(targetEnv, envName string) string {
k := fmt.Sprintf("%s_%s", strings.ToUpper(targetEnv), envName)
if v := os.Getenv(k); v != "" {
// Set the non prefixed env var with the prefixed value.
os.Setenv(envName, v)
return v
}
return os.Getenv(envName)
}
// loadGoModName parses out the module name from go.mod.
func loadGoModName(goModFile string) (string, error) {
ok, err := exists(goModFile)
if err != nil {
return "", errors.WithMessage(err, "Failed to load go.mod for project")
} else if !ok {
return "", errors.Errorf("Failed to locate project go.mod at %s", goModFile)
}
b, err := ioutil.ReadFile(goModFile)
if err != nil {
return "", errors.WithMessagef(err, "Failed to read go.mod at %s", goModFile)
}
var name string
lines := strings.Split(string(b), "\n")
for _, l := range lines {
if strings.HasPrefix(l, "module ") {
name = strings.TrimSpace(strings.Split(l, " ")[1])
break
}
}
return name, nil
}
// exists returns a bool as to whether a file path exists.
func exists(path string) (bool, error) {
_, err := os.Stat(path)
if err == nil {
return true, nil
}
if os.IsNotExist(err) {
return false, nil
}
return true, err
}
// execCmds executes a set of commands using the current env variables.
func execCmds(log *log.Logger, workDir string, cmds ...[]string) error {
for _, cmdVals := range cmds {
cmd := exec.Command(cmdVals[0], cmdVals[1:]...)
cmd.Dir = workDir
cmd.Env = os.Environ()
cmd.Stderr = log.Writer()
cmd.Stdout = log.Writer()
err := cmd.Run()
if err != nil {
return errors.WithMessagef(err, "failed to execute %s", strings.Join(cmdVals, " "))
}
}
return nil
}

View File

@ -1,449 +0,0 @@
package cicd
import (
"bufio"
"crypto/md5"
"encoding/base64"
"fmt"
"io/ioutil"
"log"
"os"
"path/filepath"
"strings"
"geeks-accelerator/oss/saas-starter-kit/internal/platform/tests"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/ecr"
"github.com/aws/aws-sdk-go/service/s3/s3manager"
"github.com/pborman/uuid"
"github.com/pkg/errors"
"gopkg.in/go-playground/validator.v9"
)
// ServiceBuildFlags defines the flags used for executing a service build.
type ServiceBuildFlags struct {
// Required flags.
ServiceName string `validate:"required" example:"web-api"`
Env string `validate:"oneof=dev stage prod" example:"dev"`
// Optional flags.
ProjectRoot string `validate:"omitempty" example:"."`
ProjectName string ` validate:"omitempty" example:"example-project"`
DockerFile string `validate:"omitempty" example:"./cmd/web-api/Dockerfile"`
CommitRef string `validate:"omitempty" example:"master@1ecfd275"`
S3BucketPrivateName string `validate:"omitempty" example:"saas-example-project-private"`
BuildDir string `validate:"omitempty" example:"."`
NoCache bool `validate:"omitempty" example:"false"`
NoPush bool `validate:"omitempty" example:"false"`
IsLambda bool `validate:"omitempty" example:"false"`
}
// serviceBuildRequest defines the details needed to execute a service build.
type serviceBuildRequest struct {
*serviceRequest
EcrRepositoryName string `validate:"required"`
EcrRepository *ecr.CreateRepositoryInput
EcrRepositoryMaxImages int `validate:"omitempty"`
BuildDir string `validate:"omitempty" example:""`
CommitRef string `validate:"omitempty"`
S3BucketPrivateName string `validate:"omitempty"`
NoCache bool `validate:"omitempty"`
NoPush bool `validate:"omitempty"`
IsLambda bool `validate:"omitempty"`
flags ServiceBuildFlags
}
// NewServiceBuildRequest generates a new request for executing build of a single service for a given set of CLI flags.
func NewServiceBuildRequest(log *log.Logger, flags ServiceBuildFlags) (*serviceBuildRequest, error) {
// Validates specified CLI flags map to struct successfully.
log.Println("Validate flags.")
{
errs := validator.New().Struct(flags)
if errs != nil {
return nil, errs
}
log.Printf("\t%s\tFlags ok.", tests.Success)
}
// Generate a deploy request using CLI flags and AWS credentials.
log.Println("Generate deploy request.")
var req serviceBuildRequest
{
// Define new service request.
sr := &serviceRequest{
ServiceName: flags.ServiceName,
Env: flags.Env,
ProjectRoot: flags.ProjectRoot,
ProjectName: flags.ProjectName,
DockerFile: flags.DockerFile,
}
if err := sr.init(log); err != nil {
return nil, err
}
req = serviceBuildRequest{
serviceRequest: sr,
CommitRef: flags.CommitRef,
S3BucketPrivateName: flags.S3BucketPrivateName,
BuildDir: flags.BuildDir,
NoCache: flags.NoCache,
NoPush: flags.NoPush,
IsLambda: flags.IsLambda,
flags: flags,
}
if req.BuildDir == "" {
req.BuildDir = req.ProjectRoot
}
// Set default AWS ECR Repository Name.
req.EcrRepositoryName = ecrRepositoryName(req.ProjectName)
req.EcrRepository = &ecr.CreateRepositoryInput{
RepositoryName: aws.String(req.EcrRepositoryName),
Tags: []*ecr.Tag{
&ecr.Tag{Key: aws.String(awsTagNameProject), Value: aws.String(req.ProjectName)},
&ecr.Tag{Key: aws.String(awsTagNameEnv), Value: aws.String(req.Env)},
},
}
log.Printf("\t\t\tSet ECR Repository Name to '%s'.", req.EcrRepositoryName)
// Set default AWS ECR Regsistry Max Images.
req.EcrRepositoryMaxImages = defaultAwsRegistryMaxImages
log.Printf("\t\t\tSet ECR Regsistry Max Images to '%d'.", req.EcrRepositoryMaxImages)
// Get the default commit ref.
if req.CommitRef == "" {
if ev := os.Getenv("CI_COMMIT_TAG"); ev != "" {
req.CommitRef = "tag-" + ev
} else if ev := os.Getenv("CI_COMMIT_REF_NAME"); ev != "" {
req.CommitRef = "branch-" + ev
}
if ev := os.Getenv("CI_COMMIT_SHORT_SHA"); ev != "" {
req.CommitRef = req.CommitRef + "@" + ev
}
}
}
return &req, nil
}
// Run is the main entrypoint for building a service for a given target environment.
func ServiceBuild(log *log.Logger, req *serviceBuildRequest) error {
// Load the AWS ECR repository. Try to find by name else create new one.
var dockerLoginCmd []string
{
log.Println("ECR - Get or create repository.")
svc := ecr.New(req.awsSession())
// First try to find ECR repository by name.
var awsRepo *ecr.Repository
descRes, err := svc.DescribeRepositories(&ecr.DescribeRepositoriesInput{
RepositoryNames: []*string{aws.String(req.EcrRepositoryName)},
})
if err != nil {
if aerr, ok := err.(awserr.Error); !ok || aerr.Code() != ecr.ErrCodeRepositoryNotFoundException {
return errors.Wrapf(err, "failed to describe repository '%s'", req.EcrRepositoryName)
}
} else if len(descRes.Repositories) > 0 {
awsRepo = descRes.Repositories[0]
}
if awsRepo == nil {
// If no repository was found, create one.
createRes, err := svc.CreateRepository(req.EcrRepository)
if err != nil {
return errors.Wrapf(err, "failed to create repository '%s'", req.EcrRepositoryName)
}
awsRepo = createRes.Repository
log.Printf("\t\tCreated: %s.", *awsRepo.RepositoryArn)
} else {
log.Printf("\t\tFound: %s.", *awsRepo.RepositoryArn)
log.Println("\t\tChecking old ECR images.")
delIds, err := EcrPurgeImages(req)
if err != nil {
return err
}
// Since ECR has max number of repository images, need to delete old ones so can stay under limit.
// If there are image IDs to delete, delete them.
if len(delIds) > 0 {
log.Printf("\t\tDeleted %d images that exceeded limit of %d", len(delIds), req.EcrRepositoryMaxImages)
for _, imgId := range delIds {
log.Printf("\t\t\t%s", *imgId.ImageTag)
}
}
}
req.ReleaseImage = releaseImage(req.Env, req.ServiceName, *awsRepo.RepositoryUri)
if err != nil {
return err
}
log.Printf("\t\trelease image: %s", req.ReleaseImage)
log.Printf("\t%s\tRelease image valid.", tests.Success)
log.Println("ECR - Retrieve authorization token used for docker login.")
// Get the credentials necessary for logging into the AWS Elastic Container Registry
// made available with the AWS access key and AWS secret access keys.
res, err := svc.GetAuthorizationToken(&ecr.GetAuthorizationTokenInput{})
if err != nil {
return errors.Wrap(err, "failed to get ecr authorization token")
}
authToken, err := base64.StdEncoding.DecodeString(*res.AuthorizationData[0].AuthorizationToken)
if err != nil {
return errors.Wrap(err, "failed to base64 decode ecr authorization token")
}
pts := strings.Split(string(authToken), ":")
user := pts[0]
pass := pts[1]
dockerLoginCmd = []string{
"docker",
"login",
"-u", user,
"-p", pass,
*res.AuthorizationData[0].ProxyEndpoint,
}
log.Printf("\t%s\tdocker login ok.", tests.Success)
}
// Once we can access the repository in ECR, do the docker build.
{
log.Printf("Starting docker build %s\n", req.ReleaseImage)
var dockerFile string
dockerPath := filepath.Join(req.BuildDir, req.DockerFile)
if _, err := os.Stat(dockerPath); err == nil {
dockerFile = req.DockerFile
} else {
dockerPath = req.DockerFile
dockerFile, err = filepath.Rel(req.BuildDir, dockerPath)
if err != nil {
return errors.Wrapf(err, "Failed parse relative path for %s from %s", dockerPath, req.BuildDir)
}
}
// Name of the first build stage declared in the docckerFile.
var buildStageName string
// When the dockerFile is multistage, caching can be applied. Scan the dockerFile for the first stage.
// FROM golang:1.12.6-alpine3.9 AS build_base
var buildBaseImageTag string
{
file, err := os.Open(dockerPath)
if err != nil {
log.Fatal(err)
}
defer file.Close()
// List of lines in the dockerfile for the first stage. This will be used to tag the image to help ensure
// any changes to the lines associated with the first stage force cache to be reset.
var stageLines []string
// Loop through all the lines in the Dockerfile searching for the lines associated with the first build stage.
scanner := bufio.NewScanner(file)
for scanner.Scan() {
line := scanner.Text()
lineLower := strings.ToLower(line)
if strings.HasPrefix(lineLower, "from ") {
if buildStageName != "" {
// Only need to scan all the lines for the first build stage. Break when reach next FROM.
break
} else if !strings.Contains(lineLower, " as ") {
// Caching is only supported if the first FROM has a name.
log.Printf("\t\t\tSkipping stage cache, build stage not detected.\n")
break
}
buildStageName = strings.TrimSpace(strings.Split(lineLower, " as ")[1])
stageLines = append(stageLines, line)
} else if buildStageName != "" {
stageLines = append(stageLines, line)
}
}
if err := scanner.Err(); err != nil {
return errors.WithStack(err)
}
// If we have detected a build stage, then generate the appropriate tag.
if buildStageName != "" {
log.Printf("\t\tFound build stage %s for caching.\n", buildStageName)
// Generate a checksum for the lines associated with the build stage.
buildBaseHashPts := []string{
fmt.Sprintf("%x", md5.Sum([]byte(strings.Join(stageLines, "\n")))),
}
switch buildStageName {
case "build_base_golang":
// Compute the checksum for the go.mod file.
goSumPath := filepath.Join(req.ProjectRoot, "go.sum")
goSumDat, err := ioutil.ReadFile(goSumPath)
if err != nil {
return errors.Wrapf(err, "Failed parse relative path for %s from %s", req.DockerFile, req.ProjectRoot)
}
buildBaseHashPts = append(buildBaseHashPts, fmt.Sprintf("%x", md5.Sum(goSumDat)))
}
// Combine all the checksums to be used to tag the target build stage.
buildBaseHash := fmt.Sprintf("%x", md5.Sum([]byte(strings.Join(buildBaseHashPts, "|"))))
// New stage image tag.
buildBaseImageTag = buildStageName + "-" + buildBaseHash[0:8]
}
}
var cmds [][]string
// Enabling caching of the first build stage defined in the dockerFile.
var buildBaseImage string
if !req.NoCache && buildBaseImageTag != "" {
var pushTargetImg bool
if ciReg := os.Getenv("CI_REGISTRY"); ciReg != "" {
cmds = append(cmds, []string{
"docker", "login",
"-u", os.Getenv("CI_REGISTRY_USER"),
"-p", os.Getenv("CI_REGISTRY_PASSWORD"),
ciReg})
buildBaseImage = os.Getenv("CI_REGISTRY_IMAGE") + ":" + buildBaseImageTag
pushTargetImg = true
} else {
buildBaseImage = req.ProjectName + ":" + req.Env + "-" + req.ServiceName + "-" + buildBaseImageTag
}
cmds = append(cmds, []string{"docker", "pull", buildBaseImage})
cmds = append(cmds, []string{
"docker", "build",
"--file=" + dockerFile,
"--cache-from", buildBaseImage,
"--build-arg", "service=" + req.ServiceName,
"--build-arg", "env=" + req.Env,
"-t", buildBaseImage,
"--target", buildStageName,
".",
})
if pushTargetImg {
cmds = append(cmds, []string{"docker", "push", buildBaseImage})
}
}
// The initial build command slice.
buildCmd := []string{
"docker", "build",
"--file=" + dockerFile,
"--build-arg", "service=" + req.ServiceName,
"--build-arg", "env=" + req.Env,
"--build-arg", "commit_ref=" + req.CommitRef,
"--build-arg", "swagInit=1",
"-t", req.ReleaseImage,
}
// Append additional build flags.
if req.NoCache {
buildCmd = append(buildCmd, "--no-cache")
} else if buildBaseImage != "" {
buildCmd = append(buildCmd, "--cache-from", buildBaseImage)
}
// Finally append the build context as the current directory since os.Exec will use the project root as
// the working directory.
buildCmd = append(buildCmd, ".")
cmds = append(cmds, buildCmd)
s3Files := make(map[string]*s3manager.UploadInput)
if req.NoPush == false {
if req.IsLambda {
lambdaS3Key := LambdaS3KeyFromReleaseImage(req.ReleaseImage)
tmpDir := os.TempDir()
lambdaZip := filepath.Join(tmpDir, filepath.Base(lambdaS3Key))
containerName := uuid.NewRandom().String()
cmds = append(cmds, []string{"docker", "create", "-ti", "--name", containerName, req.ReleaseImage, "bash"})
cmds = append(cmds, []string{"docker", "cp", containerName + ":/var/task", tmpDir})
cmds = append(cmds, []string{"docker", "rm", containerName})
cmds = append(cmds, []string{"cd", tmpDir + "/task"})
cmds = append(cmds, []string{"zip", "-r", lambdaZip, "."})
s3Files[lambdaZip] = &s3manager.UploadInput{
Bucket: &req.S3BucketPrivateName,
Key: &lambdaS3Key,
}
} else {
cmds = append(cmds, dockerLoginCmd)
cmds = append(cmds, []string{"docker", "push", req.ReleaseImage})
}
}
for _, cmd := range cmds {
var logCmd string
if len(cmd) >= 2 && cmd[1] == "login" {
logCmd = strings.Join(cmd[0:2], " ")
} else {
logCmd = strings.Join(cmd, " ")
}
log.Printf("\t\t%s\n", logCmd)
err := execCmds(log, req.BuildDir, cmd)
if err != nil {
if len(cmd) > 2 && cmd[1] == "pull" {
log.Printf("\t\t\tSkipping pull - %s\n", err.Error())
} else {
return errors.Wrapf(err, "Failed to exec %s", strings.Join(cmd, " "))
}
}
}
if s3Files != nil && len(s3Files) > 0 {
// Create an uploader with the session and default options
uploader := s3manager.NewUploader(req.awsSession())
// Perform an upload.
for lf, upParams := range s3Files {
f, err := os.Open(lf)
if err != nil {
return errors.Wrapf(err, "Failed open file to %s", lf)
}
upParams.Body = f
_, err = uploader.Upload(upParams)
if err != nil {
return errors.Wrapf(err, "Failed upload file to %s", *upParams.Key)
}
log.Printf("\t\tUploaded %s to s3://%s/%s\n", lf, *upParams.Bucket, *upParams.Key)
}
}
log.Printf("\t%s\tbuild complete.\n", tests.Success)
}
return nil
}

File diff suppressed because it is too large Load Diff

View File

@ -1,85 +0,0 @@
// Package retry contains a simple retry mechanism defined by a slice of delay
// times. There are no maximum retries accounted for here. If retries should be
// limited, use a Timeout context to keep from retrying forever. This should
// probably be made into something more robust.
package retry
import (
"context"
"time"
)
// queryPollIntervals is a slice of the delays before re-checking the status on
// an executing query, backing off from a short delay at first. This sequence
// has been selected with Athena queries in mind, which may operate very
// quickly for things like schema manipulation, or which may run for an
// extended period of time, when running an actual data analysis query.
// Long-running queries will exhaust their rapid retries quickly, and fall back
// to checking every few seconds or longer.
var DefaultPollIntervals = []time.Duration{
time.Millisecond,
2 * time.Millisecond,
2 * time.Millisecond,
5 * time.Millisecond,
10 * time.Millisecond,
20 * time.Millisecond,
50 * time.Millisecond,
50 * time.Millisecond,
100 * time.Millisecond,
100 * time.Millisecond,
200 * time.Millisecond,
500 * time.Millisecond,
time.Second,
2 * time.Second,
5 * time.Second,
10 * time.Second,
20 * time.Second,
30 * time.Second,
time.Minute,
}
// delayer keeps track of the current delay between retries.
type delayer struct {
Delays []time.Duration
currentIndex int
}
// Delay returns the current delay duration, and advances the index to the next
// delay defined. If the index has reached the end of the delay slice, then it
// will continue to return the maximum delay defined.
func (d *delayer) Delay() time.Duration {
t := d.Delays[d.currentIndex]
if d.currentIndex < len(d.Delays)-1 {
d.currentIndex++
}
return t
}
// Retry uses a slice of time.Duration interval delays to retry a function
// until it either errors or indicates that it is ready to proceed. If f
// returns true, or an error, the retry loop is broken. Pass a closure as f if
// you need to record a value from the operation that you are performing inside
// f.
func Retry(ctx context.Context, retryIntervals []time.Duration, f func() (bool, error)) (err error) {
if retryIntervals == nil || len(retryIntervals) == 0 {
retryIntervals = DefaultPollIntervals
}
d := delayer{Delays: retryIntervals}
for {
select {
case <-ctx.Done():
return ctx.Err()
default:
ok, err := f()
if err != nil {
return err
}
if ok {
return nil
}
time.Sleep(d.Delay())
}
}
return err
}

View File

@ -1,86 +0,0 @@
package retry
import (
"context"
"errors"
"testing"
"time"
)
var errExpectedFailure = errors.New("expected failure for test purposes")
func TestDelayer(t *testing.T) {
delays := []time.Duration{
time.Millisecond,
2 * time.Millisecond,
4 * time.Millisecond,
10 * time.Millisecond,
}
tt := []struct {
desc string
numRetries int
expDelay time.Duration
}{
{"first try", 0, time.Millisecond},
{"second try", 1, 2 * time.Millisecond},
{"len(delays) try", len(delays) - 1, delays[len(delays)-1]},
{"len(delays) + 1 try", len(delays), delays[len(delays)-1]},
{"len(delays) * 2 try", len(delays) * 2, delays[len(delays)-1]},
}
for _, tc := range tt {
t.Run(tc.desc, func(t *testing.T) {
var (
d = delayer{Delays: delays}
delay time.Duration
)
for i := tc.numRetries + 1; i > 0; i-- {
delay = d.Delay()
}
if delay != tc.expDelay {
t.Fatalf(
"expected delay of %s after %d retries, but got %s",
tc.expDelay, tc.numRetries, delay)
}
})
}
}
func TestRetry(t *testing.T) {
delays := []time.Duration{
time.Millisecond,
2 * time.Millisecond,
3 * time.Millisecond,
}
tt := []struct {
desc string
tries int
success bool
err error
}{
{"first try", 1, true, nil},
{"second try error", 2, false, errExpectedFailure},
{"third try success", 3, true, nil},
}
for _, tc := range tt {
t.Run(tc.desc, func(t *testing.T) {
tries := 0
retryFunc := func() (bool, error) {
tries++
if tries == tc.tries {
return tc.success, tc.err
}
t.Logf("try #%d unsuccessful: trying again up to %d times", tries, tc.tries)
return false, nil
}
err := Retry(context.Background(), delays, retryFunc)
if err != tc.err {
t.Fatalf("expected error %s, but got error %s", err, tc.err)
}
if tries != tc.tries {
t.Fatalf("expected %d tries, but tried %d times", tc.tries, tries)
}
})
}
}

View File

@ -1,8 +0,0 @@
FROM lambci/lambda-base:raw
LABEL maintainer="lee@geeksinthewoods.com"
COPY . /var/task
WORKDIR /var/task

View File

@ -1,18 +0,0 @@
{
"FunctionName": "{LAMBDA_FUNC}",
"MemorySize": 512,
"Role": "",
"Timeout": 300,
"Runtime": "python2.7",
"TracingConfig": {
"Mode": "PassThrough"
},
"Description": "Ship logs from cloudwatch to datadog",
"Handler": "lambda_function.lambda_handler",
"Environment": {
"Variables": {
"DD_API_KEY": "{DATADOG_APIKEY}",
"LAMBDA_FUNC": "{LAMBDA_FUNC}"
}
}
}

View File

@ -1,505 +0,0 @@
# Unless explicitly stated otherwise all files in this repository are licensed
# under the Apache License Version 2.0.
# This product includes software developed at Datadog (https://www.datadoghq.com/).
# Copyright 2018 Datadog, Inc.
from __future__ import print_function
import base64
import gzip
import json
import os
import re
import socket
import ssl
import urllib
from io import BytesIO, BufferedReader
import boto3
# Proxy
# Define the proxy endpoint to forward the logs to
DD_SITE = os.getenv("DD_SITE", default="datadoghq.com")
DD_URL = os.getenv("DD_URL", default="lambda-intake.logs." + DD_SITE)
# Define the proxy port to forward the logs to
try:
if "DD_SITE" in os.environ and DD_SITE == "datadoghq.eu":
DD_PORT = int(os.environ.get("DD_PORT", 443))
else:
DD_PORT = int(os.environ.get("DD_PORT", 10516))
except Exception:
DD_PORT = 10516
# Scrubbing sensitive data
# Option to redact all pattern that looks like an ip address / email address
try:
is_ipscrubbing = os.environ["REDACT_IP"]
except Exception:
is_ipscrubbing = False
try:
is_emailscrubbing = os.environ["REDACT_EMAIL"]
except Exception:
is_emailscrubbing = False
# DD_API_KEY: Datadog API Key
DD_API_KEY = "<your_api_key>"
if "DD_KMS_API_KEY" in os.environ:
ENCRYPTED = os.environ["DD_KMS_API_KEY"]
DD_API_KEY = boto3.client("kms").decrypt(
CiphertextBlob=base64.b64decode(ENCRYPTED)
)["Plaintext"]
elif "DD_API_KEY" in os.environ:
DD_API_KEY = os.environ["DD_API_KEY"]
# Strip any trailing and leading whitespace from the API key
DD_API_KEY = DD_API_KEY.strip()
cloudtrail_regex = re.compile(
"\d+_CloudTrail_\w{2}-\w{4,9}-\d_\d{8}T\d{4}Z.+.json.gz$", re.I
)
ip_regex = re.compile("\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}", re.I)
email_regex = re.compile("[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+", re.I)
DD_SOURCE = "ddsource"
DD_CUSTOM_TAGS = "ddtags"
DD_SERVICE = "service"
DD_HOST = "host"
DD_FORWARDER_VERSION = "1.2.3"
# Pass custom tags as environment variable, ensure comma separated, no trailing comma in envvar!
DD_TAGS = os.environ.get("DD_TAGS", "")
class DatadogConnection(object):
def __init__(self, host, port, ddApiKey):
self.host = host
self.port = port
self.api_key = ddApiKey
self._sock = None
def _connect(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s = ssl.wrap_socket(s)
s.connect((self.host, self.port))
return s
def safe_submit_log(self, log, metadata):
try:
self.send_entry(log, metadata)
except Exception as e:
# retry once
if self._sock:
# make sure we don't keep old connections open
self._sock.close()
self._sock = self._connect()
self.send_entry(log, metadata)
return self
def send_entry(self, log_entry, metadata):
# The log_entry can only be a string or a dict
if isinstance(log_entry, str):
log_entry = {"message": log_entry}
elif not isinstance(log_entry, dict):
raise Exception(
"Cannot send the entry as it must be either a string or a dict. Provided entry: "
+ str(log_entry)
)
# Merge with metadata
log_entry = merge_dicts(log_entry, metadata)
# Send to Datadog
str_entry = json.dumps(log_entry)
# Scrub ip addresses if activated
if is_ipscrubbing:
try:
str_entry = ip_regex.sub("xxx.xxx.xxx.xx", str_entry)
except Exception as e:
print(
"Unexpected exception while scrubbing logs: {} for event {}".format(
str(e), str_entry
)
)
# Scrub email addresses if activated
if is_emailscrubbing:
try:
str_entry = email_regex.sub("xxxxx@xxxxx.com", str_entry)
except Exception as e:
print(
"Unexpected exception while scrubbing logs: {} for event {}".format(
str(e), str_entry
)
)
# For debugging purpose uncomment the following line
# print(str_entry)
prefix = "%s " % self.api_key
return self._sock.send((prefix + str_entry + "\n").encode("UTF-8"))
def __enter__(self):
self._sock = self._connect()
return self
def __exit__(self, ex_type, ex_value, traceback):
if self._sock:
self._sock.close()
if ex_type is not None:
print("DatadogConnection exit: ", ex_type, ex_value, traceback)
def lambda_handler(event, context):
# Check prerequisites
if DD_API_KEY == "<your_api_key>" or DD_API_KEY == "":
raise Exception(
"You must configure your API key before starting this lambda function (see #Parameters section)"
)
# Check if the API key is the correct number of characters
if len(DD_API_KEY) != 32:
raise Exception(
"The API key is not the expected length. Please confirm that your API key is correct"
)
metadata = {"ddsourcecategory": "aws"}
# create socket
with DatadogConnection(DD_URL, DD_PORT, DD_API_KEY) as con:
# Add the context to meta
if "aws" not in metadata:
metadata["aws"] = {}
aws_meta = metadata["aws"]
aws_meta["function_version"] = context.function_version
aws_meta["invoked_function_arn"] = context.invoked_function_arn
# Add custom tags here by adding new value with the following format "key1:value1, key2:value2" - might be subject to modifications
dd_custom_tags_data = {
"forwardername": context.function_name.lower(),
"memorysize": context.memory_limit_in_mb,
"forwarder_version": DD_FORWARDER_VERSION,
}
metadata[DD_CUSTOM_TAGS] = ",".join(
filter(
None,
[
DD_TAGS,
",".join(
[
"{}:{}".format(k, v)
for k, v in dd_custom_tags_data.iteritems()
]
),
],
)
)
try:
logs = generate_logs(event, context, metadata)
for log in logs:
con = con.safe_submit_log(log, metadata)
except Exception as e:
print("Unexpected exception: {} for event {}".format(str(e), event))
def generate_logs(event, context, metadata):
try:
# Route to the corresponding parser
event_type = parse_event_type(event)
if event_type == "s3":
logs = s3_handler(event, context, metadata)
elif event_type == "awslogs":
logs = awslogs_handler(event, context, metadata)
elif event_type == "events":
logs = cwevent_handler(event, metadata)
elif event_type == "sns":
logs = sns_handler(event, metadata)
except Exception as e:
# Logs through the socket the error
err_message = "Error parsing the object. Exception: {} for event {}".format(
str(e), event
)
logs = [err_message]
return logs
# Utility functions
def parse_event_type(event):
if "Records" in event and len(event["Records"]) > 0:
if "s3" in event["Records"][0]:
return "s3"
elif "Sns" in event["Records"][0]:
return "sns"
elif "awslogs" in event:
return "awslogs"
elif "detail" in event:
return "events"
raise Exception("Event type not supported (see #Event supported section)")
# Handle S3 events
def s3_handler(event, context, metadata):
s3 = boto3.client("s3")
# Get the object from the event and show its content type
bucket = event["Records"][0]["s3"]["bucket"]["name"]
key = urllib.unquote_plus(event["Records"][0]["s3"]["object"]["key"]).decode("utf8")
keyMetadata = parse_key_metadata(key)
for k in keyMetadata :
metadata[k] = keyMetadata[k]
source = parse_event_source(event, key)
metadata[DD_SOURCE] = source
##Get the ARN of the service and set it as the hostname
if DD_HOST not in metadata.keys() :
hostname = parse_service_arn(source, key, bucket, context)
if hostname:
metadata[DD_HOST] = hostname
##default service to source value
if DD_SERVICE not in metadata.keys() :
metadata[DD_SERVICE] = source
# Extract the S3 object
response = s3.get_object(Bucket=bucket, Key=key)
body = response["Body"]
data = body.read()
# If the name has a .gz extension, then decompress the data
if key[-3:] == ".gz":
with gzip.GzipFile(fileobj=BytesIO(data)) as decompress_stream:
# Reading line by line avoid a bug where gzip would take a very long time (>5min) for
# file around 60MB gzipped
data = "".join(BufferedReader(decompress_stream))
if is_cloudtrail(str(key)):
cloud_trail = json.loads(data)
for event in cloud_trail["Records"]:
# Create structured object and send it
structured_line = merge_dicts(
event, {"aws": {"s3": {"bucket": bucket, "key": key}}}
)
yield structured_line
else:
# Send lines to Datadog
for line in data.splitlines():
# Create structured object and send it
structured_line = {
"aws": {"s3": {"bucket": bucket, "key": key}},
"message": line,
}
yield structured_line
# Handle CloudWatch logs
def awslogs_handler(event, context, metadata):
# Get logs
with gzip.GzipFile(
fileobj=BytesIO(base64.b64decode(event["awslogs"]["data"]))
) as decompress_stream:
# Reading line by line avoid a bug where gzip would take a very long
# time (>5min) for file around 60MB gzipped
data = "".join(BufferedReader(decompress_stream))
logs = json.loads(str(data))
# Set the source on the logs
source = logs.get("logGroup", "cloudwatch")
metadata[DD_SOURCE] = parse_event_source(event, source)
# Default service to source value
metadata[DD_SERVICE] = metadata[DD_SOURCE]
# Build aws attributes
aws_attributes = {
"aws": {
"awslogs": {
"logGroup": logs["logGroup"],
"logStream": logs["logStream"],
"owner": logs["owner"],
}
}
}
# For Lambda logs we want to extract the function name,
# then rebuild the arn of the monitored lambda using that name.
# Start by splitting the log group to get the function name
if metadata[DD_SOURCE] == "lambda":
log_group_parts = logs["logGroup"].split("/lambda/")
if len(log_group_parts) > 0:
function_name = log_group_parts[1].lower()
# Split the arn of the forwarder to extract the prefix
arn_parts = context.invoked_function_arn.split("function:")
if len(arn_parts) > 0:
arn_prefix = arn_parts[0]
# Rebuild the arn by replacing the function name
arn = arn_prefix + "function:" + function_name
# Add the arn as a log attribute
arn_attributes = {"lambda": {"arn": arn}}
aws_attributes = merge_dicts(aws_attributes, arn_attributes)
# Add the function name as tag
metadata[DD_CUSTOM_TAGS] += ",functionname:" + function_name
# Set the arn as the hostname
metadata[DD_HOST] = arn
# Create and send structured logs to Datadog
for log in logs["logEvents"]:
yield merge_dicts(log, aws_attributes)
# Handle Cloudwatch Events
def cwevent_handler(event, metadata):
data = event
# Set the source on the log
source = data.get("source", "cloudwatch")
service = source.split(".")
if len(service) > 1:
metadata[DD_SOURCE] = service[1]
else:
metadata[DD_SOURCE] = "cloudwatch"
##default service to source value
metadata[DD_SERVICE] = metadata[DD_SOURCE]
yield data
# Handle Sns events
def sns_handler(event, metadata):
data = event
# Set the source on the log
metadata[DD_SOURCE] = parse_event_source(event, "sns")
for ev in data["Records"]:
# Create structured object and send it
structured_line = ev
yield structured_line
def merge_dicts(a, b, path=None):
if path is None:
path = []
for key in b:
if key in a:
if isinstance(a[key], dict) and isinstance(b[key], dict):
merge_dicts(a[key], b[key], path + [str(key)])
elif a[key] == b[key]:
pass # same leaf value
else:
raise Exception(
"Conflict while merging metadatas and the log entry at %s"
% ".".join(path + [str(key)])
)
else:
a[key] = b[key]
return a
def is_cloudtrail(key):
match = cloudtrail_regex.search(key)
return bool(match)
def parse_event_source(event, key):
if "elasticloadbalancing" in key:
return "elb"
for source in [
"lambda",
"redshift",
"cloudfront",
"kinesis",
"mariadb",
"mysql",
"apigateway",
"route53",
"vpc",
"rds",
"sns",
"waf",
"docdb",
"ecs",
]:
if source in key:
return source
if "API-Gateway" in key:
return "apigateway"
if is_cloudtrail(str(key)):
return "cloudtrail"
if "awslogs" in event:
return "cloudwatch"
if "Records" in event and len(event["Records"]) > 0:
if "s3" in event["Records"][0]:
return "s3"
return "aws"
def parse_service_arn(source, key, bucket, context):
if source == "elb":
#For ELB logs we parse the filename to extract parameters in order to rebuild the ARN
#1. We extract the region from the filename
#2. We extract the loadbalancer name and replace the "." by "/" to match the ARN format
#3. We extract the id of the loadbalancer
#4. We build the arn
keysplit = key.split("_")
idsplit = key.split("/")
if len(keysplit) > 3:
region = keysplit[2].lower()
name = keysplit[3]
elbname = name.replace(".", "/")
if len(idsplit) > 1:
idvalue = idsplit[1]
return "arn:aws:elasticloadbalancing:" + region + ":" + idvalue + ":loadbalancer/" + elbname
if source == "s3":
#For S3 access logs we use the bucket name to rebuild the arn
if bucket:
return "arn:aws:s3:::" + bucket
if source == "cloudfront":
#For Cloudfront logs we need to get the account and distribution id from the lambda arn and the filename
#1. We extract the cloudfront id from the filename
#2. We extract the AWS account id from the lambda arn
#3. We build the arn
namesplit = key.split("/")
if len(namesplit) > 0:
filename = namesplit[len(namesplit)-1]
#(distribution-ID.YYYY-MM-DD-HH.unique-ID.gz)
filenamesplit = filename.split(".")
if len(filenamesplit) > 3:
distributionID = filenamesplit[len(filenamesplit)-4].lower()
arn = context.invoked_function_arn
arnsplit = arn.split(":")
if len(arnsplit) == 7:
awsaccountID = arnsplit[4].lower()
return "arn:aws:cloudfront::" + awsaccountID+":distribution/" + distributionID
if source == "redshift":
#For redshift logs we leverage the filename to extract the relevant information
#1. We extract the region from the filename
#2. We extract the account-id from the filename
#3. We extract the name of the cluster
#4. We build the arn: arn:aws:redshift:region:account-id:cluster:cluster-name
namesplit = key.split("/")
if len(namesplit) == 8:
region = namesplit[3].lower()
accountID = namesplit[1].lower()
filename = namesplit[7]
filesplit = filename.split("_")
if len(filesplit) == 6:
clustername = filesplit[3]
return "arn:aws:redshift:" + region + ":" + accountID + ":cluster:" + clustername
return
def parse_key_metadata(key):
metadata = {}
keysplit = key.split("/")
for k in keysplit :
if "=" in k :
prt = key.split("=")
metadata[prt[0]] = prt[0]
elif "_" in k :
kn = key.split("_")[0]
if kn in ["source", "cluster", "service", "env", "region", "host"]:
metadata[kn] = k.replace(kn+"_", "")
return metadata

View File

@ -1,162 +0,0 @@
package main
import (
"context"
"expvar"
"geeks-accelerator/oss/saas-starter-kit/internal/platform/web/webcontext"
"log"
"os"
"strings"
"time"
"geeks-accelerator/oss/saas-starter-kit/tools/devops/cmd/cicd"
_ "github.com/lib/pq"
"github.com/urfave/cli"
)
// build is the git version of this program. It is set using build flags in the makefile.
var build = "develop"
// service is the name of the program used for logging, tracing and the
// the prefix used for loading env variables
// ie: export TRUSS_ENV=dev
var service = "DEVOPS"
func main() {
// =========================================================================
// Logging
log := log.New(os.Stdout, service+" : ", log.LstdFlags|log.Lmicroseconds|log.Lshortfile)
// =========================================================================
// Log App Info
// Print the build version for our logs. Also expose it under /debug/vars.
expvar.NewString("build").Set(build)
log.Printf("main : Started : Application Initializing version %q", build)
defer log.Println("main : Completed")
log.Printf("main : Args: %s", strings.Join(os.Args, " "))
// =========================================================================
// Start Truss
var (
buildFlags cicd.ServiceBuildFlags
deployFlags cicd.ServiceDeployFlags
migrateFlags cicd.MigrateFlags
)
app := cli.NewApp()
app.Commands = []cli.Command{
{
Name: "build",
Usage: "-service=web-api -env=dev",
Flags: []cli.Flag{
cli.StringFlag{Name: "service", Usage: "name of cmd", Destination: &buildFlags.ServiceName},
cli.StringFlag{Name: "env", Usage: "dev, stage, or prod", Destination: &buildFlags.Env},
cli.StringFlag{Name: "dockerfile", Usage: "DockerFile for service", Destination: &buildFlags.DockerFile},
cli.StringFlag{Name: "root", Usage: "project root directory", Destination: &buildFlags.ProjectRoot},
cli.StringFlag{Name: "project", Usage: "name of project", Destination: &buildFlags.ProjectName},
cli.StringFlag{Name: "build_dir", Usage: "build context directory", Destination: &buildFlags.BuildDir},
cli.StringFlag{Name: "private_bucket", Usage: "dev, stage, or prod", Destination: &buildFlags.S3BucketPrivateName},
cli.BoolFlag{Name: "lambda", Usage: "build as lambda function", Destination: &buildFlags.IsLambda},
cli.BoolFlag{Name: "no_cache", Usage: "skip docker cache", Destination: &buildFlags.NoCache},
cli.BoolFlag{Name: "no_push", Usage: "skip docker push after build", Destination: &buildFlags.NoPush},
},
Action: func(c *cli.Context) error {
req, err := cicd.NewServiceBuildRequest(log, buildFlags)
if err != nil {
return err
}
return cicd.ServiceBuild(log, req)
},
},
{
Name: "deploy",
Usage: "-service=web-api -env=dev",
Flags: []cli.Flag{
cli.StringFlag{Name: "service", Usage: "name of cmd", Destination: &deployFlags.ServiceName},
cli.StringFlag{Name: "env", Usage: "dev, stage, or prod", Destination: &deployFlags.Env},
cli.BoolFlag{Name: "enable_https", Usage: "enable HTTPS", Destination: &deployFlags.EnableHTTPS},
cli.StringFlag{Name: "primary_host", Usage: "dev, stage, or prod", Destination: &deployFlags.ServiceHostPrimary},
cli.StringSliceFlag{Name: "host_names", Usage: "dev, stage, or prod", Value: &deployFlags.ServiceHostNames},
cli.StringFlag{Name: "private_bucket", Usage: "dev, stage, or prod", Destination: &deployFlags.S3BucketPrivateName},
cli.StringFlag{Name: "public_bucket", Usage: "dev, stage, or prod", Destination: &deployFlags.S3BucketPublicName},
cli.BoolFlag{Name: "public_bucket_cloudfront", Usage: "serve static files from Cloudfront", Destination: &deployFlags.S3BucketPublicCloudfront},
cli.StringFlag{Name: "dockerfile", Usage: "DockerFile for service", Destination: &deployFlags.DockerFile},
cli.StringFlag{Name: "root", Usage: "project root directory", Destination: &deployFlags.ProjectRoot},
cli.StringFlag{Name: "project", Usage: "name of project", Destination: &deployFlags.ProjectName},
cli.BoolFlag{Name: "enable_elb", Usage: "enable deployed to use Elastic Load Balancer", Destination: &deployFlags.EnableEcsElb},
cli.BoolTFlag{Name: "lambda_vpc", Usage: "deploy lambda behind VPC", Destination: &deployFlags.EnableLambdaVPC},
cli.BoolFlag{Name: "static_files_s3", Usage: "service static files from S3", Destination: &deployFlags.StaticFilesS3Enable},
cli.BoolFlag{Name: "static_files_img_resize", Usage: "enable response images from service", Destination: &deployFlags.StaticFilesImgResizeEnable},
cli.BoolFlag{Name: "recreate_service", Usage: "skip docker push after build", Destination: &deployFlags.RecreateService},
},
Action: func(c *cli.Context) error {
if len(deployFlags.ServiceHostNames.Value()) == 1 {
var hostNames []string
for _, inpVal := range deployFlags.ServiceHostNames.Value() {
pts := strings.Split(inpVal, ",")
for _, h := range pts {
h = strings.TrimSpace(h)
if h != "" {
hostNames = append(hostNames, h)
}
}
}
deployFlags.ServiceHostNames = hostNames
}
req, err := cicd.NewServiceDeployRequest(log, deployFlags)
if err != nil {
return err
}
// Set the context with the required values to
// process the request.
v := webcontext.Values{
Now: time.Now(),
Env: req.Env,
}
ctx := context.WithValue(context.Background(), webcontext.KeyValues, &v)
return cicd.ServiceDeploy(log, ctx, req)
},
},
{
Name: "migrate",
Usage: "-env=dev",
Flags: []cli.Flag{
cli.StringFlag{Name: "env", Usage: "dev, stage, or prod", Destination: &migrateFlags.Env},
cli.StringFlag{Name: "root", Usage: "project root directory", Destination: &migrateFlags.ProjectRoot},
cli.StringFlag{Name: "project", Usage: "name of project", Destination: &migrateFlags.ProjectName},
},
Action: func(c *cli.Context) error {
req, err := cicd.NewMigrateRequest(log, migrateFlags)
if err != nil {
return err
}
// Set the context with the required values to
// process the request.
v := webcontext.Values{
Now: time.Now(),
Env: req.Env,
}
ctx := context.WithValue(context.Background(), webcontext.KeyValues, &v)
return cicd.Migrate(log, ctx, req)
},
},
}
err := app.Run(os.Args)
if err != nil {
log.Fatalf("main : Truss : %+v", err)
}
log.Printf("main : Truss : Completed")
}

View File

@ -1,8 +0,0 @@
SHELL := /bin/bash
install:
go install .
build:
go install .

View File

@ -1,24 +0,0 @@
#!/usr/bin/env bash
doPush=0
if [[ "${CI_REGISTRY_IMAGE}" != "" ]]; then
docker login -u ${CI_REGISTRY_USER} -p ${CI_REGISTRY_PASSWORD} ${CI_REGISTRY}
releaseImg="${CI_REGISTRY_IMAGE}:devops-${CI_COMMIT_REF_NAME}"
doPush=1
else :
releaseImg="devops"
fi
echo "release image: ${releaseImg}"
docker pull ${releaseImg} || true
docker build -f tools/devops/Dockerfile --cache-from ${releaseImg} -t ${releaseImg} .
if [[ $doPush == 1 ]]; then
docker push ${releaseImg}
fi
docker run --rm --entrypoint=cat ${releaseImg} /go/bin/devops > devops
chmod +x devops

View File

@ -1,2 +1,2 @@
schema
local.env
.local.env

View File

@ -1,68 +1,103 @@
# SaaS Schema
Copyright 2019, Geeks Accelerator
accelerator@geeksinthewoods.com.com
## Description
schema
===
Service is handles the schema migration for the project.
_schema_ is a command line tool for local development that executes database migrations.
## Local Installation
<!-- toc -->
- [Overview](#overview)
- [Installation](#installation)
- [Usage](#usage)
* [Commands](#commands)
* [Examples](#examples)
- [Join us on Gopher Slack](#join-us-on-gopher-slack)
<!-- tocstop -->
## Overview
The command line tool that executes the database migrations defined in
[internal/schema](https://gitlab.com/geeks-accelerator/oss/saas-starter-kit/tree/master/internal/schema). This tool
should be used to test and deploy schema migrations against your local development database (hosted by docker).
For additional details regarding this tool, refer to
[build/cicd](https://gitlab.com/geeks-accelerator/oss/saas-starter-kit/tree/master/build/cicd#schema-migrations)
## Installation
Make sure you have a working Go environment. Go version 1.2+ is supported. [See
the install instructions for Go](http://golang.org/doc/install.html).
## Usage
### Build
```bash
go build .
$ go run main.go [global options] command [command options] [arguments...]
```
### Usage
```bash
./schema -h
### Global Options
Usage of ./schema
--env string <dev>
--db_host string <127.0.0.1:5433>
--db_user string <postgres>
--db_pass string <postgres>
--db_database string <shared>
--db_driver string <postgres>
--db_timezone string <utc>
--db_disabletls bool <false>
* Show help
`--help, -h`
* Print the version
`--version, -v`
### Commands
* `migrate` - Executes the database migrations defined in
[internal/schema](https://gitlab.com/geeks-accelerator/oss/saas-starter-kit/tree/master/internal/schema) for local
development. Default values are set for all command options that target the Postgres database running via
[docker compose](https://gitlab.com/geeks-accelerator/oss/saas-starter-kit/blob/master/docker-compose.yaml#L11).
Environment variables can be set as an alternative to passing in the command line options.
```bash
$ go run main.go migrate [command options]
```
Options:
```bash
--env value target environment, one of [dev, stage, prod] (default: "dev") [$ENV]
--host value host (default: "127.0.0.1:5433") [$SCHEMA_DB_HOST]
--user value username (default: "postgres") [$SCHEMA_DB_USER]
--pass value password (default: "postgres") [$SCHEMA_DB_PASS]
--database value name of the default (default: "shared") [$SCHEMA_DB_DATABASE]
--driver value database drive to use for connection (default: "postgres") [$SCHEMA_DB_DRIVER]
--disable-tls disable TLS for the database connection [$SCHEMA_DB_DISABLE_TLS]
```
* `help` - Shows a list of commands
```bash
$ go run main.go help
```
Or for one command:
```bash
$ go run main.go help migrate
```
### Examples
Execute the database migrations against the local Postgres database.
```bash
$ go run main.go migrate
```
### Execution
Manually execute binary after build
```bash
./schema
Schema : 2019/05/25 08:20:08.152557 main.go:64: main : Started : Application Initializing version "develop"
Schema : 2019/05/25 08:20:08.152814 main.go:75: main : Config : {
"Env": "dev",
"DB": {
"Host": "127.0.0.1:5433",
"User": "postgres",
"Database": "shared",
"Driver": "postgres",
"Timezone": "utc",
"DisableTLS": true
}
}
Schema : 2019/05/25 08:20:08.158270 sqlxmigrate.go:478: HasTable migrations - SELECT 1 FROM migrations
Schema : 2019/05/25 08:20:08.164275 sqlxmigrate.go:413: Migration SCHEMA_INIT - SELECT count(0) FROM migrations WHERE id = $1
Schema : 2019/05/25 08:20:08.166391 sqlxmigrate.go:368: Migration 20190522-01a - checking
Schema : 2019/05/25 08:20:08.166405 sqlxmigrate.go:413: Migration 20190522-01a - SELECT count(0) FROM migrations WHERE id = $1
Schema : 2019/05/25 08:20:08.168066 sqlxmigrate.go:375: Migration 20190522-01a - already ran
Schema : 2019/05/25 08:20:08.168078 sqlxmigrate.go:368: Migration 20190522-01b - checking
Schema : 2019/05/25 08:20:08.168084 sqlxmigrate.go:413: Migration 20190522-01b - SELECT count(0) FROM migrations WHERE id = $1
Schema : 2019/05/25 08:20:08.170297 sqlxmigrate.go:375: Migration 20190522-01b - already ran
Schema : 2019/05/25 08:20:08.170319 sqlxmigrate.go:368: Migration 20190522-01c - checking
Schema : 2019/05/25 08:20:08.170327 sqlxmigrate.go:413: Migration 20190522-01c - SELECT count(0) FROM migrations WHERE id = $1
Schema : 2019/05/25 08:20:08.172044 sqlxmigrate.go:375: Migration 20190522-01c - already ran
Schema : 2019/05/25 08:20:08.172831 main.go:130: main : Migrate : Completed
Schema : 2019/05/25 08:20:08.172935 main.go:131: main : Completed
```
Or alternative use the make file
```bash
make run
```
## Join us on Gopher Slack
If you are having problems installing, troubles getting the project running or would like to contribute, join the
channel #saas-starter-kit on [Gopher Slack](http://invite.slack.golangbridge.org/)

View File

@ -2,26 +2,21 @@ package main
import (
"context"
"encoding/json"
"expvar"
"geeks-accelerator/oss/saas-starter-kit/internal/platform/web/webcontext"
"fmt"
"log"
"net/url"
"os"
"time"
"strings"
"geeks-accelerator/oss/saas-starter-kit/internal/platform/flag"
"geeks-accelerator/oss/saas-starter-kit/internal/platform/web/webcontext"
"geeks-accelerator/oss/saas-starter-kit/internal/schema"
"github.com/kelseyhightower/envconfig"
"github.com/lib/pq"
_ "github.com/lib/pq"
"github.com/urfave/cli"
sqltrace "gopkg.in/DataDog/dd-trace-go.v1/contrib/database/sql"
sqlxtrace "gopkg.in/DataDog/dd-trace-go.v1/contrib/jmoiron/sqlx"
)
// build is the git version of this program. It is set using build flags in the makefile.
var build = "develop"
// service is the name of the program used for logging, tracing and the
// the prefix used for loading env variables
// ie: export SCHEMA_ENV=dev
@ -38,57 +33,95 @@ type DB struct {
}
func main() {
// =========================================================================
// Logging
log := log.New(os.Stdout, service+" : ", log.LstdFlags|log.Lmicroseconds|log.Lshortfile)
log.SetFlags(log.LstdFlags | log.Lmicroseconds | log.Lshortfile)
log.SetPrefix(service + " : ")
log := log.New(os.Stdout, log.Prefix(), log.Flags())
// =========================================================================
// Configuration
var cfg struct {
Env string `default:"dev" envconfig:"ENV"`
DB struct {
Host string `default:"127.0.0.1:5433" envconfig:"HOST"`
User string `default:"postgres" envconfig:"USER"`
Pass string `default:"postgres" envconfig:"PASS" json:"-"` // don't print
Database string `default:"shared" envconfig:"DATABASE"`
Driver string `default:"postgres" envconfig:"DRIVER"`
Timezone string `default:"utc" envconfig:"TIMEZONE"`
DisableTLS bool `default:"true" envconfig:"DISABLE_TLS"`
}
// New CLI application.
app := cli.NewApp()
app.Name = "schema"
app.Version = "1.0.0"
app.Author = "Lee Brown"
app.Email = "lee@geeksinthewoods.com"
app.Commands = []cli.Command{
{
Name: "migrate",
Aliases: []string{"m"},
Usage: "run schema migration",
Flags: []cli.Flag{
cli.StringFlag{
Name: "env",
Usage: fmt.Sprintf("target environment, one of [%s]",
strings.Join(webcontext.EnvNames, ", ")),
Value: "dev",
EnvVar: "ENV",
},
cli.StringFlag{
Name: "host",
Usage: "host",
Value: "127.0.0.1:5433",
EnvVar: "SCHEMA_DB_HOST",
},
cli.StringFlag{
Name: "user",
Usage: "username",
Value: "postgres",
EnvVar: "SCHEMA_DB_USER",
},
cli.StringFlag{
Name: "pass",
Usage: "password",
Value: "postgres",
EnvVar: "SCHEMA_DB_PASS",
},
cli.StringFlag{
Name: "database",
Usage: "name of the default",
Value: "shared",
EnvVar: "SCHEMA_DB_DATABASE",
},
cli.StringFlag{
Name: "driver",
Usage: "database drive to use for connection",
Value: "postgres",
EnvVar: "SCHEMA_DB_DRIVER",
},
cli.BoolTFlag{
Name: "disable-tls",
Usage: "disable TLS for the database connection",
EnvVar: "SCHEMA_DB_DISABLE_TLS",
},
},
Action: func(c *cli.Context) error {
targetEnv := c.String("env")
var dbInfo = DB{
Host: c.String("host"),
User: c.String("user"),
Pass: c.String("pass"),
Database: c.String("database"),
Driver: c.String("driver"),
DisableTLS: c.Bool("disable-tls"),
}
return runMigrate(log, targetEnv, dbInfo)
},
},
}
// For additional details refer to https://github.com/kelseyhightower/envconfig
if err := envconfig.Process(service, &cfg); err != nil {
log.Fatalf("main : Parsing Config : %v", err)
}
if err := flag.Process(&cfg); err != nil {
if err != flag.ErrHelp {
log.Fatalf("main : Parsing Command Line : %v", err)
}
return // We displayed help.
}
// =========================================================================
// Log App Info
// Print the build version for our logs. Also expose it under /debug/vars.
expvar.NewString("build").Set(build)
log.Printf("main : Started : Application Initializing version %q", build)
defer log.Println("main : Completed")
// Print the config for our logs. It's important to any credentials in the config
// that could expose a security risk are excluded from being json encoded by
// applying the tag `json:"-"` to the struct var.
{
cfgJSON, err := json.MarshalIndent(cfg, "", " ")
if err != nil {
log.Fatalf("main : Marshalling Config to JSON : %v", err)
}
log.Printf("main : Config : %v\n", string(cfgJSON))
err := app.Run(os.Args)
if err != nil {
log.Fatalf("%+v", err)
}
}
// runMigrate executes the schema migration against the provided database connection details.
func runMigrate(log *log.Logger, targetEnv string, dbInfo DB) error {
// =========================================================================
// Start Database
var dbUrl url.URL
@ -97,20 +130,18 @@ func main() {
var q url.Values = make(map[string][]string)
// Handle SSL Mode
if cfg.DB.DisableTLS {
if dbInfo.DisableTLS {
q.Set("sslmode", "disable")
} else {
q.Set("sslmode", "require")
}
q.Set("timezone", cfg.DB.Timezone)
// Construct url.
dbUrl = url.URL{
Scheme: cfg.DB.Driver,
User: url.UserPassword(cfg.DB.User, cfg.DB.Pass),
Host: cfg.DB.Host,
Path: cfg.DB.Database,
Scheme: dbInfo.Driver,
User: url.UserPassword(dbInfo.User, dbInfo.Pass),
Host: dbInfo.Host,
Path: dbInfo.Database,
RawQuery: q.Encode(),
}
}
@ -118,27 +149,23 @@ func main() {
// Register informs the sqlxtrace package of the driver that we will be using in our program.
// It uses a default service name, in the below case "postgres.db". To use a custom service
// name use RegisterWithServiceName.
sqltrace.Register(cfg.DB.Driver, &pq.Driver{}, sqltrace.WithServiceName(service))
masterDb, err := sqlxtrace.Open(cfg.DB.Driver, dbUrl.String())
sqltrace.Register(dbInfo.Driver, &pq.Driver{}, sqltrace.WithServiceName(service))
masterDb, err := sqlxtrace.Open(dbInfo.Driver, dbUrl.String())
if err != nil {
log.Fatalf("main : Register DB : %s : %v", cfg.DB.Driver, err)
log.Fatalf("main : Register DB : %s : %v", dbInfo.Driver, err)
}
defer masterDb.Close()
// =========================================================================
// Start Migrations
// Set the context with the required values to
// process the request.
v := webcontext.Values{
Now: time.Now(),
Env: cfg.Env,
}
ctx := context.WithValue(context.Background(), webcontext.KeyValues, &v)
ctx := context.Background()
// Execute the migrations
if err = schema.Migrate(ctx, masterDb, log, false); err != nil {
log.Fatalf("main : Migrate : %v", err)
if err = schema.Migrate(ctx, targetEnv, masterDb, log, false); err != nil {
return err
}
log.Printf("main : Migrate : Completed")
return nil
}

View File

@ -1,10 +0,0 @@
SHELL := /bin/bash
install:
go install .
build:
go install .
run:
go build . && ./schema