1
0
mirror of https://github.com/raseels-repos/golang-saas-starter-kit.git synced 2025-08-08 22:36:41 +02:00

Merge branch 'master' of gitlab.com:geeks-accelerator/oss/saas-starter-kit

This commit is contained in:
Lucas Brown
2019-07-14 20:13:17 -08:00
12 changed files with 318 additions and 455 deletions

View File

@@ -34,7 +34,7 @@ cache:
.deploy_tmpl: &deploy_tmpl .deploy_tmpl: &deploy_tmpl
<<: *job_tmpl <<: *job_tmpl
script: script:
- 'devops deploy -service=${SERVICE} -project=${PROJECT_NAME} -env=${TARGET_ENV} -enable_https=${ENABLE_HTTPS} -enable_elb=${ENABLE_ELB} -primary_host=${PRIMARY_HOST} -host_names=${HOST_NAMES} -private_bucket=${S3_BUCKET_PRIVATE} -public_bucket=${S3_BUCKET_PUBLIC}' - 'devops deploy -service=${SERVICE} -project=${PROJECT_NAME} -env=${TARGET_ENV} -enable_https=${ENABLE_HTTPS} -enable_elb=${ENABLE_ELB} -primary_host=${PRIMARY_HOST} -host_names=${HOST_NAMES} -private_bucket=${S3_BUCKET_PRIVATE} -public_bucket=${S3_BUCKET_PUBLIC} -static_files_s3={STATIC_FILES_S3_ENABLED} -static_files_cloudfront={STATIC_FILES_CLOUDFRONT_ENABLED} -static_files_img_resize={STATIC_FILES_IMG_RESIZE_ENABLED}'
.migrate_tmpl: &migrate_tmpl .migrate_tmpl: &migrate_tmpl
<<: *job_tmpl <<: *job_tmpl
@@ -54,6 +54,45 @@ db:migrate:dev:
TARGET_ENV: 'dev' TARGET_ENV: 'dev'
AWS_USE_ROLE: 'true' AWS_USE_ROLE: 'true'
webapp:build:dev:
<<: *build_tmpl
stage: build:dev
tags:
- dev
only:
- master
- dev
- dev-web-app
variables:
TARGET_ENV: 'dev'
SERVICE: 'web-app'
AWS_USE_ROLE: 'true'
webapp:deploy:dev:
<<: *deploy_tmpl
stage: deploy:dev
tags:
- dev
only:
- master
- dev
- dev-web-app
dependencies:
- 'webapp:build:dev'
- 'db:migrate:dev'
variables:
TARGET_ENV: 'dev'
SERVICE: 'web-app'
ENABLE_HTTPS: 1
ENABLE_ELB: 0
PRIMARY_HOST: 'eproc.tech'
HOST_NAMES: 'www.eproc.tech, dev.eproc.tech'
S3_BUCKET_PRIVATE: 'saas-starter-kit-private'
S3_BUCKET_PUBLIC: 'saas-starter-kit-public'
STATIC_FILES_S3_ENABLED: 'true'
STATIC_FILES_CLOUDFRONT_ENABLED: 'false'
STATIC_FILES_IMG_RESIZE_ENABLED: 'true'
AWS_USE_ROLE: 'true'
webapi:build:dev: webapi:build:dev:
<<: *build_tmpl <<: *build_tmpl
stage: build:dev stage: build:dev
@@ -84,14 +123,15 @@ webapi:deploy:dev:
SERVICE: 'web-api' SERVICE: 'web-api'
ENABLE_HTTPS: 1 ENABLE_HTTPS: 1
ENABLE_ELB: 0 ENABLE_ELB: 0
PRIMARY_HOST: 'eproc.tech' PRIMARY_HOST: 'api.eproc.tech'
HOST_NAMES: 'www.eproc.tech, api.eproc.tech' HOST_NAMES: 'api.dev.eproc.tech'
S3_BUCKET_PRIVATE: 'saas-starter-kit-private' S3_BUCKET_PRIVATE: 'saas-starter-kit-private'
S3_BUCKET_PUBLIC: 'saas-starter-kit-public' S3_BUCKET_PUBLIC: 'saas-starter-kit-public'
STATIC_FILES_S3_ENABLED: 'false'
STATIC_FILES_CLOUDFRONT_ENABLED: 'false'
STATIC_FILES_IMG_RESIZE_ENABLED: 'false'
AWS_USE_ROLE: 'true' AWS_USE_ROLE: 'true'
#ddlogscollector:deploy:stage: #ddlogscollector:deploy:stage:
# <<: *deploy_stage_tmpl # <<: *deploy_stage_tmpl
# variables: # variables:

View File

@@ -38,6 +38,7 @@ COPY --from=builder /static /static
COPY --from=builder /templates /templates COPY --from=builder /templates /templates
ENV TEMPLATE_DIR=/templates ENV TEMPLATE_DIR=/templates
ENV STATIC_DIR=/static
ARG service ARG service
ENV SERVICE_NAME $service ENV SERVICE_NAME $service

View File

@@ -0,0 +1,161 @@
{
"family": "{SERVICE}",
"executionRoleArn": "",
"taskRoleArn": "",
"networkMode": "awsvpc",
"containerDefinitions": [
{
"name": "{ECS_SERVICE}",
"image": "{RELEASE_IMAGE}",
"essential": true,
"logConfiguration": {
"logDriver": "awslogs",
"options": {
"awslogs-group": "{AWS_LOGS_GROUP}",
"awslogs-region": "{AWS_REGION}",
"awslogs-stream-prefix": "ecs"
}
},
"portMappings": [
{
"hostPort": 80,
"protocol": "tcp",
"containerPort": 80
}
],
"cpu": 128,
"memoryReservation": 128,
"volumesFrom": [],
"environment": [
{"name": "AWS_REGION", "value": "{AWS_REGION}"},
{"name": "AWS_USE_ROLE", "value": "true"},
{"name": "AWSLOGS_GROUP", "value": "{AWS_LOGS_GROUP}"},
{"name": "ECS_CLUSTER", "value": "{ECS_CLUSTER}"},
{"name": "ECS_SERVICE", "value": "{ECS_SERVICE}"},
{"name": "WEB_APP_HTTP_HOST", "value": "{HTTP_HOST}"},
{"name": "WEB_APP_HTTPS_HOST", "value": "{HTTPS_HOST}"},
{"name": "WEB_APP_SERVICE_PROJECT", "value": "{APP_PROJECT}"},
{"name": "WEB_APP_SERVICE_BASE_URL", "value": "{APP_BASE_URL}"},
{"name": "WEB_APP_SERVICE_HOST_NAMES", "value": "{HOST_NAMES}"},
{"name": "WEB_APP_SERVICE_ENABLE_HTTPS", "value": "{HTTPS_ENABLED}"},
{"name": "WEB_APP_SERVICE_STATICFILES_S3_ENABLED", "value": "{STATIC_FILES_S3_ENABLED}"},
{"name": "WEB_APP_SERVICE_STATICFILES_S3_PREFIX", "value": "{STATIC_FILES_S3_PREFIX}"},
{"name": "WEB_APP_SERVICE_STATICFILES_CLOUDFRONT_ENABLED", "value": "{STATIC_FILES_CLOUDFRONT_ENABLED}"},
{"name": "WEB_APP_SERVICE_STATICFILES_IMG_RESIZE_ENABLED", "value": "{STATIC_FILES_IMG_RESIZE_ENABLED}"},
{"name": "WEB_APP_REDIS_HOST", "value": "{CACHE_HOST}"},
{"name": "WEB_APP_DB_HOST", "value": "{DB_HOST}"},
{"name": "WEB_APP_DB_USER", "value": "{DB_USER}"},
{"name": "WEB_APP_DB_PASS", "value": "{DB_PASS}"},
{"name": "WEB_APP_DB_DATABASE", "value": "{DB_DATABASE}"},
{"name": "WEB_APP_DB_DRIVER", "value": "{DB_DRIVER}"},
{"name": "WEB_APP_DB_DISABLE_TLS", "value": "{DB_DISABLE_TLS}"},
{"name": "WEB_APP_AUTH_USE_AWS_SECRET_MANAGER", "value": "true"},
{"name": "WEB_APP_AUTH_AWS_SECRET_ID", "value": "auth-{ECS_SERVICE}"},
{"name": "WEB_APP_AWS_S3_BUCKET_PRIVATE", "value": "{AWS_S3_BUCKET_PRIVATE}"},
{"name": "WEB_APP_AWS_S3_BUCKET_PUBLIC", "value": "{AWS_S3_BUCKET_PUBLIC}"},
{"name": "BUILDINFO_CI_COMMIT_REF_NAME", "value": "{CI_COMMIT_REF_NAME}"},
{"name": "BUILDINFO_CI_COMMIT_REF_SLUG", "value": "{CI_COMMIT_REF_SLUG}"},
{"name": "BUILDINFO_CI_COMMIT_SHA", "value": "{CI_COMMIT_SHA}"},
{"name": "BUILDINFO_CI_COMMIT_TAG", "value": "{CI_COMMIT_TAG}"},
{"name": "BUILDINFO_CI_COMMIT_TITLE", "value": "{CI_COMMIT_TITLE}"},
{"name": "BUILDINFO_CI_COMMIT_DESCRIPTION", "value": "{CI_COMMIT_DESCRIPTION}"},
{"name": "BUILDINFO_CI_COMMIT_JOB_ID", "value": "{CI_COMMIT_JOB_ID}"},
{"name": "BUILDINFO_CI_COMMIT_JOB_URL", "value": "{CI_COMMIT_JOB_URL}"},
{"name": "BUILDINFO_CI_COMMIT_PIPELINE_ID", "value": "{CI_COMMIT_PIPELINE_ID}"},
{"name": "BUILDINFO_CI_COMMIT_PIPELINE_URL", "value": "{CI_COMMIT_PIPELINE_URL}"},
{"name": "DATADOG_ADDR", "value": "127.0.0.1:8125"},
{"name": "DD_TRACE_AGENT_HOSTNAME", "value": "127.0.0.1"},
{"name": "DD_TRACE_AGENT_PORT", "value": "8126"},
{"name": "DD_SERVICE_NAME", "value": "{ECS_SERVICE}"},
{"name": "DD_ENV", "value": "{ENV}"},
{"name": "ROUTE53_UPDATE_TASK_IPS", "value": "{ROUTE53_UPDATE_TASK_IPS}"},
{"name": "ROUTE53_ZONES", "value": "{ROUTE53_ZONES}"},
{"name": "ECS_ENABLE_CONTAINER_METADATA", "value": "true"}
],
"healthCheck": {
"retries": 3,
"command": [
"CMD-SHELL",
"curl -f http://localhost/ping || exit 1"
],
"timeout": 5,
"interval": 60,
"startPeriod": 60
},
"dockerLabels": {
"com.datadoghq.ad.check_names": "[\"{ECS_SERVICE}\"]",
"com.datadoghq.ad.logs": "[{\"source\": \"docker\", \"service\": \"{ECS_SERVICE}\", \"service_name\": \"{SERVICE}\", \"cluster\": \"{ECS_CLUSTER}\", \"env\": \"{ENV}\"}]",
"com.datadoghq.ad.init_configs": "[{}]",
"com.datadoghq.ad.instances": "[{\"host\": \"%%host%%\", \"port\": 80}]"
},
"ulimits": [
{
"name": "nofile",
"softLimit": 987654,
"hardLimit": 999999
}
]
},
{
"name": "datadog-agent",
"image": "datadog/agent:latest",
"essential": {DATADOG_ESSENTIAL},
"cpu": 128,
"memoryReservation": 128,
"portMappings": [
{
"containerPort": 8125
},
{
"containerPort": 8126
}
],
"environment": [
{
"name": "DD_API_KEY",
"value": "{DATADOG_APIKEY}"
},
{
"name": "DD_LOGS_ENABLED",
"value": "true"
},
{
"name": "DD_APM_ENABLED",
"value": "true"
},
{
"name": "DD_RECEIVER_PORT",
"value": "8126"
},
{
"name": "DD_APM_NON_LOCAL_TRAFFIC",
"value": "true"
},
{
"name": "DD_LOGS_CONFIG_CONTAINER_COLLECT_ALL",
"value": "true"
},
{
"name": "DD_TAGS",
"value": "source:docker service:{ECS_SERVICE} service_name:{SERVICE} cluster:{ECS_CLUSTER} env:{ENV}"
},
{
"name": "DD_DOGSTATSD_ORIGIN_DETECTION",
"value": "true"
},
{
"name": "DD_DOGSTATSD_NON_LOCAL_TRAFFIC",
"value": "true"
},
{
"name": "ECS_FARGATE",
"value": "true"
}
]
}
],
"volumes": [],
"requiresCompatibilities": [
"FARGATE"
]
}

View File

@@ -20,8 +20,8 @@ import (
"syscall" "syscall"
"time" "time"
"geeks-accelerator/oss/saas-starter-kit/internal/mid"
"geeks-accelerator/oss/saas-starter-kit/cmd/web-app/handlers" "geeks-accelerator/oss/saas-starter-kit/cmd/web-app/handlers"
"geeks-accelerator/oss/saas-starter-kit/internal/mid"
"geeks-accelerator/oss/saas-starter-kit/internal/platform/devops" "geeks-accelerator/oss/saas-starter-kit/internal/platform/devops"
"geeks-accelerator/oss/saas-starter-kit/internal/platform/flag" "geeks-accelerator/oss/saas-starter-kit/internal/platform/flag"
img_resize "geeks-accelerator/oss/saas-starter-kit/internal/platform/img-resize" img_resize "geeks-accelerator/oss/saas-starter-kit/internal/platform/img-resize"
@@ -80,10 +80,10 @@ func main() {
HostNames []string `envconfig:"HOST_NAMES" example:"www.eproc.tech"` HostNames []string `envconfig:"HOST_NAMES" example:"www.eproc.tech"`
EnableHTTPS bool `default:"false" envconfig:"ENABLE_HTTPS"` EnableHTTPS bool `default:"false" envconfig:"ENABLE_HTTPS"`
TemplateDir string `default:"./templates" envconfig:"TEMPLATE_DIR"` TemplateDir string `default:"./templates" envconfig:"TEMPLATE_DIR"`
StaticDir string `default:"./static" envconfig:"STATIC_DIR"` StaticFiles struct {
StaticS3 struct { Dir string `default:"./static" envconfig:"STATIC_DIR"`
S3Enabled bool `envconfig:"ENABLED"` S3Enabled bool `envconfig:"S3_ENABLED"`
S3KeyPrefix string `default:"public/web_app/static" envconfig:"KEY_PREFIX"` S3Prefix string `default:"public/web_app/static" envconfig:"S3_PREFIX"`
CloudFrontEnabled bool `envconfig:"CLOUDFRONT_ENABLED"` CloudFrontEnabled bool `envconfig:"CLOUDFRONT_ENABLED"`
ImgResizeEnabled bool `envconfig:"IMG_RESIZE_ENABLED"` ImgResizeEnabled bool `envconfig:"IMG_RESIZE_ENABLED"`
} }
@@ -371,8 +371,8 @@ func main() {
// s3UrlFormatter is a help function used by to convert an s3 key to // s3UrlFormatter is a help function used by to convert an s3 key to
// a publicly available image URL. // a publicly available image URL.
var staticS3UrlFormatter func(string) string var staticS3UrlFormatter func(string) string
if cfg.Service.StaticS3.S3Enabled || cfg.Service.StaticS3.CloudFrontEnabled || cfg.Service.StaticS3.ImgResizeEnabled { if cfg.Service.StaticFiles.S3Enabled || cfg.Service.StaticFiles.CloudFrontEnabled || cfg.Service.StaticFiles.ImgResizeEnabled {
s3UrlFormatter, err := devops.S3UrlFormatter(awsSession, cfg.Aws.S3BucketPublic, cfg.Service.StaticS3.S3KeyPrefix, cfg.Service.StaticS3.CloudFrontEnabled) s3UrlFormatter, err := devops.S3UrlFormatter(awsSession, cfg.Aws.S3BucketPublic, cfg.Service.StaticFiles.S3Prefix, cfg.Service.StaticFiles.CloudFrontEnabled)
if err != nil { if err != nil {
log.Fatalf("main : S3UrlFormatter failed : %+v", err) log.Fatalf("main : S3UrlFormatter failed : %+v", err)
} }
@@ -381,7 +381,7 @@ func main() {
// When the path starts with a forward slash its referencing a local file, // When the path starts with a forward slash its referencing a local file,
// make sure the static file prefix is included // make sure the static file prefix is included
if strings.HasPrefix(p, "/") { if strings.HasPrefix(p, "/") {
p = filepath.Join(cfg.Service.StaticS3.S3KeyPrefix, p) p = filepath.Join(cfg.Service.StaticFiles.S3Prefix, p)
} }
return s3UrlFormatter(p) return s3UrlFormatter(p)
} }
@@ -402,7 +402,7 @@ func main() {
// templates should be updated to use a fully qualified URL for either the public file on S3 // templates should be updated to use a fully qualified URL for either the public file on S3
// on from the cloudfront distribution. // on from the cloudfront distribution.
var staticUrlFormatter func(string) string var staticUrlFormatter func(string) string
if cfg.Service.StaticS3.S3Enabled || cfg.Service.StaticS3.CloudFrontEnabled { if cfg.Service.StaticFiles.S3Enabled || cfg.Service.StaticFiles.CloudFrontEnabled {
staticUrlFormatter = staticS3UrlFormatter staticUrlFormatter = staticS3UrlFormatter
} else { } else {
baseUrl, err := url.Parse(cfg.Service.BaseUrl) baseUrl, err := url.Parse(cfg.Service.BaseUrl)
@@ -510,12 +510,12 @@ func main() {
// Image Formatter - additional functions exposed to templates for resizing images // Image Formatter - additional functions exposed to templates for resizing images
// to support response web applications. // to support response web applications.
imgResizeS3KeyPrefix := filepath.Join(cfg.Service.StaticS3.S3KeyPrefix, "images/responsive") imgResizeS3KeyPrefix := filepath.Join(cfg.Service.StaticFiles.S3Prefix, "images/responsive")
imgSrcAttr := func(ctx context.Context, p string, sizes []int, includeOrig bool) template.HTMLAttr { imgSrcAttr := func(ctx context.Context, p string, sizes []int, includeOrig bool) template.HTMLAttr {
u := staticUrlFormatter(p) u := staticUrlFormatter(p)
var srcAttr string var srcAttr string
if cfg.Service.StaticS3.ImgResizeEnabled { if cfg.Service.StaticFiles.ImgResizeEnabled {
srcAttr, _ = img_resize.S3ImgSrc(ctx, redisClient, staticS3UrlFormatter, awsSession, cfg.Aws.S3BucketPublic, imgResizeS3KeyPrefix, u, sizes, includeOrig) srcAttr, _ = img_resize.S3ImgSrc(ctx, redisClient, staticS3UrlFormatter, awsSession, cfg.Aws.S3BucketPublic, imgResizeS3KeyPrefix, u, sizes, includeOrig)
} else { } else {
srcAttr = fmt.Sprintf("src=\"%s\"", u) srcAttr = fmt.Sprintf("src=\"%s\"", u)
@@ -546,7 +546,7 @@ func main() {
} }
tmplFuncs["S3ImgUrl"] = func(ctx context.Context, p string, size int) string { tmplFuncs["S3ImgUrl"] = func(ctx context.Context, p string, size int) string {
imgUrl := staticUrlFormatter(p) imgUrl := staticUrlFormatter(p)
if cfg.Service.StaticS3.ImgResizeEnabled { if cfg.Service.StaticFiles.ImgResizeEnabled {
imgUrl, _ = img_resize.S3ImgUrl(ctx, redisClient, staticS3UrlFormatter, awsSession, cfg.Aws.S3BucketPublic, imgResizeS3KeyPrefix, imgUrl, size) imgUrl, _ = img_resize.S3ImgUrl(ctx, redisClient, staticS3UrlFormatter, awsSession, cfg.Aws.S3BucketPublic, imgResizeS3KeyPrefix, imgUrl, size)
} }
return imgUrl return imgUrl
@@ -637,7 +637,7 @@ func main() {
if cfg.HTTP.Host != "" { if cfg.HTTP.Host != "" {
api := http.Server{ api := http.Server{
Addr: cfg.HTTP.Host, Addr: cfg.HTTP.Host,
Handler: handlers.APP(shutdown, log, cfg.Service.StaticDir, cfg.Service.TemplateDir, masterDb, redisClient, renderer, serviceMiddlewares...), Handler: handlers.APP(shutdown, log, cfg.Service.StaticFiles.Dir, cfg.Service.TemplateDir, masterDb, redisClient, renderer, serviceMiddlewares...),
ReadTimeout: cfg.HTTP.ReadTimeout, ReadTimeout: cfg.HTTP.ReadTimeout,
WriteTimeout: cfg.HTTP.WriteTimeout, WriteTimeout: cfg.HTTP.WriteTimeout,
MaxHeaderBytes: 1 << 20, MaxHeaderBytes: 1 << 20,
@@ -654,7 +654,7 @@ func main() {
if cfg.HTTPS.Host != "" { if cfg.HTTPS.Host != "" {
api := http.Server{ api := http.Server{
Addr: cfg.HTTPS.Host, Addr: cfg.HTTPS.Host,
Handler: handlers.APP(shutdown, log, cfg.Service.StaticDir, cfg.Service.TemplateDir, masterDb, redisClient, renderer, serviceMiddlewares...), Handler: handlers.APP(shutdown, log, cfg.Service.StaticFiles.Dir, cfg.Service.TemplateDir, masterDb, redisClient, renderer, serviceMiddlewares...),
ReadTimeout: cfg.HTTPS.ReadTimeout, ReadTimeout: cfg.HTTPS.ReadTimeout,
WriteTimeout: cfg.HTTPS.WriteTimeout, WriteTimeout: cfg.HTTPS.WriteTimeout,
MaxHeaderBytes: 1 << 20, MaxHeaderBytes: 1 << 20,

View File

@@ -1,270 +0,0 @@
package devops
import (
"io/ioutil"
"log"
"os"
"path/filepath"
"strings"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/secretsmanager"
"github.com/fsnotify/fsnotify"
"github.com/pkg/errors"
)
// SyncCfgInit provides the functionality to keep config files sync'd between running tasks and across deployments.
func SyncCfgInit(log *log.Logger, awsSession *session.Session, secretPrefix, watchDir string, syncInterval time.Duration) (func(), error) {
localfiles := make(map[string]time.Time)
// Do the initial sync before starting file watch to download any existing configs.
err := SyncCfgDir(log, awsSession, secretPrefix, watchDir, localfiles)
if err != nil {
return nil, err
}
// Create a new file watcher.
watcher, err := fsnotify.NewWatcher()
if err != nil {
return nil, errors.WithStack(err)
}
// Return function that will should be run in the back ground via a go routine that will watch for new files created
// locally and updated in AWS Secrets Manager.
f := func() {
defer watcher.Close()
// Init the watch to wait for sync local files to Secret Manager.
WatchCfgDir(log, awsSession, secretPrefix, watchDir, watcher, localfiles)
// Init ticker to sync remote files from Secret Manager locally at the defined interval.
if syncInterval.Seconds() > 0 {
ticker := time.NewTicker(syncInterval)
defer ticker.Stop()
go func() {
for _ = range ticker.C {
log.Println("AWS Secrets Manager : Checking for remote updates")
// Do the initial sync before starting file watch to download any existing configs.
err := SyncCfgDir(log, awsSession, secretPrefix, watchDir, localfiles)
if err != nil {
log.Printf("AWS Secrets Manager : Remote sync error - %+v", err)
}
}
}()
}
}
log.Printf("AWS Secrets Manager : Watching config dir %s", watchDir)
// Note: Out of the box fsnotify can watch a single file, or a single directory.
if err := watcher.Add(watchDir); err != nil {
return nil, errors.Wrapf(err, "failed to add file watcher to %s", watchDir)
}
return f, nil
}
// SyncCfgDir lists all the Secrets from AWS Secrets Manager for a provided prefix and downloads them locally.
func SyncCfgDir(log *log.Logger, awsSession *session.Session, secretPrefix, watchDir string, localfiles map[string]time.Time) error {
svc := secretsmanager.New(awsSession)
// Get a list of secrets for the prefix when the time they were last changed.
secretIDs := make(map[string]time.Time)
err := svc.ListSecretsPages(&secretsmanager.ListSecretsInput{}, func(res *secretsmanager.ListSecretsOutput, lastPage bool) bool {
for _, s := range res.SecretList {
// Skip any secret that does not have a matching prefix.
if !strings.HasPrefix(*s.Name, secretPrefix) {
continue
}
secretIDs[*s.Name] = s.LastChangedDate.UTC()
}
return !lastPage
})
if err != nil {
return errors.Wrap(err, "failed to list secrets")
}
for id, curChanged := range secretIDs {
// Load the secret by ID from Secrets Manager.
res, err := svc.GetSecretValue(&secretsmanager.GetSecretValueInput{
SecretId: aws.String(id),
})
if err != nil {
return errors.Wrapf(err, "failed to get secret value for id %s", id)
}
filename := filepath.Base(id)
localpath := filepath.Join(watchDir, filename)
// Ensure the secret exists locally.
if exists(localpath) {
// If the secret was previously downloaded and current last changed time is less than or equal to the time
// the secret was last downloaded, then no need to update.
if lastChanged, ok := localfiles[id]; ok && curChanged.UTC().Unix() <= lastChanged.UTC().Unix() {
continue
}
}
log.Printf("AWS Secrets Manager : Writing Config %s", filename)
err = ioutil.WriteFile(localpath, res.SecretBinary, 0644)
if err != nil {
return errors.Wrapf(err, "failed to write secret value for id %s to %s", id, localpath)
}
// Only mark that the secret was updated when the file was successfully saved locally.
localfiles[id] = curChanged
}
return nil
}
// WatchCfgDir watches for new/updated files locally and uploads them to in AWS Secrets Manager.
func WatchCfgDir(log *log.Logger, awsSession *session.Session, secretPrefix, dir string, watcher *fsnotify.Watcher, localfiles map[string]time.Time) error {
for {
select {
// watch for events
case event, ok := <-watcher.Events:
if !ok {
return nil
}
err := handleWatchCfgEvent(log, awsSession, secretPrefix, event)
if err != nil {
log.Printf("AWS Secrets Manager : Watcher Error - %+v", err)
}
// watch for errors
case err, ok := <-watcher.Errors:
if !ok {
return nil
}
if err != nil {
log.Printf("AWS Secrets Manager : Watcher Error - %+v", err)
}
}
}
return nil
}
// handleWatchCfgEvent handles a fsnotify event. For new files, secrets are created, for updated files, the secret is
// updated. For deleted files the secret is removed.
func handleWatchCfgEvent(log *log.Logger, awsSession *session.Session, secretPrefix string, event fsnotify.Event) error {
svc := secretsmanager.New(awsSession)
fname := filepath.Base(event.Name)
secretID := filepath.Join(secretPrefix, fname)
if event.Op&fsnotify.Create == fsnotify.Create || event.Op&fsnotify.Write == fsnotify.Write {
dat, err := ioutil.ReadFile(event.Name)
if err != nil {
return errors.Wrapf(err, "file watcher failed to read file %s", event.Name)
}
// Create the new entry in AWS Secret Manager for the file.
_, err = svc.CreateSecret(&secretsmanager.CreateSecretInput{
Name: aws.String(secretID),
SecretString: aws.String(string(dat)),
})
if err != nil {
if aerr, ok := err.(awserr.Error); !ok {
if aerr.Code() == secretsmanager.ErrCodeInvalidRequestException {
// InvalidRequestException: You can't create this secret because a secret with this
// name is already scheduled for deletion.
// Restore secret after it was already previously deleted.
_, err = svc.RestoreSecret(&secretsmanager.RestoreSecretInput{
SecretId: aws.String(secretID),
})
if err != nil {
return errors.Wrapf(err, "file watcher failed to restore secret %s for %s", secretID, event.Name)
}
} else if aerr.Code() != secretsmanager.ErrCodeResourceExistsException {
return errors.Wrapf(err, "file watcher failed to create secret %s for %s", secretID, event.Name)
}
}
// If where was a resource exists error for create, then need to update the secret instead.
_, err = svc.UpdateSecret(&secretsmanager.UpdateSecretInput{
SecretId: aws.String(secretID),
SecretString: aws.String(string(dat)),
})
if err != nil {
return errors.Wrapf(err, "file watcher failed to update secret %s for %s", secretID, event.Name)
}
log.Printf("AWS Secrets Manager : Secret %s updated for %s", secretID, event.Name)
} else {
log.Printf("AWS Secrets Manager : Secret %s created for %s", secretID, event.Name)
}
} else if event.Op&fsnotify.Remove == fsnotify.Remove || event.Op&fsnotify.Rename == fsnotify.Rename {
// Delay delete to ensure the file is really deleted.
//delCheck := time.NewTimer(time.Minute)
//<-delCheck.C
// Create the new entry in AWS Secret Manager for the file.
_, err := svc.DeleteSecret(&secretsmanager.DeleteSecretInput{
SecretId: aws.String(secretID),
// (Optional) Specifies that the secret is to be deleted without any recovery
// window. You can't use both this parameter and the RecoveryWindowInDays parameter
// in the same API call.
//
// An asynchronous background process performs the actual deletion, so there
// can be a short delay before the operation completes. If you write code to
// delete and then immediately recreate a secret with the same name, ensure
// that your code includes appropriate back off and retry logic.
//
// Use this parameter with caution. This parameter causes the operation to skip
// the normal waiting period before the permanent deletion that AWS would normally
// impose with the RecoveryWindowInDays parameter. If you delete a secret with
// the ForceDeleteWithouRecovery parameter, then you have no opportunity to
// recover the secret. It is permanently lost.
ForceDeleteWithoutRecovery: aws.Bool(false),
// (Optional) Specifies the number of days that Secrets Manager waits before
// it can delete the secret. You can't use both this parameter and the ForceDeleteWithoutRecovery
// parameter in the same API call.
//
// This value can range from 7 to 30 days.
RecoveryWindowInDays: aws.Int64(30),
})
if err != nil {
return errors.Wrapf(err, "file watcher failed to delete secret %s for %s", secretID, event.Name)
}
log.Printf("AWS Secrets Manager : Secret %s deleted for %s", secretID, event.Name)
}
return nil
}
// Exists reports whether the named file or directory exists.
func exists(name string) bool {
if _, err := os.Stat(name); err != nil {
if os.IsNotExist(err) {
return false
}
}
return true
}

View File

@@ -1,20 +0,0 @@
package devops
import (
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3/s3manager"
)
// SyncS3StaticFiles copies the local files from the static directory to s3
// with public-read enabled.
func SyncS3StaticFiles(awsSession *session.Session, staticS3Bucket, staticS3Prefix, staticDir string) error {
uploader := s3manager.NewUploader(awsSession)
di := NewDirectoryIterator(staticS3Bucket, staticS3Prefix, staticDir, "public-read")
if err := uploader.UploadWithIterator(aws.BackgroundContext(), di); err != nil {
return err
}
return nil
}

View File

@@ -227,134 +227,3 @@ func RegisterEcsServiceTasksRoute53(log *log.Logger, awsSession *session.Session
return nil return nil
} }
/*
res, err := pester.Get("http://169.254.170.2/v2/metadata")
if err != nil {
fmt.Println("http://169.254.170.2/v2/metadata failed", err.Error())
} else {
dat, _ := ioutil.ReadAll(res.Body)
res.Body.Close()
fmt.Println("http://169.254.170.2/v2/metadata, OK", string(dat))
}
http://169.254.170.2/v2/metadata,
{
"Cluster": "arn:aws:ecs:us-west-2:888955683113:cluster/example-project-dev",
"TaskARN": "arn:aws:ecs:us-west-2:888955683113:task/700e38dd-dec5-4201-b711-c04a51feef8a",
"Family": "web-api",
"Revision": "113",
"DesiredStatus": "RUNNING",
"KnownStatus": "RUNNING",
"Containers": [{
"DockerId": "c786dfdf6510b20294832ccbc3d66e6f1f915a4a79ead2588aa760a6365c839a",
"Name": "datadog-agent",
"DockerName": "ecs-web-api-113-datadog-agent-d884dee0c79af1fb6400",
"Image": "datadog/agent:latest",
"ImageID": "sha256:233c75f21f71838a59d478472d021be7006e752da6a70a11f77cf185c1050737",
"Labels": {
"com.amazonaws.ecs.cluster": "arn:aws:ecs:us-west-2:888955683113:cluster/example-project-dev",
"com.amazonaws.ecs.container-name": "datadog-agent",
"com.amazonaws.ecs.task-arn": "arn:aws:ecs:us-west-2:888955683113:task/700e38dd-dec5-4201-b711-c04a51feef8a",
"com.amazonaws.ecs.task-definition-family": "web-api",
"com.amazonaws.ecs.task-definition-version": "113"
},
"DesiredStatus": "RUNNING",
"KnownStatus": "STOPPED",
"ExitCode": 1,
"Limits": {
"CPU": 128,
"Memory": 0
},
"CreatedAt": "2019-07-11T05:36:54.135666318Z",
"StartedAt": "2019-07-11T05:36:54.481305866Z",
"FinishedAt": "2019-07-11T05:36:54.863742829Z",
"Type": "NORMAL",
"Networks": [{
"NetworkMode": "awsvpc",
"IPv4Addresses": ["172.31.62.204"]
}],
"Volumes": [{
"DockerName": "0960558c657c6e79d43e0e55f4ff259a97d78f58d9ad0d738e74495f4ba3cb06",
"Source": "/var/lib/docker/volumes/0960558c657c6e79d43e0e55f4ff259a97d78f58d9ad0d738e74495f4ba3cb06/_data",
"Destination": "/etc/datadog-agent"
}, {
"DockerName": "7a103f880857a1c2947e4a1bfff48efd25d24943a2d6a6e4dd86fa9dab3f10f0",
"Source": "/var/lib/docker/volumes/7a103f880857a1c2947e4a1bfff48efd25d24943a2d6a6e4dd86fa9dab3f10f0/_data",
"Destination": "/tmp"
}, {
"DockerName": "c88c03366eadb5d9da27708919e77ac5f8e0877c3dbb32c80580cb22e5811c00",
"Source": "/var/lib/docker/volumes/c88c03366eadb5d9da27708919e77ac5f8e0877c3dbb32c80580cb22e5811c00/_data",
"Destination": "/var/log/datadog"
}, {
"DockerName": "df97387f6ccc34c023055ef8a34a41e9d1edde4715c1849f1460683d31749539",
"Source": "/var/lib/docker/volumes/df97387f6ccc34c023055ef8a34a41e9d1edde4715c1849f1460683d31749539/_data",
"Destination": "/var/run/s6"
}]
}, {
"DockerId": "ab6bd869e675f64122a33a74da9183b304bbc60b649a15d0d83ebc48eeafdd76",
"Name": "~internal~ecs~pause",
"DockerName": "ecs-web-api-113-internalecspause-aab99b88b9ddadb0c701",
"Image": "fg-proxy:tinyproxy",
"ImageID": "",
"Labels": {
"com.amazonaws.ecs.cluster": "arn:aws:ecs:us-west-2:888955683113:cluster/example-project-dev",
"com.amazonaws.ecs.container-name": "~internal~ecs~pause",
"com.amazonaws.ecs.task-arn": "arn:aws:ecs:us-west-2:888955683113:task/700e38dd-dec5-4201-b711-c04a51feef8a",
"com.amazonaws.ecs.task-definition-family": "web-api",
"com.amazonaws.ecs.task-definition-version": "113"
},
"DesiredStatus": "RESOURCES_PROVISIONED",
"KnownStatus": "RESOURCES_PROVISIONED",
"Limits": {
"CPU": 0,
"Memory": 0
},
"CreatedAt": "2019-07-11T05:36:34.896093577Z",
"StartedAt": "2019-07-11T05:36:35.302359045Z",
"Type": "CNI_PAUSE",
"Networks": [{
"NetworkMode": "awsvpc",
"IPv4Addresses": ["172.31.62.204"]
}]
}, {
"DockerId": "07bce50839fc992393799457811e4a0ac56979b2164c7aec6e66b40162ae3119",
"Name": "web-api-dev",
"DockerName": "ecs-web-api-113-web-api-dev-ceefbfb4dba2a6e05900",
"Image": "888955683113.dkr.ecr.us-west-2.amazonaws.com/example-project:dev-web-api",
"ImageID": "sha256:cf793de01311ac4e5e32c76cb4625f6600ec8017c726e99e28ec2199d4af599b",
"Labels": {
"com.amazonaws.ecs.cluster": "arn:aws:ecs:us-west-2:888955683113:cluster/example-project-dev",
"com.amazonaws.ecs.container-name": "web-api-dev",
"com.amazonaws.ecs.task-arn": "arn:aws:ecs:us-west-2:888955683113:task/700e38dd-dec5-4201-b711-c04a51feef8a",
"com.amazonaws.ecs.task-definition-family": "web-api",
"com.amazonaws.ecs.task-definition-version": "113",
"com.datadoghq.ad.check_names": "[\"web-api-dev\"]",
"com.datadoghq.ad.init_configs": "[{}]",
"com.datadoghq.ad.instances": "[{\"host\": \"%%host%%\", \"port\": 80}]",
"com.datadoghq.ad.logs": "[{\"source\": \"docker\", \"service\": \"web-api-dev\", \"service_name\": \"web-api\", \"cluster\": \"example-project-dev\", \"env\": \"dev\"}]"
},
"DesiredStatus": "RUNNING",
"KnownStatus": "RUNNING",
"Limits": {
"CPU": 128,
"Memory": 0
},
"CreatedAt": "2019-07-11T05:36:42.417547421Z",
"StartedAt": "2019-07-11T05:36:53.88095717Z",
"Type": "NORMAL",
"Networks": [{
"NetworkMode": "awsvpc",
"IPv4Addresses": ["172.31.62.204"]
}],
"Health": {}
}],
"Limits": {
"CPU": 0.5,
"Memory": 2048
},
"PullStartedAt": "2019-07-11T05:36:35.407114703Z",
"PullStoppedAt": "2019-07-11T05:36:54.128398742Z"
}
*/

View File

@@ -3,6 +3,7 @@ package cicd
import ( import (
"encoding/json" "encoding/json"
"fmt" "fmt"
"github.com/aws/aws-sdk-go/service/s3/s3manager"
"io/ioutil" "io/ioutil"
"net/url" "net/url"
"path/filepath" "path/filepath"
@@ -262,6 +263,18 @@ func EcrPurgeImages(req *serviceBuildRequest) ([]*ecr.ImageIdentifier, error) {
return delIds, nil return delIds, nil
} }
// SyncPublicS3Files copies the local files from the static directory to s3 with public-read enabled.
func SyncPublicS3Files(awsSession *session.Session, staticS3Bucket, staticS3Prefix, staticDir string) error {
uploader := s3manager.NewUploader(awsSession)
di := NewDirectoryIterator(staticS3Bucket, staticS3Prefix, staticDir, "public-read")
if err := uploader.UploadWithIterator(aws.BackgroundContext(), di); err != nil {
return err
}
return nil
}
// EcsReadTaskDefinition reads a task definition file and json decodes it. // EcsReadTaskDefinition reads a task definition file and json decodes it.
func EcsReadTaskDefinition(serviceDir, targetEnv string) ([]byte, error) { func EcsReadTaskDefinition(serviceDir, targetEnv string) ([]byte, error) {
checkPaths := []string{ checkPaths := []string{

View File

@@ -1,4 +1,4 @@
package devops package cicd
import ( import (
"bytes" "bytes"

View File

@@ -142,19 +142,26 @@ func ecrRepositoryName(projectName string) string {
// releaseImage returns the name used for tagging a release image will always include one with environment and // releaseImage returns the name used for tagging a release image will always include one with environment and
// service name. If the env var CI_COMMIT_REF_NAME is set, it will be appended. // service name. If the env var CI_COMMIT_REF_NAME is set, it will be appended.
func releaseImage(env, serviceName, repositoryUri string) string { func releaseTag(env, serviceName string) string {
tag1 := env + "-" + serviceName tag1 := env + "-" + serviceName
// Generate tags for the release image. // Generate tags for the release image.
var releaseImage string var releaseTag string
if v := os.Getenv("CI_COMMIT_REF_NAME"); v != "" { if v := os.Getenv("CI_COMMIT_REF_NAME"); v != "" {
tag2 := tag1 + "-" + v tag2 := tag1 + "-" + v
releaseImage = repositoryUri + ":" + tag2 releaseTag = tag2
} else { } else {
releaseImage = repositoryUri + ":" + tag1 releaseTag = tag1
} }
return releaseImage return releaseTag
}
// releaseImage returns the name used for tagging a release image will always include one with environment and
// service name. If the env var CI_COMMIT_REF_NAME is set, it will be appended.
func releaseImage(env, serviceName, repositoryUri string) string {
return repositoryUri + ":" + releaseTag(env, serviceName)
} }
// dBInstanceIdentifier returns the database name. // dBInstanceIdentifier returns the database name.

View File

@@ -64,7 +64,12 @@ type ServiceDeployFlags struct {
DockerFile string `validate:"omitempty" example:"./cmd/web-api/Dockerfile"` DockerFile string `validate:"omitempty" example:"./cmd/web-api/Dockerfile"`
EnableLambdaVPC bool `validate:"omitempty" example:"false"` EnableLambdaVPC bool `validate:"omitempty" example:"false"`
EnableEcsElb bool `validate:"omitempty" example:"false"` EnableEcsElb bool `validate:"omitempty" example:"false"`
RecreateService bool `validate:"omitempty" example:"false"`
StaticFilesS3Enable bool `validate:"omitempty" example:"false"`
StaticFilesCloudfrontEnable bool `validate:"omitempty" example:"false"`
StaticFilesImgResizeEnable bool `validate:"omitempty" example:"false"`
RecreateService bool `validate:"omitempty" example:"false"`
} }
// serviceDeployRequest defines the details needed to execute a service deployment. // serviceDeployRequest defines the details needed to execute a service deployment.
@@ -105,10 +110,16 @@ type serviceDeployRequest struct {
CloudWatchLogGroupName string `validate:"required"` CloudWatchLogGroupName string `validate:"required"`
CloudWatchLogGroup *cloudwatchlogs.CreateLogGroupInput CloudWatchLogGroup *cloudwatchlogs.CreateLogGroupInput
S3BucketTempPrefix string `validate:"required_with=S3BucketPrivateName S3BucketPublicName"` S3BucketTempPrefix string `validate:"required_with=S3BucketPrivateName S3BucketPublicName"`
S3BucketPrivateName string `validate:"omitempty"` S3BucketPrivateName string `validate:"omitempty"`
S3BucketPublicName string `validate:"omitempty"` S3BucketPublicName string `validate:"omitempty"`
S3Buckets []S3Bucket S3BucketPublicKeyPrefix string `validate:"omitempty"`
S3Buckets []S3Bucket
StaticFilesS3Enable bool `validate:"omitempty"`
StaticFilesS3Prefix string `validate:"omitempty"`
StaticFilesCloudfrontEnable bool `validate:"omitempty"`
StaticFilesImgResizeEnable bool `validate:"omitempty"`
EnableEcsElb bool `validate:"omitempty"` EnableEcsElb bool `validate:"omitempty"`
ElbLoadBalancerName string `validate:"omitempty"` ElbLoadBalancerName string `validate:"omitempty"`
@@ -169,9 +180,14 @@ func NewServiceDeployRequest(log *log.Logger, flags ServiceDeployFlags) (*servic
req = serviceDeployRequest{ req = serviceDeployRequest{
serviceRequest: sr, serviceRequest: sr,
EnableHTTPS: flags.EnableHTTPS, EnableHTTPS: flags.EnableHTTPS,
ServiceHostPrimary: flags.ServiceHostPrimary, ServiceHostPrimary: flags.ServiceHostPrimary,
ServiceHostNames: flags.ServiceHostNames, ServiceHostNames: flags.ServiceHostNames,
StaticFilesS3Enable: flags.StaticFilesS3Enable,
StaticFilesCloudfrontEnable: flags.StaticFilesCloudfrontEnable,
StaticFilesImgResizeEnable: flags.StaticFilesImgResizeEnable,
S3BucketPrivateName: flags.S3BucketPrivateName, S3BucketPrivateName: flags.S3BucketPrivateName,
S3BucketPublicName: flags.S3BucketPublicName, S3BucketPublicName: flags.S3BucketPublicName,
EnableLambdaVPC: flags.EnableLambdaVPC, EnableLambdaVPC: flags.EnableLambdaVPC,
@@ -337,6 +353,16 @@ func NewServiceDeployRequest(log *log.Logger, flags ServiceDeployFlags) (*servic
}) })
} }
// The S3 key prefix used as the origin when cloud front is enabled.
if req.S3BucketPublicKeyPrefix == "" {
req.S3BucketPublicKeyPrefix = "public"
}
// The S3 prefix used to upload static files served to public.
if req.StaticFilesS3Prefix == "" {
req.StaticFilesS3Prefix = filepath.Join(req.S3BucketPublicKeyPrefix, releaseTag(req.Env, req.ServiceName), "static")
}
// Set default AWS ECR Repository Name. // Set default AWS ECR Repository Name.
req.EcrRepositoryName = ecrRepositoryName(req.ProjectName) req.EcrRepositoryName = ecrRepositoryName(req.ProjectName)
log.Printf("\t\t\tSet ECR Repository Name to '%s'.", req.EcrRepositoryName) log.Printf("\t\t\tSet ECR Repository Name to '%s'.", req.EcrRepositoryName)
@@ -988,7 +1014,6 @@ func ServiceDeploy(log *log.Logger, req *serviceDeployRequest) error {
log.Printf("\t\t\tUpdated bucket policy") log.Printf("\t\t\tUpdated bucket policy")
} }
} }
log.Printf("\t%s\tS3 buckets configured successfully.\n", tests.Success) log.Printf("\t%s\tS3 buckets configured successfully.\n", tests.Success)
} }
@@ -2307,6 +2332,11 @@ func ServiceDeploy(log *log.Logger, req *serviceDeployRequest) error {
"{HOST_PRIMARY}": req.ServiceHostPrimary, "{HOST_PRIMARY}": req.ServiceHostPrimary,
"{HOST_NAMES}": strings.Join(req.ServiceHostNames, ","), "{HOST_NAMES}": strings.Join(req.ServiceHostNames, ","),
"{STATIC_FILES_S3_ENABLED}": "false",
"{STATIC_FILES_S3_PREFIX}": "",
"{STATIC_FILES_CLOUDFRONT_ENABLED}": "false",
"{STATIC_FILES_IMG_RESIZE_ENABLED}": "false",
"{CACHE_HOST}": "", // Not enabled by default "{CACHE_HOST}": "", // Not enabled by default
"{DB_HOST}": "", "{DB_HOST}": "",
@@ -2359,6 +2389,21 @@ func ServiceDeploy(log *log.Logger, req *serviceDeployRequest) error {
placeholders["{APP_BASE_URL}"] = fmt.Sprintf("%s://%s/", appSchema, req.ServiceHostPrimary) placeholders["{APP_BASE_URL}"] = fmt.Sprintf("%s://%s/", appSchema, req.ServiceHostPrimary)
} }
// Static files served from S3.
if req.StaticFilesS3Enable {
placeholders["{STATIC_FILES_S3_ENABLED}"] = "true"
}
// Static files served from CloudFront.
if req.StaticFilesCloudfrontEnable {
placeholders["{STATIC_FILES_CLOUDFRONT_ENABLED}"] = "true"
}
// Support for resizing static images files to be responsive.
if req.StaticFilesImgResizeEnable {
placeholders["{STATIC_FILES_IMG_RESIZE_ENABLED}"] = "true"
}
// When db is set, update the placeholders. // When db is set, update the placeholders.
if db != nil { if db != nil {
placeholders["{DB_HOST}"] = db.Host placeholders["{DB_HOST}"] = db.Host
@@ -3157,6 +3202,20 @@ func ServiceDeploy(log *log.Logger, req *serviceDeployRequest) error {
} }
} }
// When static files are enabled to be to stored on S3, we need to upload all of them.
if req.StaticFilesS3Enable {
log.Println("\tSync static files to public S3 bucket")
staticDir := filepath.Join(req.ServiceDir, "static")
err := SyncPublicS3Files(req.awsSession(), req.S3BucketPublicName, req.StaticFilesS3Prefix, staticDir)
if err != nil {
return errors.Wrapf(err, "Failed to sync static files from %s to s3://%s/%s '%s'", staticDir, req.S3BucketPublicName, req.StaticFilesS3Prefix)
}
log.Printf("\t%s\tFiles uploaded.\n", tests.Success)
}
// Wait for the updated or created service to enter a stable state. // Wait for the updated or created service to enter a stable state.
{ {
log.Println("\tWaiting for service to enter stable state.") log.Println("\tWaiting for service to enter stable state.")

View File

@@ -82,6 +82,9 @@ func main() {
cli.StringFlag{Name: "project", Usage: "name of project", Destination: &deployFlags.ProjectName}, cli.StringFlag{Name: "project", Usage: "name of project", Destination: &deployFlags.ProjectName},
cli.BoolFlag{Name: "enable_elb", Usage: "enable deployed to use Elastic Load Balancer", Destination: &deployFlags.EnableEcsElb}, cli.BoolFlag{Name: "enable_elb", Usage: "enable deployed to use Elastic Load Balancer", Destination: &deployFlags.EnableEcsElb},
cli.BoolTFlag{Name: "lambda_vpc", Usage: "deploy lambda behind VPC", Destination: &deployFlags.EnableLambdaVPC}, cli.BoolTFlag{Name: "lambda_vpc", Usage: "deploy lambda behind VPC", Destination: &deployFlags.EnableLambdaVPC},
cli.BoolFlag{Name: "static_files_s3", Usage: "service static files from S3", Destination: &deployFlags.StaticFilesS3Enable},
cli.BoolFlag{Name: "static_files_cloudfront", Usage: "serve static files from Cloudfront", Destination: &deployFlags.StaticFilesCloudfrontEnable},
cli.BoolFlag{Name: "static_files_img_resize", Usage: "enable response images from service", Destination: &deployFlags.StaticFilesImgResizeEnable},
cli.BoolFlag{Name: "recreate_service", Usage: "skip docker push after build", Destination: &deployFlags.RecreateService}, cli.BoolFlag{Name: "recreate_service", Usage: "skip docker push after build", Destination: &deployFlags.RecreateService},
}, },
Action: func(c *cli.Context) error { Action: func(c *cli.Context) error {