1
0
mirror of https://github.com/raseels-repos/golang-saas-starter-kit.git synced 2025-06-15 00:15:15 +02:00

Completed s3 config sync

This commit is contained in:
Lee Brown
2019-07-12 23:28:53 -08:00
parent 0efe444c05
commit ba30670f6a
11 changed files with 676 additions and 159 deletions

View File

@ -11,7 +11,7 @@
"logConfiguration": {
"logDriver": "awslogs",
"options": {
"awslogs-group": "{AWSLOGS_GROUP}",
"awslogs-group": "{AWS_LOGS_GROUP}",
"awslogs-region": "{AWS_REGION}",
"awslogs-stream-prefix": "ecs"
}
@ -29,12 +29,15 @@
"environment": [
{"name": "AWS_REGION", "value": "{AWS_REGION}"},
{"name": "AWS_USE_ROLE", "value": "true"},
{"name": "AWSLOGS_GROUP", "value": "{AWSLOGS_GROUP}"},
{"name": "AWSLOGS_GROUP", "value": "{AWS_LOGS_GROUP}"},
{"name": "ECS_CLUSTER", "value": "{ECS_CLUSTER}"},
{"name": "ECS_SERVICE", "value": "{ECS_SERVICE}"},
{"name": "WEB_API_HTTP_HOST", "value": "{HTTP_HOST}"},
{"name": "WEB_API_HTTPS_HOST", "value": "{HTTPS_HOST}"},
{"name": "WEB_API_APP_PROJECT", "value": "{APP_PROJECT}"},
{"name": "WEB_API_APP_BASE_URL", "value": "{APP_BASE_URL}"},
{"name": "WEB_API_HOST_PRIMARY", "value": "{HOST_PRIMARY}"},
{"name": "WEB_API_HOST_NAMES", "value": "{HOST_NAMES}"},
{"name": "WEB_API_REDIS_HOST", "value": "{CACHE_HOST}"},
{"name": "WEB_API_DB_HOST", "value": "{DB_HOST}"},
{"name": "WEB_API_DB_USER", "value": "{DB_USER}"},
@ -44,6 +47,8 @@
{"name": "WEB_API_DB_DISABLE_TLS", "value": "{DB_DISABLE_TLS}"},
{"name": "WEB_API_AUTH_USE_AWS_SECRET_MANAGER", "value": "true"},
{"name": "WEB_API_AUTH_AWS_SECRET_ID", "value": "auth-{ECS_SERVICE}"},
{"name": "WEB_API_AWS_S3_BUCKET_PRIVATE", "value": "{AWS_S3_BUCKET_PRIVATE}"},
{"name": "WEB_API_AWS_S3_BUCKET_PUBLIC", "value": "{AWS_S3_BUCKET_PUBLIC}"},
{"name": "BUILDINFO_CI_COMMIT_REF_NAME", "value": "{CI_COMMIT_REF_NAME}"},
{"name": "BUILDINFO_CI_COMMIT_REF_SLUG", "value": "{CI_COMMIT_REF_SLUG}"},
{"name": "BUILDINFO_CI_COMMIT_SHA", "value": "{CI_COMMIT_SHA}"},

View File

@ -13,13 +13,14 @@ import (
"net/url"
"os"
"os/signal"
"path/filepath"
"strings"
"syscall"
"time"
"geeks-accelerator/oss/saas-starter-kit/example-project/internal/mid"
"geeks-accelerator/oss/saas-starter-kit/example-project/cmd/web-api/docs"
"geeks-accelerator/oss/saas-starter-kit/example-project/cmd/web-api/handlers"
"geeks-accelerator/oss/saas-starter-kit/example-project/internal/mid"
"geeks-accelerator/oss/saas-starter-kit/example-project/internal/platform/auth"
"geeks-accelerator/oss/saas-starter-kit/example-project/internal/platform/devops"
"geeks-accelerator/oss/saas-starter-kit/example-project/internal/platform/flag"
@ -86,8 +87,12 @@ func main() {
}
App struct {
Name string `default:"web-api" envconfig:"NAME"`
BaseUrl string `default:"" envconfig:"BASE_URL"`
Project string `default:"" envconfig:"PROJECT"`
BaseUrl string `default:"" envconfig:"BASE_URL" example:"http://example-project.com"`
HostPrimary string `envconfig:"HOST_PRIMARY" example:"example-project.com"`
HostNames []string `envconfig:"HOST_NAMES" example:"subdomain.example-project.com"`
TemplateDir string `default:"./templates" envconfig:"TEMPLATE_DIR"`
ConfigDir string `default:"" envconfig:"CONFIG_DIR"`
DebugHost string `default:"0.0.0.0:4000" envconfig:"DEBUG_HOST"`
ShutdownTimeout time.Duration `default:"5s" envconfig:"SHUTDOWN_TIMEOUT"`
}
@ -115,6 +120,10 @@ func main() {
AccessKeyID string `envconfig:"AWS_ACCESS_KEY_ID"` // WEB_API_AWS_AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY_ID
SecretAccessKey string `envconfig:"AWS_SECRET_ACCESS_KEY" json:"-"` // don't print
Region string `default:"us-east-1" envconfig:"AWS_REGION"`
S3BucketPrivate string `envconfig:"S3_BUCKET_PRIVATE"`
S3BucketPublic string `envconfig:"S3_BUCKET_PUBLIC"`
SecretsManagerConfigPrefix string `default:"" envconfig:"SECRETS_MANAGER_CONFIG_PREFIX"`
SecretsManagerConfigSyncInterval time.Duration `default:"5m" envconfig:"SECRETS_MANAGER_CONFIG_SYNC_INTERVAL"`
// Get an AWS session from an implicit source if no explicit
// configuration is provided. This is useful for taking advantage of
@ -123,7 +132,6 @@ func main() {
}
Auth struct {
UseAwsSecretManager bool `default:"false" envconfig:"USE_AWS_SECRET_MANAGER"`
AwsSecretID string `default:"auth-secret-key" envconfig:"AWS_SECRET_ID"`
KeyExpiration time.Duration `default:"3600s" envconfig:"KEY_EXPIRATION"`
}
BuildInfo struct {
@ -142,12 +150,12 @@ func main() {
// For additional details refer to https://github.com/kelseyhightower/envconfig
if err := envconfig.Process(service, &cfg); err != nil {
log.Fatalf("main : Parsing Config : %v", err)
log.Fatalf("main : Parsing Config : %+v", err)
}
if err := flag.Process(&cfg); err != nil {
if err != flag.ErrHelp {
log.Fatalf("main : Parsing Command Line : %v", err)
log.Fatalf("main : Parsing Command Line : %+v", err)
}
return // We displayed help.
}
@ -161,6 +169,19 @@ func main() {
cfg.Aws.SecretAccessKey = ""
}
// Set the default AWS Secrets Manager prefix used for name to store config files that will be persisted across
// deployments and distributed to each instance of the service running.
if cfg.Aws.SecretsManagerConfigPrefix == "" {
var pts []string
if cfg.App.Project != "" {
pts = append(pts, cfg.App.Project)
}
pts = append(pts, cfg.Env, cfg.App.Name)
cfg.Aws.SecretsManagerConfigPrefix = filepath.Join(pts...)
}
// If base URL is empty, set the default value from the HTTP Host
if cfg.App.BaseUrl == "" {
baseUrl := cfg.HTTP.Host
@ -177,6 +198,21 @@ func main() {
cfg.App.BaseUrl = baseUrl
}
// Set the default config directory used to store config files locally that will be sync'd to AWS Secrets Manager
// and distributed to all other running services. This include Let's Encrypt for HTTPS when not using an Elastic
// Load Balancer.
// Note: All files stored in this directory are uploaded to AWS Secrets Manager.
if cfg.App.ConfigDir == "" {
if cfg.App.ConfigDir == "" {
cfg.App.ConfigDir = filepath.Join(os.TempDir(), cfg.App.Name, "cfg")
if err := os.MkdirAll(cfg.App.ConfigDir, os.ModePerm); err != nil {
log.Fatalf("main : Make config directory : %s : %+v", cfg.App.ConfigDir, err)
}
}
}
// =========================================================================
// Log App Info
@ -191,7 +227,7 @@ func main() {
{
cfgJSON, err := json.MarshalIndent(cfg, "", " ")
if err != nil {
log.Fatalf("main : Marshalling Config to JSON : %v", err)
log.Fatalf("main : Marshalling Config to JSON : %+v", err)
}
log.Printf("main : Config : %v\n", string(cfgJSON))
}
@ -207,13 +243,18 @@ func main() {
log.Printf("main : AWS : Using role.\n")
} else {
} else if cfg.Aws.AccessKeyID != "" {
creds := credentials.NewStaticCredentials(cfg.Aws.AccessKeyID, cfg.Aws.SecretAccessKey, "")
awsSession = session.New(&aws.Config{Region: aws.String(cfg.Aws.Region), Credentials: creds})
log.Printf("main : AWS : Using static credentials\n")
}
awsSession = awstrace.WrapSession(awsSession)
// Wrap the AWS session to enable tracing.
if awsSession != nil {
awsSession = awstrace.WrapSession(awsSession)
}
// =========================================================================
// Start Redis
@ -237,12 +278,12 @@ func main() {
if cfg.Redis.MaxmemoryPolicy != "" {
err := redisClient.ConfigSet(evictPolicyConfigKey, cfg.Redis.MaxmemoryPolicy).Err()
if err != nil && !strings.Contains(err.Error(), "unknown command") {
log.Fatalf("main : redis : ConfigSet maxmemory-policy : %v", err)
log.Fatalf("main : redis : ConfigSet maxmemory-policy : %+v", err)
}
} else {
evictPolicy, err := redisClient.ConfigGet(evictPolicyConfigKey).Result()
if err != nil && !strings.Contains(err.Error(), "unknown command") {
log.Fatalf("main : redis : ConfigGet maxmemory-policy : %v", err)
log.Fatalf("main : redis : ConfigGet maxmemory-policy : %+v", err)
} else if evictPolicy != nil && len(evictPolicy) > 0 && evictPolicy[1] != "allkeys-lru" {
log.Printf("main : redis : ConfigGet maxmemory-policy : recommended to be set to allkeys-lru to avoid OOM")
}
@ -281,36 +322,37 @@ func main() {
sqltrace.Register(cfg.DB.Driver, &pq.Driver{}, sqltrace.WithServiceName(service))
masterDb, err := sqlxtrace.Open(cfg.DB.Driver, dbUrl.String())
if err != nil {
log.Fatalf("main : Register DB : %s : %v", cfg.DB.Driver, err)
log.Fatalf("main : Register DB : %s : %+v", cfg.DB.Driver, err)
}
defer masterDb.Close()
// =========================================================================
// Init new Authenticator
var authenticator *auth.Authenticator
if cfg.Auth.UseAwsSecretManager {
authenticator, err = auth.NewAuthenticatorAws(awsSession, cfg.Auth.AwsSecretID, time.Now().UTC(), cfg.Auth.KeyExpiration)
secretName := filepath.Join(cfg.Aws.SecretsManagerConfigPrefix, "authenticator")
authenticator, err = auth.NewAuthenticatorAws(awsSession, secretName, time.Now().UTC(), cfg.Auth.KeyExpiration)
} else {
authenticator, err = auth.NewAuthenticatorFile("", time.Now().UTC(), cfg.Auth.KeyExpiration)
}
if err != nil {
log.Fatalf("main : Constructing authenticator : %v", err)
log.Fatalf("main : Constructing authenticator : %+v", err)
}
// =========================================================================
// Init redirect middleware to ensure all requests go to the primary domain.
baseSiteUrl, err := url.Parse(cfg.App.BaseUrl)
if err != nil {
log.Fatalf("main : Parse App Base URL : %s : %v", cfg.App.BaseUrl, err)
log.Fatalf("main : Parse App Base URL : %s : %+v", cfg.App.BaseUrl, err)
}
var primaryDomain string
if strings.Contains(baseSiteUrl.Host, ":") {
primaryDomain, _, err = net.SplitHostPort(baseSiteUrl.Host)
if err != nil {
log.Fatalf("main : SplitHostPort : %s : %v", baseSiteUrl.Host, err)
log.Fatalf("main : SplitHostPort : %s : %+v", baseSiteUrl.Host, err)
}
} else {
primaryDomain = baseSiteUrl.Host
@ -343,13 +385,33 @@ func main() {
}()
}
// =========================================================================
// ECS Task registration for services that don't use an AWS Elastic Load Balancer.
if awsSession != nil {
syncPrefix := filepath.Join(cfg.Aws.SecretsManagerConfigPrefix, "sync-config")
// Download all config files from Secret Manager.
f, err := devops.SyncCfgInit(log, awsSession, syncPrefix, cfg.App.ConfigDir, cfg.Aws.SecretsManagerConfigSyncInterval)
if err != nil {
log.Fatalf("main : AWS Secrets Manager config download : %+v", err)
}
// Start the watcher worker.
if f != nil {
go f()
}
}
// =========================================================================
// ECS Task registration for services that don't use an AWS Elastic Load Balancer.
err = devops.EcsServiceTaskInit(log, awsSession)
if err != nil {
log.Fatalf("main : Ecs Service Task init : %v", err)
log.Fatalf("main : Ecs Service Task init : %+v", err)
}
// =========================================================================
// Start API Service
@ -359,7 +421,7 @@ func main() {
u, err := url.Parse(cfg.App.BaseUrl)
if err != nil {
log.Fatalf("main : Parse app base url %s : %v", cfg.App.BaseUrl, err)
log.Fatalf("main : Parse app base url %s : %+v", cfg.App.BaseUrl, err)
}
docs.SwaggerInfo.Host = u.Host
@ -395,7 +457,7 @@ func main() {
}()
}
// Start the HTTPS service listening for requests.
// Start the HTTPS service listening for requests with an SSL Cert auto generated with Let's Encrypt.
if cfg.HTTPS.Host != "" {
api := http.Server{
Addr: cfg.HTTPS.Host,
@ -405,22 +467,22 @@ func main() {
MaxHeaderBytes: 1 << 20,
}
// Note: use a sensible value for data directory
// this is where cached certificates are stored
dataDir := "."
hostPolicy := func(ctx context.Context, host string) error {
// Note: change to your real domain
allowedHost := "www.mydomain.com"
if host == allowedHost {
return nil
// Generate a unique list of hostnames.
var hosts []string
if cfg.App.HostPrimary != "" {
hosts = append(hosts, cfg.App.HostPrimary)
}
for _, h := range cfg.App.HostNames {
h = strings.TrimSpace(h)
if h != cfg.App.HostPrimary {
hosts = append(hosts, h)
}
return fmt.Errorf("acme/autocert: only %s host is allowed", allowedHost)
}
m := &autocert.Manager{
Prompt: autocert.AcceptTOS,
HostPolicy: hostPolicy,
Cache: autocert.DirCache(dataDir),
HostPolicy: autocert.HostWhitelist(hosts...),
Cache: autocert.DirCache(cfg.App.ConfigDir),
}
api.TLSConfig = &tls.Config{GetCertificate: m.GetCertificate}
@ -438,7 +500,7 @@ func main() {
// Blocking main and waiting for shutdown.
select {
case err := <-serverErrors:
log.Fatalf("main : Error starting server: %v", err)
log.Fatalf("main : Error starting server: %+v", err)
case sig := <-shutdown:
log.Printf("main : %v : Start shutdown..", sig)
@ -446,7 +508,7 @@ func main() {
// Ensure the public IP address for the task is removed from Route53.
err = devops.EcsServiceTaskTaskShutdown(log, awsSession)
if err != nil {
log.Fatalf("main : Ecs Service Task shutdown : %v", err)
log.Fatalf("main : Ecs Service Task shutdown : %+v", err)
}
// Create context for Shutdown call.
@ -470,7 +532,7 @@ func main() {
case sig == syscall.SIGSTOP:
log.Fatal("main : Integrity issue caused shutdown")
case err != nil:
log.Fatalf("main : Could not stop server gracefully : %v", err)
log.Fatalf("main : Could not stop server gracefully : %+v", err)
}
}
}

View File

@ -135,12 +135,12 @@ func main() {
// For additional details refer to https://github.com/kelseyhightower/envconfig
if err := envconfig.Process(service, &cfg); err != nil {
log.Fatalf("main : Parsing Config : %v", err)
log.Fatalf("main : Parsing Config : %+v", err)
}
if err := flag.Process(&cfg); err != nil {
if err != flag.ErrHelp {
log.Fatalf("main : Parsing Command Line : %v", err)
log.Fatalf("main : Parsing Command Line : %+v", err)
}
return // We displayed help.
}
@ -184,7 +184,7 @@ func main() {
{
cfgJSON, err := json.MarshalIndent(cfg, "", " ")
if err != nil {
log.Fatalf("main : Marshalling Config to JSON : %v", err)
log.Fatalf("main : Marshalling Config to JSON : %+v", err)
}
log.Printf("main : Config : %v\n", string(cfgJSON))
}
@ -225,12 +225,12 @@ func main() {
if cfg.Redis.MaxmemoryPolicy != "" {
err := redisClient.ConfigSet(evictPolicyConfigKey, cfg.Redis.MaxmemoryPolicy).Err()
if err != nil && !strings.Contains(err.Error(), "unknown command") {
log.Fatalf("main : redis : ConfigSet maxmemory-policy : %v", err)
log.Fatalf("main : redis : ConfigSet maxmemory-policy : %+v", err)
}
} else {
evictPolicy, err := redisClient.ConfigGet(evictPolicyConfigKey).Result()
if err != nil && !strings.Contains(err.Error(), "unknown command") {
log.Fatalf("main : redis : ConfigGet maxmemory-policy : %v", err)
log.Fatalf("main : redis : ConfigGet maxmemory-policy : %+v", err)
} else if evictPolicy != nil && len(evictPolicy) > 0 && evictPolicy[1] != "allkeys-lru" {
log.Printf("main : redis : ConfigGet maxmemory-policy : recommended to be set to allkeys-lru to avoid OOM")
}
@ -269,7 +269,7 @@ func main() {
sqltrace.Register(cfg.DB.Driver, &pq.Driver{}, sqltrace.WithServiceName(service))
masterDb, err := sqlxtrace.Open(cfg.DB.Driver, dbUrl.String())
if err != nil {
log.Fatalf("main : Register DB : %s : %v", cfg.DB.Driver, err)
log.Fatalf("main : Register DB : %s : %+v", cfg.DB.Driver, err)
}
defer masterDb.Close()
@ -281,7 +281,7 @@ func main() {
if cfg.App.StaticS3.S3Enabled || cfg.App.StaticS3.CloudFrontEnabled {
err = devops.SyncS3StaticFiles(awsSession, cfg.App.StaticS3.S3Bucket, cfg.App.StaticS3.S3KeyPrefix, cfg.App.StaticDir)
if err != nil {
log.Fatalf("main : deploy : %v", err)
log.Fatalf("main : deploy : %+v", err)
}
}
return
@ -292,14 +292,14 @@ func main() {
baseSiteUrl, err := url.Parse(cfg.App.BaseUrl)
if err != nil {
log.Fatalf("main : Parse App Base URL : %s : %v", cfg.App.BaseUrl, err)
log.Fatalf("main : Parse App Base URL : %s : %+v", cfg.App.BaseUrl, err)
}
var primaryDomain string
if strings.Contains(baseSiteUrl.Host, ":") {
primaryDomain, _, err = net.SplitHostPort(baseSiteUrl.Host)
if err != nil {
log.Fatalf("main : SplitHostPort : %s : %v", baseSiteUrl.Host, err)
log.Fatalf("main : SplitHostPort : %s : %+v", baseSiteUrl.Host, err)
}
} else {
primaryDomain = baseSiteUrl.Host
@ -318,7 +318,7 @@ func main() {
if cfg.App.StaticS3.S3Enabled || cfg.App.StaticS3.CloudFrontEnabled || cfg.App.StaticS3.ImgResizeEnabled {
s3UrlFormatter, err := devops.S3UrlFormatter(awsSession, cfg.App.StaticS3.S3Bucket, cfg.App.StaticS3.S3KeyPrefix, cfg.App.StaticS3.CloudFrontEnabled)
if err != nil {
log.Fatalf("main : S3UrlFormatter failed : %v", err)
log.Fatalf("main : S3UrlFormatter failed : %+v", err)
}
staticS3UrlFormatter = func(p string) string {
@ -341,7 +341,7 @@ func main() {
} else {
baseUrl, err := url.Parse(cfg.App.BaseUrl)
if err != nil {
log.Fatalf("main : url Parse(%s) : %v", cfg.App.BaseUrl, err)
log.Fatalf("main : url Parse(%s) : %+v", cfg.App.BaseUrl, err)
}
staticUrlFormatter = func(p string) string {
@ -521,7 +521,7 @@ func main() {
// Template Renderer used to generate HTML response for web experience.
renderer, err := template_renderer.NewTemplateRenderer(cfg.App.TemplateDir, enableHotReload, gvd, t, eh)
if err != nil {
log.Fatalf("main : Marshalling Config to JSON : %v", err)
log.Fatalf("main : Marshalling Config to JSON : %+v", err)
}
// =========================================================================
@ -577,7 +577,7 @@ func main() {
// Blocking main and waiting for shutdown.
select {
case err := <-serverErrors:
log.Fatalf("main : Error starting server: %v", err)
log.Fatalf("main : Error starting server: %+v", err)
case sig := <-shutdown:
log.Printf("main : %v : Start shutdown..", sig)
@ -598,7 +598,7 @@ func main() {
case sig == syscall.SIGSTOP:
log.Fatal("main : Integrity issue caused shutdown")
case err != nil:
log.Fatalf("main : Could not stop server gracefully : %v", err)
log.Fatalf("main : Could not stop server gracefully : %+v", err)
}
}
}

View File

@ -16,6 +16,7 @@ require (
github.com/dustin/go-humanize v1.0.0
github.com/fatih/camelcase v1.0.0
github.com/fatih/structtag v1.0.0
github.com/fsnotify/fsnotify v1.4.7
github.com/geeks-accelerator/sqlxmigrate v0.0.0-20190527223850-4a863a2d30db
github.com/go-openapi/spec v0.19.2 // indirect
github.com/go-openapi/swag v0.19.4 // indirect

View File

@ -0,0 +1,271 @@
package devops
import (
"io/ioutil"
"log"
"os"
"path/filepath"
"strings"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/secretsmanager"
"github.com/fsnotify/fsnotify"
"github.com/pkg/errors"
)
// SyncCfgInit provides the functionality to keep config files sync'd between running tasks and across deployments.
func SyncCfgInit(log *log.Logger, awsSession *session.Session, secretPrefix, watchDir string, syncInterval time.Duration) (func(), error) {
localfiles := make(map[string]time.Time)
// Do the initial sync before starting file watch to download any existing configs.
err := SyncCfgDir(log, awsSession, secretPrefix, watchDir, localfiles)
if err != nil {
return nil, err
}
// Create a new file watcher.
watcher, err := fsnotify.NewWatcher()
if err != nil {
return nil, errors.WithStack(err)
}
// Return function that will should be run in the back ground via a go routine that will watch for new files created
// locally and updated in AWS Secrets Manager.
f := func() {
defer watcher.Close()
// Init the watch to wait for sync local files to Secret Manager.
WatchCfgDir(log, awsSession, secretPrefix, watchDir, watcher, localfiles)
// Init ticker to sync remote files from Secret Manager locally at the defined interval.
if syncInterval.Seconds() > 0 {
ticker := time.NewTicker(syncInterval)
defer ticker.Stop()
go func() {
for _ = range ticker.C {
log.Println("AWS Secrets Manager : Checking for remote updates")
// Do the initial sync before starting file watch to download any existing configs.
err := SyncCfgDir(log, awsSession, secretPrefix, watchDir, localfiles)
if err != nil {
log.Printf("AWS Secrets Manager : Remote sync error - %+v", err)
}
}
}()
}
}
log.Printf("AWS Secrets Manager : Watching config dir %s", watchDir)
// Note: Out of the box fsnotify can watch a single file, or a single directory.
if err := watcher.Add(watchDir); err != nil {
return nil, errors.Wrapf(err, "failed to add file watcher to %s", watchDir)
}
return f, nil
}
// SyncCfgDir lists all the Secrets from AWS Secrets Manager for a provided prefix and downloads them locally.
func SyncCfgDir(log *log.Logger, awsSession *session.Session, secretPrefix, watchDir string, localfiles map[string]time.Time) error {
svc := secretsmanager.New(awsSession)
// Get a list of secrets for the prefix when the time they were last changed.
secretIDs := make(map[string]time.Time)
err := svc.ListSecretsPages(&secretsmanager.ListSecretsInput{}, func(res *secretsmanager.ListSecretsOutput, lastPage bool) bool {
for _, s := range res.SecretList {
// Skip any secret that does not have a matching prefix.
if !strings.HasPrefix(*s.Name, secretPrefix) {
continue
}
secretIDs[*s.Name] = s.LastChangedDate.UTC()
}
return !lastPage
})
if err != nil {
return errors.Wrap(err, "failed to list secrets")
}
for id, curChanged := range secretIDs {
// Load the secret by ID from Secrets Manager.
res, err := svc.GetSecretValue(&secretsmanager.GetSecretValueInput{
SecretId: aws.String(id),
})
if err != nil {
return errors.Wrapf(err, "failed to get secret value for id %s", id)
}
filename := filepath.Base(id)
localpath := filepath.Join(watchDir, filename)
// Ensure the secret exists locally.
if exists(localpath) {
// If the secret was previously downloaded and current last changed time is less than or equal to the time
// the secret was last downloaded, then no need to update.
if lastChanged, ok := localfiles[id]; ok && curChanged.UTC().Unix() <= lastChanged.UTC().Unix() {
continue
}
}
log.Printf("AWS Secrets Manager : Writing Config %s", filename)
err = ioutil.WriteFile(localpath, res.SecretBinary, 0644)
if err != nil {
return errors.Wrapf(err, "failed to write secret value for id %s to %s", id, localpath)
}
// Only mark that the secret was updated when the file was successfully saved locally.
localfiles[id] = curChanged
}
return nil
}
// WatchCfgDir watches for new/updated files locally and uploads them to in AWS Secrets Manager.
func WatchCfgDir(log *log.Logger, awsSession *session.Session, secretPrefix, dir string, watcher *fsnotify.Watcher, localfiles map[string]time.Time) error {
for {
select {
// watch for events
case event, ok := <-watcher.Events:
if !ok {
return nil
}
err := handleWatchCfgEvent(log, awsSession, secretPrefix, event)
if err != nil {
log.Printf("AWS Secrets Manager : Watcher Error - %+v", err)
}
// watch for errors
case err, ok := <-watcher.Errors:
if !ok {
return nil
}
if err != nil {
log.Printf("AWS Secrets Manager : Watcher Error - %+v", err)
}
}
}
return nil
}
// handleWatchCfgEvent handles a fsnotify event. For new files, secrets are created, for updated files, the secret is
// updated. For deleted files the secret is removed.
func handleWatchCfgEvent(log *log.Logger, awsSession *session.Session, secretPrefix string, event fsnotify.Event) error {
svc := secretsmanager.New(awsSession)
fname := filepath.Base(event.Name)
secretID := filepath.Join(secretPrefix, fname)
if event.Op&fsnotify.Create == fsnotify.Create || event.Op&fsnotify.Write == fsnotify.Write {
dat, err := ioutil.ReadFile(event.Name)
if err != nil {
return errors.Wrapf(err, "file watcher failed to read file %s", event.Name)
}
// Create the new entry in AWS Secret Manager for the file.
_, err = svc.CreateSecret(&secretsmanager.CreateSecretInput{
Name: aws.String(secretID),
SecretString: aws.String(string(dat)),
})
if err != nil {
if aerr, ok := err.(awserr.Error); !ok {
if aerr.Code() == secretsmanager.ErrCodeInvalidRequestException {
// InvalidRequestException: You can't create this secret because a secret with this
// name is already scheduled for deletion.
// Restore secret after it was already previously deleted.
_, err = svc.RestoreSecret(&secretsmanager.RestoreSecretInput{
SecretId: aws.String(secretID),
})
if err != nil {
return errors.Wrapf(err, "file watcher failed to restore secret %s for %s", secretID, event.Name)
}
} else if aerr.Code() != secretsmanager.ErrCodeResourceExistsException {
return errors.Wrapf(err, "file watcher failed to create secret %s for %s", secretID, event.Name)
}
}
// If where was a resource exists error for create, then need to update the secret instead.
_, err = svc.UpdateSecret(&secretsmanager.UpdateSecretInput{
SecretId: aws.String(secretID),
SecretString: aws.String(string(dat)),
})
if err != nil {
return errors.Wrapf(err, "file watcher failed to update secret %s for %s", secretID, event.Name)
}
log.Printf("AWS Secrets Manager : Secret %s updated for %s", secretID, event.Name)
} else {
log.Printf("AWS Secrets Manager : Secret %s created for %s", secretID, event.Name)
}
} else if event.Op&fsnotify.Remove == fsnotify.Remove || event.Op&fsnotify.Rename == fsnotify.Rename {
// Delay delete to ensure the file is really deleted.
//delCheck := time.NewTimer(time.Minute)
//<-delCheck.C
// Create the new entry in AWS Secret Manager for the file.
_, err := svc.DeleteSecret(&secretsmanager.DeleteSecretInput{
SecretId: aws.String(secretID),
// (Optional) Specifies that the secret is to be deleted without any recovery
// window. You can't use both this parameter and the RecoveryWindowInDays parameter
// in the same API call.
//
// An asynchronous background process performs the actual deletion, so there
// can be a short delay before the operation completes. If you write code to
// delete and then immediately recreate a secret with the same name, ensure
// that your code includes appropriate back off and retry logic.
//
// Use this parameter with caution. This parameter causes the operation to skip
// the normal waiting period before the permanent deletion that AWS would normally
// impose with the RecoveryWindowInDays parameter. If you delete a secret with
// the ForceDeleteWithouRecovery parameter, then you have no opportunity to
// recover the secret. It is permanently lost.
ForceDeleteWithoutRecovery: aws.Bool(false),
// (Optional) Specifies the number of days that Secrets Manager waits before
// it can delete the secret. You can't use both this parameter and the ForceDeleteWithoutRecovery
// parameter in the same API call.
//
// This value can range from 7 to 30 days.
RecoveryWindowInDays: aws.Int64(30),
})
if err != nil {
return errors.Wrapf(err, "file watcher failed to delete secret %s for %s", secretID, event.Name)
}
log.Printf("AWS Secrets Manager : Secret %s deleted for %s", secretID, event.Name)
}
return nil
}
// Exists reports whether the named file or directory exists.
func exists(name string) bool {
if _, err := os.Stat(name); err != nil {
if os.IsNotExist(err) {
return false
}
}
return true
}

View File

@ -21,6 +21,10 @@ import (
// EcsServiceTaskInit allows newly spun up ECS Service Tasks to register their public IP with Route 53.
func EcsServiceTaskInit(log *log.Logger, awsSession *session.Session) error {
if awsSession == nil {
return nil
}
ecsClusterName := os.Getenv("ECS_CLUSTER")
ecsServiceName := os.Getenv("ECS_SERVICE")
@ -78,6 +82,10 @@ func EcsServiceTaskTaskShutdown(log *log.Logger, awsSession *session.Session) er
// RegisterEcsServiceTasksRoute53 registers the public IPs for a ECS Service Task with Route 53.
func RegisterEcsServiceTasksRoute53(log *log.Logger, awsSession *session.Session, ecsClusterName, ecsServiceName string, zoneArecNames map[string][]string) error {
if awsSession == nil {
return nil
}
var networkInterfaceIds []string
for a := 0; a <= 3; a++ {
svc := ecs.New(awsSession)

View File

@ -108,6 +108,8 @@
"secretsmanager:ListSecrets",
"secretsmanager:GetSecretValue",
"secretsmanager:UpdateSecret",
"secretsmanager:RestoreSecret",
"secretsmanager:DeleteSecret",
"servicediscovery:ListNamespaces",
"servicediscovery:CreatePrivateDnsNamespace",
"servicediscovery:GetOperation",

View File

@ -0,0 +1,151 @@
package devops
import (
"context"
"log"
"path/filepath"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/secretsmanager"
"github.com/pkg/errors"
"golang.org/x/crypto/acme/autocert"
"github.com/aws/aws-sdk-go/aws/session"
)
// SecretManagerAutocertCache implements the autocert.Cache interface for AWS Secrets Manager that is used by Manager
// to store and retrieve previously obtained certificates and other account data as opaque blobs.
type SecretManagerAutocertCache struct {
awsSession *session.Session
log *log.Logger
secretPrefix string
}
// SyncCfgInit provides the functionality to keep config files sync'd between running tasks and across deployments.
func NewSecretManagerAutocertCache(log *log.Logger, awsSession *session.Session, secretPrefix string ) (*SecretManagerAutocertCache, error) {
return &SecretManagerAutocertCache{
awsSession,
log,
secretPrefix,
}, nil
}
// Get returns a certificate data for the specified key.
// If there's no such key, Get returns ErrCacheMiss.
func (c *SecretManagerAutocertCache) Get(ctx context.Context, key string) ([]byte, error) {
svc := secretsmanager.New(c.awsSession)
secretID := filepath.Join(c.secretPrefix, key)
// Load the secret by ID from Secrets Manager.
res, err := svc.GetSecretValue(&secretsmanager.GetSecretValueInput{
SecretId: aws.String(secretID),
})
if err != nil {
if aerr, ok := err.(awserr.Error); ok && aerr.Code() == secretsmanager.ErrCodeResourceNotFoundException {
return nil, autocert.ErrCacheMiss
}
return nil, errors.Wrapf(err, "failed to get value for secret id %s", secretID)
}
log.Printf("AWS Secrets Manager : Secret %s found", secretID)
return res.SecretBinary, nil
}
// Put stores the data in the cache under the specified key.
// Underlying implementations may use any data storage format,
// as long as the reverse operation, Get, results in the original data.
func (c *SecretManagerAutocertCache) Put(ctx context.Context, key string, data []byte) error {
svc := secretsmanager.New(c.awsSession)
secretID := filepath.Join(c.secretPrefix, key)
// Create the new entry in AWS Secret Manager for the file.
_, err := svc.CreateSecret(&secretsmanager.CreateSecretInput{
Name: aws.String(secretID),
SecretString: aws.String(string(data)),
})
if err != nil {
if aerr, ok := err.(awserr.Error); !ok {
if aerr.Code() == secretsmanager.ErrCodeInvalidRequestException {
// InvalidRequestException: You can't create this secret because a secret with this
// name is already scheduled for deletion.
// Restore secret after it was already previously deleted.
_, err = svc.RestoreSecret(&secretsmanager.RestoreSecretInput{
SecretId: aws.String(secretID),
})
if err != nil {
return errors.Wrapf(err, "autocert failed to restore secret %s", secretID)
}
} else if aerr.Code() != secretsmanager.ErrCodeResourceExistsException {
return errors.Wrapf(err, "autocert failed to create secret %s", secretID)
}
}
// If where was a resource exists error for create, then need to update the secret instead.
_, err = svc.UpdateSecret(&secretsmanager.UpdateSecretInput{
SecretId: aws.String(secretID),
SecretString: aws.String(string(data)),
})
if err != nil {
return errors.Wrapf(err, "autocert failed to update secret %s", secretID)
}
log.Printf("AWS Secrets Manager : Secret %s updated", secretID)
} else {
log.Printf("AWS Secrets Manager : Secret %s created", secretID)
}
return nil
}
// Delete removes a certificate data from the cache under the specified key.
// If there's no such key in the cache, Delete returns nil.
func (c *SecretManagerAutocertCache) Delete(ctx context.Context, key string) error {
svc := secretsmanager.New(c.awsSession)
secretID := filepath.Join(c.secretPrefix, key)
// Create the new entry in AWS Secret Manager for the file.
_, err := svc.DeleteSecret(&secretsmanager.DeleteSecretInput{
SecretId: aws.String(secretID),
// (Optional) Specifies that the secret is to be deleted without any recovery
// window. You can't use both this parameter and the RecoveryWindowInDays parameter
// in the same API call.
//
// An asynchronous background process performs the actual deletion, so there
// can be a short delay before the operation completes. If you write code to
// delete and then immediately recreate a secret with the same name, ensure
// that your code includes appropriate back off and retry logic.
//
// Use this parameter with caution. This parameter causes the operation to skip
// the normal waiting period before the permanent deletion that AWS would normally
// impose with the RecoveryWindowInDays parameter. If you delete a secret with
// the ForceDeleteWithouRecovery parameter, then you have no opportunity to
// recover the secret. It is permanently lost.
ForceDeleteWithoutRecovery: aws.Bool(false),
// (Optional) Specifies the number of days that Secrets Manager waits before
// it can delete the secret. You can't use both this parameter and the ForceDeleteWithoutRecovery
// parameter in the same API call.
//
// This value can range from 7 to 30 days.
RecoveryWindowInDays: aws.Int64(30),
})
if err != nil {
return errors.Wrapf(err, "autocert failed to delete secret %s", secretID)
}
log.Printf("AWS Secrets Manager : Secret %s deleted for %s", secretID)
return nil
}

View File

@ -28,8 +28,8 @@ type ServiceDeployFlags struct {
// Optional flags.
EnableHTTPS bool `validate:"omitempty" example:"false"`
ServiceDomainName string `validate:"omitempty" example:"example-project.com"`
ServiceDomainNameAliases cli.StringSlice `validate:"omitempty" example:"subdomain.example-project.com"`
ServiceHostPrimary string `validate:"omitempty" example:"example-project.com"`
ServiceHostNames cli.StringSlice `validate:"omitempty" example:"subdomain.example-project.com"`
S3BucketPrivateName string `validate:"omitempty" example:"saas-example-project-private"`
S3BucketPublicName string `validate:"omitempty" example:"saas-example-project-public"`
@ -57,8 +57,8 @@ type serviceDeployRequest struct {
GoModName string `validate:"required"`
EnableHTTPS bool `validate:"omitempty"`
ServiceDomainName string `validate:"omitempty,required_with=EnableHTTPS,fqdn"`
ServiceDomainNameAliases []string `validate:"omitempty,dive,fqdn"`
ServiceHostPrimary string `validate:"omitempty,required_with=EnableHTTPS,fqdn"`
ServiceHostNames []string `validate:"omitempty,dive,fqdn"`
AwsCreds awsCredentials `validate:"required,dive,required"`

View File

@ -84,8 +84,8 @@ func NewServiceDeployRequest(log *log.Logger, flags ServiceDeployFlags) (*servic
ProjectName: flags.ProjectName,
DockerFile: flags.DockerFile,
EnableHTTPS: flags.EnableHTTPS,
ServiceDomainName: flags.ServiceDomainName,
ServiceDomainNameAliases: flags.ServiceDomainNameAliases,
ServiceHostPrimary: flags.ServiceHostPrimary,
ServiceHostNames: flags.ServiceHostNames,
S3BucketPrivateName: flags.S3BucketPrivateName,
S3BucketPublicName: flags.S3BucketPublicName,
EnableLambdaVPC: flags.EnableLambdaVPC,
@ -163,8 +163,14 @@ func NewServiceDeployRequest(log *log.Logger, flags ServiceDeployFlags) (*servic
log.Printf("\t\t\tdockerfile: %s", req.DockerFile)
}
log.Println("\tSet defaults not defined in env vars.")
log.Println("\tSet defaults.")
{
// When only service host names are set, choose the first item as the primary host.
if req.ServiceHostPrimary == "" && len(req.ServiceHostNames) > 0 {
req.ServiceHostPrimary = req.ServiceHostNames[0]
log.Printf("\t\t\tSet Service Primary Host to '%s'.", req.ServiceHostPrimary)
}
// S3 temp prefix used by services for short term storage. A lifecycle policy will be used for expiration.
req.S3BucketTempPrefix = "tmp/"
@ -191,94 +197,98 @@ func NewServiceDeployRequest(log *log.Logger, flags ServiceDeployFlags) (*servic
}
// Defines the S3 Buckets used for all services.
req.S3Buckets = []S3Bucket{
// The public S3 Bucket used to serve static files and other assets.
S3Bucket{
Name: req.S3BucketPublicName,
Input: &s3.CreateBucketInput{
Bucket: aws.String(req.S3BucketPublicName),
},
LifecycleRules: []*s3.LifecycleRule{bucketLifecycleTempRule},
CORSRules: []*s3.CORSRule{
&s3.CORSRule{
// Headers that are specified in the Access-Control-Request-Headers header.
// These headers are allowed in a preflight OPTIONS request. In response to
// any preflight OPTIONS request, Amazon S3 returns any requested headers that
// are allowed.
// AllowedHeaders: aws.StringSlice([]string{}),
// An HTTP method that you allow the origin to execute. Valid values are GET,
// PUT, HEAD, POST, and DELETE.
//
// AllowedMethods is a required field
AllowedMethods: aws.StringSlice([]string{"GET", "POST"}),
// One or more origins you want customers to be able to access the bucket from.
//
// AllowedOrigins is a required field
AllowedOrigins: aws.StringSlice([]string{"*"}),
// One or more headers in the response that you want customers to be able to
// access from their applications (for example, from a JavaScript XMLHttpRequest
// object).
// ExposeHeaders: aws.StringSlice([]string{}),
// The time in seconds that your browser is to cache the preflight response
// for the specified resource.
// MaxAgeSeconds: aws.Int64(),
// The public S3 Bucket used to serve static files and other assets.
if req.S3BucketPublicName != "" {
req.S3Buckets = append(req.S3Buckets,
S3Bucket{
Name: req.S3BucketPublicName,
Input: &s3.CreateBucketInput{
Bucket: aws.String(req.S3BucketPublicName),
},
},
},
LifecycleRules: []*s3.LifecycleRule{bucketLifecycleTempRule},
CORSRules: []*s3.CORSRule{
&s3.CORSRule{
// Headers that are specified in the Access-Control-Request-Headers header.
// These headers are allowed in a preflight OPTIONS request. In response to
// any preflight OPTIONS request, Amazon S3 returns any requested headers that
// are allowed.
// AllowedHeaders: aws.StringSlice([]string{}),
// The private S3 Bucket used to persist data for services.
S3Bucket{
Name: req.S3BucketPrivateName,
Input: &s3.CreateBucketInput{
Bucket: aws.String(req.S3BucketPrivateName),
},
LifecycleRules: []*s3.LifecycleRule{bucketLifecycleTempRule},
PublicAccessBlock: &s3.PublicAccessBlockConfiguration{
// Specifies whether Amazon S3 should block public access control lists (ACLs)
// for this bucket and objects in this bucket. Setting this element to TRUE
// causes the following behavior:
//
// * PUT Bucket acl and PUT Object acl calls fail if the specified ACL is
// public.
//
// * PUT Object calls fail if the request includes a public ACL.
//
// Enabling this setting doesn't affect existing policies or ACLs.
BlockPublicAcls: aws.Bool(true),
// An HTTP method that you allow the origin to execute. Valid values are GET,
// PUT, HEAD, POST, and DELETE.
//
// AllowedMethods is a required field
AllowedMethods: aws.StringSlice([]string{"GET", "POST"}),
// Specifies whether Amazon S3 should block public bucket policies for this
// bucket. Setting this element to TRUE causes Amazon S3 to reject calls to
// PUT Bucket policy if the specified bucket policy allows public access.
//
// Enabling this setting doesn't affect existing bucket policies.
BlockPublicPolicy: aws.Bool(true),
// One or more origins you want customers to be able to access the bucket from.
//
// AllowedOrigins is a required field
AllowedOrigins: aws.StringSlice([]string{"*"}),
// Specifies whether Amazon S3 should restrict public bucket policies for this
// bucket. Setting this element to TRUE restricts access to this bucket to only
// AWS services and authorized users within this account if the bucket has a
// public policy.
//
// Enabling this setting doesn't affect previously stored bucket policies, except
// that public and cross-account access within any public bucket policy, including
// non-public delegation to specific accounts, is blocked.
RestrictPublicBuckets: aws.Bool(true),
// One or more headers in the response that you want customers to be able to
// access from their applications (for example, from a JavaScript XMLHttpRequest
// object).
// ExposeHeaders: aws.StringSlice([]string{}),
// Specifies whether Amazon S3 should ignore public ACLs for this bucket and
// objects in this bucket. Setting this element to TRUE causes Amazon S3 to
// ignore all public ACLs on this bucket and objects in this bucket.
//
// Enabling this setting doesn't affect the persistence of any existing ACLs
// and doesn't prevent new public ACLs from being set.
IgnorePublicAcls: aws.Bool(true),
},
Policy: func() string {
// Add a bucket policy to enable exports from Cloudwatch Logs for the private S3 bucket.
policyResource := strings.Trim(filepath.Join(req.S3BucketPrivateName, req.S3BucketTempPrefix), "/")
return fmt.Sprintf(`{
// The time in seconds that your browser is to cache the preflight response
// for the specified resource.
// MaxAgeSeconds: aws.Int64(),
},
},
})
}
// The private S3 Bucket used to persist data for services.
if req.S3BucketPrivateName != "" {
req.S3Buckets = append(req.S3Buckets,
S3Bucket{
Name: req.S3BucketPrivateName,
Input: &s3.CreateBucketInput{
Bucket: aws.String(req.S3BucketPrivateName),
},
LifecycleRules: []*s3.LifecycleRule{bucketLifecycleTempRule},
PublicAccessBlock: &s3.PublicAccessBlockConfiguration{
// Specifies whether Amazon S3 should block public access control lists (ACLs)
// for this bucket and objects in this bucket. Setting this element to TRUE
// causes the following behavior:
//
// * PUT Bucket acl and PUT Object acl calls fail if the specified ACL is
// public.
//
// * PUT Object calls fail if the request includes a public ACL.
//
// Enabling this setting doesn't affect existing policies or ACLs.
BlockPublicAcls: aws.Bool(true),
// Specifies whether Amazon S3 should block public bucket policies for this
// bucket. Setting this element to TRUE causes Amazon S3 to reject calls to
// PUT Bucket policy if the specified bucket policy allows public access.
//
// Enabling this setting doesn't affect existing bucket policies.
BlockPublicPolicy: aws.Bool(true),
// Specifies whether Amazon S3 should restrict public bucket policies for this
// bucket. Setting this element to TRUE restricts access to this bucket to only
// AWS services and authorized users within this account if the bucket has a
// public policy.
//
// Enabling this setting doesn't affect previously stored bucket policies, except
// that public and cross-account access within any public bucket policy, including
// non-public delegation to specific accounts, is blocked.
RestrictPublicBuckets: aws.Bool(true),
// Specifies whether Amazon S3 should ignore public ACLs for this bucket and
// objects in this bucket. Setting this element to TRUE causes Amazon S3 to
// ignore all public ACLs on this bucket and objects in this bucket.
//
// Enabling this setting doesn't affect the persistence of any existing ACLs
// and doesn't prevent new public ACLs from being set.
IgnorePublicAcls: aws.Bool(true),
},
Policy: func() string {
// Add a bucket policy to enable exports from Cloudwatch Logs for the private S3 bucket.
policyResource := strings.Trim(filepath.Join(req.S3BucketPrivateName, req.S3BucketTempPrefix), "/")
return fmt.Sprintf(`{
"Version": "2012-10-17",
"Statement": [
{
@ -296,8 +306,8 @@ func NewServiceDeployRequest(log *log.Logger, flags ServiceDeployFlags) (*servic
}
]
}`, req.S3BucketPrivateName, req.AwsCreds.Region, policyResource, req.AwsCreds.Region)
}(),
},
}(),
})
}
// Set default AWS ECR Repository Name.
@ -1770,7 +1780,7 @@ func ServiceDeploy(log *log.Logger, req *serviceDeployRequest) error {
// Route 53 zone lookup when hostname is set. Supports both top level domains or sub domains.
var zoneArecNames = map[string][]string{}
if req.ServiceDomainName != "" {
if req.ServiceHostPrimary != "" {
log.Println("Route 53 - Get or create hosted zones.")
svc := route53.New(req.awsSession())
@ -1789,8 +1799,11 @@ func ServiceDeploy(log *log.Logger, req *serviceDeployRequest) error {
}
// Generate a slice with the primary domain name and include all the alternative domain names.
lookupDomains := []string{req.ServiceDomainName}
for _, dn := range req.ServiceDomainNameAliases {
lookupDomains := []string{}
if req.ServiceHostPrimary != "" {
lookupDomains = append(lookupDomains, req.ServiceHostPrimary)
}
for _, dn := range req.ServiceHostNames {
lookupDomains = append(lookupDomains, dn)
}
@ -2083,7 +2096,7 @@ func ServiceDeploy(log *log.Logger, req *serviceDeployRequest) error {
err := svc.ListCertificatesPages(&acm.ListCertificatesInput{},
func(res *acm.ListCertificatesOutput, lastPage bool) bool {
for _, cert := range res.CertificateSummaryList {
if *cert.DomainName == req.ServiceDomainName {
if *cert.DomainName == req.ServiceHostPrimary {
certificateArn = *cert.CertificateArn
return false
}
@ -2091,12 +2104,12 @@ func ServiceDeploy(log *log.Logger, req *serviceDeployRequest) error {
return !lastPage
})
if err != nil {
return errors.Wrapf(err, "failed to list certificates for '%s'", req.ServiceDomainName)
return errors.Wrapf(err, "failed to list certificates for '%s'", req.ServiceHostPrimary)
}
if certificateArn == "" {
// Create hash of all the domain names to be used to mark unique requests.
idempotencyToken := req.ServiceDomainName + "|" + strings.Join(req.ServiceDomainNameAliases, "|")
idempotencyToken := req.ServiceHostPrimary + "|" + strings.Join(req.ServiceHostNames, "|")
idempotencyToken = fmt.Sprintf("%x", md5.Sum([]byte(idempotencyToken)))
// If no certicate was found, create one.
@ -2111,7 +2124,7 @@ func ServiceDeploy(log *log.Logger, req *serviceDeployRequest) error {
// octets in length.
//
// DomainName is a required field
DomainName: aws.String(req.ServiceDomainName),
DomainName: aws.String(req.ServiceHostPrimary),
// Customer chosen string that can be used to distinguish between calls to RequestCertificate.
// Idempotency tokens time out after one hour. Therefore, if you call RequestCertificate
@ -2138,7 +2151,7 @@ func ServiceDeploy(log *log.Logger, req *serviceDeployRequest) error {
// add to an ACM certificate is 100. However, the initial limit is 10 domain
// names. If you need more than 10 names, you must request a limit increase.
// For more information, see Limits (https://docs.aws.amazon.com/acm/latest/userguide/acm-limits.html).
SubjectAlternativeNames: aws.StringSlice(req.ServiceDomainNameAliases),
SubjectAlternativeNames: aws.StringSlice(req.ServiceHostNames),
// The method you want to use if you are requesting a public certificate to
// validate that you own or control domain. You can validate with DNS (https://docs.aws.amazon.com/acm/latest/userguide/gs-acm-validate-dns.html)
@ -2147,13 +2160,13 @@ func ServiceDeploy(log *log.Logger, req *serviceDeployRequest) error {
ValidationMethod: aws.String("DNS"),
})
if err != nil {
return errors.Wrapf(err, "failed to create certificate '%s'", req.ServiceDomainName)
return errors.Wrapf(err, "failed to create certificate '%s'", req.ServiceHostPrimary)
}
certificateArn = *createRes.CertificateArn
log.Printf("\t\tCreated certificate '%s'", req.ServiceDomainName)
log.Printf("\t\tCreated certificate '%s'", req.ServiceHostPrimary)
} else {
log.Printf("\t\tFound certificate '%s'", req.ServiceDomainName)
log.Printf("\t\tFound certificate '%s'", req.ServiceHostPrimary)
}
descRes, err := svc.DescribeCertificate(&acm.DescribeCertificateInput{
@ -2546,15 +2559,19 @@ func ServiceDeploy(log *log.Logger, req *serviceDeployRequest) error {
"{ECS_CLUSTER}": req.EcsClusterName,
"{ECS_SERVICE}": req.EcsServiceName,
"{AWS_REGION}": req.AwsCreds.Region,
"{AWSLOGS_GROUP}": req.CloudWatchLogGroupName,
"{AWS_LOGS_GROUP}": req.CloudWatchLogGroupName,
"{AWS_AWS_S3_BUCKET_PRIVATE}": req.S3BucketPrivateName,
"{S3_BUCKET_PUBLIC}": req.S3BucketPublicName,
"{ENV}": req.Env,
"{DATADOG_APIKEY}": datadogApiKey,
"{DATADOG_ESSENTIAL}": "true",
"{HTTP_HOST}": "0.0.0.0:80",
"{HTTPS_HOST}": "", // Not enabled by default
"{APP_PROJECT}": req.ProjectName,
"{APP_BASE_URL}": "", // Not set by default, requires a hostname to be defined.
//"{DOMAIN_NAME}": req.ServiceDomainName,
//"{DOMAIN_NAME_ALIASES}": strings.Join(req.ServiceDomainNameAliases, ","),
"{HOST_PRIMARY}": req.ServiceHostPrimary,
"{HOST_NAMES}": strings.Join(req.ServiceHostNames, ","),
"{CACHE_HOST}": "", // Not enabled by default
@ -2592,7 +2609,7 @@ func ServiceDeploy(log *log.Logger, req *serviceDeployRequest) error {
}
// When a domain name if defined for the service, set the App Base URL. Default to HTTPS if enabled.
if req.ServiceDomainName != "" {
if req.ServiceHostPrimary != "" {
var appSchema string
if req.EnableHTTPS {
appSchema = "https"
@ -2600,7 +2617,7 @@ func ServiceDeploy(log *log.Logger, req *serviceDeployRequest) error {
appSchema = "http"
}
placeholders["{APP_BASE_URL}"] = fmt.Sprintf("%s://%s/", appSchema, req.ServiceDomainName)
placeholders["{APP_BASE_URL}"] = fmt.Sprintf("%s://%s/", appSchema, req.ServiceHostPrimary)
}
// When db is set, update the placeholders.

View File

@ -216,8 +216,8 @@ func main() {
cli.StringFlag{Name: "service", Usage: "name of cmd", Destination: &deployFlags.ServiceName},
cli.StringFlag{Name: "env", Usage: "dev, stage, or prod", Destination: &deployFlags.Env},
cli.BoolFlag{Name: "enable_https", Usage: "enable HTTPS", Destination: &deployFlags.EnableHTTPS},
cli.StringFlag{Name: "domain_name", Usage: "dev, stage, or prod", Destination: &deployFlags.ServiceDomainName},
cli.StringSliceFlag{Name: "domain_name_aliases", Usage: "dev, stage, or prod", Value: &deployFlags.ServiceDomainNameAliases},
cli.StringFlag{Name: "primary_host", Usage: "dev, stage, or prod", Destination: &deployFlags.ServiceHostPrimary},
cli.StringSliceFlag{Name: "host_names", Usage: "dev, stage, or prod", Value: &deployFlags.ServiceHostNames},
cli.StringFlag{Name: "private_bucket", Usage: "dev, stage, or prod", Destination: &deployFlags.S3BucketPrivateName},
cli.StringFlag{Name: "public_bucket", Usage: "dev, stage, or prod", Destination: &deployFlags.S3BucketPublicName},
cli.StringFlag{Name: "dockerfile", Usage: "DockerFile for service", Destination: &deployFlags.DockerFile},