1
0
mirror of https://github.com/raseels-repos/golang-saas-starter-kit.git synced 2025-06-27 00:51:13 +02:00

WIP: code migration complete

This commit is contained in:
Lee Brown
2019-08-16 13:33:57 -08:00
parent 059900e4f8
commit dd4cf51c94
9 changed files with 112 additions and 1367 deletions

1
.gitignore vendored
View File

@ -4,3 +4,4 @@ aws.*
.env_docker_compose
local.env
.DS_Store
tmp

2
go.mod
View File

@ -46,7 +46,7 @@ require (
github.com/ugorji/go v1.1.7 // indirect
github.com/urfave/cli v1.21.0
github.com/xwb1989/sqlparser v0.0.0-20180606152119-120387863bf2
gitlab.com/geeks-accelerator/oss/devops v0.0.0-20190815180027-17c30c1f4c9e // indirect
gitlab.com/geeks-accelerator/oss/devops v0.0.0-20190815180027-17c30c1f4c9e
golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4
golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7
golang.org/x/text v0.3.2

1
go.sum
View File

@ -201,6 +201,7 @@ golang.org/x/net v0.0.0-20190620200207-3b0461eec859 h1:R/3boaszxrf1GEUWTVDzSKVwL
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80 h1:Ao/3l156eZf2AW5wK8a7/smtodRU+gha3+BeqJ69lRk=
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7 h1:fHDIZ2oxGnUZRN6WgWFCbYBjH9uqVPRCUVUDhs0wnbA=
golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU=

View File

@ -1,383 +0,0 @@
package cicd
import (
"encoding/json"
"fmt"
"github.com/aws/aws-sdk-go/service/s3/s3manager"
"io/ioutil"
"net/url"
"path/filepath"
"sort"
"strconv"
"strings"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/ec2metadata"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/ecr"
"github.com/aws/aws-sdk-go/service/ecs"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/aws/aws-sdk-go/service/secretsmanager"
"github.com/pkg/errors"
"gopkg.in/go-playground/validator.v9"
)
const (
defaultAwsRegistryMaxImages = 1000
awsTagNameProject = "project"
awsTagNameEnv = "env"
awsTagNameName = "Name"
)
// AwsCredentials defines AWS credentials used for deployment. Unable to use roles when deploying
// using gitlab CI/CD pipeline.
type awsCredentials struct {
AccessKeyID string `validate:"required_without=UseRole"`
SecretAccessKey string `validate:"required_without=UseRole"`
Region string `validate:"required_without=UseRole"`
UseRole bool
}
// Session returns a new AWS Session used to access AWS services.
func (creds awsCredentials) Session() *session.Session {
if creds.UseRole {
// Get an AWS session from an implicit source if no explicit
// configuration is provided. This is useful for taking advantage of
// EC2/ECS instance roles.
sess := session.Must(session.NewSession())
if creds.Region != "" {
sess.Config.WithRegion(creds.Region)
}
return sess
}
return session.New(
&aws.Config{
Region: aws.String(creds.Region),
Credentials: credentials.NewStaticCredentials(creds.AccessKeyID, creds.SecretAccessKey, ""),
})
}
// IamPolicyDocument defines an AWS IAM policy used for defining access for IAM roles, users, and groups.
type IamPolicyDocument struct {
Version string `json:"Version"`
Statement []IamStatementEntry `json:"Statement"`
}
// IamStatementEntry defines a single statement for an IAM policy.
type IamStatementEntry struct {
Sid string `json:"Sid"`
Effect string `json:"Effect"`
Action []string `json:"Action"`
Resource interface{} `json:"Resource"`
}
// S3Bucket defines the details need to create a bucket that includes additional configuration.
type S3Bucket struct {
Name string `validate:"omitempty"`
Input *s3.CreateBucketInput
LifecycleRules []*s3.LifecycleRule
CORSRules []*s3.CORSRule
PublicAccessBlock *s3.PublicAccessBlockConfiguration
Policy string
}
// DB mimics the general info needed for services used to define placeholders.
type DB struct {
Host string
User string
Pass string
Database string
Driver string
DisableTLS bool
}
// URL returns the URL to connect to a database.
func (db DB) URL() string {
// Query parameters.
var q url.Values = make(map[string][]string)
// Handle SSL Mode
if db.DisableTLS {
q.Set("sslmode", "disable")
} else {
q.Set("sslmode", "require")
}
// Construct url.
dbUrl := url.URL{
Scheme: db.Driver,
User: url.UserPassword(db.User, db.Pass),
Host: db.Host,
Path: db.Database,
RawQuery: q.Encode(),
}
return dbUrl.String()
}
// GetAwsCredentials loads the AWS Access Keys from env variables unless a role is used.
func GetAwsCredentials(targetEnv string) (awsCredentials, error) {
var creds awsCredentials
creds.Region = strings.TrimSpace(getTargetEnv(targetEnv, "AWS_REGION"))
if v := getTargetEnv(targetEnv, "AWS_USE_ROLE"); v != "" {
creds.UseRole, _ = strconv.ParseBool(v)
sess, err := session.NewSession()
if err != nil {
return creds, errors.Wrap(err, "Failed to load AWS credentials from instance")
}
if sess.Config != nil && sess.Config.Region != nil && *sess.Config.Region != "" {
creds.Region = *sess.Config.Region
} else {
sm := ec2metadata.New(sess)
creds.Region, err = sm.Region()
if err != nil {
return creds, errors.Wrap(err, "Failed to get region from AWS session")
}
}
return creds, nil
}
creds.AccessKeyID = strings.TrimSpace(getTargetEnv(targetEnv, "AWS_ACCESS_KEY_ID"))
creds.SecretAccessKey = strings.TrimSpace(getTargetEnv(targetEnv, "AWS_SECRET_ACCESS_KEY"))
errs := validator.New().Struct(creds)
if errs != nil {
return creds, errs
}
//os.Setenv("AWS_DEFAULT_REGION", creds.Region)
return creds, nil
}
// GetAwsSecretValue returns the string value for a secret stored in AWS Secrets Manager.
func GetAwsSecretValue(creds awsCredentials, secretId string) (string, error) {
svc := secretsmanager.New(creds.Session())
res, err := svc.GetSecretValue(&secretsmanager.GetSecretValueInput{
SecretId: aws.String(secretId),
})
if err != nil {
return "", errors.Wrapf(err, "failed to get value for secret id %s", secretId)
}
return string(res.SecretBinary), nil
}
// EcrPurgeImages ensures pipeline does not generate images for max of 10000 and prevent manual deletion of images.
func EcrPurgeImages(req *serviceBuildRequest) ([]*ecr.ImageIdentifier, error) {
svc := ecr.New(req.awsSession())
// First list all the image IDs for the repository.
var imgIds []*ecr.ImageIdentifier
err := svc.ListImagesPages(&ecr.ListImagesInput{
RepositoryName: aws.String(req.EcrRepositoryName),
}, func(res *ecr.ListImagesOutput, lastPage bool) bool {
imgIds = append(imgIds, res.ImageIds...)
return !lastPage
})
if err != nil {
return nil, errors.Wrapf(err, "failed to list images for repository '%s'", req.EcrRepositoryName)
}
var (
ts []int
tsImgIds = map[int][]*ecr.ImageIdentifier{}
)
// Describe all the image IDs to determine oldest.
err = svc.DescribeImagesPages(&ecr.DescribeImagesInput{
RepositoryName: aws.String(req.EcrRepositoryName),
ImageIds: imgIds,
}, func(res *ecr.DescribeImagesOutput, lastPage bool) bool {
for _, img := range res.ImageDetails {
imgTs := int(img.ImagePushedAt.Unix())
if _, ok := tsImgIds[imgTs]; !ok {
tsImgIds[imgTs] = []*ecr.ImageIdentifier{}
ts = append(ts, imgTs)
}
if img.ImageTags != nil {
tsImgIds[imgTs] = append(tsImgIds[imgTs], &ecr.ImageIdentifier{
ImageTag: img.ImageTags[0],
})
} else if img.ImageDigest != nil {
tsImgIds[imgTs] = append(tsImgIds[imgTs], &ecr.ImageIdentifier{
ImageDigest: img.ImageDigest,
})
}
}
return !lastPage
})
if err != nil {
return nil, errors.Wrapf(err, "failed to describe images for repository '%s'", req.EcrRepositoryName)
}
// Sort the image timestamps in reverse order.
sort.Sort(sort.Reverse(sort.IntSlice(ts)))
// Loop over all the timestamps, skip the newest images until count exceeds limit.
var imgCnt int
var delIds []*ecr.ImageIdentifier
for _, imgTs := range ts {
for _, imgId := range tsImgIds[imgTs] {
imgCnt = imgCnt + 1
if imgCnt <= req.EcrRepositoryMaxImages {
continue
}
delIds = append(delIds, imgId)
}
}
// If there are image IDs to delete, delete them.
if len(delIds) > 0 {
//log.Printf("\t\tECR has %d images for repository '%s' which exceeds limit of %d", imgCnt, creds.EcrRepositoryName, creds.EcrRepositoryMaxImages)
//for _, imgId := range delIds {
// log.Printf("\t\t\tDelete %s", *imgId.ImageTag)
//}
_, err = svc.BatchDeleteImage(&ecr.BatchDeleteImageInput{
ImageIds: delIds,
RepositoryName: aws.String(req.EcrRepositoryName),
})
if err != nil {
return nil, errors.Wrapf(err, "failed to delete %d images for repository '%s'", len(delIds), req.EcrRepositoryName)
}
}
return delIds, nil
}
// SyncPublicS3Files copies the local files from the static directory to s3 with public-read enabled.
func SyncPublicS3Files(awsSession *session.Session, staticS3Bucket, staticS3Prefix, staticDir string) error {
uploader := s3manager.NewUploader(awsSession)
di := NewDirectoryIterator(staticS3Bucket, staticS3Prefix, staticDir, "public-read")
if err := uploader.UploadWithIterator(aws.BackgroundContext(), di); err != nil {
return err
}
return nil
}
// EcsReadTaskDefinition reads a task definition file and json decodes it.
func EcsReadTaskDefinition(serviceDir, targetEnv string) ([]byte, error) {
checkPaths := []string{
filepath.Join(serviceDir, fmt.Sprintf("ecs-task-definition-%s.json", targetEnv)),
filepath.Join(serviceDir, "ecs-task-definition.json"),
}
var defFile string
for _, tf := range checkPaths {
ok, _ := exists(tf)
if ok {
defFile = tf
break
}
}
if defFile == "" {
return nil, errors.Errorf("failed to locate task definition - checked %s", strings.Join(checkPaths, ", "))
}
dat, err := ioutil.ReadFile(defFile)
if err != nil {
return nil, errors.WithMessagef(err, "failed to read file %s", defFile)
}
return dat, nil
}
// LambdaReadFuncDefinition reads a task definition file and json decodes it.
func LambdaReadFuncDefinition(serviceDir, targetEnv string) ([]byte, error) {
checkPaths := []string{
filepath.Join(serviceDir, fmt.Sprintf("lambda-func-definition-%s.json", targetEnv)),
filepath.Join(serviceDir, "lambda-func-definition.json"),
}
var defFile string
for _, tf := range checkPaths {
ok, _ := exists(tf)
if ok {
defFile = tf
break
}
}
if defFile == "" {
return nil, errors.Errorf("failed to locate task definition - checked %s", strings.Join(checkPaths, ", "))
}
dat, err := ioutil.ReadFile(defFile)
if err != nil {
return nil, errors.WithMessagef(err, "failed to read file %s", defFile)
}
return dat, nil
}
// LambdaS3KeyFromReleaseImage generates an S3 key from a release image.
func LambdaS3KeyFromReleaseImage(releaseImage string) string {
it := filepath.Base(releaseImage)
it = strings.Replace(it, ":", "/", -1)
return filepath.Join("src/aws/lambda/", it+".zip")
}
// parseTaskDefinition json decodes it.
func parseTaskDefinitionInput(dat []byte) (*ecs.RegisterTaskDefinitionInput, error) {
dat = convertKeys(dat)
var taskDef *ecs.RegisterTaskDefinitionInput
if err := json.Unmarshal(dat, &taskDef); err != nil {
return nil, errors.WithMessagef(err, "failed to json decode task definition - %s", string(dat))
}
return taskDef, nil
}
// convertKeys fixes json keys to they can be unmarshaled into aws types. No AWS structs have json tags.
func convertKeys(j json.RawMessage) json.RawMessage {
m := make(map[string]json.RawMessage)
if err := json.Unmarshal([]byte(j), &m); err != nil {
// Not a JSON object
return j
}
for k, v := range m {
fixed := fixKey(k)
delete(m, k)
m[fixed] = convertKeys(v)
}
b, err := json.Marshal(m)
if err != nil {
return j
}
return json.RawMessage(b)
}
func fixKey(key string) string {
return strings.ToTitle(key)
}
// jsonEncodeStringValue json encodes string values to be used in the ECS task definition.
func jsonEncodeStringValue(str string) string {
dat, _ := json.Marshal(str)
return strings.Trim(string(dat), "\"")
}

View File

@ -1,24 +0,0 @@
package cicd
import "github.com/aws/aws-sdk-go/service/ec2"
// deployRequest defines the details needed to execute a service deployment.
type deployRequest struct {
*serviceRequest
EcrRepositoryName string `validate:"required"`
Ec2SecurityGroupName string `validate:"required"`
Ec2SecurityGroup *ec2.CreateSecurityGroupInput
GitlabRunnerEc2SecurityGroupName string `validate:"required"`
S3BucketTempPrefix string `validate:"required_with=S3BucketPrivateName S3BucketPublicName"`
S3BucketPrivateName string `validate:"omitempty"`
S3Buckets []S3Bucket
EcsService *deployEcsServiceRequest
LambdaFunction *deployLambdaFuncRequest
}

View File

@ -19,8 +19,28 @@ import (
"strings"
)
// DeployContext defines the flags for defining the deployment env.
type DeployContext struct {
/*
// Register informs the sqlxtrace package of the driver that we will be using in our program.
// It uses a default service name, in the below case "postgres.db". To use a custom service
// name use RegisterWithServiceName.
sqltrace.Register(db.Driver, &pq.Driver{}, sqltrace.WithServiceName("devops:migrate"))
masterDb, err := sqlxtrace.Open(db.Driver, db.URL())
if err != nil {
return errors.WithStack(err)
}
defer masterDb.Close()
// Start the database migrations.
log.Printf("\t\tStart migrations.")
if err = schema.Migrate(masterDb, log, false); err != nil {
return errors.WithStack(err)
}
log.Printf("\t\tFinished migrations.")
*/
// BuildContext defines the flags for defining the deployment env.
type BuildContext struct {
// Env is the target environment used for the deployment.
Env string `validate:"oneof=dev stage prod"`
@ -28,66 +48,82 @@ type DeployContext struct {
AwsCredentials devdeploy.AwsCredentials `validate:"required,dive,required"`
}
// DefineDeploymentEnv handles defining all the information needed to setup the target env including RDS and cache.
func DefineDeploymentEnv(log *log.Logger, ctx DeployContext) (*devdeploy.DeploymentEnv, error) {
// DefineBuildEnv defines the details to setup the target environment for the project to build services and functions.
func DefineBuildEnv(buildCtx BuildContext) (*devdeploy.BuildEnv, error) {
// If AWS Credentials are not set and use role is not enabled, try to load the credentials from env vars.
if ctx.AwsCredentials.UseRole == false && ctx.AwsCredentials.AccessKeyID == "" {
if buildCtx.AwsCredentials.UseRole == false && buildCtx.AwsCredentials.AccessKeyID == "" {
var err error
ctx.AwsCredentials, err = devdeploy.GetAwsCredentialsFromEnv(ctx.Env)
buildCtx.AwsCredentials, err = devdeploy.GetAwsCredentialsFromEnv(buildCtx.Env)
if err != nil {
return nil, err
}
}
targetEnv := &devdeploy.DeploymentEnv{
Env: ctx.Env,
AwsCredentials: ctx.AwsCredentials,
// Init a new build target environment for the project.
buildEnv := &devdeploy.BuildEnv{
Env: buildCtx.Env,
AwsCredentials: buildCtx.AwsCredentials,
}
// Get the current working directory. This should be somewhere contained within the project.
workDir, err := os.Getwd()
if err != nil {
return targetEnv, errors.WithMessage(err, "Failed to get current working directory.")
return buildEnv, errors.WithMessage(err, "Failed to get current working directory.")
}
// Set the project root directory and project name. This is current set by finding the go.mod file for the project
// repo. Project name is the directory name.
modDetails, err := devdeploy.LoadModuleDetails(workDir)
if err != nil {
return targetEnv, err
return buildEnv, err
}
// ProjectRoot should be the root directory for the project.
targetEnv.ProjectRoot = modDetails.ProjectRoot
buildEnv.ProjectRoot = modDetails.ProjectRoot
// ProjectName will be used for prefixing AWS resources. This could be changed as needed or manually defined.
targetEnv.ProjectName = modDetails.ProjectName
buildEnv.ProjectName = modDetails.ProjectName
// Set default AWS ECR Repository Name.
targetEnv.AwsEcrRepository = &devdeploy.AwsEcrRepository{
RepositoryName: targetEnv.ProjectName,
buildEnv.AwsEcrRepository = &devdeploy.AwsEcrRepository{
RepositoryName: buildEnv.ProjectName,
Tags: []devdeploy.Tag{
{Key: awsTagNameProject, Value: targetEnv.ProjectName},
{Key: awsTagNameEnv, Value: targetEnv.Env},
{Key: devdeploy.AwsTagNameProject, Value: buildEnv.ProjectName},
{Key: devdeploy.AwsTagNameEnv, Value: buildEnv.Env},
},
}
return buildEnv, nil
}
// DefineDeploymentEnv handles defining all the information needed to setup the target env including RDS and cache.
func DefineDeploymentEnv(log *log.Logger, buildEnv *devdeploy.BuildEnv) (*devdeploy.DeploymentEnv, error) {
// Init a new deployment target environment for the project.
deployEnv := &devdeploy.DeploymentEnv{
BuildEnv: buildEnv,
}
// Set the deployment to use the default VPC for the region.
targetEnv.AwsEc2Vpc = &devdeploy.AwsEc2Vpc{
deployEnv.AwsEc2Vpc = &devdeploy.AwsEc2Vpc{
IsDefault : true,
}
// Set the security group to use for the deployed services, database and cluster. This will used the VPC ID defined
// for the deployment.
targetEnv.AwsEc2SecurityGroup = &devdeploy.AwsEc2SecurityGroup{
GroupName: targetEnv.ProjectName + "-" + ctx.Env,
Description: fmt.Sprintf("Security group for %s services running on ECS", targetEnv.ProjectName),
deployEnv.AwsEc2SecurityGroup = &devdeploy.AwsEc2SecurityGroup{
GroupName: deployEnv.ProjectName + "-" + buildEnv.Env,
Description: fmt.Sprintf("Security group for %s services running on ECS", deployEnv.ProjectName),
Tags: []devdeploy.Tag{
{Key: devdeploy.AwsTagNameProject, Value: deployEnv.ProjectName},
{Key: devdeploy.AwsTagNameEnv, Value: deployEnv.Env},
},
}
// Set the name of the EC2 Security Group used by the gitlab runner. This is used to ensure the security
// group defined above has access to the RDS cluster/instance and can thus handle schema migrations.
targetEnv.GitlabRunnerEc2SecurityGroupName = "gitlab-runner"
deployEnv.GitlabRunnerEc2SecurityGroupName = "gitlab-runner"
// Set the s3 buckets used by the deployed services.
// S3 temp prefix used by services for short term storage. A lifecycle policy will be used for expiration.
@ -116,11 +152,11 @@ func DefineDeploymentEnv(log *log.Logger, ctx DeployContext) (*devdeploy.Deploym
}
// Define the public S3 bucket used to serve static files for all the services.
targetEnv.AwsS3BucketPublic = &devdeploy.AwsS3Bucket{
BucketName: targetEnv.ProjectName+"-public",
deployEnv.AwsS3BucketPublic = &devdeploy.AwsS3Bucket{
BucketName: deployEnv.ProjectName+"-public",
IsPublic: true,
TempPrefix: s3BucketTempPrefix,
LocationConstraint: &ctx.AwsCredentials.Region,
LocationConstraint: &buildEnv.AwsCredentials.Region,
LifecycleRules: []*s3.LifecycleRule{bucketLifecycleTempRule},
CORSRules: []*s3.CORSRule{
&s3.CORSRule{
@ -154,13 +190,13 @@ func DefineDeploymentEnv(log *log.Logger, ctx DeployContext) (*devdeploy.Deploym
}
// The base s3 key prefix used to upload static files.
targetEnv.AwsS3BucketPublicKeyPrefix = "/public"
deployEnv.AwsS3BucketPublicKeyPrefix = "/public"
// For production, enable Cloudfront CND for all static files to avoid serving them from the slower S3 option.
if ctx.Env == webcontext.Env_Prod {
targetEnv.AwsS3BucketPublic.CloudFront = &devdeploy.AwsS3BucketCloudFront{
if deployEnv.Env == webcontext.Env_Prod {
deployEnv.AwsS3BucketPublic.CloudFront = &devdeploy.AwsS3BucketCloudFront{
// S3 key prefix to request your content from a directory in your Amazon S3 bucket.
OriginPath : targetEnv.AwsS3BucketPublicKeyPrefix ,
OriginPath : deployEnv.AwsS3BucketPublicKeyPrefix ,
// A complex type that controls whether CloudFront caches the response to requests.
CachedMethods: []string{"HEAD", "GET"},
@ -201,11 +237,11 @@ func DefineDeploymentEnv(log *log.Logger, ctx DeployContext) (*devdeploy.Deploym
// Define the private S3 bucket used for long term file storage including but not limited to: log exports,
// AWS Lambda code, application caching.
targetEnv.AwsS3BucketPrivate = &devdeploy.AwsS3Bucket{
BucketName: targetEnv.ProjectName+"-private",
deployEnv.AwsS3BucketPrivate = &devdeploy.AwsS3Bucket{
BucketName: deployEnv.ProjectName+"-private",
IsPublic: false,
TempPrefix: s3BucketTempPrefix,
LocationConstraint: &ctx.AwsCredentials.Region,
LocationConstraint: &buildEnv.AwsCredentials.Region,
LifecycleRules: []*s3.LifecycleRule{bucketLifecycleTempRule},
PublicAccessBlock: &s3.PublicAccessBlockConfiguration{
// Specifies whether Amazon S3 should block public access control lists (ACLs)
@ -248,8 +284,8 @@ func DefineDeploymentEnv(log *log.Logger, ctx DeployContext) (*devdeploy.Deploym
}
// Add a bucket policy to enable exports from Cloudwatch Logs for the private S3 bucket.
targetEnv.AwsS3BucketPrivate.Policy = func() string {
policyResource := strings.Trim(filepath.Join(targetEnv.AwsS3BucketPrivate.BucketName, targetEnv.AwsS3BucketPrivate.TempPrefix), "/")
deployEnv.AwsS3BucketPrivate.Policy = func() string {
policyResource := strings.Trim(filepath.Join(deployEnv.AwsS3BucketPrivate.BucketName, deployEnv.AwsS3BucketPrivate.TempPrefix), "/")
return fmt.Sprintf(`{
"Version": "2012-10-17",
"Statement": [
@ -267,12 +303,12 @@ func DefineDeploymentEnv(log *log.Logger, ctx DeployContext) (*devdeploy.Deploym
"Principal": { "Service": "logs.%s.amazonaws.com" }
}
]
}`, targetEnv.AwsS3BucketPrivate.BucketName, ctx.AwsCredentials.Region, policyResource, ctx.AwsCredentials.Region)
}`, deployEnv.AwsS3BucketPrivate.BucketName, buildEnv.AwsCredentials.Region, policyResource, buildEnv.AwsCredentials.Region)
}()
// Define the Redis Cache cluster used for ephemeral storage.
targetEnv.AwsElasticCacheCluster = &devdeploy.AwsElasticCacheCluster{
CacheClusterId: targetEnv.ProjectName + "-" + ctx.Env,
deployEnv.AwsElasticCacheCluster = &devdeploy.AwsElasticCacheCluster{
CacheClusterId: deployEnv.ProjectName + "-" + buildEnv.Env,
CacheNodeType: "cache.t2.micro",
CacheSubnetGroupName: "default",
Engine: "redis",
@ -290,8 +326,8 @@ func DefineDeploymentEnv(log *log.Logger, ctx DeployContext) (*devdeploy.Deploym
}
// Define the RDS Database instance for transactional data. A random one will be generated for any created instance.
targetEnv.AwsRdsDBInstance = &devdeploy.AwsRdsDBInstance{
DBInstanceIdentifier: targetEnv.ProjectName + "-" + ctx.Env,
deployEnv.AwsRdsDBInstance = &devdeploy.AwsRdsDBInstance{
DBInstanceIdentifier: deployEnv.ProjectName + "-" + buildEnv.Env,
DBName: "shared",
Engine: "postgres",
MasterUsername: "god",
@ -304,12 +340,12 @@ func DefineDeploymentEnv(log *log.Logger, ctx DeployContext) (*devdeploy.Deploym
AutoMinorVersionUpgrade: true,
CopyTagsToSnapshot: aws.Bool(true),
Tags: []devdeploy.Tag{
{Key: awsTagNameProject, Value: targetEnv.ProjectName},
{Key: awsTagNameEnv, Value: targetEnv.Env},
{Key: devdeploy.AwsTagNameProject, Value: deployEnv.ProjectName},
{Key: devdeploy.AwsTagNameEnv, Value: deployEnv.Env},
},
}
return targetEnv, nil
return deployEnv, nil
}
// ServiceContext defines the flags for deploying a service.
@ -333,13 +369,13 @@ type ServiceContext struct {
}
// DefineDeployService handles defining all the information needed to deploy a service to AWS ECS.
func DefineDeployService(log *log.Logger, ctx ServiceContext, targetEnv *devdeploy.DeploymentEnv) (*devdeploy.DeployService, error) {
func DefineDeployService(log *log.Logger, ctx ServiceContext, deployEnv *devdeploy.DeploymentEnv) (*devdeploy.DeployService, error) {
log.Printf("\tDefine deploy for service '%s'.", ctx.ServiceName)
// Start to define all the information for the service from the service context.
srv := &devdeploy.DeployService{
DeploymentEnv: targetEnv,
//DeploymentEnv: deployEnv,
ServiceName: ctx.ServiceName,
EnableHTTPS: ctx.EnableHTTPS,
ServiceHostPrimary: ctx.ServiceHostPrimary,
@ -354,12 +390,12 @@ func DefineDeployService(log *log.Logger, ctx ServiceContext, targetEnv *devdepl
}
// Set the release tag for the image to use include env + service name + commit hash/tag.
srv.ReleaseTag = devdeploy.GitLabCiReleaseTag(targetEnv.Env, srv.ServiceName)
srv.ReleaseTag = devdeploy.GitLabCiReleaseTag(deployEnv.Env, srv.ServiceName)
log.Printf("\t\tSet ReleaseTag '%s'.", srv.ReleaseTag)
// The S3 prefix used to upload static files served to public.
if ctx.StaticFilesS3Enable {
srv.StaticFilesS3Prefix = filepath.Join(targetEnv.AwsS3BucketPublicKeyPrefix, srv.ReleaseTag, "static")
srv.StaticFilesS3Prefix = filepath.Join(deployEnv.AwsS3BucketPublicKeyPrefix, srv.ReleaseTag, "static")
}
// Determine the Dockerfile for the service.
@ -368,7 +404,7 @@ func DefineDeployService(log *log.Logger, ctx ServiceContext, targetEnv *devdepl
log.Printf("\t\tUsing docker file '%s'.", srv.Dockerfile)
} else {
var err error
srv.Dockerfile, err = devdeploy.FindServiceDockerFile(targetEnv.ProjectRoot, srv.ServiceName)
srv.Dockerfile, err = devdeploy.FindServiceDockerFile(deployEnv.ProjectRoot, srv.ServiceName)
if err != nil {
return nil, err
}
@ -383,22 +419,22 @@ func DefineDeployService(log *log.Logger, ctx ServiceContext, targetEnv *devdepl
// Define the ECS Cluster used to host the serverless fargate tasks.
srv.AwsEcsCluster = &devdeploy.AwsEcsCluster{
ClusterName: targetEnv.ProjectName + "-" + targetEnv.Env,
ClusterName: deployEnv.ProjectName + "-" + deployEnv.Env,
Tags: []devdeploy.Tag{
{Key: awsTagNameProject, Value: targetEnv.ProjectName},
{Key: awsTagNameEnv, Value: targetEnv.Env},
{Key: devdeploy.AwsTagNameProject, Value: deployEnv.ProjectName},
{Key: devdeploy.AwsTagNameEnv, Value: deployEnv.Env},
},
}
// Define the ECS task execution role. This role executes ECS actions such as pulling the image and storing the
// application logs in cloudwatch.
srv.AwsEcsExecutionRole = &devdeploy.AwsIamRole{
RoleName: fmt.Sprintf("ecsExecutionRole%s%s", targetEnv.ProjectNameCamel(), strcase.ToCamel(targetEnv.Env)),
Description: fmt.Sprintf("Provides access to other AWS service resources that are required to run Amazon ECS tasks for %s. ", targetEnv.ProjectName),
RoleName: fmt.Sprintf("ecsExecutionRole%s%s", deployEnv.ProjectNameCamel(), strcase.ToCamel(deployEnv.Env)),
Description: fmt.Sprintf("Provides access to other AWS service resources that are required to run Amazon ECS tasks for %s. ", deployEnv.ProjectName),
AssumeRolePolicyDocument: "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Service\":[\"ecs-tasks.amazonaws.com\"]},\"Action\":[\"sts:AssumeRole\"]}]}",
Tags: []devdeploy.Tag{
{Key: awsTagNameProject, Value: targetEnv.ProjectName},
{Key: awsTagNameEnv, Value: targetEnv.Env},
{Key: devdeploy.AwsTagNameProject, Value: deployEnv.ProjectName},
{Key: devdeploy.AwsTagNameEnv, Value: deployEnv.Env},
},
AttachRolePolicyArns: []string{"arn:aws:iam::aws:policy/service-role/AmazonECSTaskExecutionRolePolicy"},
}
@ -406,12 +442,12 @@ func DefineDeployService(log *log.Logger, ctx ServiceContext, targetEnv *devdepl
// Define the ECS task role. This role is used by the task itself for calling other AWS services.
srv.AwsEcsTaskRole = &devdeploy.AwsIamRole{
RoleName: fmt.Sprintf("ecsTaskRole%s%s", targetEnv.ProjectNameCamel(), strcase.ToCamel(targetEnv.Env)),
Description: fmt.Sprintf("Allows ECS tasks for %s to call AWS services on your behalf.", targetEnv.ProjectName),
RoleName: fmt.Sprintf("ecsTaskRole%s%s", deployEnv.ProjectNameCamel(), strcase.ToCamel(deployEnv.Env)),
Description: fmt.Sprintf("Allows ECS tasks for %s to call AWS services on your behalf.", deployEnv.ProjectName),
AssumeRolePolicyDocument:"{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Service\":[\"ecs-tasks.amazonaws.com\"]},\"Action\":[\"sts:AssumeRole\"]}]}",
Tags: []devdeploy.Tag{
{Key: awsTagNameProject, Value: targetEnv.ProjectName},
{Key: awsTagNameEnv, Value: targetEnv.Env},
{Key: devdeploy.AwsTagNameProject, Value: deployEnv.ProjectName},
{Key: devdeploy.AwsTagNameEnv, Value: deployEnv.Env},
},
}
log.Printf("\t\tSet ECS Task Role Name to '%s'.", srv.AwsEcsTaskRole)
@ -420,8 +456,8 @@ func DefineDeployService(log *log.Logger, ctx ServiceContext, targetEnv *devdepl
// the permissions required for deployed services to access AWS services. If the policy already exists, the
// statements will be used to add new required actions, but not for removal.
srv.AwsEcsTaskPolicy = &devdeploy.AwsIamPolicy{
PolicyName: fmt.Sprintf("%s%sServices", targetEnv.ProjectNameCamel(), strcase.ToCamel(targetEnv.Env)),
Description: fmt.Sprintf("Defines access for %s services. ", targetEnv.ProjectName),
PolicyName: fmt.Sprintf("%s%sServices", deployEnv.ProjectNameCamel(), strcase.ToCamel(deployEnv.Env)),
Description: fmt.Sprintf("Defines access for %s services. ", deployEnv.ProjectName),
PolicyDocument: devdeploy.AwsIamPolicyDocument{
Version: "2012-10-17",
Statement: []devdeploy.AwsIamStatementEntry{
@ -495,10 +531,10 @@ func DefineDeployService(log *log.Logger, ctx ServiceContext, targetEnv *devdepl
// AwsCloudWatchLogGroup defines the name of the cloudwatch log group that will be used to store logs for the ECS tasks.
srv.AwsCloudWatchLogGroup = &devdeploy.AwsCloudWatchLogGroup {
LogGroupName: fmt.Sprintf("logs/env_%s/aws/ecs/cluster_%s/service_%s", targetEnv.Env, srv.AwsEcsCluster.ClusterName, srv.ServiceName),
LogGroupName: fmt.Sprintf("logs/env_%s/aws/ecs/cluster_%s/service_%s", deployEnv.Env, srv.AwsEcsCluster.ClusterName, srv.ServiceName),
Tags: []devdeploy.Tag{
{Key: awsTagNameProject, Value: targetEnv.ProjectName},
{Key: awsTagNameEnv, Value: targetEnv.Env},
{Key: devdeploy.AwsTagNameProject, Value: deployEnv.ProjectName},
{Key: devdeploy.AwsTagNameEnv, Value: deployEnv.Env},
},
}
log.Printf("\t\tSet AWS Log Group Name to '%s'.", srv.AwsCloudWatchLogGroup.LogGroupName)
@ -520,13 +556,13 @@ func DefineDeployService(log *log.Logger, ctx ServiceContext, targetEnv *devdepl
if ctx.EnableElb {
// AwsElbLoadBalancer defines if the service should use an elastic load balancer.
srv.AwsElbLoadBalancer = &devdeploy.AwsElbLoadBalancer{
Name: fmt.Sprintf("%s-%s-%s", targetEnv.Env, srv.AwsEcsCluster.ClusterName, srv.ServiceName),
Name: fmt.Sprintf("%s-%s-%s", deployEnv.Env, srv.AwsEcsCluster.ClusterName, srv.ServiceName),
IpAddressType: "ipv4",
Scheme: "internet-facing",
Type: "application",
Tags: []devdeploy.Tag{
{Key: awsTagNameProject, Value: targetEnv.ProjectName},
{Key: awsTagNameEnv, Value: targetEnv.Env},
{Key: devdeploy.AwsTagNameProject, Value: deployEnv.ProjectName},
{Key: devdeploy.AwsTagNameEnv, Value: deployEnv.Env},
},
}
log.Printf("\t\tSet ELB Name to '%s'.", srv.AwsElbLoadBalancer.Name)
@ -551,7 +587,7 @@ func DefineDeployService(log *log.Logger, ctx ServiceContext, targetEnv *devdepl
srv.AwsElbLoadBalancer.TargetGroup .Name)
// Set ECS configs based on specified env.
if targetEnv.Env == "prod" {
if deployEnv.Env == "prod" {
srv.AwsElbLoadBalancer.EcsTaskDeregistrationDelay = 300
} else {
// Force staging to deploy immediately without waiting for connections to drain
@ -574,7 +610,7 @@ func DefineDeployService(log *log.Logger, ctx ServiceContext, targetEnv *devdepl
}
// Set ECS configs based on specified env.
if targetEnv.Env == "prod" {
if deployEnv.Env == "prod" {
srv.AwsEcsService .DeploymentMinimumHealthyPercent =100
srv.AwsEcsService .DeploymentMaximumPercent = 200
} else {
@ -582,9 +618,8 @@ func DefineDeployService(log *log.Logger, ctx ServiceContext, targetEnv *devdepl
srv.AwsEcsService .DeploymentMaximumPercent = 200
}
// Read the defined json task definition for the service.
dat, err := devdeploy.EcsReadTaskDefinition(ctx.ServiceDir, targetEnv.Env)
dat, err := devdeploy.EcsReadTaskDefinition(ctx.ServiceDir, deployEnv.Env)
if err != nil {
return srv, err
}
@ -605,13 +640,13 @@ func DefineDeployService(log *log.Logger, ctx ServiceContext, targetEnv *devdepl
{
// Load Datadog API key which can be either stored in an environment variable or in AWS Secrets Manager.
// 1. Check env vars for [DEV|STAGE|PROD]_DD_API_KEY and DD_API_KEY
datadogApiKey := devdeploy.GetTargetEnv(targetEnv.Env, "DD_API_KEY")
datadogApiKey := devdeploy.GetTargetEnv(deployEnv.Env, "DD_API_KEY")
// 2. Check AWS Secrets Manager for datadog entry prefixed with target environment.
if datadogApiKey == "" {
prefixedSecretId := secretID(targetEnv.ProjectName, targetEnv.Env, "datadog")
prefixedSecretId := deployEnv.SecretID("datadog/api-key")
var err error
datadogApiKey, err = devdeploy.GetAwsSecretValue(targetEnv.AwsCredentials, prefixedSecretId)
datadogApiKey, err = devdeploy.GetAwsSecretValue(deployEnv.AwsCredentials, prefixedSecretId)
if err != nil {
if aerr, ok := errors.Cause(err).(awserr.Error); !ok || aerr.Code() != secretsmanager.ErrCodeResourceNotFoundException {
return err
@ -623,7 +658,7 @@ func DefineDeployService(log *log.Logger, ctx ServiceContext, targetEnv *devdepl
if datadogApiKey == "" {
secretId := "DATADOG"
var err error
datadogApiKey, err = devdeploy.GetAwsSecretValue(targetEnv.AwsCredentials, secretId)
datadogApiKey, err = devdeploy.GetAwsSecretValue(deployEnv.AwsCredentials, secretId)
if err != nil {
if aerr, ok := errors.Cause(err).(awserr.Error); !ok || aerr.Code() != secretsmanager.ErrCodeResourceNotFoundException {
return err
@ -639,8 +674,6 @@ func DefineDeployService(log *log.Logger, ctx ServiceContext, targetEnv *devdepl
placeholders["{DATADOG_APIKEY}"] = datadogApiKey
// When the datadog API key is empty, don't force the container to be essential have have the whole task fail.
if datadogApiKey != "" {
placeholders["{DATADOG_ESSENTIAL}"] = "true"
@ -649,7 +682,6 @@ func DefineDeployService(log *log.Logger, ctx ServiceContext, targetEnv *devdepl
}
}
return nil
},
}

View File

@ -1,119 +0,0 @@
package cicd
import (
"bytes"
"mime"
"os"
"path/filepath"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/s3/s3manager"
"github.com/pkg/errors"
)
// DirectoryIterator represents an iterator of a specified directory
type DirectoryIterator struct {
dir string
filePaths []string
bucket string
keyPrefix string
acl string
next struct {
path string
f *os.File
}
err error
}
// NewDirectoryIterator builds a new DirectoryIterator
func NewDirectoryIterator(bucket, keyPrefix, dir, acl string) s3manager.BatchUploadIterator {
var paths []string
filepath.Walk(dir, func(path string, info os.FileInfo, err error) error {
if !info.IsDir() {
paths = append(paths, path)
}
return nil
})
return &DirectoryIterator{
dir: dir,
filePaths: paths,
bucket: bucket,
keyPrefix: keyPrefix,
acl: acl,
}
}
// Next returns whether next file exists or not
func (di *DirectoryIterator) Next() bool {
if len(di.filePaths) == 0 {
di.next.f = nil
return false
}
f, err := os.Open(di.filePaths[0])
di.err = err
di.next.f = f
di.next.path = di.filePaths[0]
di.filePaths = di.filePaths[1:]
return true && di.Err() == nil
}
// Err returns error of DirectoryIterator
func (di *DirectoryIterator) Err() error {
return errors.WithStack(di.err)
}
// UploadObject uploads a file
func (di *DirectoryIterator) UploadObject() s3manager.BatchUploadObject {
f := di.next.f
var acl *string
if di.acl != "" {
acl = aws.String(di.acl)
}
buffer, contentType, rerr := readFile(f)
nextPath, _ := filepath.Rel(di.dir, di.next.path)
return s3manager.BatchUploadObject{
Object: &s3manager.UploadInput{
Bucket: aws.String(di.bucket),
Key: aws.String(filepath.Join(di.keyPrefix, nextPath)),
Body: bytes.NewReader(buffer),
ContentType: aws.String(contentType),
ACL: acl,
},
After: func() error {
if rerr != nil {
return rerr
}
return f.Close()
},
}
}
func readFile(f *os.File) ([]byte, string, error) {
// Get file size and read the file content into a buffer
fileInfo, err := f.Stat()
if err != nil {
panic(err)
return nil, "", err
}
var size int64 = fileInfo.Size()
buffer := make([]byte, size)
f.Read(buffer)
ext := filepath.Ext(f.Name())
contentType := mime.TypeByExtension(ext)
//f.Seek(0, io.SeekStart)
//ctBuf := make([]byte, 512)
//f.Read(ctBuf)
//contentType = http.DetectContentType(ctBuf)
return buffer, contentType, nil
}

View File

@ -1,314 +0,0 @@
package cicd
import (
"fmt"
"io/ioutil"
"log"
"os"
"os/exec"
"path/filepath"
"strings"
"geeks-accelerator/oss/saas-starter-kit/internal/platform/tests"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/iancoleman/strcase"
"github.com/pkg/errors"
)
// serviceDeployRequest defines the details needed to execute a service deployment.
type serviceRequest struct {
ServiceName string `validate:"required"`
ServiceDir string `validate:"required"`
Env string `validate:"oneof=dev stage prod"`
ProjectRoot string `validate:"required"`
ProjectName string `validate:"required"`
DockerFile string `validate:"required"`
GoModFile string `validate:"required"`
GoModName string `validate:"required"`
AwsCreds awsCredentials `validate:"required,dive,required"`
_awsSession *session.Session
ReleaseImage string
}
// projectNameCamel takes a project name and returns the camel cased version.
func (r *serviceRequest) ProjectNameCamel() string {
s := strings.Replace(r.ProjectName, "_", " ", -1)
s = strings.Replace(s, "-", " ", -1)
s = strcase.ToCamel(s)
return s
}
// awsSession returns the current AWS session for the serviceDeployRequest.
func (r *serviceRequest) awsSession() *session.Session {
if r._awsSession == nil {
r._awsSession = r.AwsCreds.Session()
}
return r._awsSession
}
// init sets the basic details needed for both build and deploy for serviceRequest.
func (req *serviceRequest) init(log *log.Logger) error {
// When project root directory is empty or set to current working path, then search for the project root by locating
// the go.mod file.
log.Println("\tDetermining the project root directory.")
{
if req.ProjectRoot == "" || req.ProjectRoot == "." {
log.Println("\tAttempting to location project root directory from current working directory.")
var err error
req.GoModFile, err = findProjectGoModFile()
if err != nil {
return err
}
req.ProjectRoot = filepath.Dir(req.GoModFile)
} else {
log.Printf("\t\tUsing supplied project root directory '%s'.\n", req.ProjectRoot)
req.GoModFile = filepath.Join(req.ProjectRoot, "go.mod")
}
log.Printf("\t\t\tproject root: %s", req.ProjectRoot)
log.Printf("\t\t\tgo.mod: %s", req.GoModFile)
}
log.Println("\tExtracting go module name from go.mod.")
{
var err error
req.GoModName, err = loadGoModName(req.GoModFile)
if err != nil {
return err
}
log.Printf("\t\t\tmodule name: %s", req.GoModName)
}
log.Println("\tDetermining the project name.")
{
if req.ProjectName != "" {
log.Printf("\t\tUse provided value.")
} else {
req.ProjectName = filepath.Base(req.GoModName)
log.Printf("\t\tSet from go module.")
}
log.Printf("\t\t\tproject name: %s", req.ProjectName)
}
log.Println("\tAttempting to locate service directory from project root directory.")
{
if req.DockerFile != "" {
req.DockerFile = req.DockerFile
log.Printf("\t\tUse provided value.")
} else {
log.Printf("\t\tFind from project root looking for Dockerfile.")
var err error
req.DockerFile, err = findServiceDockerFile(req.ProjectRoot, req.ServiceName)
if err != nil {
return err
}
}
req.ServiceDir = filepath.Dir(req.DockerFile)
log.Printf("\t\t\tservice directory: %s", req.ServiceDir)
log.Printf("\t\t\tdockerfile: %s", req.DockerFile)
}
// Verifies AWS credentials specified as environment variables.
log.Println("\tVerify AWS credentials.")
{
var err error
req.AwsCreds, err = GetAwsCredentials(req.Env)
if err != nil {
return err
}
if req.AwsCreds.UseRole {
log.Printf("\t\t\tUsing role")
} else {
log.Printf("\t\t\tAccessKeyID: '%s'", req.AwsCreds.AccessKeyID)
}
log.Printf("\t\t\tRegion: '%s'", req.AwsCreds.Region)
log.Printf("\t%s\tAWS credentials valid.", tests.Success)
}
return nil
}
// ecrRepositoryName returns the name used for the AWS ECR Repository.
func ecrRepositoryName(projectName string) string {
return projectName
}
// releaseImage returns the name used for tagging a release image will always include one with environment and
// service name. If the env var CI_COMMIT_REF_NAME is set, it will be appended.
func releaseTag(env, serviceName string) string {
tag1 := env + "-" + serviceName
// Generate tags for the release image.
var releaseTag string
if v := os.Getenv("BUILDINFO_CI_COMMIT_SHA"); v != "" {
tag2 := tag1 + "-" + v[0:8]
releaseTag = tag2
} else if v := os.Getenv("CI_COMMIT_SHA"); v != "" {
tag2 := tag1 + "-" + v[0:8]
releaseTag = tag2
} else if v := os.Getenv("BUILDINFO_CI_COMMIT_REF_NAME"); v != "" {
tag2 := tag1 + "-" + v
releaseTag = tag2
} else if v := os.Getenv("CI_COMMIT_REF_NAME"); v != "" {
tag2 := tag1 + "-" + v
releaseTag = tag2
} else {
releaseTag = tag1
}
return releaseTag
}
// releaseImage returns the name used for tagging a release image will always include one with environment and
// service name. If the env var CI_COMMIT_REF_NAME is set, it will be appended.
func releaseImage(env, serviceName, repositoryUri string) string {
return repositoryUri + ":" + releaseTag(env, serviceName)
}
// dBInstanceIdentifier returns the database name.
func dBInstanceIdentifier(projectName, env string) string {
return projectName + "-" + env
}
// secretID returns the secret name with a standard prefix.
func secretID(projectName, env, secretName string) string {
return filepath.Join(projectName, env, secretName)
}
// findProjectGoModFile finds the project root directory from the current working directory.
func findProjectGoModFile() (string, error) {
var err error
projectRoot, err := os.Getwd()
if err != nil {
return "", errors.WithMessage(err, "failed to get current working directory")
}
// Try to find the project root for looking for the go.mod file in a parent directory.
var goModFile string
testDir := projectRoot
for i := 0; i < 3; i++ {
if goModFile != "" {
testDir = filepath.Join(testDir, "../")
}
goModFile = filepath.Join(testDir, "go.mod")
ok, _ := exists(goModFile)
if ok {
projectRoot = testDir
break
}
}
// Verify the go.mod file was found.
ok, err := exists(goModFile)
if err != nil {
return "", errors.WithMessagef(err, "failed to load go.mod for project using project root %s")
} else if !ok {
return "", errors.Errorf("failed to locate project go.mod in project root %s", projectRoot)
}
return goModFile, nil
}
// findServiceDockerFile finds the service directory.
func findServiceDockerFile(projectRoot, targetService string) (string, error) {
checkDirs := []string{
filepath.Join(projectRoot, "cmd", targetService),
filepath.Join(projectRoot, "tools", targetService),
}
var dockerFile string
for _, cd := range checkDirs {
// Check to see if directory contains Dockerfile.
tf := filepath.Join(cd, "Dockerfile")
ok, _ := exists(tf)
if ok {
dockerFile = tf
break
}
}
if dockerFile == "" {
return "", errors.Errorf("failed to locate Dockerfile for service %s", targetService)
}
return dockerFile, nil
}
// getTargetEnv checks for an env var that is prefixed with the current target env.
func getTargetEnv(targetEnv, envName string) string {
k := fmt.Sprintf("%s_%s", strings.ToUpper(targetEnv), envName)
if v := os.Getenv(k); v != "" {
// Set the non prefixed env var with the prefixed value.
os.Setenv(envName, v)
return v
}
return os.Getenv(envName)
}
// loadGoModName parses out the module name from go.mod.
func loadGoModName(goModFile string) (string, error) {
ok, err := exists(goModFile)
if err != nil {
return "", errors.WithMessage(err, "Failed to load go.mod for project")
} else if !ok {
return "", errors.Errorf("Failed to locate project go.mod at %s", goModFile)
}
b, err := ioutil.ReadFile(goModFile)
if err != nil {
return "", errors.WithMessagef(err, "Failed to read go.mod at %s", goModFile)
}
var name string
lines := strings.Split(string(b), "\n")
for _, l := range lines {
if strings.HasPrefix(l, "module ") {
name = strings.TrimSpace(strings.Split(l, " ")[1])
break
}
}
return name, nil
}
// exists returns a bool as to whether a file path exists.
func exists(path string) (bool, error) {
_, err := os.Stat(path)
if err == nil {
return true, nil
}
if os.IsNotExist(err) {
return false, nil
}
return true, err
}
// execCmds executes a set of commands using the current env variables.
func execCmds(log *log.Logger, workDir string, cmds ...[]string) error {
for _, cmdVals := range cmds {
cmd := exec.Command(cmdVals[0], cmdVals[1:]...)
cmd.Dir = workDir
cmd.Env = os.Environ()
cmd.Stderr = log.Writer()
cmd.Stdout = log.Writer()
err := cmd.Run()
if err != nil {
return errors.WithMessagef(err, "failed to execute %s", strings.Join(cmdVals, " "))
}
}
return nil
}

View File

@ -1,449 +0,0 @@
package cicd
import (
"bufio"
"crypto/md5"
"encoding/base64"
"fmt"
"io/ioutil"
"log"
"os"
"path/filepath"
"strings"
"geeks-accelerator/oss/saas-starter-kit/internal/platform/tests"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/ecr"
"github.com/aws/aws-sdk-go/service/s3/s3manager"
"github.com/pborman/uuid"
"github.com/pkg/errors"
"gopkg.in/go-playground/validator.v9"
)
// ServiceBuildFlags defines the flags used for executing a service build.
type ServiceBuildFlags struct {
// Required flags.
ServiceName string `validate:"required" example:"web-api"`
Env string `validate:"oneof=dev stage prod" example:"dev"`
// Optional flags.
ProjectRoot string `validate:"omitempty" example:"."`
ProjectName string ` validate:"omitempty" example:"example-project"`
DockerFile string `validate:"omitempty" example:"./cmd/web-api/Dockerfile"`
CommitRef string `validate:"omitempty" example:"master@1ecfd275"`
S3BucketPrivateName string `validate:"omitempty" example:"saas-example-project-private"`
BuildDir string `validate:"omitempty" example:"."`
NoCache bool `validate:"omitempty" example:"false"`
NoPush bool `validate:"omitempty" example:"false"`
IsLambda bool `validate:"omitempty" example:"false"`
}
// serviceBuildRequest defines the details needed to execute a service build.
type serviceBuildRequest struct {
*serviceRequest
EcrRepositoryName string `validate:"required"`
EcrRepository *ecr.CreateRepositoryInput
EcrRepositoryMaxImages int `validate:"omitempty"`
BuildDir string `validate:"omitempty" example:""`
CommitRef string `validate:"omitempty"`
S3BucketPrivateName string `validate:"omitempty"`
NoCache bool `validate:"omitempty"`
NoPush bool `validate:"omitempty"`
IsLambda bool `validate:"omitempty"`
flags ServiceBuildFlags
}
// NewServiceBuildRequest generates a new request for executing build of a single service for a given set of CLI flags.
func NewServiceBuildRequest(log *log.Logger, flags ServiceBuildFlags) (*serviceBuildRequest, error) {
// Validates specified CLI flags map to struct successfully.
log.Println("Validate flags.")
{
errs := validator.New().Struct(flags)
if errs != nil {
return nil, errs
}
log.Printf("\t%s\tFlags ok.", tests.Success)
}
// Generate a deploy request using CLI flags and AWS credentials.
log.Println("Generate deploy request.")
var req serviceBuildRequest
{
// Define new service request.
sr := &serviceRequest{
ServiceName: flags.ServiceName,
Env: flags.Env,
ProjectRoot: flags.ProjectRoot,
ProjectName: flags.ProjectName,
DockerFile: flags.DockerFile,
}
if err := sr.init(log); err != nil {
return nil, err
}
req = serviceBuildRequest{
serviceRequest: sr,
CommitRef: flags.CommitRef,
S3BucketPrivateName: flags.S3BucketPrivateName,
BuildDir: flags.BuildDir,
NoCache: flags.NoCache,
NoPush: flags.NoPush,
IsLambda: flags.IsLambda,
flags: flags,
}
if req.BuildDir == "" {
req.BuildDir = req.ProjectRoot
}
// Set default AWS ECR Repository Name.
req.EcrRepositoryName = ecrRepositoryName(req.ProjectName)
req.EcrRepository = &ecr.CreateRepositoryInput{
RepositoryName: aws.String(req.EcrRepositoryName),
Tags: []*ecr.Tag{
&ecr.Tag{Key: aws.String(awsTagNameProject), Value: aws.String(req.ProjectName)},
&ecr.Tag{Key: aws.String(awsTagNameEnv), Value: aws.String(req.Env)},
},
}
log.Printf("\t\t\tSet ECR Repository Name to '%s'.", req.EcrRepositoryName)
// Set default AWS ECR Regsistry Max Images.
req.EcrRepositoryMaxImages = defaultAwsRegistryMaxImages
log.Printf("\t\t\tSet ECR Regsistry Max Images to '%d'.", req.EcrRepositoryMaxImages)
// Get the default commit ref.
if req.CommitRef == "" {
if ev := os.Getenv("CI_COMMIT_TAG"); ev != "" {
req.CommitRef = "tag-" + ev
} else if ev := os.Getenv("CI_COMMIT_REF_NAME"); ev != "" {
req.CommitRef = "branch-" + ev
}
if ev := os.Getenv("CI_COMMIT_SHORT_SHA"); ev != "" {
req.CommitRef = req.CommitRef + "@" + ev
}
}
}
return &req, nil
}
// Run is the main entrypoint for building a service for a given target environment.
func ServiceBuild(log *log.Logger, req *serviceBuildRequest) error {
// Load the AWS ECR repository. Try to find by name else create new one.
var dockerLoginCmd []string
{
log.Println("ECR - Get or create repository.")
svc := ecr.New(req.awsSession())
// First try to find ECR repository by name.
var awsRepo *ecr.Repository
descRes, err := svc.DescribeRepositories(&ecr.DescribeRepositoriesInput{
RepositoryNames: []*string{aws.String(req.EcrRepositoryName)},
})
if err != nil {
if aerr, ok := err.(awserr.Error); !ok || aerr.Code() != ecr.ErrCodeRepositoryNotFoundException {
return errors.Wrapf(err, "failed to describe repository '%s'", req.EcrRepositoryName)
}
} else if len(descRes.Repositories) > 0 {
awsRepo = descRes.Repositories[0]
}
if awsRepo == nil {
// If no repository was found, create one.
createRes, err := svc.CreateRepository(req.EcrRepository)
if err != nil {
return errors.Wrapf(err, "failed to create repository '%s'", req.EcrRepositoryName)
}
awsRepo = createRes.Repository
log.Printf("\t\tCreated: %s.", *awsRepo.RepositoryArn)
} else {
log.Printf("\t\tFound: %s.", *awsRepo.RepositoryArn)
log.Println("\t\tChecking old ECR images.")
delIds, err := EcrPurgeImages(req)
if err != nil {
return err
}
// Since ECR has max number of repository images, need to delete old ones so can stay under limit.
// If there are image IDs to delete, delete them.
if len(delIds) > 0 {
log.Printf("\t\tDeleted %d images that exceeded limit of %d", len(delIds), req.EcrRepositoryMaxImages)
for _, imgId := range delIds {
log.Printf("\t\t\t%s", *imgId.ImageTag)
}
}
}
req.ReleaseImage = releaseImage(req.Env, req.ServiceName, *awsRepo.RepositoryUri)
if err != nil {
return err
}
log.Printf("\t\trelease image: %s", req.ReleaseImage)
log.Printf("\t%s\tRelease image valid.", tests.Success)
log.Println("ECR - Retrieve authorization token used for docker login.")
// Get the credentials necessary for logging into the AWS Elastic Container Registry
// made available with the AWS access key and AWS secret access keys.
res, err := svc.GetAuthorizationToken(&ecr.GetAuthorizationTokenInput{})
if err != nil {
return errors.Wrap(err, "failed to get ecr authorization token")
}
authToken, err := base64.StdEncoding.DecodeString(*res.AuthorizationData[0].AuthorizationToken)
if err != nil {
return errors.Wrap(err, "failed to base64 decode ecr authorization token")
}
pts := strings.Split(string(authToken), ":")
user := pts[0]
pass := pts[1]
dockerLoginCmd = []string{
"docker",
"login",
"-u", user,
"-p", pass,
*res.AuthorizationData[0].ProxyEndpoint,
}
log.Printf("\t%s\tdocker login ok.", tests.Success)
}
// Once we can access the repository in ECR, do the docker build.
{
log.Printf("Starting docker build %s\n", req.ReleaseImage)
var dockerFile string
dockerPath := filepath.Join(req.BuildDir, req.DockerFile)
if _, err := os.Stat(dockerPath); err == nil {
dockerFile = req.DockerFile
} else {
dockerPath = req.DockerFile
dockerFile, err = filepath.Rel(req.BuildDir, dockerPath)
if err != nil {
return errors.Wrapf(err, "Failed parse relative path for %s from %s", dockerPath, req.BuildDir)
}
}
// Name of the first build stage declared in the docckerFile.
var buildStageName string
// When the dockerFile is multistage, caching can be applied. Scan the dockerFile for the first stage.
// FROM golang:1.12.6-alpine3.9 AS build_base
var buildBaseImageTag string
{
file, err := os.Open(dockerPath)
if err != nil {
log.Fatal(err)
}
defer file.Close()
// List of lines in the dockerfile for the first stage. This will be used to tag the image to help ensure
// any changes to the lines associated with the first stage force cache to be reset.
var stageLines []string
// Loop through all the lines in the Dockerfile searching for the lines associated with the first build stage.
scanner := bufio.NewScanner(file)
for scanner.Scan() {
line := scanner.Text()
lineLower := strings.ToLower(line)
if strings.HasPrefix(lineLower, "from ") {
if buildStageName != "" {
// Only need to scan all the lines for the first build stage. Break when reach next FROM.
break
} else if !strings.Contains(lineLower, " as ") {
// Caching is only supported if the first FROM has a name.
log.Printf("\t\t\tSkipping stage cache, build stage not detected.\n")
break
}
buildStageName = strings.TrimSpace(strings.Split(lineLower, " as ")[1])
stageLines = append(stageLines, line)
} else if buildStageName != "" {
stageLines = append(stageLines, line)
}
}
if err := scanner.Err(); err != nil {
return errors.WithStack(err)
}
// If we have detected a build stage, then generate the appropriate tag.
if buildStageName != "" {
log.Printf("\t\tFound build stage %s for caching.\n", buildStageName)
// Generate a checksum for the lines associated with the build stage.
buildBaseHashPts := []string{
fmt.Sprintf("%x", md5.Sum([]byte(strings.Join(stageLines, "\n")))),
}
switch buildStageName {
case "build_base_golang":
// Compute the checksum for the go.mod file.
goSumPath := filepath.Join(req.ProjectRoot, "go.sum")
goSumDat, err := ioutil.ReadFile(goSumPath)
if err != nil {
return errors.Wrapf(err, "Failed parse relative path for %s from %s", req.DockerFile, req.ProjectRoot)
}
buildBaseHashPts = append(buildBaseHashPts, fmt.Sprintf("%x", md5.Sum(goSumDat)))
}
// Combine all the checksums to be used to tag the target build stage.
buildBaseHash := fmt.Sprintf("%x", md5.Sum([]byte(strings.Join(buildBaseHashPts, "|"))))
// New stage image tag.
buildBaseImageTag = buildStageName + "-" + buildBaseHash[0:8]
}
}
var cmds [][]string
// Enabling caching of the first build stage defined in the dockerFile.
var buildBaseImage string
if !req.NoCache && buildBaseImageTag != "" {
var pushTargetImg bool
if ciReg := os.Getenv("CI_REGISTRY"); ciReg != "" {
cmds = append(cmds, []string{
"docker", "login",
"-u", os.Getenv("CI_REGISTRY_USER"),
"-p", os.Getenv("CI_REGISTRY_PASSWORD"),
ciReg})
buildBaseImage = os.Getenv("CI_REGISTRY_IMAGE") + ":" + buildBaseImageTag
pushTargetImg = true
} else {
buildBaseImage = req.ProjectName + ":" + req.Env + "-" + req.ServiceName + "-" + buildBaseImageTag
}
cmds = append(cmds, []string{"docker", "pull", buildBaseImage})
cmds = append(cmds, []string{
"docker", "build",
"--file=" + dockerFile,
"--cache-from", buildBaseImage,
"--build-arg", "service=" + req.ServiceName,
"--build-arg", "env=" + req.Env,
"-t", buildBaseImage,
"--target", buildStageName,
".",
})
if pushTargetImg {
cmds = append(cmds, []string{"docker", "push", buildBaseImage})
}
}
// The initial build command slice.
buildCmd := []string{
"docker", "build",
"--file=" + dockerFile,
"--build-arg", "service=" + req.ServiceName,
"--build-arg", "env=" + req.Env,
"--build-arg", "commit_ref=" + req.CommitRef,
"--build-arg", "swagInit=1",
"-t", req.ReleaseImage,
}
// Append additional build flags.
if req.NoCache {
buildCmd = append(buildCmd, "--no-cache")
} else if buildBaseImage != "" {
buildCmd = append(buildCmd, "--cache-from", buildBaseImage)
}
// Finally append the build context as the current directory since os.Exec will use the project root as
// the working directory.
buildCmd = append(buildCmd, ".")
cmds = append(cmds, buildCmd)
s3Files := make(map[string]*s3manager.UploadInput)
if req.NoPush == false {
if req.IsLambda {
lambdaS3Key := LambdaS3KeyFromReleaseImage(req.ReleaseImage)
tmpDir := os.TempDir()
lambdaZip := filepath.Join(tmpDir, filepath.Base(lambdaS3Key))
containerName := uuid.NewRandom().String()
cmds = append(cmds, []string{"docker", "create", "-ti", "--name", containerName, req.ReleaseImage, "bash"})
cmds = append(cmds, []string{"docker", "cp", containerName + ":/var/task", tmpDir})
cmds = append(cmds, []string{"docker", "rm", containerName})
cmds = append(cmds, []string{"cd", tmpDir + "/task"})
cmds = append(cmds, []string{"zip", "-r", lambdaZip, "."})
s3Files[lambdaZip] = &s3manager.UploadInput{
Bucket: &req.S3BucketPrivateName,
Key: &lambdaS3Key,
}
} else {
cmds = append(cmds, dockerLoginCmd)
cmds = append(cmds, []string{"docker", "push", req.ReleaseImage})
}
}
for _, cmd := range cmds {
var logCmd string
if len(cmd) >= 2 && cmd[1] == "login" {
logCmd = strings.Join(cmd[0:2], " ")
} else {
logCmd = strings.Join(cmd, " ")
}
log.Printf("\t\t%s\n", logCmd)
err := execCmds(log, req.BuildDir, cmd)
if err != nil {
if len(cmd) > 2 && cmd[1] == "pull" {
log.Printf("\t\t\tSkipping pull - %s\n", err.Error())
} else {
return errors.Wrapf(err, "Failed to exec %s", strings.Join(cmd, " "))
}
}
}
if s3Files != nil && len(s3Files) > 0 {
// Create an uploader with the session and default options
uploader := s3manager.NewUploader(req.awsSession())
// Perform an upload.
for lf, upParams := range s3Files {
f, err := os.Open(lf)
if err != nil {
return errors.Wrapf(err, "Failed open file to %s", lf)
}
upParams.Body = f
_, err = uploader.Upload(upParams)
if err != nil {
return errors.Wrapf(err, "Failed upload file to %s", *upParams.Key)
}
log.Printf("\t\tUploaded %s to s3://%s/%s\n", lf, *upParams.Bucket, *upParams.Key)
}
}
log.Printf("\t%s\tbuild complete.\n", tests.Success)
}
return nil
}